From ea954ed1767d94ef57c77cdb46af7f14ca1c0f14 Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Fri, 19 Jan 2024 14:34:57 +0100 Subject: [PATCH 01/15] Test --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 75dee7dfac..117d5d770a 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Redis Hugo site template -## Files and folders +# Files and folders * **/archetypes**: A Markdown file needs to have some front matter. An archetype defines which front matter is used when using `hugo new content`. Right now, the only supported archetype is the default one. **Note:** We might want to add additional archetypes in the future because most of our pages contain additional meta data properties like `linkTitle`. * **/content**: This folder contains the markdown files. We will have the subfolders like `/develop`, `/integrate`, and `/operate` From 01356171aa234bf2070e525bb4c52af0a8c42559 Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Fri, 19 Jan 2024 14:35:47 +0100 Subject: [PATCH 02/15] Test --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 117d5d770a..75dee7dfac 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Redis Hugo site template -# Files and folders +## Files and folders * **/archetypes**: A Markdown file needs to have some front matter. An archetype defines which front matter is used when using `hugo new content`. Right now, the only supported archetype is the default one. **Note:** We might want to add additional archetypes in the future because most of our pages contain additional meta data properties like `linkTitle`. * **/content**: This folder contains the markdown files. We will have the subfolders like `/develop`, `/integrate`, and `/operate` From 02478c6f080a0d502dfebcbf984e36777c9792ff Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Fri, 19 Jan 2024 18:04:01 +0100 Subject: [PATCH 03/15] Replaced normal links by relRefs --- build/migrate.py | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) mode change 100644 => 100755 build/migrate.py diff --git a/build/migrate.py b/build/migrate.py old mode 100644 new mode 100755 index 5777a51bac..3874c93fcc --- a/build/migrate.py +++ b/build/migrate.py @@ -107,9 +107,9 @@ def replace_links_in_file(file_path, old_prefix, new_prefix): with open(file_path, 'r', encoding='utf-8') as file: file_content = file.read() - link_pattern = re.compile(r'(\[.*?\]\()(' + re.escape(old_prefix) + r')(.*?\))') - updated_content = re.sub(link_pattern, r'\1' + new_prefix + r'\3', file_content) - + link_pattern = re.compile(r'(\[.*?\]\()(' + re.escape(old_prefix) + r')(.*?)' + r'(\))') + #updated_content = re.sub(link_pattern, r'\1' + '{{ relURL "" }}' + new_prefix + r'\3', file_content) + updated_content = re.sub(link_pattern, r'\1' + '{{< relref "' + new_prefix + r'\3' + '" >}}' + r'\4', file_content) with open(file_path, 'w', encoding='utf-8') as file: file.write(updated_content) @@ -407,26 +407,22 @@ def migrate_static_files(repo): print(set_env()) #print("## Fetching temporary development documentation content ...") - #fetch_io() + fetch_io() #print("## Migrating commands to {}".format(DOCS_CMD)) - #migrate_commands() + migrate_commands() #print("## Migrating developer documentation to {} ...".format(DOCS_DEV)) - #migrate_developer_docs() + migrate_developer_docs() #print("## Migrating operator documentation to {} ...".format(DOCS_OPS)) #migrate_oss_ops_docs() print("## Fetching temporary Enterprise documentation content ...") - repo = fetch_docs_redis_com() + #repo = fetch_docs_redis_com() #migrate_enterprise_ops_docs(repo) #migrate_gloassary(repo) - migrate_static_files(repo) - delete_folder(repo) - - - - # TODO: Serve the site and check for still broken links + #migrate_static_files(repo) + #delete_folder(repo) From c40d18042e6fedbd6fda9519f6b5cac40b798d3d Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Fri, 19 Jan 2024 18:04:55 +0100 Subject: [PATCH 04/15] The developer docs uses relrefs now --- content/commands/acl-cat/index.md | 10 + content/commands/acl-deluser/index.md | 10 + content/commands/acl-dryrun/index.md | 14 +- content/commands/acl-genpass/index.md | 10 + content/commands/acl-getuser/index.md | 10 + content/commands/acl-help/index.md | 10 + content/commands/acl-list/index.md | 10 + content/commands/acl-load/index.md | 10 + content/commands/acl-log/index.md | 10 + content/commands/acl-save/index.md | 10 + content/commands/acl-setuser/index.md | 10 + content/commands/acl-users/index.md | 10 + content/commands/acl-whoami/index.md | 10 + content/commands/acl/index.md | 10 + content/commands/append/index.md | 10 + content/commands/asking/index.md | 10 + content/commands/auth/index.md | 10 + content/commands/bf.add/index.md | 10 + content/commands/bf.card/index.md | 10 + content/commands/bf.exists/index.md | 10 + content/commands/bf.info/index.md | 10 + content/commands/bf.insert/index.md | 16 +- content/commands/bf.loadchunk/index.md | 10 + content/commands/bf.madd/index.md | 10 + content/commands/bf.mexists/index.md | 10 + content/commands/bf.reserve/index.md | 13 +- content/commands/bf.scandump/index.md | 10 + content/commands/bgrewriteaof/index.md | 10 + content/commands/bgsave/index.md | 10 + content/commands/bitcount/index.md | 10 + content/commands/bitfield/index.md | 22 +- content/commands/bitfield_ro/index.md | 10 + content/commands/bitop/index.md | 10 + content/commands/bitpos/index.md | 10 + content/commands/blmove/index.md | 10 + content/commands/blmpop/index.md | 10 + content/commands/blpop/index.md | 10 + content/commands/brpop/index.md | 10 + content/commands/brpoplpush/index.md | 10 + content/commands/bzmpop/index.md | 10 + content/commands/bzpopmax/index.md | 10 + content/commands/bzpopmin/index.md | 10 + content/commands/cf.add/index.md | 10 + content/commands/cf.addnx/index.md | 10 + content/commands/cf.count/index.md | 10 + content/commands/cf.del/index.md | 10 + content/commands/cf.exists/index.md | 10 + content/commands/cf.info/index.md | 10 + content/commands/cf.insert/index.md | 10 + content/commands/cf.insertnx/index.md | 10 + content/commands/cf.loadchunk/index.md | 10 + content/commands/cf.mexists/index.md | 10 + content/commands/cf.reserve/index.md | 38 +- content/commands/cf.scandump/index.md | 10 + content/commands/client-caching/index.md | 10 + content/commands/client-getname/index.md | 10 + content/commands/client-getredir/index.md | 10 + content/commands/client-help/index.md | 10 + content/commands/client-id/index.md | 10 + content/commands/client-info/index.md | 10 + content/commands/client-kill/index.md | 25 +- content/commands/client-list/index.md | 13 +- content/commands/client-no-evict/index.md | 10 + content/commands/client-no-touch/index.md | 10 + content/commands/client-pause/index.md | 10 + content/commands/client-reply/index.md | 10 + content/commands/client-setinfo/index.md | 10 + content/commands/client-setname/index.md | 10 + content/commands/client-tracking/index.md | 13 +- content/commands/client-trackinginfo/index.md | 10 + content/commands/client-unblock/index.md | 10 + content/commands/client-unpause/index.md | 10 + content/commands/client/index.md | 10 + content/commands/cluster-addslots/index.md | 10 + .../commands/cluster-addslotsrange/index.md | 10 + content/commands/cluster-bumpepoch/index.md | 10 + .../cluster-count-failure-reports/index.md | 10 + .../commands/cluster-countkeysinslot/index.md | 10 + content/commands/cluster-delslots/index.md | 10 + .../commands/cluster-delslotsrange/index.md | 10 + content/commands/cluster-failover/index.md | 10 + content/commands/cluster-flushslots/index.md | 10 + content/commands/cluster-forget/index.md | 10 + .../commands/cluster-getkeysinslot/index.md | 10 + content/commands/cluster-help/index.md | 10 + content/commands/cluster-info/index.md | 10 + content/commands/cluster-keyslot/index.md | 10 + content/commands/cluster-links/index.md | 10 + content/commands/cluster-meet/index.md | 10 + content/commands/cluster-myid/index.md | 10 + content/commands/cluster-myshardid/index.md | 10 + content/commands/cluster-nodes/index.md | 13 +- content/commands/cluster-replicas/index.md | 10 + content/commands/cluster-replicate/index.md | 10 + content/commands/cluster-reset/index.md | 10 + content/commands/cluster-saveconfig/index.md | 10 + .../cluster-set-config-epoch/index.md | 10 + content/commands/cluster-setslot/index.md | 13 +- content/commands/cluster-shards/index.md | 10 + content/commands/cluster-slaves/index.md | 10 + content/commands/cluster-slots/index.md | 10 + content/commands/cluster/index.md | 10 + content/commands/cms.incrby/index.md | 10 + content/commands/cms.info/index.md | 10 + content/commands/cms.initbydim/index.md | 10 + content/commands/cms.initbyprob/index.md | 10 + content/commands/cms.merge/index.md | 13 +- content/commands/cms.query/index.md | 10 + content/commands/command-count/index.md | 10 + content/commands/command-docs/index.md | 10 + content/commands/command-getkeys/index.md | 10 + .../commands/command-getkeysandflags/index.md | 10 + content/commands/command-help/index.md | 10 + content/commands/command-info/index.md | 10 + content/commands/command-list/index.md | 13 +- content/commands/command/index.md | 10 + content/commands/config-get/index.md | 10 + content/commands/config-help/index.md | 10 + content/commands/config-resetstat/index.md | 10 + content/commands/config-rewrite/index.md | 10 + content/commands/config-set/index.md | 10 + content/commands/config/index.md | 10 + content/commands/copy/index.md | 10 + content/commands/dbsize/index.md | 10 + content/commands/debug/index.md | 10 + content/commands/decr/index.md | 10 + content/commands/decrby/index.md | 10 + content/commands/del/index.md | 10 + content/commands/discard/index.md | 10 + content/commands/dump/index.md | 10 + content/commands/echo/index.md | 10 + content/commands/eval/index.md | 10 + content/commands/eval_ro/index.md | 10 + content/commands/evalsha/index.md | 10 + content/commands/evalsha_ro/index.md | 10 + content/commands/exec/index.md | 10 + content/commands/exists/index.md | 10 + content/commands/expire/index.md | 10 + content/commands/expireat/index.md | 10 + content/commands/expiretime/index.md | 10 + content/commands/failover/index.md | 10 + content/commands/fcall/index.md | 10 + content/commands/fcall_ro/index.md | 10 + content/commands/flushall/index.md | 10 + content/commands/flushdb/index.md | 10 + content/commands/ft._list/index.md | 10 + content/commands/ft.aggregate/index.md | 64 +- content/commands/ft.aliasadd/index.md | 10 + content/commands/ft.aliasdel/index.md | 10 + content/commands/ft.aliasupdate/index.md | 10 + content/commands/ft.alter/index.md | 10 + content/commands/ft.config-get/index.md | 10 + content/commands/ft.config-help/index.md | 10 + content/commands/ft.config-set/index.md | 10 + content/commands/ft.create/index.md | 68 +- content/commands/ft.cursor-del/index.md | 10 + content/commands/ft.cursor-read/index.md | 10 + content/commands/ft.dictadd/index.md | 10 + content/commands/ft.dictdel/index.md | 10 + content/commands/ft.dictdump/index.md | 10 + content/commands/ft.dropindex/index.md | 14 +- content/commands/ft.explain/index.md | 14 +- content/commands/ft.explaincli/index.md | 14 +- content/commands/ft.info/index.md | 10 + content/commands/ft.profile/index.md | 10 + content/commands/ft.search/index.md | 93 +- content/commands/ft.spellcheck/index.md | 21 +- content/commands/ft.sugadd/index.md | 15 +- content/commands/ft.sugdel/index.md | 10 + content/commands/ft.sugget/index.md | 19 +- content/commands/ft.suglen/index.md | 10 + content/commands/ft.syndump/index.md | 10 + content/commands/ft.synupdate/index.md | 17 +- content/commands/ft.tagvals/index.md | 10 + content/commands/function-delete/index.md | 14 +- content/commands/function-dump/index.md | 10 + content/commands/function-flush/index.md | 10 + content/commands/function-help/index.md | 10 + content/commands/function-kill/index.md | 10 + content/commands/function-list/index.md | 10 + content/commands/function-load/index.md | 10 + content/commands/function-restore/index.md | 10 + content/commands/function-stats/index.md | 10 + content/commands/function/index.md | 10 + content/commands/geoadd/index.md | 13 +- content/commands/geodist/index.md | 10 + content/commands/geohash/index.md | 10 + content/commands/geopos/index.md | 10 + content/commands/georadius/index.md | 16 +- content/commands/georadius_ro/index.md | 13 +- content/commands/georadiusbymember/index.md | 16 +- .../commands/georadiusbymember_ro/index.md | 13 +- content/commands/geosearch/index.md | 19 +- content/commands/geosearchstore/index.md | 19 +- content/commands/get/index.md | 10 + content/commands/getbit/index.md | 10 + content/commands/getdel/index.md | 10 + content/commands/getex/index.md | 13 +- content/commands/getrange/index.md | 10 + content/commands/getset/index.md | 10 + content/commands/hdel/index.md | 10 + content/commands/hello/index.md | 10 + content/commands/hexists/index.md | 10 + content/commands/hget/index.md | 10 + content/commands/hgetall/index.md | 10 + content/commands/hincrby/index.md | 10 + content/commands/hincrbyfloat/index.md | 12 +- content/commands/hkeys/index.md | 10 + content/commands/hlen/index.md | 10 + content/commands/hmget/index.md | 10 + content/commands/hmset/index.md | 10 + content/commands/hrandfield/index.md | 10 + content/commands/hscan/index.md | 10 + content/commands/hset/index.md | 10 + content/commands/hsetnx/index.md | 10 + content/commands/hstrlen/index.md | 10 + content/commands/hvals/index.md | 10 + content/commands/incr/index.md | 10 + content/commands/incrby/index.md | 10 + content/commands/incrbyfloat/index.md | 10 + content/commands/info/index.md | 26 +- content/commands/json.arrappend/index.md | 10 + content/commands/json.arrindex/index.md | 12 +- content/commands/json.arrinsert/index.md | 12 +- content/commands/json.arrlen/index.md | 10 + content/commands/json.arrpop/index.md | 10 + content/commands/json.arrtrim/index.md | 10 + content/commands/json.clear/index.md | 10 + content/commands/json.debug-help/index.md | 10 + content/commands/json.debug-memory/index.md | 10 + content/commands/json.debug/index.md | 10 + content/commands/json.del/index.md | 10 + content/commands/json.forget/index.md | 10 + content/commands/json.get/index.md | 13 +- content/commands/json.merge/index.md | 12 +- content/commands/json.mget/index.md | 10 + content/commands/json.mset/index.md | 18 +- content/commands/json.numincrby/index.md | 10 + content/commands/json.nummultby/index.md | 10 + content/commands/json.objkeys/index.md | 10 + content/commands/json.objlen/index.md | 10 + content/commands/json.resp/index.md | 10 + content/commands/json.set/index.md | 10 + content/commands/json.strappend/index.md | 10 + content/commands/json.strlen/index.md | 10 + content/commands/json.toggle/index.md | 10 + content/commands/json.type/index.md | 11 + content/commands/keys/index.md | 10 + content/commands/lastsave/index.md | 10 + content/commands/latency-doctor/index.md | 10 + content/commands/latency-graph/index.md | 10 + content/commands/latency-help/index.md | 10 + content/commands/latency-histogram/index.md | 10 + content/commands/latency-history/index.md | 10 + content/commands/latency-latest/index.md | 10 + content/commands/latency-reset/index.md | 10 + content/commands/latency/index.md | 10 + content/commands/lcs/index.md | 10 + content/commands/lindex/index.md | 10 + content/commands/linsert/index.md | 10 + content/commands/llen/index.md | 10 + content/commands/lmove/index.md | 10 + content/commands/lmpop/index.md | 10 + content/commands/lolwut/index.md | 10 + content/commands/lpop/index.md | 10 + content/commands/lpos/index.md | 10 + content/commands/lpush/index.md | 10 + content/commands/lpushx/index.md | 10 + content/commands/lrange/index.md | 10 + content/commands/lrem/index.md | 10 + content/commands/lset/index.md | 10 + content/commands/ltrim/index.md | 10 + content/commands/memory-doctor/index.md | 10 + content/commands/memory-help/index.md | 10 + content/commands/memory-malloc-stats/index.md | 10 + content/commands/memory-purge/index.md | 10 + content/commands/memory-stats/index.md | 13 +- content/commands/memory-usage/index.md | 12 +- content/commands/memory/index.md | 10 + content/commands/mget/index.md | 10 + content/commands/migrate/index.md | 13 +- content/commands/module-help/index.md | 10 + content/commands/module-list/index.md | 10 + content/commands/module-load/index.md | 10 + content/commands/module-loadex/index.md | 13 +- content/commands/module-unload/index.md | 10 + content/commands/module/index.md | 10 + content/commands/monitor/index.md | 10 + content/commands/move/index.md | 10 + content/commands/mset/index.md | 10 + content/commands/msetnx/index.md | 10 + content/commands/multi/index.md | 10 + content/commands/object-encoding/index.md | 10 + content/commands/object-freq/index.md | 10 + content/commands/object-help/index.md | 10 + content/commands/object-idletime/index.md | 10 + content/commands/object-refcount/index.md | 10 + content/commands/object/index.md | 10 + content/commands/persist/index.md | 10 + content/commands/pexpire/index.md | 10 + content/commands/pexpireat/index.md | 10 + content/commands/pexpiretime/index.md | 10 + content/commands/pfadd/index.md | 10 + content/commands/pfcount/index.md | 10 + content/commands/pfdebug/index.md | 10 + content/commands/pfmerge/index.md | 10 + content/commands/pfselftest/index.md | 10 + content/commands/ping/index.md | 10 + content/commands/psetex/index.md | 10 + content/commands/psubscribe/index.md | 10 + content/commands/psync/index.md | 10 + content/commands/pttl/index.md | 10 + content/commands/publish/index.md | 10 + content/commands/pubsub-channels/index.md | 10 + content/commands/pubsub-help/index.md | 10 + content/commands/pubsub-numpat/index.md | 10 + content/commands/pubsub-numsub/index.md | 10 + .../commands/pubsub-shardchannels/index.md | 10 + content/commands/pubsub-shardnumsub/index.md | 10 + content/commands/pubsub/index.md | 10 + content/commands/punsubscribe/index.md | 10 + content/commands/quit/index.md | 10 + content/commands/randomkey/index.md | 10 + content/commands/readonly/index.md | 10 + content/commands/readwrite/index.md | 10 + content/commands/rename/index.md | 10 + content/commands/renamenx/index.md | 10 + content/commands/replconf/index.md | 10 + content/commands/replicaof/index.md | 10 + content/commands/reset/index.md | 10 + content/commands/restore-asking/index.md | 13 +- content/commands/restore/index.md | 13 +- content/commands/role/index.md | 10 + content/commands/rpop/index.md | 10 + content/commands/rpoplpush/index.md | 10 + content/commands/rpush/index.md | 10 + content/commands/rpushx/index.md | 10 + content/commands/sadd/index.md | 10 + content/commands/save/index.md | 10 + content/commands/scan/index.md | 10 + content/commands/scard/index.md | 10 + content/commands/script-debug/index.md | 10 + content/commands/script-exists/index.md | 10 + content/commands/script-flush/index.md | 10 + content/commands/script-help/index.md | 10 + content/commands/script-kill/index.md | 10 + content/commands/script-load/index.md | 10 + content/commands/script/index.md | 10 + content/commands/sdiff/index.md | 10 + content/commands/sdiffstore/index.md | 10 + content/commands/select/index.md | 10 + content/commands/set/index.md | 21 +- content/commands/setbit/index.md | 10 + content/commands/setex/index.md | 10 + content/commands/setnx/index.md | 10 + content/commands/setrange/index.md | 10 + content/commands/shutdown/index.md | 10 + content/commands/sinter/index.md | 10 + content/commands/sintercard/index.md | 10 + content/commands/sinterstore/index.md | 10 + content/commands/sismember/index.md | 10 + content/commands/slaveof/index.md | 10 + content/commands/slowlog-get/index.md | 10 + content/commands/slowlog-help/index.md | 10 + content/commands/slowlog-len/index.md | 10 + content/commands/slowlog-reset/index.md | 10 + content/commands/slowlog/index.md | 10 + content/commands/smembers/index.md | 10 + content/commands/smismember/index.md | 10 + content/commands/smove/index.md | 10 + content/commands/sort/index.md | 13 +- content/commands/sort_ro/index.md | 13 +- content/commands/spop/index.md | 10 + content/commands/spublish/index.md | 10 + content/commands/srandmember/index.md | 10 + content/commands/srem/index.md | 10 + content/commands/sscan/index.md | 10 + content/commands/ssubscribe/index.md | 10 + content/commands/strlen/index.md | 10 + content/commands/subscribe/index.md | 10 + content/commands/substr/index.md | 10 + content/commands/sunion/index.md | 10 + content/commands/sunionstore/index.md | 10 + content/commands/sunsubscribe/index.md | 10 + content/commands/swapdb/index.md | 10 + content/commands/sync/index.md | 10 + content/commands/tdigest.add/index.md | 10 + content/commands/tdigest.byrank/index.md | 10 + content/commands/tdigest.byrevrank/index.md | 10 + content/commands/tdigest.cdf/index.md | 10 + content/commands/tdigest.create/index.md | 10 + content/commands/tdigest.info/index.md | 10 + content/commands/tdigest.max/index.md | 10 + content/commands/tdigest.merge/index.md | 13 +- content/commands/tdigest.min/index.md | 10 + content/commands/tdigest.quantile/index.md | 10 + content/commands/tdigest.rank/index.md | 10 + content/commands/tdigest.reset/index.md | 10 + content/commands/tdigest.revrank/index.md | 10 + .../commands/tdigest.trimmed_mean/index.md | 10 + content/commands/tfcall/index.md | 10 + content/commands/tfcallasync/index.md | 10 + content/commands/tfunction-delete/index.md | 13 +- content/commands/tfunction-list/index.md | 13 +- content/commands/tfunction-load/index.md | 13 +- content/commands/time/index.md | 10 + content/commands/topk.add/index.md | 10 + content/commands/topk.count/index.md | 10 + content/commands/topk.incrby/index.md | 10 + content/commands/topk.info/index.md | 10 + content/commands/topk.list/index.md | 10 + content/commands/topk.query/index.md | 10 + content/commands/topk.reserve/index.md | 10 + content/commands/touch/index.md | 10 + content/commands/ts.add/index.md | 31 +- content/commands/ts.alter/index.md | 25 +- content/commands/ts.create/index.md | 26 +- content/commands/ts.createrule/index.md | 23 +- content/commands/ts.decrby/index.md | 26 +- content/commands/ts.del/index.md | 10 + content/commands/ts.deleterule/index.md | 10 + content/commands/ts.get/index.md | 18 +- content/commands/ts.incrby/index.md | 26 +- content/commands/ts.info/index.md | 14 +- content/commands/ts.madd/index.md | 10 + content/commands/ts.mget/index.md | 46 +- content/commands/ts.mrange/index.md | 81 +- content/commands/ts.mrevrange/index.md | 81 +- content/commands/ts.queryindex/index.md | 37 +- content/commands/ts.range/index.md | 34 +- content/commands/ts.revrange/index.md | 34 +- content/commands/ttl/index.md | 10 + content/commands/type/index.md | 10 + content/commands/unlink/index.md | 10 + content/commands/unsubscribe/index.md | 10 + content/commands/unwatch/index.md | 10 + content/commands/wait/index.md | 10 + content/commands/waitaof/index.md | 15 + content/commands/watch/index.md | 10 + content/commands/xack/index.md | 10 + content/commands/xadd/index.md | 13 +- content/commands/xautoclaim/index.md | 13 +- content/commands/xclaim/index.md | 16 +- content/commands/xdel/index.md | 10 + content/commands/xgroup-create/index.md | 13 +- .../commands/xgroup-createconsumer/index.md | 10 + content/commands/xgroup-delconsumer/index.md | 10 + content/commands/xgroup-destroy/index.md | 10 + content/commands/xgroup-help/index.md | 10 + content/commands/xgroup-setid/index.md | 10 + content/commands/xgroup/index.md | 10 + content/commands/xinfo-consumers/index.md | 10 + content/commands/xinfo-groups/index.md | 10 + content/commands/xinfo-help/index.md | 10 + content/commands/xinfo-stream/index.md | 10 + content/commands/xinfo/index.md | 10 + content/commands/xlen/index.md | 10 + content/commands/xpending/index.md | 10 + content/commands/xrange/index.md | 10 + content/commands/xread/index.md | 13 +- content/commands/xreadgroup/index.md | 13 +- content/commands/xrevrange/index.md | 10 + content/commands/xsetid/index.md | 13 +- content/commands/xtrim/index.md | 10 + content/commands/zadd/index.md | 13 +- content/commands/zcard/index.md | 10 + content/commands/zcount/index.md | 10 + content/commands/zdiff/index.md | 10 + content/commands/zdiffstore/index.md | 10 + content/commands/zincrby/index.md | 10 + content/commands/zinter/index.md | 13 +- content/commands/zintercard/index.md | 10 + content/commands/zinterstore/index.md | 13 +- content/commands/zlexcount/index.md | 10 + content/commands/zmpop/index.md | 10 + content/commands/zmscore/index.md | 10 + content/commands/zpopmax/index.md | 10 + content/commands/zpopmin/index.md | 10 + content/commands/zrandmember/index.md | 10 + content/commands/zrange/index.md | 13 +- content/commands/zrangebylex/index.md | 10 + content/commands/zrangebyscore/index.md | 10 + content/commands/zrangestore/index.md | 13 +- content/commands/zrank/index.md | 10 + content/commands/zrem/index.md | 10 + content/commands/zremrangebylex/index.md | 10 + content/commands/zremrangebyrank/index.md | 10 + content/commands/zremrangebyscore/index.md | 10 + content/commands/zrevrange/index.md | 10 + content/commands/zrevrangebylex/index.md | 10 + content/commands/zrevrangebyscore/index.md | 10 + content/commands/zrevrank/index.md | 10 + content/commands/zscan/index.md | 10 + content/commands/zscore/index.md | 10 + content/commands/zunion/index.md | 13 +- content/commands/zunionstore/index.md | 13 +- content/develop/_index.md | 10 + content/develop/connect/_index.md | 26 +- content/develop/connect/cli.md | 14 +- content/develop/connect/clients/_index.md | 26 +- content/develop/connect/clients/dotnet.md | 19 +- content/develop/connect/clients/go.md | 14 +- content/develop/connect/clients/java.md | 17 +- content/develop/connect/clients/nodejs.md | 17 +- .../connect/clients/om-clients/_index.md | 18 +- .../clients/om-clients/stack-dotnet.md | 14 +- .../connect/clients/om-clients/stack-node.md | 14 +- .../clients/om-clients/stack-python.md | 14 +- .../clients/om-clients/stack-spring.md | 14 +- content/develop/connect/clients/python.md | 19 +- content/develop/connect/insight/_index.md | 14 +- .../tutorials/insight-stream-consumer.md | 14 +- content/develop/data-types/_index.md | 62 +- content/develop/data-types/bitfields.md | 52 +- content/develop/data-types/bitmaps.md | 57 +- content/develop/data-types/geospatial.md | 10 + content/develop/data-types/hashes.md | 10 + content/develop/data-types/json/_index.md | 18 +- content/develop/data-types/json/developer.md | 12 +- .../develop/data-types/json/indexing_JSON.md | 18 +- content/develop/data-types/json/path.md | 12 +- .../data-types/json/performance/_index.md | 12 +- content/develop/data-types/json/ram.md | 12 +- content/develop/data-types/json/resp3.md | 10 + content/develop/data-types/json/use_cases.md | 10 + content/develop/data-types/lists.md | 14 +- .../data-types/probabilistic/Configuration.md | 14 +- .../data-types/probabilistic/_index.md | 10 + .../data-types/probabilistic/bloom-filter.md | 12 +- .../probabilistic/count-min-sketch.md | 10 + .../data-types/probabilistic/cuckoo-filter.md | 10 + .../data-types/probabilistic/hyperloglogs.md | 12 +- .../data-types/probabilistic/t-digest.md | 10 + .../develop/data-types/probabilistic/top-k.md | 10 + content/develop/data-types/sets.md | 14 +- content/develop/data-types/sorted-sets.md | 12 +- content/develop/data-types/streams.md | 16 +- content/develop/data-types/strings.md | 14 +- .../develop/data-types/timeseries/_index.md | 12 +- .../develop/data-types/timeseries/clients.md | 12 +- .../data-types/timeseries/configuration.md | 16 +- .../data-types/timeseries/development.md | 12 +- .../data-types/timeseries/quickstart.md | 14 +- .../data-types/timeseries/reference/_index.md | 10 + .../_index.md | 12 +- .../data-types/timeseries/use_cases.md | 13 +- content/develop/get-started/_index.md | 20 +- content/develop/get-started/data-store.md | 22 +- .../develop/get-started/document-database.md | 32 +- content/develop/get-started/faq.md | 14 +- .../develop/get-started/vector-database.md | 30 +- content/develop/interact/_index.md | 10 + .../interact/programmability/_index.md | 15 +- .../interact/programmability/eval-intro.md | 13 +- .../programmability/functions-intro.md | 21 +- .../interact/programmability/lua-api.md | 19 +- .../interact/programmability/lua-debugging.md | 13 +- .../triggers-and-functions/Configuration.md | 10 + .../triggers-and-functions/Debugging.md | 14 +- .../triggers-and-functions/Development.md | 12 +- .../triggers-and-functions/Examples.md | 10 + .../Known_Limitations.md | 10 + .../triggers-and-functions/Quick_Start_CLI.md | 14 +- .../triggers-and-functions/Quick_Start_RI.md | 14 +- .../triggers-and-functions/_index.md | 12 +- .../concepts/Binary_Data.md | 22 +- .../concepts/Cluster_Support.md | 16 +- .../concepts/Function_Flags.md | 10 + .../concepts/JavaScript_API.md | 10 + .../concepts/Library_Configuration.md | 12 +- .../concepts/RESP_JS_Conversion.md | 10 + .../concepts/Sync_Async.md | 14 +- .../triggers-and-functions/concepts/_index.md | 10 + .../concepts/triggers/KeySpace_Triggers.md | 14 +- .../concepts/triggers/Stream_Triggers.md | 14 +- .../concepts/triggers/User_Functions.md | 10 + .../concepts/triggers/_index.md | 10 + content/develop/interact/pubsub.md | 16 +- .../interact/search-and-query/_index.md | 20 +- .../search-and-query/administration/_index.md | 12 +- .../search-and-query/administration/design.md | 12 +- .../administration/extensions.md | 13 +- .../search-and-query/administration/gc.md | 12 +- .../administration/indexing.md | 12 +- .../administration/overview.md | 19 +- .../advanced-concepts/_index.md | 32 +- .../advanced-concepts/aggregations.md | 18 +- .../advanced-concepts/chinese.md | 12 +- .../advanced-concepts/dialects.md | 228 +++ .../advanced-concepts/escaping.md | 14 +- .../advanced-concepts/highlight.md | 13 +- .../advanced-concepts/phonetic_matching.md | 14 +- .../advanced-concepts/query_syntax.md | 23 +- .../advanced-concepts/scoring.md | 16 +- .../advanced-concepts/sorting.md | 13 +- .../advanced-concepts/spellcheck.md | 13 +- .../advanced-concepts/stemming.md | 15 +- .../advanced-concepts/stopwords.md | 13 +- .../advanced-concepts/synonyms.md | 12 +- .../advanced-concepts/tags.md | 15 +- .../advanced-concepts/vectors.md | 21 +- .../basic-constructs/_index.md | 12 +- .../configuration-parameters.md | 24 +- .../field-and-type-options.md | 16 +- .../basic-constructs/schema-definition.md | 10 + .../search-and-query/deprecated/_index.md | 10 + .../deprecated/development.md | 12 +- .../search-and-query/deprecated/payloads.md | 12 +- .../search-and-query/indexing/_index.md | 32 +- .../query-use-cases/_index.md | 12 +- .../interact/search-and-query/query/_index.md | 24 +- .../search-and-query/query/aggregation.md | 16 +- .../search-and-query/query/combined.md | 26 +- .../search-and-query/query/exact-match.md | 18 +- .../search-and-query/query/full-text.md | 16 +- .../search-and-query/query/geo-spatial.md | 10 + .../interact/search-and-query/query/range.md | 12 +- .../search-and-query/query/vector-search.md | 20 +- content/develop/interact/transactions.md | 13 +- content/develop/manual/_index.md | 16 + content/develop/manual/client-side-caching.md | 344 +++++ .../develop/manual/keyspace-notifications.md | 191 +++ content/develop/manual/keyspace.md | 154 ++ content/develop/manual/patterns/_index.md | 18 + .../develop/manual/patterns/bulk-loading.md | 156 ++ .../manual/patterns/distributed-locks.md | 242 ++++ .../manual/patterns/indexes/2idx_0.png | Bin 0 -> 23007 bytes .../manual/patterns/indexes/2idx_1.png | Bin 0 -> 11649 bytes .../manual/patterns/indexes/2idx_2.png | Bin 0 -> 13973 bytes .../develop/manual/patterns/indexes/index.md | 755 ++++++++++ .../develop/manual/patterns/twitter-clone.md | 460 ++++++ content/develop/manual/pipelining/index.md | 186 +++ .../manual/pipelining/pipeline_iops.png | Bin 0 -> 14577 bytes content/develop/reference/_index.md | 10 + content/develop/reference/arm.md | 69 - content/develop/reference/clients.md | 15 +- content/develop/reference/cluster-spec.md | 1280 ----------------- .../develop/reference/command-arguments.md | 12 +- content/develop/reference/command-tips.md | 16 +- content/develop/reference/eviction/index.md | 16 +- content/develop/reference/gopher.md | 12 +- content/develop/reference/key-specs.md | 12 +- content/develop/reference/modules/_index.md | 12 +- .../reference/modules/modules-api-ref.md | 12 +- .../reference/modules/modules-blocking-ops.md | 12 +- .../reference/modules/modules-native-types.md | 12 +- content/develop/reference/protocol-spec.md | 20 +- content/develop/reference/sentinel-clients.md | 12 +- content/develop/reference/signals.md | 89 -- content/develop/use/_index.md | 10 + content/develop/use/client-side-caching.md | 12 +- content/develop/use/keyspace-notifications.md | 12 +- content/develop/use/keyspace.md | 12 +- content/develop/use/manual/_index.md | 10 + .../develop/use/manual/client-side-caching.md | 12 +- .../use/manual/keyspace-notifications.md | 12 +- content/develop/use/manual/keyspace.md | 12 +- content/develop/use/manual/patterns/_index.md | 12 +- .../use/manual/patterns/bulk-loading.md | 13 +- .../use/manual/patterns/distributed-locks.md | 14 +- .../use/manual/patterns/indexes/index.md | 13 +- .../use/manual/patterns/twitter-clone.md | 12 +- .../develop/use/manual/pipelining/index.md | 12 +- content/develop/use/patterns/_index.md | 12 +- content/develop/use/patterns/bulk-loading.md | 13 +- .../develop/use/patterns/distributed-locks.md | 14 +- content/develop/use/patterns/indexes/index.md | 13 +- content/develop/use/patterns/twitter-clone.md | 12 +- content/develop/use/pipelining/index.md | 12 +- 669 files changed, 10107 insertions(+), 2253 deletions(-) create mode 100644 content/develop/interact/search-and-query/advanced-concepts/dialects.md create mode 100644 content/develop/manual/_index.md create mode 100644 content/develop/manual/client-side-caching.md create mode 100644 content/develop/manual/keyspace-notifications.md create mode 100644 content/develop/manual/keyspace.md create mode 100644 content/develop/manual/patterns/_index.md create mode 100644 content/develop/manual/patterns/bulk-loading.md create mode 100644 content/develop/manual/patterns/distributed-locks.md create mode 100644 content/develop/manual/patterns/indexes/2idx_0.png create mode 100644 content/develop/manual/patterns/indexes/2idx_1.png create mode 100644 content/develop/manual/patterns/indexes/2idx_2.png create mode 100644 content/develop/manual/patterns/indexes/index.md create mode 100644 content/develop/manual/patterns/twitter-clone.md create mode 100644 content/develop/manual/pipelining/index.md create mode 100644 content/develop/manual/pipelining/pipeline_iops.png delete mode 100644 content/develop/reference/arm.md delete mode 100644 content/develop/reference/cluster-spec.md delete mode 100644 content/develop/reference/signals.md diff --git a/content/commands/acl-cat/index.md b/content/commands/acl-cat/index.md index 2f21f31360..78711b8739 100644 --- a/content/commands/acl-cat/index.md +++ b/content/commands/acl-cat/index.md @@ -7,6 +7,16 @@ arguments: optional: true type: string arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/acl-deluser/index.md b/content/commands/acl-deluser/index.md index da6b0afb2f..a6f73de458 100644 --- a/content/commands/acl-deluser/index.md +++ b/content/commands/acl-deluser/index.md @@ -9,6 +9,16 @@ arguments: name: username type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/acl-dryrun/index.md b/content/commands/acl-dryrun/index.md index 1dcdfde546..4be989e071 100644 --- a/content/commands/acl-dryrun/index.md +++ b/content/commands/acl-dryrun/index.md @@ -16,6 +16,16 @@ arguments: optional: true type: string arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript @@ -43,6 +53,6 @@ This command can be used to test the permissions of a given user without having "OK" > ACL DRYRUN VIRGINIA SET foo bar "OK" -> ACL DRYRUN VIRGINIA GET foo bar -"This user has no permissions to run the 'GET' command" +> ACL DRYRUN VIRGINIA GET foo +"User VIRGINIA has no permissions to run the 'get' command" ``` diff --git a/content/commands/acl-genpass/index.md b/content/commands/acl-genpass/index.md index 122166b584..0c0115507c 100644 --- a/content/commands/acl-genpass/index.md +++ b/content/commands/acl-genpass/index.md @@ -7,6 +7,16 @@ arguments: optional: true type: integer arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/acl-getuser/index.md b/content/commands/acl-getuser/index.md index 0416c594da..54c3e561c0 100644 --- a/content/commands/acl-getuser/index.md +++ b/content/commands/acl-getuser/index.md @@ -8,6 +8,16 @@ arguments: name: username type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/acl-help/index.md b/content/commands/acl-help/index.md index d15f834b9b..e98c483a73 100644 --- a/content/commands/acl-help/index.md +++ b/content/commands/acl-help/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/acl-list/index.md b/content/commands/acl-list/index.md index b311b4997f..a00a2be016 100644 --- a/content/commands/acl-list/index.md +++ b/content/commands/acl-list/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/acl-load/index.md b/content/commands/acl-load/index.md index 1ce12d688d..53cca3c692 100644 --- a/content/commands/acl-load/index.md +++ b/content/commands/acl-load/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/acl-log/index.md b/content/commands/acl-log/index.md index 4619a9161b..c30a619f4a 100644 --- a/content/commands/acl-log/index.md +++ b/content/commands/acl-log/index.md @@ -16,6 +16,16 @@ arguments: optional: true type: oneof arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/acl-save/index.md b/content/commands/acl-save/index.md index 640099b334..d136c536d5 100644 --- a/content/commands/acl-save/index.md +++ b/content/commands/acl-save/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/acl-setuser/index.md b/content/commands/acl-setuser/index.md index 76fb97c77d..a683026731 100644 --- a/content/commands/acl-setuser/index.md +++ b/content/commands/acl-setuser/index.md @@ -13,6 +13,16 @@ arguments: optional: true type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/acl-users/index.md b/content/commands/acl-users/index.md index 6a76345f38..4a3fc90907 100644 --- a/content/commands/acl-users/index.md +++ b/content/commands/acl-users/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/acl-whoami/index.md b/content/commands/acl-whoami/index.md index 0e770b6ac0..2624323798 100644 --- a/content/commands/acl-whoami/index.md +++ b/content/commands/acl-whoami/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/acl/index.md b/content/commands/acl/index.md index f5b4272849..bd06cfd25d 100644 --- a/content/commands/acl/index.md +++ b/content/commands/acl/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on subcommand. description: A container for Access List Control commands. group: server diff --git a/content/commands/append/index.md b/content/commands/append/index.md index 164b26bb03..86b2b53a57 100644 --- a/content/commands/append/index.md +++ b/content/commands/append/index.md @@ -12,6 +12,16 @@ arguments: name: value type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/asking/index.md b/content/commands/asking/index.md index 6723c6b339..9c538b4962 100644 --- a/content/commands/asking/index.md +++ b/content/commands/asking/index.md @@ -3,6 +3,16 @@ acl_categories: - '@fast' - '@connection' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - fast complexity: O(1) diff --git a/content/commands/auth/index.md b/content/commands/auth/index.md index 286ee8bf51..fb08542a99 100644 --- a/content/commands/auth/index.md +++ b/content/commands/auth/index.md @@ -12,6 +12,16 @@ arguments: name: password type: string arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/bf.add/index.md b/content/commands/bf.add/index.md index c50d835073..58d41ee347 100644 --- a/content/commands/bf.add/index.md +++ b/content/commands/bf.add/index.md @@ -4,6 +4,16 @@ arguments: type: key - name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(k), where k is the number of hash functions used by the last sub-filter description: Adds an item to a Bloom Filter group: bf diff --git a/content/commands/bf.card/index.md b/content/commands/bf.card/index.md index af04ac40c2..dd5a6d7af2 100644 --- a/content/commands/bf.card/index.md +++ b/content/commands/bf.card/index.md @@ -2,6 +2,16 @@ arguments: - name: key type: key +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns the cardinality of a Bloom filter group: bf diff --git a/content/commands/bf.exists/index.md b/content/commands/bf.exists/index.md index 325ca3cba4..e104dfecb4 100644 --- a/content/commands/bf.exists/index.md +++ b/content/commands/bf.exists/index.md @@ -4,6 +4,16 @@ arguments: type: key - name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(k), where k is the number of hash functions used by the last sub-filter description: Checks whether an item exists in a Bloom Filter group: bf diff --git a/content/commands/bf.info/index.md b/content/commands/bf.info/index.md index 06d2f76ae0..2262b25ef1 100644 --- a/content/commands/bf.info/index.md +++ b/content/commands/bf.info/index.md @@ -21,6 +21,16 @@ arguments: name: single_value optional: true type: oneof +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns information about a Bloom Filter group: bf diff --git a/content/commands/bf.insert/index.md b/content/commands/bf.insert/index.md index 264efe8f92..a6a55e893a 100644 --- a/content/commands/bf.insert/index.md +++ b/content/commands/bf.insert/index.md @@ -28,6 +28,16 @@ arguments: - multiple: true name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(k * n), where k is the number of hash functions and n is the number of items description: Adds one or more items to a Bloom Filter. A filter will be created if @@ -40,8 +50,10 @@ since: 1.0.0 stack_path: docs/data-types/probabilistic summary: Adds one or more items to a Bloom Filter. A filter will be created if it does not exist -syntax_fmt: "BF.INSERT key [CAPACITY\_capacity] [ERROR\_error]\n [EXPANSION\_expansion]\ - \ [NOCREATE] [NONSCALING] ITEMS item [item\n ...]" +syntax_fmt: "BF.INSERT key [CAPACITY\_capacity] [ERROR\_error] + [EXPANSION\_expansion]\ + \ [NOCREATE] [NONSCALING] ITEMS item [item + ...]" syntax_str: "[CAPACITY\_capacity] [ERROR\_error] [EXPANSION\_expansion] [NOCREATE]\ \ [NONSCALING] ITEMS item [item ...]" title: BF.INSERT diff --git a/content/commands/bf.loadchunk/index.md b/content/commands/bf.loadchunk/index.md index 5261a41fd5..c864174e94 100644 --- a/content/commands/bf.loadchunk/index.md +++ b/content/commands/bf.loadchunk/index.md @@ -6,6 +6,16 @@ arguments: type: integer - name: data type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n), where n is the capacity description: Restores a filter previously saved using SCANDUMP group: bf diff --git a/content/commands/bf.madd/index.md b/content/commands/bf.madd/index.md index 5280e14ce1..6d38bcdd66 100644 --- a/content/commands/bf.madd/index.md +++ b/content/commands/bf.madd/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(k * n), where k is the number of hash functions and n is the number of items description: Adds one or more items to a Bloom Filter. A filter will be created if diff --git a/content/commands/bf.mexists/index.md b/content/commands/bf.mexists/index.md index d1621c96e0..1399dff5da 100644 --- a/content/commands/bf.mexists/index.md +++ b/content/commands/bf.mexists/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(k * n), where k is the number of hash functions and n is the number of items description: Checks whether one or more items exist in a Bloom Filter diff --git a/content/commands/bf.reserve/index.md b/content/commands/bf.reserve/index.md index 93b0989581..a85b640a4e 100644 --- a/content/commands/bf.reserve/index.md +++ b/content/commands/bf.reserve/index.md @@ -14,6 +14,16 @@ arguments: optional: true token: NONSCALING type: pure-token +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Creates a new Bloom Filter group: bf @@ -23,7 +33,8 @@ module: Bloom since: 1.0.0 stack_path: docs/data-types/probabilistic summary: Creates a new Bloom Filter -syntax_fmt: "BF.RESERVE key error_rate capacity [EXPANSION\_expansion]\n [NONSCALING]" +syntax_fmt: "BF.RESERVE key error_rate capacity [EXPANSION\_expansion] + [NONSCALING]" syntax_str: "error_rate capacity [EXPANSION\_expansion] [NONSCALING]" title: BF.RESERVE --- diff --git a/content/commands/bf.scandump/index.md b/content/commands/bf.scandump/index.md index 53cdbad931..e5dd9d95bd 100644 --- a/content/commands/bf.scandump/index.md +++ b/content/commands/bf.scandump/index.md @@ -4,6 +4,16 @@ arguments: type: key - name: iterator type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n), where n is the capacity description: Begins an incremental save of the bloom filter group: bf diff --git a/content/commands/bgrewriteaof/index.md b/content/commands/bgrewriteaof/index.md index db81589eef..e35b5aab64 100644 --- a/content/commands/bgrewriteaof/index.md +++ b/content/commands/bgrewriteaof/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/bgsave/index.md b/content/commands/bgsave/index.md index 6a8a6c0589..bce771b490 100644 --- a/content/commands/bgsave/index.md +++ b/content/commands/bgsave/index.md @@ -11,6 +11,16 @@ arguments: token: SCHEDULE type: pure-token arity: -1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/bitcount/index.md b/content/commands/bitcount/index.md index 7013b0820c..58ce118bcc 100644 --- a/content/commands/bitcount/index.md +++ b/content/commands/bitcount/index.md @@ -32,6 +32,16 @@ arguments: optional: true type: block arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) diff --git a/content/commands/bitfield/index.md b/content/commands/bitfield/index.md index 1eef4c8c73..9bd5260766 100644 --- a/content/commands/bitfield/index.md +++ b/content/commands/bitfield/index.md @@ -73,6 +73,16 @@ arguments: optional: true type: oneof arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -99,10 +109,14 @@ key_specs: linkTitle: BITFIELD since: 3.2.0 summary: Performs arbitrary bitfield integer operations on strings. -syntax_fmt: "BITFIELD key [GET\_encoding offset | [OVERFLOW\_]\n\ - \ \n [GET\_encoding\ - \ offset | [OVERFLOW\_]\n \n ...]]" +syntax_fmt: "BITFIELD key [GET\_encoding offset | [OVERFLOW\_] +\ + \ + [GET\_encoding\ + \ offset | [OVERFLOW\_] + + ...]]" syntax_str: "[GET\_encoding offset | [OVERFLOW\_] [GET\_encoding offset | [OVERFLOW\_\ ] \ diff --git a/content/commands/bitfield_ro/index.md b/content/commands/bitfield_ro/index.md index 85037a16f8..6ee9b4a051 100644 --- a/content/commands/bitfield_ro/index.md +++ b/content/commands/bitfield_ro/index.md @@ -22,6 +22,16 @@ arguments: token: GET type: block arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/bitop/index.md b/content/commands/bitop/index.md index d0ee9b2115..b3ced21958 100644 --- a/content/commands/bitop/index.md +++ b/content/commands/bitop/index.md @@ -33,6 +33,16 @@ arguments: name: key type: key arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/bitpos/index.md b/content/commands/bitpos/index.md index a008af2aae..202d315b74 100644 --- a/content/commands/bitpos/index.md +++ b/content/commands/bitpos/index.md @@ -39,6 +39,16 @@ arguments: optional: true type: block arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) diff --git a/content/commands/blmove/index.md b/content/commands/blmove/index.md index 21ebee2b82..098865c454 100644 --- a/content/commands/blmove/index.md +++ b/content/commands/blmove/index.md @@ -39,6 +39,16 @@ arguments: name: timeout type: double arity: 6 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/blmpop/index.md b/content/commands/blmpop/index.md index 626d911c70..178a81b4fa 100644 --- a/content/commands/blmpop/index.md +++ b/content/commands/blmpop/index.md @@ -33,6 +33,16 @@ arguments: token: COUNT type: integer arity: -5 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - blocking diff --git a/content/commands/blpop/index.md b/content/commands/blpop/index.md index 4759cc1d46..ca4bacaba8 100644 --- a/content/commands/blpop/index.md +++ b/content/commands/blpop/index.md @@ -14,6 +14,16 @@ arguments: name: timeout type: double arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - blocking diff --git a/content/commands/brpop/index.md b/content/commands/brpop/index.md index b6512f1779..ae7fa249b7 100644 --- a/content/commands/brpop/index.md +++ b/content/commands/brpop/index.md @@ -14,6 +14,16 @@ arguments: name: timeout type: double arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - blocking diff --git a/content/commands/brpoplpush/index.md b/content/commands/brpoplpush/index.md index 61416d346f..8064848bd9 100644 --- a/content/commands/brpoplpush/index.md +++ b/content/commands/brpoplpush/index.md @@ -17,6 +17,16 @@ arguments: name: timeout type: double arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/bzmpop/index.md b/content/commands/bzmpop/index.md index 62f6b84fae..b7959381f0 100644 --- a/content/commands/bzmpop/index.md +++ b/content/commands/bzmpop/index.md @@ -33,6 +33,16 @@ arguments: token: COUNT type: integer arity: -5 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - blocking diff --git a/content/commands/bzpopmax/index.md b/content/commands/bzpopmax/index.md index 7471828518..e33b828439 100644 --- a/content/commands/bzpopmax/index.md +++ b/content/commands/bzpopmax/index.md @@ -14,6 +14,16 @@ arguments: name: timeout type: double arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - blocking diff --git a/content/commands/bzpopmin/index.md b/content/commands/bzpopmin/index.md index 99a186dfca..fb4dbb8916 100644 --- a/content/commands/bzpopmin/index.md +++ b/content/commands/bzpopmin/index.md @@ -14,6 +14,16 @@ arguments: name: timeout type: double arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - blocking diff --git a/content/commands/cf.add/index.md b/content/commands/cf.add/index.md index 2df24c676d..1f2fe80edf 100644 --- a/content/commands/cf.add/index.md +++ b/content/commands/cf.add/index.md @@ -4,6 +4,16 @@ arguments: type: key - name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(k + i), where k is the number of sub-filters and i is maxIterations description: Adds an item to a Cuckoo Filter group: cf diff --git a/content/commands/cf.addnx/index.md b/content/commands/cf.addnx/index.md index ce74c79e75..95d27f071f 100644 --- a/content/commands/cf.addnx/index.md +++ b/content/commands/cf.addnx/index.md @@ -4,6 +4,16 @@ arguments: type: key - name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(k + i), where k is the number of sub-filters and i is maxIterations description: Adds an item to a Cuckoo Filter if the item did not exist previously. group: cf diff --git a/content/commands/cf.count/index.md b/content/commands/cf.count/index.md index a94a662872..2d35e4a17b 100644 --- a/content/commands/cf.count/index.md +++ b/content/commands/cf.count/index.md @@ -4,6 +4,16 @@ arguments: type: key - name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(k), where k is the number of sub-filters description: Return the number of times an item might be in a Cuckoo Filter group: cf diff --git a/content/commands/cf.del/index.md b/content/commands/cf.del/index.md index fa2864f792..643b5d3bcb 100644 --- a/content/commands/cf.del/index.md +++ b/content/commands/cf.del/index.md @@ -4,6 +4,16 @@ arguments: type: key - name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(k), where k is the number of sub-filters description: Deletes an item from a Cuckoo Filter group: cf diff --git a/content/commands/cf.exists/index.md b/content/commands/cf.exists/index.md index 3292c67748..60dcace812 100644 --- a/content/commands/cf.exists/index.md +++ b/content/commands/cf.exists/index.md @@ -4,6 +4,16 @@ arguments: type: key - name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(k), where k is the number of sub-filters description: Checks whether one or more items exist in a Cuckoo Filter group: cf diff --git a/content/commands/cf.info/index.md b/content/commands/cf.info/index.md index 65b7e39aec..e1caecea1d 100644 --- a/content/commands/cf.info/index.md +++ b/content/commands/cf.info/index.md @@ -2,6 +2,16 @@ arguments: - name: key type: key +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns information about a Cuckoo Filter group: cf diff --git a/content/commands/cf.insert/index.md b/content/commands/cf.insert/index.md index 32a4f5a24b..ead8b2b160 100644 --- a/content/commands/cf.insert/index.md +++ b/content/commands/cf.insert/index.md @@ -16,6 +16,16 @@ arguments: - multiple: true name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n * (k + i)), where n is the number of items, k is the number of sub-filters and i is maxIterations description: Adds one or more items to a Cuckoo Filter. A filter will be created if diff --git a/content/commands/cf.insertnx/index.md b/content/commands/cf.insertnx/index.md index b01ceb2153..3e30fbe4a4 100644 --- a/content/commands/cf.insertnx/index.md +++ b/content/commands/cf.insertnx/index.md @@ -16,6 +16,16 @@ arguments: - multiple: true name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n * (k + i)), where n is the number of items, k is the number of sub-filters and i is maxIterations description: Adds one or more items to a Cuckoo Filter if the items did not exist diff --git a/content/commands/cf.loadchunk/index.md b/content/commands/cf.loadchunk/index.md index fd4bca3a60..3212d97404 100644 --- a/content/commands/cf.loadchunk/index.md +++ b/content/commands/cf.loadchunk/index.md @@ -6,6 +6,16 @@ arguments: type: integer - name: data type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n), where n is the capacity description: Restores a filter previously saved using SCANDUMP group: cf diff --git a/content/commands/cf.mexists/index.md b/content/commands/cf.mexists/index.md index 1c67140049..fd8ca664bc 100644 --- a/content/commands/cf.mexists/index.md +++ b/content/commands/cf.mexists/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(k * n), where k is the number of sub-filters and n is the number of items description: Checks whether one or more items exist in a Cuckoo Filter diff --git a/content/commands/cf.reserve/index.md b/content/commands/cf.reserve/index.md index 223d16996a..4bfdfd1e7a 100644 --- a/content/commands/cf.reserve/index.md +++ b/content/commands/cf.reserve/index.md @@ -16,6 +16,16 @@ arguments: optional: true token: EXPANSION type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Creates a new Cuckoo Filter group: cf @@ -25,7 +35,8 @@ module: Bloom since: 1.0.0 stack_path: docs/data-types/probabilistic summary: Creates a new Cuckoo Filter -syntax_fmt: "CF.RESERVE key capacity [BUCKETSIZE\_bucketsize]\n [MAXITERATIONS\_\ +syntax_fmt: "CF.RESERVE key capacity [BUCKETSIZE\_bucketsize] + [MAXITERATIONS\_\ maxiterations] [EXPANSION\_expansion]" syntax_str: "capacity [BUCKETSIZE\_bucketsize] [MAXITERATIONS\_maxiterations] [EXPANSION\_\ expansion]" @@ -57,25 +68,40 @@ is key name for the the cuckoo filter to be created.
capacity -Estimated capacity for the filter. Capacity is rounded to the next `2^n` number. The filter will likely not fill up to 100% of it's capacity. -Make sure to reserve extra capacity if you want to avoid expansions. +Estimated capacity for the filter. + +Capacity is rounded to the next `2^n` number. + +The filter will likely not fill up to 100% of it's capacity. Make sure to reserve extra capacity if you want to avoid expansions.
## Optional arguments
BUCKETSIZE bucketsize -Number of items in each bucket. A higher bucket size value improves the fill rate but also causes a higher error rate and slightly slower performance. The default value is 2. +Number of items in each bucket. + +A higher bucket size value improves the fill rate but also causes a higher error rate and slightly slower performance. + +`bucketsize` is an integer between 1 and 255. The default value is 2.
MAXITERATIONS maxiterations -Number of attempts to swap items between buckets before declaring filter as full and creating an additional filter. A low value is better for performance and a higher number is better for filter fill rate. The default value is 20. +Number of attempts to swap items between buckets before declaring filter as full and creating an additional filter. + +A low value is better for performance and a higher number is better for filter fill rate. + +`maxiterations` is an integer between 1 and 65535. The default value is 20.
EXPANSION expansion -When a new filter is created, its size is the size of the current filter multiplied by `expansion`, specified as a non-negative integer. Expansion is rounded to the next `2^n` number. The default value is `1`. +When a new filter is created, its size is the size of the current filter multiplied by `expansion`. + +`expansion` is an integer between 0 and 32768. The default value is 1. + +Expansion is rounded to the next `2^n` number.
## Return value diff --git a/content/commands/cf.scandump/index.md b/content/commands/cf.scandump/index.md index d6ff6ec7f3..3eba4216ec 100644 --- a/content/commands/cf.scandump/index.md +++ b/content/commands/cf.scandump/index.md @@ -4,6 +4,16 @@ arguments: type: key - name: iterator type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n), where n is the capacity description: Begins an incremental save of the bloom filter group: cf diff --git a/content/commands/client-caching/index.md b/content/commands/client-caching/index.md index 3aeacd9573..5ec0ab2dab 100644 --- a/content/commands/client-caching/index.md +++ b/content/commands/client-caching/index.md @@ -15,6 +15,16 @@ arguments: name: mode type: oneof arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/client-getname/index.md b/content/commands/client-getname/index.md index 7a663a06d2..0e0eeacaff 100644 --- a/content/commands/client-getname/index.md +++ b/content/commands/client-getname/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@connection' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/client-getredir/index.md b/content/commands/client-getredir/index.md index 8fe111ad34..31af6b59a5 100644 --- a/content/commands/client-getredir/index.md +++ b/content/commands/client-getredir/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@connection' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/client-help/index.md b/content/commands/client-help/index.md index e76e899e0d..21c2c8e2cb 100644 --- a/content/commands/client-help/index.md +++ b/content/commands/client-help/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@connection' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/client-id/index.md b/content/commands/client-id/index.md index 5e92c2b5c8..88f6704b3d 100644 --- a/content/commands/client-id/index.md +++ b/content/commands/client-id/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@connection' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/client-info/index.md b/content/commands/client-info/index.md index ff3188ca37..cc7416d858 100644 --- a/content/commands/client-info/index.md +++ b/content/commands/client-info/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@connection' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/client-kill/index.md b/content/commands/client-kill/index.md index 3af759b450..8dc236f7ce 100644 --- a/content/commands/client-kill/index.md +++ b/content/commands/client-kill/index.md @@ -80,6 +80,16 @@ arguments: name: filter type: oneof arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript @@ -103,11 +113,16 @@ history: linkTitle: CLIENT KILL since: 2.4.0 summary: Terminates open connections. -syntax_fmt: "CLIENT KILL ] | [USER\_username] | [ADDR\_ip:port] |\n [LADDR\_\ - ip:port] | [SKIPME\_] [[ID\_client-id] |\n [TYPE\_] |\n [USER\_username] | [ADDR\_ip:port] | [LADDR\_\ - ip:port] | [SKIPME\_] ...]>>" +syntax_fmt: "CLIENT KILL ] | [USER\_username] | [ADDR\_ip:port] | + [LADDR\_\ + ip:port] | [SKIPME\_] [[ID\_client-id] | + [TYPE\_] | + [USER\_username] | [ADDR\_ip:port] | [LADDR\_\ + ip:port] | [SKIPME\_] ...]>>" syntax_str: '' title: CLIENT KILL --- diff --git a/content/commands/client-list/index.md b/content/commands/client-list/index.md index 368640e66e..cedc759a05 100644 --- a/content/commands/client-list/index.md +++ b/content/commands/client-list/index.md @@ -35,6 +35,16 @@ arguments: token: ID type: integer arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript @@ -63,7 +73,8 @@ history: linkTitle: CLIENT LIST since: 2.4.0 summary: Lists open connections. -syntax_fmt: "CLIENT LIST [TYPE\_]\n [ID\_client-id\ +syntax_fmt: "CLIENT LIST [TYPE\_] + [ID\_client-id\ \ [client-id ...]]" syntax_str: "[ID\_client-id [client-id ...]]" title: CLIENT LIST diff --git a/content/commands/client-no-evict/index.md b/content/commands/client-no-evict/index.md index f59bbeca42..76a289671c 100644 --- a/content/commands/client-no-evict/index.md +++ b/content/commands/client-no-evict/index.md @@ -17,6 +17,16 @@ arguments: name: enabled type: oneof arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/client-no-touch/index.md b/content/commands/client-no-touch/index.md index bcabdc62e2..576fb28fe6 100644 --- a/content/commands/client-no-touch/index.md +++ b/content/commands/client-no-touch/index.md @@ -15,6 +15,16 @@ arguments: name: enabled type: oneof arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/client-pause/index.md b/content/commands/client-pause/index.md index d4f0fc0faa..5c57e5ccc4 100644 --- a/content/commands/client-pause/index.md +++ b/content/commands/client-pause/index.md @@ -22,6 +22,16 @@ arguments: since: 6.2.0 type: oneof arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/client-reply/index.md b/content/commands/client-reply/index.md index 780fbd1fb6..96cc5ed822 100644 --- a/content/commands/client-reply/index.md +++ b/content/commands/client-reply/index.md @@ -19,6 +19,16 @@ arguments: name: action type: oneof arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/client-setinfo/index.md b/content/commands/client-setinfo/index.md index 2892165c71..06174bc904 100644 --- a/content/commands/client-setinfo/index.md +++ b/content/commands/client-setinfo/index.md @@ -15,6 +15,16 @@ arguments: name: attr type: oneof arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/client-setname/index.md b/content/commands/client-setname/index.md index ded7b9dd41..a681401c1e 100644 --- a/content/commands/client-setname/index.md +++ b/content/commands/client-setname/index.md @@ -7,6 +7,16 @@ arguments: name: connection-name type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/client-tracking/index.md b/content/commands/client-tracking/index.md index 093b2e880b..0d192e9983 100644 --- a/content/commands/client-tracking/index.md +++ b/content/commands/client-tracking/index.md @@ -47,6 +47,16 @@ arguments: token: NOLOOP type: pure-token arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading @@ -58,7 +68,8 @@ hidden: false linkTitle: CLIENT TRACKING since: 6.0.0 summary: Controls server-assisted client-side caching for the connection. -syntax_fmt: "CLIENT TRACKING [REDIRECT\_client-id] [PREFIX\_prefix\n [PREFIX\ +syntax_fmt: "CLIENT TRACKING [REDIRECT\_client-id] [PREFIX\_prefix + [PREFIX\ \ prefix ...]] [BCAST] [OPTIN] [OPTOUT] [NOLOOP]" syntax_str: "[REDIRECT\_client-id] [PREFIX\_prefix [PREFIX prefix ...]] [BCAST] [OPTIN]\ \ [OPTOUT] [NOLOOP]" diff --git a/content/commands/client-trackinginfo/index.md b/content/commands/client-trackinginfo/index.md index 05ee78e270..3b0bf1d2fe 100644 --- a/content/commands/client-trackinginfo/index.md +++ b/content/commands/client-trackinginfo/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@connection' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/client-unblock/index.md b/content/commands/client-unblock/index.md index 5084fb78c0..3a96846c09 100644 --- a/content/commands/client-unblock/index.md +++ b/content/commands/client-unblock/index.md @@ -21,6 +21,16 @@ arguments: optional: true type: oneof arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/client-unpause/index.md b/content/commands/client-unpause/index.md index 52afd3c8cd..9e8a782627 100644 --- a/content/commands/client-unpause/index.md +++ b/content/commands/client-unpause/index.md @@ -5,6 +5,16 @@ acl_categories: - '@dangerous' - '@connection' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/client/index.md b/content/commands/client/index.md index 3b6e780481..d76f17802b 100644 --- a/content/commands/client/index.md +++ b/content/commands/client/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on subcommand. description: A container for client connection commands. group: connection diff --git a/content/commands/cluster-addslots/index.md b/content/commands/cluster-addslots/index.md index 8386ec3b49..fc8e78b680 100644 --- a/content/commands/cluster-addslots/index.md +++ b/content/commands/cluster-addslots/index.md @@ -9,6 +9,16 @@ arguments: name: slot type: integer arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-addslotsrange/index.md b/content/commands/cluster-addslotsrange/index.md index 88ad701590..496d83e80a 100644 --- a/content/commands/cluster-addslotsrange/index.md +++ b/content/commands/cluster-addslotsrange/index.md @@ -15,6 +15,16 @@ arguments: name: range type: block arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-bumpepoch/index.md b/content/commands/cluster-bumpepoch/index.md index 8ae11a8d11..f2b010b788 100644 --- a/content/commands/cluster-bumpepoch/index.md +++ b/content/commands/cluster-bumpepoch/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-count-failure-reports/index.md b/content/commands/cluster-count-failure-reports/index.md index a30c494dbf..cd3dcd09e1 100644 --- a/content/commands/cluster-count-failure-reports/index.md +++ b/content/commands/cluster-count-failure-reports/index.md @@ -8,6 +8,16 @@ arguments: name: node-id type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-countkeysinslot/index.md b/content/commands/cluster-countkeysinslot/index.md index ebced0d295..bd8f30d8db 100644 --- a/content/commands/cluster-countkeysinslot/index.md +++ b/content/commands/cluster-countkeysinslot/index.md @@ -6,6 +6,16 @@ arguments: name: slot type: integer arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - stale complexity: O(1) diff --git a/content/commands/cluster-delslots/index.md b/content/commands/cluster-delslots/index.md index 5d2127a826..1bfcae8f80 100644 --- a/content/commands/cluster-delslots/index.md +++ b/content/commands/cluster-delslots/index.md @@ -9,6 +9,16 @@ arguments: name: slot type: integer arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-delslotsrange/index.md b/content/commands/cluster-delslotsrange/index.md index eb2ea9bff1..3d67f6c95f 100644 --- a/content/commands/cluster-delslotsrange/index.md +++ b/content/commands/cluster-delslotsrange/index.md @@ -15,6 +15,16 @@ arguments: name: range type: block arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-failover/index.md b/content/commands/cluster-failover/index.md index eca355a1eb..6d824dd853 100644 --- a/content/commands/cluster-failover/index.md +++ b/content/commands/cluster-failover/index.md @@ -17,6 +17,16 @@ arguments: optional: true type: oneof arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-flushslots/index.md b/content/commands/cluster-flushslots/index.md index 536150a02e..8b852571cf 100644 --- a/content/commands/cluster-flushslots/index.md +++ b/content/commands/cluster-flushslots/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-forget/index.md b/content/commands/cluster-forget/index.md index 0443e66ac1..0172647bbc 100644 --- a/content/commands/cluster-forget/index.md +++ b/content/commands/cluster-forget/index.md @@ -8,6 +8,16 @@ arguments: name: node-id type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-getkeysinslot/index.md b/content/commands/cluster-getkeysinslot/index.md index 97e4abc8a5..c8df07d0a2 100644 --- a/content/commands/cluster-getkeysinslot/index.md +++ b/content/commands/cluster-getkeysinslot/index.md @@ -9,6 +9,16 @@ arguments: name: count type: integer arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - stale complexity: O(N) where N is the number of requested keys diff --git a/content/commands/cluster-help/index.md b/content/commands/cluster-help/index.md index 3168c3ac54..3429264216 100644 --- a/content/commands/cluster-help/index.md +++ b/content/commands/cluster-help/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/cluster-info/index.md b/content/commands/cluster-info/index.md index 74f2aab72a..cd6fdda1af 100644 --- a/content/commands/cluster-info/index.md +++ b/content/commands/cluster-info/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - stale complexity: O(1) diff --git a/content/commands/cluster-keyslot/index.md b/content/commands/cluster-keyslot/index.md index 2b1c85c2c8..4190488593 100644 --- a/content/commands/cluster-keyslot/index.md +++ b/content/commands/cluster-keyslot/index.md @@ -6,6 +6,16 @@ arguments: name: key type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - stale complexity: O(N) where N is the number of bytes in the key diff --git a/content/commands/cluster-links/index.md b/content/commands/cluster-links/index.md index caf1c9f461..f764362b4c 100644 --- a/content/commands/cluster-links/index.md +++ b/content/commands/cluster-links/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - stale complexity: O(N) where N is the total number of Cluster nodes diff --git a/content/commands/cluster-meet/index.md b/content/commands/cluster-meet/index.md index a55476285f..e99006d438 100644 --- a/content/commands/cluster-meet/index.md +++ b/content/commands/cluster-meet/index.md @@ -16,6 +16,16 @@ arguments: since: 4.0.0 type: integer arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-myid/index.md b/content/commands/cluster-myid/index.md index 541d622c64..021ebf55e9 100644 --- a/content/commands/cluster-myid/index.md +++ b/content/commands/cluster-myid/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - stale complexity: O(1) diff --git a/content/commands/cluster-myshardid/index.md b/content/commands/cluster-myshardid/index.md index 2c7268b27a..f5dca75a87 100644 --- a/content/commands/cluster-myshardid/index.md +++ b/content/commands/cluster-myshardid/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - stale complexity: O(1) diff --git a/content/commands/cluster-nodes/index.md b/content/commands/cluster-nodes/index.md index 94c6818c20..ff05e9e161 100644 --- a/content/commands/cluster-nodes/index.md +++ b/content/commands/cluster-nodes/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - stale complexity: O(N) where N is the total number of Cluster nodes @@ -57,7 +67,8 @@ Each line is composed of the following fields: The meaning of each field is the following: 1. `id`: The node ID, a 40-character globally unique string generated when a node is created and never changed again (unless `CLUSTER RESET HARD` is used). -2. `ip:port@cport`: The node address that clients should contact to run queries. +2. `ip:port@cport`: The node address that clients should contact to run queries, along with the used cluster bus port. + `:0@0` can be expected when the address is no longer known for this node ID, hence flagged with `noaddr`. 3. `hostname`: A human readable string that can be configured via the `cluster-annouce-hostname` setting. The max length of the string is 256 characters, excluding the null terminator. The name can contain ASCII alphanumeric characters, '-', and '.' only. 5. `flags`: A list of comma separated flags: `myself`, `master`, `slave`, `fail?`, `fail`, `handshake`, `noaddr`, `nofailover`, `noflags`. Flags are explained below. 6. `master`: If the node is a replica, and the primary is known, the primary node ID, otherwise the "-" character. diff --git a/content/commands/cluster-replicas/index.md b/content/commands/cluster-replicas/index.md index a1f2c63ef7..14d7d748b4 100644 --- a/content/commands/cluster-replicas/index.md +++ b/content/commands/cluster-replicas/index.md @@ -8,6 +8,16 @@ arguments: name: node-id type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-replicate/index.md b/content/commands/cluster-replicate/index.md index e2ac9b091b..1d667ec6c6 100644 --- a/content/commands/cluster-replicate/index.md +++ b/content/commands/cluster-replicate/index.md @@ -8,6 +8,16 @@ arguments: name: node-id type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-reset/index.md b/content/commands/cluster-reset/index.md index 2e0ed6be80..3b769ce036 100644 --- a/content/commands/cluster-reset/index.md +++ b/content/commands/cluster-reset/index.md @@ -17,6 +17,16 @@ arguments: optional: true type: oneof arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/cluster-saveconfig/index.md b/content/commands/cluster-saveconfig/index.md index babc3eddb5..affdc5ba50 100644 --- a/content/commands/cluster-saveconfig/index.md +++ b/content/commands/cluster-saveconfig/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-set-config-epoch/index.md b/content/commands/cluster-set-config-epoch/index.md index d700be6b95..7773cc86e6 100644 --- a/content/commands/cluster-set-config-epoch/index.md +++ b/content/commands/cluster-set-config-epoch/index.md @@ -8,6 +8,16 @@ arguments: name: config-epoch type: integer arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-setslot/index.md b/content/commands/cluster-setslot/index.md index 1e4ad4f109..5b259b90c6 100644 --- a/content/commands/cluster-setslot/index.md +++ b/content/commands/cluster-setslot/index.md @@ -27,6 +27,16 @@ arguments: name: subcommand type: oneof arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale @@ -38,7 +48,8 @@ hidden: false linkTitle: CLUSTER SETSLOT since: 3.0.0 summary: Binds a hash slot to a node. -syntax_fmt: "CLUSTER SETSLOT slot " syntax_str: "" title: CLUSTER SETSLOT diff --git a/content/commands/cluster-shards/index.md b/content/commands/cluster-shards/index.md index 63d12ff1d8..1d410e9aa3 100644 --- a/content/commands/cluster-shards/index.md +++ b/content/commands/cluster-shards/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/cluster-slaves/index.md b/content/commands/cluster-slaves/index.md index 0468204e97..b36adde078 100644 --- a/content/commands/cluster-slaves/index.md +++ b/content/commands/cluster-slaves/index.md @@ -8,6 +8,16 @@ arguments: name: node-id type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - stale diff --git a/content/commands/cluster-slots/index.md b/content/commands/cluster-slots/index.md index 5eeabf7702..1a80e97b69 100644 --- a/content/commands/cluster-slots/index.md +++ b/content/commands/cluster-slots/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/cluster/index.md b/content/commands/cluster/index.md index bcc7304a42..f994e52b15 100644 --- a/content/commands/cluster/index.md +++ b/content/commands/cluster/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on subcommand. description: A container for Redis Cluster commands. group: cluster diff --git a/content/commands/cms.incrby/index.md b/content/commands/cms.incrby/index.md index 1da86ecf68..5cb5b8be5d 100644 --- a/content/commands/cms.incrby/index.md +++ b/content/commands/cms.incrby/index.md @@ -10,6 +10,16 @@ arguments: multiple: true name: items type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n) where n is the number of items description: Increases the count of one or more items by increment group: cms diff --git a/content/commands/cms.info/index.md b/content/commands/cms.info/index.md index 861655ac5e..6986308952 100644 --- a/content/commands/cms.info/index.md +++ b/content/commands/cms.info/index.md @@ -2,6 +2,16 @@ arguments: - name: key type: key +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns information about a sketch group: cms diff --git a/content/commands/cms.initbydim/index.md b/content/commands/cms.initbydim/index.md index ceca0d18ac..8bb59510f9 100644 --- a/content/commands/cms.initbydim/index.md +++ b/content/commands/cms.initbydim/index.md @@ -6,6 +6,16 @@ arguments: type: integer - name: depth type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Initializes a Count-Min Sketch to dimensions specified by user group: cms diff --git a/content/commands/cms.initbyprob/index.md b/content/commands/cms.initbyprob/index.md index 0ebf08d534..0fb598239f 100644 --- a/content/commands/cms.initbyprob/index.md +++ b/content/commands/cms.initbyprob/index.md @@ -6,6 +6,16 @@ arguments: type: double - name: probability type: double +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Initializes a Count-Min Sketch to accommodate requested tolerances. group: cms diff --git a/content/commands/cms.merge/index.md b/content/commands/cms.merge/index.md index fdc6c1c71a..9152c13510 100644 --- a/content/commands/cms.merge/index.md +++ b/content/commands/cms.merge/index.md @@ -17,6 +17,16 @@ arguments: name: weight optional: true type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n) where n is the number of sketches description: Merges several sketches into one sketch group: cms @@ -26,7 +36,8 @@ module: Bloom since: 2.0.0 stack_path: docs/data-types/probabilistic summary: Merges several sketches into one sketch -syntax_fmt: "CMS.MERGE destination numKeys source [source ...] [WEIGHTS weight\n \ +syntax_fmt: "CMS.MERGE destination numKeys source [source ...] [WEIGHTS weight + \ \ [weight ...]]" syntax_str: numKeys source [source ...] [WEIGHTS weight [weight ...]] title: CMS.MERGE diff --git a/content/commands/cms.query/index.md b/content/commands/cms.query/index.md index e5f913f165..a1464be63b 100644 --- a/content/commands/cms.query/index.md +++ b/content/commands/cms.query/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n) where n is the number of items description: Returns the count for one or more items in a sketch group: cms diff --git a/content/commands/command-count/index.md b/content/commands/command-count/index.md index e0321866ba..cd1baf1392 100644 --- a/content/commands/command-count/index.md +++ b/content/commands/command-count/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@connection' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/command-docs/index.md b/content/commands/command-docs/index.md index b046014583..c223f90aa6 100644 --- a/content/commands/command-docs/index.md +++ b/content/commands/command-docs/index.md @@ -9,6 +9,16 @@ arguments: optional: true type: string arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/command-getkeys/index.md b/content/commands/command-getkeys/index.md index d4809ba053..3c0584733a 100644 --- a/content/commands/command-getkeys/index.md +++ b/content/commands/command-getkeys/index.md @@ -12,6 +12,16 @@ arguments: optional: true type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/command-getkeysandflags/index.md b/content/commands/command-getkeysandflags/index.md index a8dd3671d5..eb550228fb 100644 --- a/content/commands/command-getkeysandflags/index.md +++ b/content/commands/command-getkeysandflags/index.md @@ -12,6 +12,16 @@ arguments: optional: true type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/command-help/index.md b/content/commands/command-help/index.md index ce07206799..bcf70082c2 100644 --- a/content/commands/command-help/index.md +++ b/content/commands/command-help/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@connection' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/command-info/index.md b/content/commands/command-info/index.md index 9321294dcc..e1d7978f4d 100644 --- a/content/commands/command-info/index.md +++ b/content/commands/command-info/index.md @@ -9,6 +9,16 @@ arguments: optional: true type: string arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/command-list/index.md b/content/commands/command-list/index.md index 6b1fef687c..2a7f3b2960 100644 --- a/content/commands/command-list/index.md +++ b/content/commands/command-list/index.md @@ -21,6 +21,16 @@ arguments: token: FILTERBY type: oneof arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale @@ -33,7 +43,8 @@ hints: linkTitle: COMMAND LIST since: 7.0.0 summary: Returns a list of command names. -syntax_fmt: "COMMAND LIST [FILTERBY\_]" syntax_str: '' title: COMMAND LIST diff --git a/content/commands/command/index.md b/content/commands/command/index.md index e66f9e58fe..55c37c68b8 100644 --- a/content/commands/command/index.md +++ b/content/commands/command/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@connection' arity: -1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/config-get/index.md b/content/commands/config-get/index.md index 7d2d9513cd..62612b9844 100644 --- a/content/commands/config-get/index.md +++ b/content/commands/config-get/index.md @@ -9,6 +9,16 @@ arguments: name: parameter type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/config-help/index.md b/content/commands/config-help/index.md index 8975ea87e2..5c83bc0442 100644 --- a/content/commands/config-help/index.md +++ b/content/commands/config-help/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/config-resetstat/index.md b/content/commands/config-resetstat/index.md index 985eaf8657..b82b8f5ac8 100644 --- a/content/commands/config-resetstat/index.md +++ b/content/commands/config-resetstat/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/config-rewrite/index.md b/content/commands/config-rewrite/index.md index 4838cb8ca8..e227c62be6 100644 --- a/content/commands/config-rewrite/index.md +++ b/content/commands/config-rewrite/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/config-set/index.md b/content/commands/config-set/index.md index 119d46982a..6db01794ff 100644 --- a/content/commands/config-set/index.md +++ b/content/commands/config-set/index.md @@ -15,6 +15,16 @@ arguments: name: data type: block arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/config/index.md b/content/commands/config/index.md index 6fdc0eddc2..48f7aa0164 100644 --- a/content/commands/config/index.md +++ b/content/commands/config/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on subcommand. description: A container for server configuration commands. group: server diff --git a/content/commands/copy/index.md b/content/commands/copy/index.md index bbbd714a29..4c1b57f11b 100644 --- a/content/commands/copy/index.md +++ b/content/commands/copy/index.md @@ -23,6 +23,16 @@ arguments: token: REPLACE type: pure-token arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/dbsize/index.md b/content/commands/dbsize/index.md index 862d246cae..9060bc04e1 100644 --- a/content/commands/dbsize/index.md +++ b/content/commands/dbsize/index.md @@ -4,6 +4,16 @@ acl_categories: - '@read' - '@fast' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/debug/index.md b/content/commands/debug/index.md index ea4d202c84..2dd5003a47 100644 --- a/content/commands/debug/index.md +++ b/content/commands/debug/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/decr/index.md b/content/commands/decr/index.md index 17e9dd38e7..6c1b313088 100644 --- a/content/commands/decr/index.md +++ b/content/commands/decr/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/decrby/index.md b/content/commands/decrby/index.md index 0e24bdbb0d..28ee08987a 100644 --- a/content/commands/decrby/index.md +++ b/content/commands/decrby/index.md @@ -12,6 +12,16 @@ arguments: name: decrement type: integer arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/del/index.md b/content/commands/del/index.md index aad224839f..1f59c38933 100644 --- a/content/commands/del/index.md +++ b/content/commands/del/index.md @@ -10,6 +10,16 @@ arguments: name: key type: key arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write complexity: O(N) where N is the number of keys that will be removed. When a key to diff --git a/content/commands/discard/index.md b/content/commands/discard/index.md index 8092099991..0dbbfcef1b 100644 --- a/content/commands/discard/index.md +++ b/content/commands/discard/index.md @@ -3,6 +3,16 @@ acl_categories: - '@fast' - '@transaction' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/dump/index.md b/content/commands/dump/index.md index daaaf238e0..ce0b6bcc5b 100644 --- a/content/commands/dump/index.md +++ b/content/commands/dump/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) to access the key and additional O(N*M) to serialize it, where N diff --git a/content/commands/echo/index.md b/content/commands/echo/index.md index 19e40a8ec4..5a0c807cf7 100644 --- a/content/commands/echo/index.md +++ b/content/commands/echo/index.md @@ -7,6 +7,16 @@ arguments: name: message type: string arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/eval/index.md b/content/commands/eval/index.md index 734f96a5e3..961845f22a 100644 --- a/content/commands/eval/index.md +++ b/content/commands/eval/index.md @@ -21,6 +21,16 @@ arguments: optional: true type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - stale diff --git a/content/commands/eval_ro/index.md b/content/commands/eval_ro/index.md index b6015b259b..e566843a26 100644 --- a/content/commands/eval_ro/index.md +++ b/content/commands/eval_ro/index.md @@ -21,6 +21,16 @@ arguments: optional: true type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - noscript diff --git a/content/commands/evalsha/index.md b/content/commands/evalsha/index.md index 1d5d1248ff..7137b16435 100644 --- a/content/commands/evalsha/index.md +++ b/content/commands/evalsha/index.md @@ -21,6 +21,16 @@ arguments: optional: true type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - stale diff --git a/content/commands/evalsha_ro/index.md b/content/commands/evalsha_ro/index.md index c0e5cc263b..bebce6259c 100644 --- a/content/commands/evalsha_ro/index.md +++ b/content/commands/evalsha_ro/index.md @@ -21,6 +21,16 @@ arguments: optional: true type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - noscript diff --git a/content/commands/exec/index.md b/content/commands/exec/index.md index eb6ab695d5..1343021a3b 100644 --- a/content/commands/exec/index.md +++ b/content/commands/exec/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@transaction' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/exists/index.md b/content/commands/exists/index.md index 2087cc82a0..edf6c05981 100644 --- a/content/commands/exists/index.md +++ b/content/commands/exists/index.md @@ -10,6 +10,16 @@ arguments: name: key type: key arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/expire/index.md b/content/commands/expire/index.md index 8fdd484fbf..e2b260f5c3 100644 --- a/content/commands/expire/index.md +++ b/content/commands/expire/index.md @@ -33,6 +33,16 @@ arguments: since: 7.0.0 type: oneof arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/expireat/index.md b/content/commands/expireat/index.md index b45081a1ef..92bdb7e5c8 100644 --- a/content/commands/expireat/index.md +++ b/content/commands/expireat/index.md @@ -33,6 +33,16 @@ arguments: since: 7.0.0 type: oneof arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/expiretime/index.md b/content/commands/expiretime/index.md index 8e07e6e063..971e400c5d 100644 --- a/content/commands/expiretime/index.md +++ b/content/commands/expiretime/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/failover/index.md b/content/commands/failover/index.md index 225d575cb5..8419678415 100644 --- a/content/commands/failover/index.md +++ b/content/commands/failover/index.md @@ -31,6 +31,16 @@ arguments: token: TIMEOUT type: integer arity: -1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/fcall/index.md b/content/commands/fcall/index.md index 7a76fcc6a0..960551afce 100644 --- a/content/commands/fcall/index.md +++ b/content/commands/fcall/index.md @@ -21,6 +21,16 @@ arguments: optional: true type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - stale diff --git a/content/commands/fcall_ro/index.md b/content/commands/fcall_ro/index.md index f1b59b5130..3a71050284 100644 --- a/content/commands/fcall_ro/index.md +++ b/content/commands/fcall_ro/index.md @@ -21,6 +21,16 @@ arguments: optional: true type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - noscript diff --git a/content/commands/flushall/index.md b/content/commands/flushall/index.md index bd7a566954..fe10a95fbe 100644 --- a/content/commands/flushall/index.md +++ b/content/commands/flushall/index.md @@ -20,6 +20,16 @@ arguments: optional: true type: oneof arity: -1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write complexity: O(N) where N is the total number of keys in all databases diff --git a/content/commands/flushdb/index.md b/content/commands/flushdb/index.md index 1e4b7b89d6..b87dd3fd79 100644 --- a/content/commands/flushdb/index.md +++ b/content/commands/flushdb/index.md @@ -20,6 +20,16 @@ arguments: optional: true type: oneof arity: -1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write complexity: O(N) where N is the number of keys in the selected database diff --git a/content/commands/ft._list/index.md b/content/commands/ft._list/index.md index 0b64a3e015..0460d5ce61 100644 --- a/content/commands/ft._list/index.md +++ b/content/commands/ft._list/index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns a list of all existing indexes group: search diff --git a/content/commands/ft.aggregate/index.md b/content/commands/ft.aggregate/index.md index 0da2302c03..41fc62f3b6 100644 --- a/content/commands/ft.aggregate/index.md +++ b/content/commands/ft.aggregate/index.md @@ -144,6 +144,16 @@ arguments: since: 2.4.3 token: DIALECT type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Run a search query on an index and perform aggregate transformations on the results @@ -155,23 +165,45 @@ since: 1.1.0 stack_path: docs/interact/search-and-query summary: Run a search query on an index and perform aggregate transformations on the results -syntax: "FT.AGGREGATE index query \n [VERBATIM] \n [LOAD count field [field ...]]\ - \ \n [TIMEOUT timeout] \n [ GROUPBY nargs property [property ...] [ REDUCE function\ +syntax: "FT.AGGREGATE index query + [VERBATIM] + [LOAD count field [field ...]]\ + \ + [TIMEOUT timeout] + [ GROUPBY nargs property [property ...] [ REDUCE function\ \ nargs arg [arg ...] [AS name] [ REDUCE function nargs arg [arg ...] [AS name]\ - \ ...]] ...]] \n [ SORTBY nargs [ property ASC | DESC [ property ASC | DESC ...]]\ - \ [MAX num] [WITHCOUNT] \n [ APPLY expression AS name [ APPLY expression AS name\ - \ ...]] \n [ LIMIT offset num] \n [FILTER filter] \n [ WITHCURSOR [COUNT read_size]\ - \ [MAXIDLE idle_time]] \n [ PARAMS nargs name value [ name value ...]] \n [DIALECT\ - \ dialect]\n" -syntax_fmt: "FT.AGGREGATE index query [VERBATIM] [LOAD\_count field [field ...]]\n\ - \ [TIMEOUT\_timeout] [LOAD *] [GROUPBY\_nargs property [property ...]\n [REDUCE\_\ - function nargs arg [arg ...] [AS\_name] [REDUCE\_function\n nargs arg [arg ...]\ - \ [AS\_name] ...]] [GROUPBY\_nargs property\n [property ...] [REDUCE\_function\ - \ nargs arg [arg ...] [AS\_name]\n [REDUCE\_function nargs arg [arg ...] [AS\_\ - name] ...]] ...]]\n [SORTBY\_nargs [property [property \ - \ ...]]\n [MAX\_num]] [APPLY\_expression AS\_name [APPLY\_expression AS\_name\n\ - \ ...]] [LIMIT offset num] [FILTER\_filter] [WITHCURSOR\n [COUNT\_read_size] [MAXIDLE\_\ - idle_time]] [PARAMS nargs name value\n [name value ...]] [DIALECT\_dialect]" + \ ...]] ...]] + [ SORTBY nargs [ property ASC | DESC [ property ASC | DESC ...]]\ + \ [MAX num] [WITHCOUNT] + [ APPLY expression AS name [ APPLY expression AS name\ + \ ...]] + [ LIMIT offset num] + [FILTER filter] + [ WITHCURSOR [COUNT read_size]\ + \ [MAXIDLE idle_time]] + [ PARAMS nargs name value [ name value ...]] + [DIALECT\ + \ dialect] +" +syntax_fmt: "FT.AGGREGATE index query [VERBATIM] [LOAD\_count field [field ...]] +\ + \ [TIMEOUT\_timeout] [LOAD *] [GROUPBY\_nargs property [property ...] + [REDUCE\_\ + function nargs arg [arg ...] [AS\_name] [REDUCE\_function + nargs arg [arg ...]\ + \ [AS\_name] ...]] [GROUPBY\_nargs property + [property ...] [REDUCE\_function\ + \ nargs arg [arg ...] [AS\_name] + [REDUCE\_function nargs arg [arg ...] [AS\_\ + name] ...]] ...]] + [SORTBY\_nargs [property [property \ + \ ...]] + [MAX\_num]] [APPLY\_expression AS\_name [APPLY\_expression AS\_name +\ + \ ...]] [LIMIT offset num] [FILTER\_filter] [WITHCURSOR + [COUNT\_read_size] [MAXIDLE\_\ + idle_time]] [PARAMS nargs name value + [name value ...]] [DIALECT\_dialect]" syntax_str: "query [VERBATIM] [LOAD\_count field [field ...]] [TIMEOUT\_timeout] [LOAD\ \ *] [GROUPBY\_nargs property [property ...] [REDUCE\_function nargs arg [arg ...]\ \ [AS\_name] [REDUCE\_function nargs arg [arg ...] [AS\_name] ...]] [GROUPBY\_nargs\ diff --git a/content/commands/ft.aliasadd/index.md b/content/commands/ft.aliasadd/index.md index d1ec57bf67..120ba6ddf6 100644 --- a/content/commands/ft.aliasadd/index.md +++ b/content/commands/ft.aliasadd/index.md @@ -4,6 +4,16 @@ arguments: type: string - name: index type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Adds an alias to the index group: search diff --git a/content/commands/ft.aliasdel/index.md b/content/commands/ft.aliasdel/index.md index 338b43345c..c4c00086a4 100644 --- a/content/commands/ft.aliasdel/index.md +++ b/content/commands/ft.aliasdel/index.md @@ -2,6 +2,16 @@ arguments: - name: alias type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Deletes an alias from the index group: search diff --git a/content/commands/ft.aliasupdate/index.md b/content/commands/ft.aliasupdate/index.md index 1f390c354f..4ad7a37599 100644 --- a/content/commands/ft.aliasupdate/index.md +++ b/content/commands/ft.aliasupdate/index.md @@ -4,6 +4,16 @@ arguments: type: string - name: index type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Adds or updates an alias to the index group: search diff --git a/content/commands/ft.alter/index.md b/content/commands/ft.alter/index.md index 9b92f3d0c2..4519e2b452 100644 --- a/content/commands/ft.alter/index.md +++ b/content/commands/ft.alter/index.md @@ -16,6 +16,16 @@ arguments: type: string - name: options type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) where N is the number of keys in the keyspace description: Adds a new field to the index group: search diff --git a/content/commands/ft.config-get/index.md b/content/commands/ft.config-get/index.md index b1c293e99d..74359dafdf 100644 --- a/content/commands/ft.config-get/index.md +++ b/content/commands/ft.config-get/index.md @@ -2,6 +2,16 @@ arguments: - name: option type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Retrieves runtime configuration options group: search diff --git a/content/commands/ft.config-help/index.md b/content/commands/ft.config-help/index.md index 024d093434..daa5a2179a 100644 --- a/content/commands/ft.config-help/index.md +++ b/content/commands/ft.config-help/index.md @@ -2,6 +2,16 @@ arguments: - name: option type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Help description of runtime configuration options group: search diff --git a/content/commands/ft.config-set/index.md b/content/commands/ft.config-set/index.md index 6b286dd37b..4fa327765a 100644 --- a/content/commands/ft.config-set/index.md +++ b/content/commands/ft.config-set/index.md @@ -4,6 +4,16 @@ arguments: type: string - name: value type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Sets runtime configuration options group: search diff --git a/content/commands/ft.create/index.md b/content/commands/ft.create/index.md index 26fe2166a2..bb5fc61f16 100644 --- a/content/commands/ft.create/index.md +++ b/content/commands/ft.create/index.md @@ -136,6 +136,16 @@ arguments: multiple: true name: field type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(K) at creation where K is the number of fields, O(N) if scanning the keyspace is triggered, where N is the number of keys in the keyspace description: Creates an index with the given spec @@ -152,21 +162,49 @@ module: Search since: 1.0.0 stack_path: docs/interact/search-and-query summary: Creates an index with the given spec -syntax: "FT.CREATE index \n [ON HASH | JSON] \n [PREFIX count prefix [prefix ...]]\ - \ \n [FILTER {filter}]\n [LANGUAGE default_lang] \n [LANGUAGE_FIELD lang_attribute]\ - \ \n [SCORE default_score] \n [SCORE_FIELD score_attribute] \n [PAYLOAD_FIELD\ - \ payload_attribute] \n [MAXTEXTFIELDS] \n [TEMPORARY seconds] \n [NOOFFSETS]\ - \ \n [NOHL] \n [NOFIELDS] \n [NOFREQS] \n [STOPWORDS count [stopword ...]] \n\ - \ [SKIPINITIALSCAN]\n SCHEMA field_name [AS alias] TEXT | TAG | NUMERIC | GEO\ - \ | VECTOR | GEOSHAPE [ SORTABLE [UNF]] \n [NOINDEX] [ field_name [AS alias] TEXT\ - \ | TAG | NUMERIC | GEO | VECTOR | GEOSHAPE [ SORTABLE [UNF]] [NOINDEX] ...]\n" -syntax_fmt: "FT.CREATE index [ON\_] [PREFIX\_count prefix [prefix\n \ - \ ...]] [FILTER\_filter] [LANGUAGE\_default_lang]\n [LANGUAGE_FIELD\_lang_attribute]\ - \ [SCORE\_default_score]\n [SCORE_FIELD\_score_attribute] [PAYLOAD_FIELD\_payload_attribute]\n\ - \ [MAXTEXTFIELDS] [TEMPORARY\_seconds] [NOOFFSETS] [NOHL] [NOFIELDS]\n [NOFREQS]\ - \ [STOPWORDS\_count [stopword [stopword ...]]]\n [SKIPINITIALSCAN] SCHEMA field_name\ - \ [AS\_alias] [WITHSUFFIXTRIE] [SORTABLE\ - \ [UNF]]\n [NOINDEX] [field_name [AS\_alias] \ +syntax: "FT.CREATE index + [ON HASH | JSON] + [PREFIX count prefix [prefix ...]]\ + \ + [FILTER {filter}] + [LANGUAGE default_lang] + [LANGUAGE_FIELD lang_attribute]\ + \ + [SCORE default_score] + [SCORE_FIELD score_attribute] + [PAYLOAD_FIELD\ + \ payload_attribute] + [MAXTEXTFIELDS] + [TEMPORARY seconds] + [NOOFFSETS]\ + \ + [NOHL] + [NOFIELDS] + [NOFREQS] + [STOPWORDS count [stopword ...]] +\ + \ [SKIPINITIALSCAN] + SCHEMA field_name [AS alias] TEXT | TAG | NUMERIC | GEO\ + \ | VECTOR | GEOSHAPE [ SORTABLE [UNF]] + [NOINDEX] [ field_name [AS alias] TEXT\ + \ | TAG | NUMERIC | GEO | VECTOR | GEOSHAPE [ SORTABLE [UNF]] [NOINDEX] ...] +" +syntax_fmt: "FT.CREATE index [ON\_] [PREFIX\_count prefix [prefix + \ + \ ...]] [FILTER\_filter] [LANGUAGE\_default_lang] + [LANGUAGE_FIELD\_lang_attribute]\ + \ [SCORE\_default_score] + [SCORE_FIELD\_score_attribute] [PAYLOAD_FIELD\_payload_attribute] +\ + \ [MAXTEXTFIELDS] [TEMPORARY\_seconds] [NOOFFSETS] [NOHL] [NOFIELDS] + [NOFREQS]\ + \ [STOPWORDS\_count [stopword [stopword ...]]] + [SKIPINITIALSCAN] SCHEMA field_name\ + \ [AS\_alias] [WITHSUFFIXTRIE] [SORTABLE\ + \ [UNF]] + [NOINDEX] [field_name [AS\_alias] \ \ [WITHSUFFIXTRIE] [SORTABLE [UNF]] [NOINDEX] ...]" syntax_str: "[ON\_] [PREFIX\_count prefix [prefix ...]] [FILTER\_filter]\ \ [LANGUAGE\_default_lang] [LANGUAGE_FIELD\_lang_attribute] [SCORE\_default_score]\ diff --git a/content/commands/ft.cursor-del/index.md b/content/commands/ft.cursor-del/index.md index d924488f52..3b2fa0488a 100644 --- a/content/commands/ft.cursor-del/index.md +++ b/content/commands/ft.cursor-del/index.md @@ -4,6 +4,16 @@ arguments: type: string - name: cursor_id type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Deletes a cursor group: search diff --git a/content/commands/ft.cursor-read/index.md b/content/commands/ft.cursor-read/index.md index b395d35f35..4affd41322 100644 --- a/content/commands/ft.cursor-read/index.md +++ b/content/commands/ft.cursor-read/index.md @@ -8,6 +8,16 @@ arguments: optional: true token: COUNT type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Reads from a cursor group: search diff --git a/content/commands/ft.dictadd/index.md b/content/commands/ft.dictadd/index.md index 66fa2a6ba1..516b0b92f2 100644 --- a/content/commands/ft.dictadd/index.md +++ b/content/commands/ft.dictadd/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: term type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Adds terms to a dictionary group: search diff --git a/content/commands/ft.dictdel/index.md b/content/commands/ft.dictdel/index.md index 1f3288c3d7..7658bbe8e5 100644 --- a/content/commands/ft.dictdel/index.md +++ b/content/commands/ft.dictdel/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: term type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Deletes terms from a dictionary group: search diff --git a/content/commands/ft.dictdump/index.md b/content/commands/ft.dictdump/index.md index 9a09d5fbe9..a7ec30c875 100644 --- a/content/commands/ft.dictdump/index.md +++ b/content/commands/ft.dictdump/index.md @@ -2,6 +2,16 @@ arguments: - name: dict type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N), where N is the size of the dictionary description: Dumps all terms in the given dictionary group: search diff --git a/content/commands/ft.dropindex/index.md b/content/commands/ft.dropindex/index.md index cbf0048bf5..28d588aa03 100644 --- a/content/commands/ft.dropindex/index.md +++ b/content/commands/ft.dropindex/index.md @@ -9,6 +9,16 @@ arguments: name: delete docs optional: true type: oneof +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) or O(N) if documents are deleted, where N is the number of keys in the keyspace description: Deletes the index @@ -19,7 +29,9 @@ module: Search since: 2.0.0 stack_path: docs/interact/search-and-query summary: Deletes the index -syntax: "FT.DROPINDEX index \n [DD]\n" +syntax: "FT.DROPINDEX index + [DD] +" syntax_fmt: FT.DROPINDEX index [DD] syntax_str: '[DD]' title: FT.DROPINDEX diff --git a/content/commands/ft.explain/index.md b/content/commands/ft.explain/index.md index e61a0c8dbe..a4ccbc1ef3 100644 --- a/content/commands/ft.explain/index.md +++ b/content/commands/ft.explain/index.md @@ -9,6 +9,16 @@ arguments: since: 2.4.3 token: DIALECT type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns the execution plan for a complex query group: search @@ -18,7 +28,9 @@ module: Search since: 1.0.0 stack_path: docs/interact/search-and-query summary: Returns the execution plan for a complex query -syntax: "FT.EXPLAIN index query \n [DIALECT dialect]\n" +syntax: "FT.EXPLAIN index query + [DIALECT dialect] +" syntax_fmt: "FT.EXPLAIN index query [DIALECT\_dialect]" syntax_str: "query [DIALECT\_dialect]" title: FT.EXPLAIN diff --git a/content/commands/ft.explaincli/index.md b/content/commands/ft.explaincli/index.md index d57983aee2..7dbc31ac70 100644 --- a/content/commands/ft.explaincli/index.md +++ b/content/commands/ft.explaincli/index.md @@ -9,6 +9,16 @@ arguments: since: 2.4.3 token: DIALECT type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns the execution plan for a complex query group: search @@ -18,7 +28,9 @@ module: Search since: 1.0.0 stack_path: docs/interact/search-and-query summary: Returns the execution plan for a complex query -syntax: "FT.EXPLAINCLI index query \n [DIALECT dialect]\n" +syntax: "FT.EXPLAINCLI index query + [DIALECT dialect] +" syntax_fmt: "FT.EXPLAINCLI index query [DIALECT\_dialect]" syntax_str: "query [DIALECT\_dialect]" title: FT.EXPLAINCLI diff --git a/content/commands/ft.info/index.md b/content/commands/ft.info/index.md index 117af6022e..b828bd5fc0 100644 --- a/content/commands/ft.info/index.md +++ b/content/commands/ft.info/index.md @@ -2,6 +2,16 @@ arguments: - name: index type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns information and statistics on the index group: search diff --git a/content/commands/ft.profile/index.md b/content/commands/ft.profile/index.md index d106f965fe..691b4db393 100644 --- a/content/commands/ft.profile/index.md +++ b/content/commands/ft.profile/index.md @@ -20,6 +20,16 @@ arguments: type: pure-token - name: query type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) description: Performs a `FT.SEARCH` or `FT.AGGREGATE` command and collects performance information diff --git a/content/commands/ft.search/index.md b/content/commands/ft.search/index.md index 80ef7abbe7..0ccea8625c 100644 --- a/content/commands/ft.search/index.md +++ b/content/commands/ft.search/index.md @@ -245,6 +245,16 @@ arguments: since: 2.4.3 token: DIALECT type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) description: Searches the index with a textual query, returning either documents or just ids @@ -259,29 +269,66 @@ since: 1.0.0 stack_path: docs/interact/search-and-query summary: Searches the index with a textual query, returning either documents or just ids -syntax: "FT.SEARCH index query \n [NOCONTENT] \n [VERBATIM] [NOSTOPWORDS] \n [WITHSCORES]\ - \ \n [WITHPAYLOADS] \n [WITHSORTKEYS] \n [FILTER numeric_field min max [ FILTER\ - \ numeric_field min max ...]] \n [GEOFILTER geo_field lon lat radius m | km | mi\ - \ | ft [ GEOFILTER geo_field lon lat radius m | km | mi | ft ...]] \n [INKEYS count\ - \ key [key ...]] [ INFIELDS count field [field ...]] \n [RETURN count identifier\ - \ [AS property] [ identifier [AS property] ...]] \n [SUMMARIZE [ FIELDS count field\ - \ [field ...]] [FRAGS num] [LEN fragsize] [SEPARATOR separator]] \n [HIGHLIGHT\ - \ [ FIELDS count field [field ...]] [ TAGS open close]] \n [SLOP slop] \n [TIMEOUT\ - \ timeout] \n [INORDER] \n [LANGUAGE language] \n [EXPANDER expander] \n [SCORER\ - \ scorer] \n [EXPLAINSCORE] \n [PAYLOAD payload] \n [SORTBY sortby [ ASC | DESC]\ - \ [WITHCOUNT]] \n [LIMIT offset num] \n [PARAMS nargs name value [ name value\ - \ ...]] \n [DIALECT dialect]\n" -syntax_fmt: "FT.SEARCH index query [NOCONTENT] [VERBATIM] [NOSTOPWORDS]\n [WITHSCORES]\ - \ [WITHPAYLOADS] [WITHSORTKEYS] [FILTER\_numeric_field\n min max [FILTER\_numeric_field\ - \ min max ...]] [GEOFILTER\_geo_field\n lon lat radius [GEOFILTER\_\ - geo_field lon lat\n radius ...]] [INKEYS\_count key [key ...]]\n\ - \ [INFIELDS\_count field [field ...]] [RETURN\_count identifier\n [AS\_property]\ - \ [identifier [AS\_property] ...]] [SUMMARIZE\n [FIELDS\_count field [field ...]]\ - \ [FRAGS\_num] [LEN\_fragsize]\n [SEPARATOR\_separator]] [HIGHLIGHT [FIELDS\_count\ - \ field [field ...]]\n [TAGS open close]] [SLOP\_slop] [TIMEOUT\_timeout] [INORDER]\n\ - \ [LANGUAGE\_language] [EXPANDER\_expander] [SCORER\_scorer]\n [EXPLAINSCORE]\ - \ [PAYLOAD\_payload] [SORTBY\_sortby [ASC | DESC]]\n [LIMIT offset num] [PARAMS\ - \ nargs name value [name value ...]]\n [DIALECT\_dialect]" +syntax: "FT.SEARCH index query + [NOCONTENT] + [VERBATIM] [NOSTOPWORDS] + [WITHSCORES]\ + \ + [WITHPAYLOADS] + [WITHSORTKEYS] + [FILTER numeric_field min max [ FILTER\ + \ numeric_field min max ...]] + [GEOFILTER geo_field lon lat radius m | km | mi\ + \ | ft [ GEOFILTER geo_field lon lat radius m | km | mi | ft ...]] + [INKEYS count\ + \ key [key ...]] [ INFIELDS count field [field ...]] + [RETURN count identifier\ + \ [AS property] [ identifier [AS property] ...]] + [SUMMARIZE [ FIELDS count field\ + \ [field ...]] [FRAGS num] [LEN fragsize] [SEPARATOR separator]] + [HIGHLIGHT\ + \ [ FIELDS count field [field ...]] [ TAGS open close]] + [SLOP slop] + [TIMEOUT\ + \ timeout] + [INORDER] + [LANGUAGE language] + [EXPANDER expander] + [SCORER\ + \ scorer] + [EXPLAINSCORE] + [PAYLOAD payload] + [SORTBY sortby [ ASC | DESC]\ + \ [WITHCOUNT]] + [LIMIT offset num] + [PARAMS nargs name value [ name value\ + \ ...]] + [DIALECT dialect] +" +syntax_fmt: "FT.SEARCH index query [NOCONTENT] [VERBATIM] [NOSTOPWORDS] + [WITHSCORES]\ + \ [WITHPAYLOADS] [WITHSORTKEYS] [FILTER\_numeric_field + min max [FILTER\_numeric_field\ + \ min max ...]] [GEOFILTER\_geo_field + lon lat radius [GEOFILTER\_\ + geo_field lon lat + radius ...]] [INKEYS\_count key [key ...]] +\ + \ [INFIELDS\_count field [field ...]] [RETURN\_count identifier + [AS\_property]\ + \ [identifier [AS\_property] ...]] [SUMMARIZE + [FIELDS\_count field [field ...]]\ + \ [FRAGS\_num] [LEN\_fragsize] + [SEPARATOR\_separator]] [HIGHLIGHT [FIELDS\_count\ + \ field [field ...]] + [TAGS open close]] [SLOP\_slop] [TIMEOUT\_timeout] [INORDER] +\ + \ [LANGUAGE\_language] [EXPANDER\_expander] [SCORER\_scorer] + [EXPLAINSCORE]\ + \ [PAYLOAD\_payload] [SORTBY\_sortby [ASC | DESC]] + [LIMIT offset num] [PARAMS\ + \ nargs name value [name value ...]] + [DIALECT\_dialect]" syntax_str: "query [NOCONTENT] [VERBATIM] [NOSTOPWORDS] [WITHSCORES] [WITHPAYLOADS]\ \ [WITHSORTKEYS] [FILTER\_numeric_field min max [FILTER\_numeric_field min max ...]]\ \ [GEOFILTER\_geo_field lon lat radius [GEOFILTER\_geo_field\ diff --git a/content/commands/ft.spellcheck/index.md b/content/commands/ft.spellcheck/index.md index 4124f37184..2fa47a2c3d 100644 --- a/content/commands/ft.spellcheck/index.md +++ b/content/commands/ft.spellcheck/index.md @@ -33,6 +33,16 @@ arguments: since: 2.4.3 token: DIALECT type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Performs spelling correction on a query, returning suggestions for misspelled terms @@ -44,9 +54,14 @@ since: 1.4.0 stack_path: docs/interact/search-and-query summary: Performs spelling correction on a query, returning suggestions for misspelled terms -syntax: "FT.SPELLCHECK index query \n [DISTANCE distance] \n [TERMS INCLUDE | EXCLUDE\ - \ dictionary [terms [terms ...]]] \n [DIALECT dialect]\n" -syntax_fmt: "FT.SPELLCHECK index query [DISTANCE\_distance] [TERMS\_ dictionary [terms [terms ...]]] [DIALECT\_dialect]" syntax_str: "query [DISTANCE\_distance] [TERMS\_ dictionary [terms\ \ [terms ...]]] [DIALECT\_dialect]" diff --git a/content/commands/ft.sugadd/index.md b/content/commands/ft.sugadd/index.md index a09b7c64d9..bce8b34fbd 100644 --- a/content/commands/ft.sugadd/index.md +++ b/content/commands/ft.sugadd/index.md @@ -17,6 +17,16 @@ arguments: optional: true token: PAYLOAD type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Adds a suggestion string to an auto-complete suggestion dictionary group: suggestion @@ -29,7 +39,10 @@ module: Search since: 1.0.0 stack_path: docs/interact/search-and-query summary: Adds a suggestion string to an auto-complete suggestion dictionary -syntax: "FT.SUGADD key string score \n [INCR] \n [PAYLOAD payload]\n" +syntax: "FT.SUGADD key string score + [INCR] + [PAYLOAD payload] +" syntax_fmt: "FT.SUGADD key string score [INCR] [PAYLOAD\_payload]" syntax_str: "string score [INCR] [PAYLOAD\_payload]" title: FT.SUGADD diff --git a/content/commands/ft.sugdel/index.md b/content/commands/ft.sugdel/index.md index c44bf42532..ea839ea1cd 100644 --- a/content/commands/ft.sugdel/index.md +++ b/content/commands/ft.sugdel/index.md @@ -4,6 +4,16 @@ arguments: type: string - name: string type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Deletes a string from a suggestion index group: suggestion diff --git a/content/commands/ft.sugget/index.md b/content/commands/ft.sugget/index.md index b5c738d590..7a71106e0f 100644 --- a/content/commands/ft.sugget/index.md +++ b/content/commands/ft.sugget/index.md @@ -20,6 +20,16 @@ arguments: optional: true token: MAX type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Gets completion suggestions for a prefix group: suggestion @@ -32,8 +42,13 @@ module: Search since: 1.0.0 stack_path: docs/interact/search-and-query summary: Gets completion suggestions for a prefix -syntax: "FT.SUGGET key prefix \n [FUZZY] \n [WITHSCORES] \n [WITHPAYLOADS] \n \ - \ [MAX max]\n" +syntax: "FT.SUGGET key prefix + [FUZZY] + [WITHSCORES] + [WITHPAYLOADS] + \ + \ [MAX max] +" syntax_fmt: "FT.SUGGET key prefix [FUZZY] [WITHSCORES] [WITHPAYLOADS] [MAX\_max]" syntax_str: "prefix [FUZZY] [WITHSCORES] [WITHPAYLOADS] [MAX\_max]" title: FT.SUGGET diff --git a/content/commands/ft.suglen/index.md b/content/commands/ft.suglen/index.md index a04bffd53f..c29652f4a6 100644 --- a/content/commands/ft.suglen/index.md +++ b/content/commands/ft.suglen/index.md @@ -2,6 +2,16 @@ arguments: - name: key type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Gets the size of an auto-complete suggestion dictionary group: suggestion diff --git a/content/commands/ft.syndump/index.md b/content/commands/ft.syndump/index.md index f5570d2ebe..a45badbe2a 100644 --- a/content/commands/ft.syndump/index.md +++ b/content/commands/ft.syndump/index.md @@ -2,6 +2,16 @@ arguments: - name: index type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Dumps the contents of a synonym group group: search diff --git a/content/commands/ft.synupdate/index.md b/content/commands/ft.synupdate/index.md index 7de01bf528..563351f947 100644 --- a/content/commands/ft.synupdate/index.md +++ b/content/commands/ft.synupdate/index.md @@ -11,6 +11,16 @@ arguments: - multiple: true name: term type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Creates or updates a synonym group with additional terms group: search @@ -20,8 +30,11 @@ module: Search since: 1.2.0 stack_path: docs/interact/search-and-query summary: Creates or updates a synonym group with additional terms -syntax: "FT.SYNUPDATE index synonym_group_id \n [SKIPINITIALSCAN] term [term ...]\n" -syntax_fmt: "FT.SYNUPDATE index synonym_group_id [SKIPINITIALSCAN] term [term\n ...]" +syntax: "FT.SYNUPDATE index synonym_group_id + [SKIPINITIALSCAN] term [term ...] +" +syntax_fmt: "FT.SYNUPDATE index synonym_group_id [SKIPINITIALSCAN] term [term + ...]" syntax_str: synonym_group_id [SKIPINITIALSCAN] term [term ...] title: FT.SYNUPDATE --- diff --git a/content/commands/ft.tagvals/index.md b/content/commands/ft.tagvals/index.md index 97de394b43..dd540613e1 100644 --- a/content/commands/ft.tagvals/index.md +++ b/content/commands/ft.tagvals/index.md @@ -4,6 +4,16 @@ arguments: type: string - name: field_name type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) description: Returns the distinct tags indexed in a Tag field group: search diff --git a/content/commands/function-delete/index.md b/content/commands/function-delete/index.md index 47d77088fe..3fe2338424 100644 --- a/content/commands/function-delete/index.md +++ b/content/commands/function-delete/index.md @@ -8,6 +8,16 @@ arguments: name: library-name type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - noscript @@ -35,8 +45,8 @@ For more information please refer to [Introduction to Redis Functions](/topics/f ## Examples ``` -redis> FUNCTION LOAD Lua mylib "redis.register_function('myfunc', function(keys, args) return 'hello' end)" -OK +redis> FUNCTION LOAD "#!lua name=mylib \n redis.register_function('myfunc', function(keys, args) return 'hello' end)" +"mylib" redis> FCALL myfunc 0 "hello" redis> FUNCTION DELETE mylib diff --git a/content/commands/function-dump/index.md b/content/commands/function-dump/index.md index 1644a8d395..4ef37545f4 100644 --- a/content/commands/function-dump/index.md +++ b/content/commands/function-dump/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@scripting' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript complexity: O(N) where N is the number of functions diff --git a/content/commands/function-flush/index.md b/content/commands/function-flush/index.md index d378490b43..8140aa5d2a 100644 --- a/content/commands/function-flush/index.md +++ b/content/commands/function-flush/index.md @@ -17,6 +17,16 @@ arguments: optional: true type: oneof arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - noscript diff --git a/content/commands/function-help/index.md b/content/commands/function-help/index.md index c1b1e7c051..1bd2204564 100644 --- a/content/commands/function-help/index.md +++ b/content/commands/function-help/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@scripting' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/function-kill/index.md b/content/commands/function-kill/index.md index a605471a95..3669fa7f25 100644 --- a/content/commands/function-kill/index.md +++ b/content/commands/function-kill/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@scripting' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - allow_busy diff --git a/content/commands/function-list/index.md b/content/commands/function-list/index.md index 77ebbb36f2..05823222c9 100644 --- a/content/commands/function-list/index.md +++ b/content/commands/function-list/index.md @@ -14,6 +14,16 @@ arguments: token: WITHCODE type: pure-token arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript complexity: O(N) where N is the number of functions diff --git a/content/commands/function-load/index.md b/content/commands/function-load/index.md index 7fb021990c..53e7ea0721 100644 --- a/content/commands/function-load/index.md +++ b/content/commands/function-load/index.md @@ -13,6 +13,16 @@ arguments: name: function-code type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/function-restore/index.md b/content/commands/function-restore/index.md index 4c3b46c014..4a60f0adee 100644 --- a/content/commands/function-restore/index.md +++ b/content/commands/function-restore/index.md @@ -24,6 +24,16 @@ arguments: optional: true type: oneof arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/function-stats/index.md b/content/commands/function-stats/index.md index 1e39401d5b..2614766db5 100644 --- a/content/commands/function-stats/index.md +++ b/content/commands/function-stats/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@scripting' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - allow_busy diff --git a/content/commands/function/index.md b/content/commands/function/index.md index 3f7e269663..490535c7a9 100644 --- a/content/commands/function/index.md +++ b/content/commands/function/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on subcommand. description: A container for function commands. group: scripting diff --git a/content/commands/geoadd/index.md b/content/commands/geoadd/index.md index ceeec3d27a..ee578f2d9a 100644 --- a/content/commands/geoadd/index.md +++ b/content/commands/geoadd/index.md @@ -41,6 +41,16 @@ arguments: name: data type: block arity: -5 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -70,7 +80,8 @@ linkTitle: GEOADD since: 3.2.0 summary: Adds one or more members to a geospatial index. The key is created if it doesn't exist. -syntax_fmt: "GEOADD key [NX | XX] [CH] longitude latitude member [longitude\n latitude\ +syntax_fmt: "GEOADD key [NX | XX] [CH] longitude latitude member [longitude + latitude\ \ member ...]" syntax_str: '[NX | XX] [CH] longitude latitude member [longitude latitude member ...]' title: GEOADD diff --git a/content/commands/geodist/index.md b/content/commands/geodist/index.md index 21db07d9d9..116d1535ab 100644 --- a/content/commands/geodist/index.md +++ b/content/commands/geodist/index.md @@ -35,6 +35,16 @@ arguments: optional: true type: oneof arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) diff --git a/content/commands/geohash/index.md b/content/commands/geohash/index.md index 7ff7b5fa22..c8e329eda7 100644 --- a/content/commands/geohash/index.md +++ b/content/commands/geohash/index.md @@ -14,6 +14,16 @@ arguments: optional: true type: string arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) for each member requested. diff --git a/content/commands/geopos/index.md b/content/commands/geopos/index.md index b294895a11..d921e4bb1f 100644 --- a/content/commands/geopos/index.md +++ b/content/commands/geopos/index.md @@ -14,6 +14,16 @@ arguments: optional: true type: string arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) for each member requested. diff --git a/content/commands/georadius/index.md b/content/commands/georadius/index.md index f1698d5108..5382e7b3b7 100644 --- a/content/commands/georadius/index.md +++ b/content/commands/georadius/index.md @@ -92,6 +92,16 @@ arguments: optional: true type: oneof arity: -6 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -156,8 +166,10 @@ replaced_by: '[`GEOSEARCH`](/commands/geosearch) and [`GEOSEARCHSTORE`](/command since: 3.2.0 summary: Queries a geospatial index for members within a distance from a coordinate, optionally stores the result. -syntax_fmt: "GEORADIUS key longitude latitude radius \n [WITHCOORD]\ - \ [WITHDIST] [WITHHASH] [COUNT\_count [ANY]] [ASC | DESC]\n [STORE\_key | STOREDIST\_\ +syntax_fmt: "GEORADIUS key longitude latitude radius + [WITHCOORD]\ + \ [WITHDIST] [WITHHASH] [COUNT\_count [ANY]] [ASC | DESC] + [STORE\_key | STOREDIST\_\ key]" syntax_str: "longitude latitude radius [WITHCOORD] [WITHDIST] [WITHHASH]\ \ [COUNT\_count [ANY]] [ASC | DESC] [STORE\_key | STOREDIST\_key]" diff --git a/content/commands/georadius_ro/index.md b/content/commands/georadius_ro/index.md index a8dd2711dd..a918782054 100644 --- a/content/commands/georadius_ro/index.md +++ b/content/commands/georadius_ro/index.md @@ -78,6 +78,16 @@ arguments: optional: true type: oneof arity: -6 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N+log(M)) where N is the number of elements inside the bounding box @@ -111,7 +121,8 @@ replaced_by: '[`GEOSEARCH`](/commands/geosearch) with the `BYRADIUS` argument' since: 3.2.10 summary: Returns members from a geospatial index that are within a distance from a coordinate. -syntax_fmt: "GEORADIUS_RO key longitude latitude radius \n [WITHCOORD]\ +syntax_fmt: "GEORADIUS_RO key longitude latitude radius + [WITHCOORD]\ \ [WITHDIST] [WITHHASH] [COUNT\_count [ANY]] [ASC | DESC]" syntax_str: "longitude latitude radius [WITHCOORD] [WITHDIST] [WITHHASH]\ \ [COUNT\_count [ANY]] [ASC | DESC]" diff --git a/content/commands/georadiusbymember/index.md b/content/commands/georadiusbymember/index.md index 86945262d5..f6670bb1ac 100644 --- a/content/commands/georadiusbymember/index.md +++ b/content/commands/georadiusbymember/index.md @@ -88,6 +88,16 @@ arguments: optional: true type: oneof arity: -5 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -150,8 +160,10 @@ replaced_by: '[`GEOSEARCH`](/commands/geosearch) and [`GEOSEARCHSTORE`](/command since: 3.2.0 summary: Queries a geospatial index for members within a distance from a member, optionally stores the result. -syntax_fmt: "GEORADIUSBYMEMBER key member radius [WITHCOORD]\n\ - \ [WITHDIST] [WITHHASH] [COUNT\_count [ANY]] [ASC | DESC] [STORE\_key\n | STOREDIST\_\ +syntax_fmt: "GEORADIUSBYMEMBER key member radius [WITHCOORD] +\ + \ [WITHDIST] [WITHHASH] [COUNT\_count [ANY]] [ASC | DESC] [STORE\_key + | STOREDIST\_\ key]" syntax_str: "member radius [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT\_\ count [ANY]] [ASC | DESC] [STORE\_key | STOREDIST\_key]" diff --git a/content/commands/georadiusbymember_ro/index.md b/content/commands/georadiusbymember_ro/index.md index 62615bd1c2..84a52cbe57 100644 --- a/content/commands/georadiusbymember_ro/index.md +++ b/content/commands/georadiusbymember_ro/index.md @@ -74,6 +74,16 @@ arguments: optional: true type: oneof arity: -5 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N+log(M)) where N is the number of elements inside the bounding box @@ -105,7 +115,8 @@ replaced_by: '[`GEOSEARCH`](/commands/geosearch) with the `BYRADIUS` and `FROMME since: 3.2.10 summary: Returns members from a geospatial index that are within a distance from a member. -syntax_fmt: "GEORADIUSBYMEMBER_RO key member radius \n [WITHCOORD]\ +syntax_fmt: "GEORADIUSBYMEMBER_RO key member radius + [WITHCOORD]\ \ [WITHDIST] [WITHHASH] [COUNT\_count [ANY]] [ASC | DESC]" syntax_str: "member radius [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT\_\ count [ANY]] [ASC | DESC]" diff --git a/content/commands/geosearch/index.md b/content/commands/geosearch/index.md index 3c92277a96..7ab8a294dd 100644 --- a/content/commands/geosearch/index.md +++ b/content/commands/geosearch/index.md @@ -124,6 +124,16 @@ arguments: token: WITHHASH type: pure-token arity: -7 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N+log(M)) where N is the number of elements in the grid-aligned bounding @@ -151,9 +161,12 @@ key_specs: linkTitle: GEOSEARCH since: 6.2.0 summary: Queries a geospatial index for members inside an area of a box or a circle. -syntax_fmt: "GEOSEARCH key \n\ - \ | BYBOX\_width height > [ASC | DESC] [COUNT\_count [ANY]] [WITHCOORD] [WITHDIST]\n [WITHHASH]" +syntax_fmt: "GEOSEARCH key +\ + \ | BYBOX\_width height > [ASC | DESC] [COUNT\_count [ANY]] [WITHCOORD] [WITHDIST] + [WITHHASH]" syntax_str: " | BYBOX\_width height > [ASC | DESC] [COUNT\_\ count [ANY]] [WITHCOORD] [WITHDIST] [WITHHASH]" diff --git a/content/commands/geosearchstore/index.md b/content/commands/geosearchstore/index.md index 6899aaf63b..c1269428af 100644 --- a/content/commands/geosearchstore/index.md +++ b/content/commands/geosearchstore/index.md @@ -118,6 +118,16 @@ arguments: token: STOREDIST type: pure-token arity: -8 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -160,9 +170,12 @@ linkTitle: GEOSEARCHSTORE since: 6.2.0 summary: Queries a geospatial index for members inside an area of a box or a circle, optionally stores the result. -syntax_fmt: "GEOSEARCHSTORE destination source \n | BYBOX\_width height\ - \ > [ASC | DESC] [COUNT\_count\n [ANY]] [STOREDIST]" +syntax_fmt: "GEOSEARCHSTORE destination source + | BYBOX\_width height\ + \ > [ASC | DESC] [COUNT\_count + [ANY]] [STOREDIST]" syntax_str: "source | BYBOX\_width height > [ASC | DESC]\ \ [COUNT\_count [ANY]] [STOREDIST]" diff --git a/content/commands/get/index.md b/content/commands/get/index.md index 7450b7ff61..01d2cc8974 100644 --- a/content/commands/get/index.md +++ b/content/commands/get/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/getbit/index.md b/content/commands/getbit/index.md index 44b98a01e1..578cb473a5 100644 --- a/content/commands/getbit/index.md +++ b/content/commands/getbit/index.md @@ -12,6 +12,16 @@ arguments: name: offset type: integer arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/getdel/index.md b/content/commands/getdel/index.md index 46ac3dd99b..46f7c0d142 100644 --- a/content/commands/getdel/index.md +++ b/content/commands/getdel/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/getex/index.md b/content/commands/getex/index.md index 3da65dd04e..89cd8cac8b 100644 --- a/content/commands/getex/index.md +++ b/content/commands/getex/index.md @@ -33,6 +33,16 @@ arguments: optional: true type: oneof arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast @@ -58,7 +68,8 @@ key_specs: linkTitle: GETEX since: 6.2.0 summary: Returns the string value of a key after setting its expiration time. -syntax_fmt: "GETEX key [EX\_seconds | PX\_milliseconds | EXAT\_unix-time-seconds |\n\ +syntax_fmt: "GETEX key [EX\_seconds | PX\_milliseconds | EXAT\_unix-time-seconds | +\ \ PXAT\_unix-time-milliseconds | PERSIST]" syntax_str: "[EX\_seconds | PX\_milliseconds | EXAT\_unix-time-seconds | PXAT\_unix-time-milliseconds\ \ | PERSIST]" diff --git a/content/commands/getrange/index.md b/content/commands/getrange/index.md index b6bde8940e..6d5692a7d0 100644 --- a/content/commands/getrange/index.md +++ b/content/commands/getrange/index.md @@ -15,6 +15,16 @@ arguments: name: end type: integer arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) where N is the length of the returned string. The complexity is ultimately diff --git a/content/commands/getset/index.md b/content/commands/getset/index.md index 9ccfa71d22..04eff68405 100644 --- a/content/commands/getset/index.md +++ b/content/commands/getset/index.md @@ -12,6 +12,16 @@ arguments: name: value type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/hdel/index.md b/content/commands/hdel/index.md index 7981e614ba..f35a70ad48 100644 --- a/content/commands/hdel/index.md +++ b/content/commands/hdel/index.md @@ -13,6 +13,16 @@ arguments: name: field type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/hello/index.md b/content/commands/hello/index.md index 0c966971ea..ce89366e2a 100644 --- a/content/commands/hello/index.md +++ b/content/commands/hello/index.md @@ -27,6 +27,16 @@ arguments: optional: true type: block arity: -1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/hexists/index.md b/content/commands/hexists/index.md index 73bbf0f608..b3b5932a3a 100644 --- a/content/commands/hexists/index.md +++ b/content/commands/hexists/index.md @@ -12,6 +12,16 @@ arguments: name: field type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/hget/index.md b/content/commands/hget/index.md index e269d7cf69..87a477cd23 100644 --- a/content/commands/hget/index.md +++ b/content/commands/hget/index.md @@ -12,6 +12,16 @@ arguments: name: field type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/hgetall/index.md b/content/commands/hgetall/index.md index 8324468f37..380a76973f 100644 --- a/content/commands/hgetall/index.md +++ b/content/commands/hgetall/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) where N is the size of the hash. diff --git a/content/commands/hincrby/index.md b/content/commands/hincrby/index.md index 1b8135a051..5bdcef1572 100644 --- a/content/commands/hincrby/index.md +++ b/content/commands/hincrby/index.md @@ -15,6 +15,16 @@ arguments: name: increment type: integer arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/hincrbyfloat/index.md b/content/commands/hincrbyfloat/index.md index 0ccbde6e5f..dd421366ba 100644 --- a/content/commands/hincrbyfloat/index.md +++ b/content/commands/hincrbyfloat/index.md @@ -15,6 +15,16 @@ arguments: name: increment type: double arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -52,7 +62,7 @@ is negative, the result is to have the hash field value **decremented** instead If the field does not exist, it is set to `0` before performing the operation. An error is returned if one of the following conditions occur: -* The field contains a value of the wrong type (not a string). +* The key contains a value of the wrong type (not a hash). * The current field content or the specified increment are not parsable as a double precision floating point number. diff --git a/content/commands/hkeys/index.md b/content/commands/hkeys/index.md index 40580dde88..58621d054c 100644 --- a/content/commands/hkeys/index.md +++ b/content/commands/hkeys/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) where N is the size of the hash. diff --git a/content/commands/hlen/index.md b/content/commands/hlen/index.md index 108d8439de..42a37ee52e 100644 --- a/content/commands/hlen/index.md +++ b/content/commands/hlen/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/hmget/index.md b/content/commands/hmget/index.md index 7f0cebd556..cba317b150 100644 --- a/content/commands/hmget/index.md +++ b/content/commands/hmget/index.md @@ -13,6 +13,16 @@ arguments: name: field type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/hmset/index.md b/content/commands/hmset/index.md index 9ef8207872..36cb0e7637 100644 --- a/content/commands/hmset/index.md +++ b/content/commands/hmset/index.md @@ -19,6 +19,16 @@ arguments: name: data type: block arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/hrandfield/index.md b/content/commands/hrandfield/index.md index d78c49ebed..2673e0a25d 100644 --- a/content/commands/hrandfield/index.md +++ b/content/commands/hrandfield/index.md @@ -21,6 +21,16 @@ arguments: optional: true type: block arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) where N is the number of fields returned diff --git a/content/commands/hscan/index.md b/content/commands/hscan/index.md index a81080fe1e..fb826626ce 100644 --- a/content/commands/hscan/index.md +++ b/content/commands/hscan/index.md @@ -22,6 +22,16 @@ arguments: token: COUNT type: integer arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) for every call. O(N) for a complete iteration, including enough command diff --git a/content/commands/hset/index.md b/content/commands/hset/index.md index 5210a4d109..9d08bcb908 100644 --- a/content/commands/hset/index.md +++ b/content/commands/hset/index.md @@ -19,6 +19,16 @@ arguments: name: data type: block arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/hsetnx/index.md b/content/commands/hsetnx/index.md index 74ceae2450..c7fd5d2b32 100644 --- a/content/commands/hsetnx/index.md +++ b/content/commands/hsetnx/index.md @@ -15,6 +15,16 @@ arguments: name: value type: string arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/hstrlen/index.md b/content/commands/hstrlen/index.md index 3810f1082e..6646e3aeda 100644 --- a/content/commands/hstrlen/index.md +++ b/content/commands/hstrlen/index.md @@ -12,6 +12,16 @@ arguments: name: field type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/hvals/index.md b/content/commands/hvals/index.md index 7141d8cd4c..3c958999be 100644 --- a/content/commands/hvals/index.md +++ b/content/commands/hvals/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) where N is the size of the hash. diff --git a/content/commands/incr/index.md b/content/commands/incr/index.md index a28235c74c..dc48810658 100644 --- a/content/commands/incr/index.md +++ b/content/commands/incr/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/incrby/index.md b/content/commands/incrby/index.md index c6823b3aeb..0d40f2ef11 100644 --- a/content/commands/incrby/index.md +++ b/content/commands/incrby/index.md @@ -12,6 +12,16 @@ arguments: name: increment type: integer arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/incrbyfloat/index.md b/content/commands/incrbyfloat/index.md index a1feefc632..1120951f96 100644 --- a/content/commands/incrbyfloat/index.md +++ b/content/commands/incrbyfloat/index.md @@ -12,6 +12,16 @@ arguments: name: increment type: double arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/info/index.md b/content/commands/info/index.md index b210c3ee98..fa66dc9491 100644 --- a/content/commands/info/index.md +++ b/content/commands/info/index.md @@ -9,6 +9,16 @@ arguments: optional: true type: string arity: -1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale @@ -116,6 +126,7 @@ Here is the meaning of all fields in the **clients** section: * `blocked_clients`: Number of clients pending on a blocking call ([`BLPOP`](/commands/blpop), [`BRPOP`](/commands/brpop), [`BRPOPLPUSH`](/commands/brpoplpush), [`BLMOVE`](/commands/blmove), [`BZPOPMIN`](/commands/bzpopmin), [`BZPOPMAX`](/commands/bzpopmax)) * `tracking_clients`: Number of clients being tracked ([`CLIENT TRACKING`](/commands/client-tracking)) +* `pubsub_clients`: Number of clients in pubsub mode ([`SUBSCRIBE`](/commands/subscribe), [`PSUBSCRIBE`](/commands/psubscribe), [`SSUBSCRIBE`](/commands/ssubscribe)). Added in Redis 8.0 * `clients_in_timeout_table`: Number of clients in the clients timeout table * `total_blocking_keys`: Number of blocking keys. Added in Redis 7.2. * `total_blocking_keys_on_nokey`: Number of blocking keys that one or more clients that would like to be unblocked when the key is deleted. Added in Redis 7.2. @@ -144,9 +155,18 @@ Here is the meaning of all fields in the **memory** section: the net memory usage (`used_memory` minus `used_memory_startup`) * `total_system_memory`: The total amount of memory that the Redis host has * `total_system_memory_human`: Human readable representation of previous value -* `used_memory_lua`: Number of bytes used by the Lua engine -* `used_memory_lua_human`: Human readable representation of previous value -* `used_memory_scripts`: Number of bytes used by cached Lua scripts +* `used_memory_lua`: Number of bytes used by the Lua engine for EVAL scripts. Deprecated in Redis 7.0, renamed to `used_memory_vm_eval` +* `used_memory_vm_eval`: Number of bytes used by the script VM engines for EVAL framework (not part of used_memory). Added in Redis 7.0 +* `used_memory_lua_human`: Human readable representation of previous value. Deprecated in Redis 7.0 +* `used_memory_scripts_eval`: Number of bytes overhead by the EVAL scripts (part of used_memory). Added in Redis 7.0 +* `number_of_cached_scripts`: The number of EVAL scripts cached by the server. Added in Redis 7.0 +* `number_of_functions`: The number of functions. Added in Redis 7.0 +* `number_of_libraries`: The number of libraries. Added in Redis 7.0 +* `used_memory_vm_functions`: Number of bytes used by the script VM engines for Functions framework (not part of used_memory). Added in Redis 7.0 +* `used_memory_vm_total`: `used_memory_vm_eval` + `used_memory_vm_functions` (not part of used_memory). Added in Redis 7.0 +* `used_memory_vm_total_human`: Human readable representation of previous value. +* `used_memory_functions`: Number of bytes overhead by Function scripts (part of used_memory). Added in Redis 7.0 +* `used_memory_scripts`: `used_memory_scripts_eval` + `used_memory_functions` (part of used_memory). Added in Redis 7.0 * `used_memory_scripts_human`: Human readable representation of previous value * `maxmemory`: The value of the `maxmemory` configuration directive * `maxmemory_human`: Human readable representation of previous value diff --git a/content/commands/json.arrappend/index.md b/content/commands/json.arrappend/index.md index 48da7b6b44..2311f2187c 100644 --- a/content/commands/json.arrappend/index.md +++ b/content/commands/json.arrappend/index.md @@ -8,6 +8,16 @@ arguments: - multiple: true name: value type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key description: Append one or more json values into the array at path after the last diff --git a/content/commands/json.arrindex/index.md b/content/commands/json.arrindex/index.md index e88835005d..7c5c30be00 100644 --- a/content/commands/json.arrindex/index.md +++ b/content/commands/json.arrindex/index.md @@ -15,6 +15,16 @@ arguments: name: range optional: true type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key @@ -102,7 +112,7 @@ redis> JSON.ARRAPPEND item:1 $.colors '"blue"' Return the new length of the `colors` array. {{< highlight bash >}} -JSON.GET item:1 +redis> JSON.GET item:1 "{\"name\":\"Noise-cancelling Bluetooth headphones\",\"description\":\"Wireless Bluetooth headphones with noise-cancelling technology\",\"connection\":{\"wireless\":true,\"type\":\"Bluetooth\"},\"price\":99.98,\"stock\":25,\"colors\":[\"black\",\"silver\",\"blue\"]}" {{< / highlight >}} diff --git a/content/commands/json.arrinsert/index.md b/content/commands/json.arrinsert/index.md index 6a179c2742..4c2ff6de9a 100644 --- a/content/commands/json.arrinsert/index.md +++ b/content/commands/json.arrinsert/index.md @@ -9,6 +9,16 @@ arguments: - multiple: true name: value type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key @@ -84,7 +94,7 @@ redis> JSON.ARRAPPEND item:1 $.colors '"blue"' Return the new length of the `colors` array. {{< highlight bash >}} -JSON.GET item:1 +redis> JSON.GET item:1 "{\"name\":\"Noise-cancelling Bluetooth headphones\",\"description\":\"Wireless Bluetooth headphones with noise-cancelling technology\",\"connection\":{\"wireless\":true,\"type\":\"Bluetooth\"},\"price\":99.98,\"stock\":25,\"colors\":[\"black\",\"silver\",\"blue\"]}" {{< / highlight >}} diff --git a/content/commands/json.arrlen/index.md b/content/commands/json.arrlen/index.md index 068525a855..fab415f7a8 100644 --- a/content/commands/json.arrlen/index.md +++ b/content/commands/json.arrlen/index.md @@ -5,6 +5,16 @@ arguments: - name: path optional: true type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) where path is evaluated to a single value, O(N) where path is evaluated to multiple values, where N is the size of the key description: Returns the length of the array at path diff --git a/content/commands/json.arrpop/index.md b/content/commands/json.arrpop/index.md index 28214abb77..482fa387e2 100644 --- a/content/commands/json.arrpop/index.md +++ b/content/commands/json.arrpop/index.md @@ -11,6 +11,16 @@ arguments: name: path optional: true type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) when path is evaluated to a single value where N is the size of the array and the specified index is not the last element, O(1) when path is evaluated to a single value and the specified index is the last element, or O(N) when path diff --git a/content/commands/json.arrtrim/index.md b/content/commands/json.arrtrim/index.md index ff0fd6cd9d..f4befcc3ad 100644 --- a/content/commands/json.arrtrim/index.md +++ b/content/commands/json.arrtrim/index.md @@ -8,6 +8,16 @@ arguments: type: integer - name: stop type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) when path is evaluated to a single value where N is the size of the array, O(N) when path is evaluated to multiple values, where N is the size of the key diff --git a/content/commands/json.clear/index.md b/content/commands/json.clear/index.md index 8ee9daf609..aa6e6545f3 100644 --- a/content/commands/json.clear/index.md +++ b/content/commands/json.clear/index.md @@ -5,6 +5,16 @@ arguments: - name: path optional: true type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) when path is evaluated to a single value where N is the size of the values, O(N) when path is evaluated to multiple values, where N is the size of the key diff --git a/content/commands/json.debug-help/index.md b/content/commands/json.debug-help/index.md index 19063780f5..2b00d972d5 100644 --- a/content/commands/json.debug-help/index.md +++ b/content/commands/json.debug-help/index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: N/A description: Shows helpful information group: json diff --git a/content/commands/json.debug-memory/index.md b/content/commands/json.debug-memory/index.md index 950ba4551c..ff7bdb8452 100644 --- a/content/commands/json.debug-memory/index.md +++ b/content/commands/json.debug-memory/index.md @@ -5,6 +5,16 @@ arguments: - name: path optional: true type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) when path is evaluated to a single value, where N is the size of the value, O(N) when path is evaluated to multiple values, where N is the size of the key diff --git a/content/commands/json.debug/index.md b/content/commands/json.debug/index.md index a5200f8f0b..637e5b4c71 100644 --- a/content/commands/json.debug/index.md +++ b/content/commands/json.debug/index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: N/A description: Debugging container command group: json diff --git a/content/commands/json.del/index.md b/content/commands/json.del/index.md index fe03296be8..00833c0579 100644 --- a/content/commands/json.del/index.md +++ b/content/commands/json.del/index.md @@ -5,6 +5,16 @@ arguments: - name: path optional: true type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) when path is evaluated to a single value where N is the size of the deleted value, O(N) when path is evaluated to multiple values, where N is the size of the key diff --git a/content/commands/json.forget/index.md b/content/commands/json.forget/index.md index d97b47ed49..9d79edcb99 100644 --- a/content/commands/json.forget/index.md +++ b/content/commands/json.forget/index.md @@ -5,6 +5,16 @@ arguments: - name: path optional: true type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) when path is evaluated to a single value where N is the size of the deleted value, O(N) when path is evaluated to multiple values, where N is the size of the key diff --git a/content/commands/json.get/index.md b/content/commands/json.get/index.md index b3f6342ff7..075b6de315 100644 --- a/content/commands/json.get/index.md +++ b/content/commands/json.get/index.md @@ -18,6 +18,16 @@ arguments: name: path optional: true type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) when path is evaluated to a single value where N is the size of the value, O(N) when path is evaluated to multiple values, where N is the size of the key @@ -29,7 +39,8 @@ module: JSON since: 1.0.0 stack_path: docs/data-types/json summary: Gets the value at one or more paths in JSON serialized form -syntax_fmt: "JSON.GET key [INDENT\_indent] [NEWLINE\_newline] [SPACE\_space] [path\n\ +syntax_fmt: "JSON.GET key [INDENT\_indent] [NEWLINE\_newline] [SPACE\_space] [path +\ \ [path ...]]" syntax_str: "[INDENT\_indent] [NEWLINE\_newline] [SPACE\_space] [path [path ...]]" title: JSON.GET diff --git a/content/commands/json.merge/index.md b/content/commands/json.merge/index.md index 1f74e47501..4df457a8f7 100644 --- a/content/commands/json.merge/index.md +++ b/content/commands/json.merge/index.md @@ -6,6 +6,16 @@ arguments: type: string - name: value type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(M+N) when path is evaluated to a single value where M is the size of the original value (if it exists) and N is the size of the new value, O(M+N) when path is evaluated to multiple values where M is the size of the key and N is the @@ -127,7 +137,7 @@ redis> JSON.SET doc $ '{"f1": {"a":1}, "f2":{"a":2}}' OK redis> JSON.GET doc "{\"f1\":{\"a\":1},\"f2\":{\"a\":2}}" -redis> JSON.MERGE doc $ '{"f1": 'null', "f2":{"a":3, "b":4}, "f3":'[2,4,6]'}' +redis> JSON.MERGE doc $ '{"f1": null, "f2":{"a":3, "b":4}, "f3":[2,4,6]}' OK redis> JSON.GET doc "{\"f2\":{\"a\":3,\"b\":4},\"f3\":[2,4,6]}" diff --git a/content/commands/json.mget/index.md b/content/commands/json.mget/index.md index 71b411af5a..9dc862528d 100644 --- a/content/commands/json.mget/index.md +++ b/content/commands/json.mget/index.md @@ -5,6 +5,16 @@ arguments: type: key - name: path type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(M*N) when path is evaluated to a single value where M is the number of keys and N is the size of the value, O(N1+N2+...+Nm) when path is evaluated to multiple values where m is the number of keys and Ni is the size of the i-th key diff --git a/content/commands/json.mset/index.md b/content/commands/json.mset/index.md index 740ec7a4d4..385e27dd6c 100644 --- a/content/commands/json.mset/index.md +++ b/content/commands/json.mset/index.md @@ -10,6 +10,16 @@ arguments: multiple: true name: triplet type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(K*(M+N)) where k is the number of keys in the command, when path is evaluated to a single value where M is the size of the original value (if it exists) and N is the size of the new value, or O(K*(M+N)) when path is evaluated to multiple @@ -66,14 +76,16 @@ For more information about replies, see [Redis serialization protocol specificat Add a new values in multiple keys {{< highlight bash >}} -redis> JSON.MSET doc1 $ '{"a":2}' doc2 $.f.a '3' doc3 $ '{"f1": {"a":1}, "f2":{"a":2}}' +redis> JSON.MSET doc1 $ '{"a":1}' doc2 $ '{"f":{"a":2}}' doc3 $ '{"f1":{"a":0},"f2":{"a":0}}' +OK +redis> JSON.MSET doc1 $ '{"a":2}' doc2 $.f.a '3' doc3 $ '{"f1":{"a":1},"f2":{"a":2}}' OK redis> JSON.GET doc1 $ "[{\"a\":2}]" redis> JSON.GET doc2 $ -"[{\"f\":{\"a\":3]" +"[{\"f\":{\"a\":3}}]" redis> JSON.GET doc3 -"{\"f1\":{\"a\":3},\"f2\":{\"a\":3}}" +"{\"f1\":{\"a\":1},\"f2\":{\"a\":2}}" {{< / highlight >}} diff --git a/content/commands/json.numincrby/index.md b/content/commands/json.numincrby/index.md index 595b6ea055..a6f092f162 100644 --- a/content/commands/json.numincrby/index.md +++ b/content/commands/json.numincrby/index.md @@ -6,6 +6,16 @@ arguments: type: string - name: value type: double +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key description: Increments the numeric value at path by a value diff --git a/content/commands/json.nummultby/index.md b/content/commands/json.nummultby/index.md index 9d704f1550..7890e803c2 100644 --- a/content/commands/json.nummultby/index.md +++ b/content/commands/json.nummultby/index.md @@ -6,6 +6,16 @@ arguments: type: string - name: value type: double +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key deprecated_since: '2.0' diff --git a/content/commands/json.objkeys/index.md b/content/commands/json.objkeys/index.md index 221f120e57..2ef51362c7 100644 --- a/content/commands/json.objkeys/index.md +++ b/content/commands/json.objkeys/index.md @@ -5,6 +5,16 @@ arguments: - name: path optional: true type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) when path is evaluated to a single value, where N is the number of keys in the object, O(N) when path is evaluated to multiple values, where N is the size of the key diff --git a/content/commands/json.objlen/index.md b/content/commands/json.objlen/index.md index 33b92af11c..5a796cb6ae 100644 --- a/content/commands/json.objlen/index.md +++ b/content/commands/json.objlen/index.md @@ -5,6 +5,16 @@ arguments: - name: path optional: true type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key description: Returns the number of keys of the object at path diff --git a/content/commands/json.resp/index.md b/content/commands/json.resp/index.md index 27fdde4ba3..bb32a9fb06 100644 --- a/content/commands/json.resp/index.md +++ b/content/commands/json.resp/index.md @@ -5,6 +5,16 @@ arguments: - name: path optional: true type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) when path is evaluated to a single value, where N is the size of the value, O(N) when path is evaluated to multiple values, where N is the size of the key diff --git a/content/commands/json.set/index.md b/content/commands/json.set/index.md index aa00ba8666..d500ef2c9c 100644 --- a/content/commands/json.set/index.md +++ b/content/commands/json.set/index.md @@ -16,6 +16,16 @@ arguments: name: condition optional: true type: oneof +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(M+N) when path is evaluated to a single value where M is the size of the original value (if it exists) and N is the size of the new value, O(M+N) when path is evaluated to multiple values where M is the size of the key and N is the diff --git a/content/commands/json.strappend/index.md b/content/commands/json.strappend/index.md index adcd602ed9..aad2c04308 100644 --- a/content/commands/json.strappend/index.md +++ b/content/commands/json.strappend/index.md @@ -7,6 +7,16 @@ arguments: type: string - name: value type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key description: Appends a string to a JSON string value at path diff --git a/content/commands/json.strlen/index.md b/content/commands/json.strlen/index.md index 88dd748d07..56b331d499 100644 --- a/content/commands/json.strlen/index.md +++ b/content/commands/json.strlen/index.md @@ -5,6 +5,16 @@ arguments: - name: path optional: true type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key description: Returns the length of the JSON String at path in key diff --git a/content/commands/json.toggle/index.md b/content/commands/json.toggle/index.md index 3aa7baf7d5..d9654d69d5 100644 --- a/content/commands/json.toggle/index.md +++ b/content/commands/json.toggle/index.md @@ -4,6 +4,16 @@ arguments: type: key - name: path type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key description: Toggles a boolean value diff --git a/content/commands/json.type/index.md b/content/commands/json.type/index.md index b9636c00c6..46f8cb8cc3 100644 --- a/content/commands/json.type/index.md +++ b/content/commands/json.type/index.md @@ -5,6 +5,16 @@ arguments: - name: path optional: true type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) when path is evaluated to a single value, O(N) when path is evaluated to multiple values, where N is the size of the key description: Returns the type of the JSON value at path @@ -54,6 +64,7 @@ redis> JSON.TYPE doc $..a 1) "integer" 2) "boolean" redis> JSON.TYPE doc $..dummy +(empty array) {{< / highlight >}} ## See also diff --git a/content/commands/keys/index.md b/content/commands/keys/index.md index 078f1905f7..8a500f1511 100644 --- a/content/commands/keys/index.md +++ b/content/commands/keys/index.md @@ -9,6 +9,16 @@ arguments: name: pattern type: pattern arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) with N being the number of keys in the database, under the assumption diff --git a/content/commands/lastsave/index.md b/content/commands/lastsave/index.md index 52b7cf2179..546e64842e 100644 --- a/content/commands/lastsave/index.md +++ b/content/commands/lastsave/index.md @@ -4,6 +4,16 @@ acl_categories: - '@fast' - '@dangerous' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/latency-doctor/index.md b/content/commands/latency-doctor/index.md index 47134ce4ca..1ef0d7eef8 100644 --- a/content/commands/latency-doctor/index.md +++ b/content/commands/latency-doctor/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/latency-graph/index.md b/content/commands/latency-graph/index.md index e7351bb060..4f5f9cfa68 100644 --- a/content/commands/latency-graph/index.md +++ b/content/commands/latency-graph/index.md @@ -8,6 +8,16 @@ arguments: name: event type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/latency-help/index.md b/content/commands/latency-help/index.md index eb8a13f87d..2fb26edf15 100644 --- a/content/commands/latency-help/index.md +++ b/content/commands/latency-help/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/latency-histogram/index.md b/content/commands/latency-histogram/index.md index 8f6dafaa97..afcd39ccc1 100644 --- a/content/commands/latency-histogram/index.md +++ b/content/commands/latency-histogram/index.md @@ -10,6 +10,16 @@ arguments: optional: true type: string arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/latency-history/index.md b/content/commands/latency-history/index.md index cf070e1589..4336ab2c8f 100644 --- a/content/commands/latency-history/index.md +++ b/content/commands/latency-history/index.md @@ -8,6 +8,16 @@ arguments: name: event type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/latency-latest/index.md b/content/commands/latency-latest/index.md index 4eb93fda1b..2dd2e6791d 100644 --- a/content/commands/latency-latest/index.md +++ b/content/commands/latency-latest/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/latency-reset/index.md b/content/commands/latency-reset/index.md index fbe5f0527a..27211348cc 100644 --- a/content/commands/latency-reset/index.md +++ b/content/commands/latency-reset/index.md @@ -10,6 +10,16 @@ arguments: optional: true type: string arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/latency/index.md b/content/commands/latency/index.md index 2abaac4c92..f60486c7a6 100644 --- a/content/commands/latency/index.md +++ b/content/commands/latency/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on subcommand. description: A container for latency diagnostics commands. group: server diff --git a/content/commands/lcs/index.md b/content/commands/lcs/index.md index 606cfc7b16..f6b587a880 100644 --- a/content/commands/lcs/index.md +++ b/content/commands/lcs/index.md @@ -33,6 +33,16 @@ arguments: token: WITHMATCHLEN type: pure-token arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N*M) where N and M are the lengths of s1 and s2, respectively diff --git a/content/commands/lindex/index.md b/content/commands/lindex/index.md index 89046c04a2..d17f24afe3 100644 --- a/content/commands/lindex/index.md +++ b/content/commands/lindex/index.md @@ -12,6 +12,16 @@ arguments: name: index type: integer arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) where N is the number of elements to traverse to get to the element diff --git a/content/commands/linsert/index.md b/content/commands/linsert/index.md index 14e5973ec2..a543fd4a07 100644 --- a/content/commands/linsert/index.md +++ b/content/commands/linsert/index.md @@ -26,6 +26,16 @@ arguments: name: element type: string arity: 5 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/llen/index.md b/content/commands/llen/index.md index fa4a1ebd1a..7da4b3e676 100644 --- a/content/commands/llen/index.md +++ b/content/commands/llen/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/lmove/index.md b/content/commands/lmove/index.md index 44ef1f992a..0bbdbd7a9a 100644 --- a/content/commands/lmove/index.md +++ b/content/commands/lmove/index.md @@ -35,6 +35,16 @@ arguments: name: whereto type: oneof arity: 5 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/lmpop/index.md b/content/commands/lmpop/index.md index 2fcb6097bc..ddb3d40207 100644 --- a/content/commands/lmpop/index.md +++ b/content/commands/lmpop/index.md @@ -29,6 +29,16 @@ arguments: token: COUNT type: integer arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - movablekeys diff --git a/content/commands/lolwut/index.md b/content/commands/lolwut/index.md index 667426d090..7f12f7255a 100644 --- a/content/commands/lolwut/index.md +++ b/content/commands/lolwut/index.md @@ -9,6 +9,16 @@ arguments: token: VERSION type: integer arity: -1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/lpop/index.md b/content/commands/lpop/index.md index 2265e39b1e..09ba05f510 100644 --- a/content/commands/lpop/index.md +++ b/content/commands/lpop/index.md @@ -14,6 +14,16 @@ arguments: since: 6.2.0 type: integer arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/lpos/index.md b/content/commands/lpos/index.md index b2dca010ac..3c4a69f3a0 100644 --- a/content/commands/lpos/index.md +++ b/content/commands/lpos/index.md @@ -27,6 +27,16 @@ arguments: token: MAXLEN type: integer arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) where N is the number of elements in the list, for the average case. diff --git a/content/commands/lpush/index.md b/content/commands/lpush/index.md index 83d359267e..5ee7bb8e3f 100644 --- a/content/commands/lpush/index.md +++ b/content/commands/lpush/index.md @@ -13,6 +13,16 @@ arguments: name: element type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/lpushx/index.md b/content/commands/lpushx/index.md index 9286102a5f..01c2d4f34a 100644 --- a/content/commands/lpushx/index.md +++ b/content/commands/lpushx/index.md @@ -13,6 +13,16 @@ arguments: name: element type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/lrange/index.md b/content/commands/lrange/index.md index 4547c5df7d..92a6cc832c 100644 --- a/content/commands/lrange/index.md +++ b/content/commands/lrange/index.md @@ -15,6 +15,16 @@ arguments: name: stop type: integer arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(S+N) where S is the distance of start offset from HEAD for small lists, diff --git a/content/commands/lrem/index.md b/content/commands/lrem/index.md index e62eb2937e..83ea8fee63 100644 --- a/content/commands/lrem/index.md +++ b/content/commands/lrem/index.md @@ -15,6 +15,16 @@ arguments: name: element type: string arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write complexity: O(N+M) where N is the length of the list and M is the number of elements diff --git a/content/commands/lset/index.md b/content/commands/lset/index.md index baae7f106a..44a9626cb2 100644 --- a/content/commands/lset/index.md +++ b/content/commands/lset/index.md @@ -15,6 +15,16 @@ arguments: name: element type: string arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/ltrim/index.md b/content/commands/ltrim/index.md index 62f7ee8be7..0d80a5705c 100644 --- a/content/commands/ltrim/index.md +++ b/content/commands/ltrim/index.md @@ -15,6 +15,16 @@ arguments: name: stop type: integer arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write complexity: O(N) where N is the number of elements to be removed by the operation. diff --git a/content/commands/memory-doctor/index.md b/content/commands/memory-doctor/index.md index df338c24c2..fc7bcd0c27 100644 --- a/content/commands/memory-doctor/index.md +++ b/content/commands/memory-doctor/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Outputs a memory problems report. group: server diff --git a/content/commands/memory-help/index.md b/content/commands/memory-help/index.md index 30d41e6c98..79c6cf5f59 100644 --- a/content/commands/memory-help/index.md +++ b/content/commands/memory-help/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/memory-malloc-stats/index.md b/content/commands/memory-malloc-stats/index.md index fa6159c377..ece412fe7b 100644 --- a/content/commands/memory-malloc-stats/index.md +++ b/content/commands/memory-malloc-stats/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on how much memory is allocated, could be slow description: Returns the allocator statistics. group: server diff --git a/content/commands/memory-purge/index.md b/content/commands/memory-purge/index.md index 563f9b061a..93fd9757b8 100644 --- a/content/commands/memory-purge/index.md +++ b/content/commands/memory-purge/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on how much memory is allocated, could be slow description: Asks the allocator to release memory. group: server diff --git a/content/commands/memory-stats/index.md b/content/commands/memory-stats/index.md index e2364b01d7..0507e577bb 100644 --- a/content/commands/memory-stats/index.md +++ b/content/commands/memory-stats/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns details about memory usage. group: server @@ -48,8 +58,7 @@ values. The following metrics are reported: Redis keyspace (see [`INFO`](/commands/info)'s `used_memory_overhead`) * `keys.count`: The total number of keys stored across all databases in the server -* `keys.bytes-per-key`: The ratio between **net memory usage** (`total.allocated` - minus `startup.allocated`) and `keys.count` +* `keys.bytes-per-key`: The ratio between `dataset.bytes` and `keys.count` * `dataset.bytes`: The size in bytes of the dataset, i.e. `overhead.total` subtracted from `total.allocated` (see [`INFO`](/commands/info)'s `used_memory_dataset`) * `dataset.percentage`: The percentage of `dataset.bytes` out of the total diff --git a/content/commands/memory-usage/index.md b/content/commands/memory-usage/index.md index 6af023b35f..2ca25901f0 100644 --- a/content/commands/memory-usage/index.md +++ b/content/commands/memory-usage/index.md @@ -13,6 +13,16 @@ arguments: token: SAMPLES type: integer arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) where N is the number of samples. @@ -42,7 +52,7 @@ The `MEMORY USAGE` command reports the number of bytes that a key and its value require to be stored in RAM. The reported usage is the total of memory allocations for data and -administrative overheads that a key its value require. +administrative overheads that a key and its value require. For nested data types, the optional `SAMPLES` option can be provided, where `count` is the number of sampled nested values. The samples are averaged to estimate the total size. diff --git a/content/commands/memory/index.md b/content/commands/memory/index.md index 0218a03a17..dd4975f16c 100644 --- a/content/commands/memory/index.md +++ b/content/commands/memory/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on subcommand. description: A container for memory diagnostics commands. group: server diff --git a/content/commands/mget/index.md b/content/commands/mget/index.md index 3105d6e584..f035e3667d 100644 --- a/content/commands/mget/index.md +++ b/content/commands/mget/index.md @@ -10,6 +10,16 @@ arguments: name: key type: key arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/migrate/index.md b/content/commands/migrate/index.md index ffabcda769..43c68b382c 100644 --- a/content/commands/migrate/index.md +++ b/content/commands/migrate/index.md @@ -69,6 +69,16 @@ arguments: token: KEYS type: key arity: -6 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - movablekeys @@ -121,7 +131,8 @@ key_specs: linkTitle: MIGRATE since: 2.6.0 summary: Atomically transfers a key from one Redis instance to another. -syntax_fmt: "MIGRATE host port destination-db timeout [COPY] [REPLACE]\n\ +syntax_fmt: "MIGRATE host port destination-db timeout [COPY] [REPLACE] +\ \ [AUTH\_password | AUTH2\_username password] [KEYS\_key [key ...]]" syntax_str: "port destination-db timeout [COPY] [REPLACE] [AUTH\_password\ \ | AUTH2\_username password] [KEYS\_key [key ...]]" diff --git a/content/commands/module-help/index.md b/content/commands/module-help/index.md index 165f7e3669..12d77981d2 100644 --- a/content/commands/module-help/index.md +++ b/content/commands/module-help/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/module-list/index.md b/content/commands/module-list/index.md index 995a93805d..9f7712fb63 100644 --- a/content/commands/module-list/index.md +++ b/content/commands/module-list/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/module-load/index.md b/content/commands/module-load/index.md index 9282b26eb6..734c291b46 100644 --- a/content/commands/module-load/index.md +++ b/content/commands/module-load/index.md @@ -13,6 +13,16 @@ arguments: optional: true type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/module-loadex/index.md b/content/commands/module-loadex/index.md index 11e3e5f106..1d741d8492 100644 --- a/content/commands/module-loadex/index.md +++ b/content/commands/module-loadex/index.md @@ -27,6 +27,16 @@ arguments: token: ARGS type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript @@ -38,7 +48,8 @@ hidden: false linkTitle: MODULE LOADEX since: 7.0.0 summary: Loads a module using extended parameters. -syntax_fmt: "MODULE LOADEX path [CONFIG\_name value [CONFIG name value ...]]\n [ARGS\_\ +syntax_fmt: "MODULE LOADEX path [CONFIG\_name value [CONFIG name value ...]] + [ARGS\_\ args [args ...]]" syntax_str: "[CONFIG\_name value [CONFIG name value ...]] [ARGS\_args [args ...]]" title: MODULE LOADEX diff --git a/content/commands/module-unload/index.md b/content/commands/module-unload/index.md index b3888fe92b..7112bb041c 100644 --- a/content/commands/module-unload/index.md +++ b/content/commands/module-unload/index.md @@ -8,6 +8,16 @@ arguments: name: name type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/module/index.md b/content/commands/module/index.md index b9e575526d..adc0baae3e 100644 --- a/content/commands/module/index.md +++ b/content/commands/module/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on subcommand. description: A container for module commands. group: server diff --git a/content/commands/monitor/index.md b/content/commands/monitor/index.md index 90222e9505..0f697c10c8 100644 --- a/content/commands/monitor/index.md +++ b/content/commands/monitor/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/move/index.md b/content/commands/move/index.md index eefe01f955..2c0d023039 100644 --- a/content/commands/move/index.md +++ b/content/commands/move/index.md @@ -12,6 +12,16 @@ arguments: name: db type: integer arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/mset/index.md b/content/commands/mset/index.md index e0627d27bb..0c87df51c4 100644 --- a/content/commands/mset/index.md +++ b/content/commands/mset/index.md @@ -16,6 +16,16 @@ arguments: name: data type: block arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/msetnx/index.md b/content/commands/msetnx/index.md index 56b27f8805..61f59e0bec 100644 --- a/content/commands/msetnx/index.md +++ b/content/commands/msetnx/index.md @@ -16,6 +16,16 @@ arguments: name: data type: block arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/multi/index.md b/content/commands/multi/index.md index 7f06d41bf1..96ff277873 100644 --- a/content/commands/multi/index.md +++ b/content/commands/multi/index.md @@ -3,6 +3,16 @@ acl_categories: - '@fast' - '@transaction' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/object-encoding/index.md b/content/commands/object-encoding/index.md index 62a96145a2..2e105e40fb 100644 --- a/content/commands/object-encoding/index.md +++ b/content/commands/object-encoding/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) diff --git a/content/commands/object-freq/index.md b/content/commands/object-freq/index.md index afa57e9f3d..5dffe6d3ef 100644 --- a/content/commands/object-freq/index.md +++ b/content/commands/object-freq/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) diff --git a/content/commands/object-help/index.md b/content/commands/object-help/index.md index 32f19a45b2..cf13bcd86a 100644 --- a/content/commands/object-help/index.md +++ b/content/commands/object-help/index.md @@ -3,6 +3,16 @@ acl_categories: - '@keyspace' - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/object-idletime/index.md b/content/commands/object-idletime/index.md index 82cc3f7f50..6c75898811 100644 --- a/content/commands/object-idletime/index.md +++ b/content/commands/object-idletime/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) diff --git a/content/commands/object-refcount/index.md b/content/commands/object-refcount/index.md index fbaf9eb32c..84412f707f 100644 --- a/content/commands/object-refcount/index.md +++ b/content/commands/object-refcount/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) diff --git a/content/commands/object/index.md b/content/commands/object/index.md index 0c13540dd0..1136a1ab16 100644 --- a/content/commands/object/index.md +++ b/content/commands/object/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on subcommand. description: A container for object introspection commands. group: generic diff --git a/content/commands/persist/index.md b/content/commands/persist/index.md index ac894a7e8f..9b0f61e1dd 100644 --- a/content/commands/persist/index.md +++ b/content/commands/persist/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/pexpire/index.md b/content/commands/pexpire/index.md index 17fff5e4a8..533d5db170 100644 --- a/content/commands/pexpire/index.md +++ b/content/commands/pexpire/index.md @@ -33,6 +33,16 @@ arguments: since: 7.0.0 type: oneof arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/pexpireat/index.md b/content/commands/pexpireat/index.md index 9b69d82416..eac7543001 100644 --- a/content/commands/pexpireat/index.md +++ b/content/commands/pexpireat/index.md @@ -33,6 +33,16 @@ arguments: since: 7.0.0 type: oneof arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/pexpiretime/index.md b/content/commands/pexpiretime/index.md index b661173492..c7683fb53b 100644 --- a/content/commands/pexpiretime/index.md +++ b/content/commands/pexpiretime/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/pfadd/index.md b/content/commands/pfadd/index.md index 083778b28d..7fb140e49e 100644 --- a/content/commands/pfadd/index.md +++ b/content/commands/pfadd/index.md @@ -14,6 +14,16 @@ arguments: optional: true type: string arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/pfcount/index.md b/content/commands/pfcount/index.md index e1c9ce24b5..8102069009 100644 --- a/content/commands/pfcount/index.md +++ b/content/commands/pfcount/index.md @@ -10,6 +10,16 @@ arguments: name: key type: key arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) with a very small average constant time when called with a single diff --git a/content/commands/pfdebug/index.md b/content/commands/pfdebug/index.md index e0ebb6f39f..82ab7af2b4 100644 --- a/content/commands/pfdebug/index.md +++ b/content/commands/pfdebug/index.md @@ -14,6 +14,16 @@ arguments: name: key type: key arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/pfmerge/index.md b/content/commands/pfmerge/index.md index 24628461a8..0f925e731f 100644 --- a/content/commands/pfmerge/index.md +++ b/content/commands/pfmerge/index.md @@ -15,6 +15,16 @@ arguments: optional: true type: key arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/pfselftest/index.md b/content/commands/pfselftest/index.md index ab9208c13c..9b70ba6c62 100644 --- a/content/commands/pfselftest/index.md +++ b/content/commands/pfselftest/index.md @@ -5,6 +5,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin complexity: N/A diff --git a/content/commands/ping/index.md b/content/commands/ping/index.md index cdb12d13ac..a65eb05a63 100644 --- a/content/commands/ping/index.md +++ b/content/commands/ping/index.md @@ -8,6 +8,16 @@ arguments: optional: true type: string arity: -1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - fast complexity: O(1) diff --git a/content/commands/psetex/index.md b/content/commands/psetex/index.md index f0467dc7bc..d15c0e2f69 100644 --- a/content/commands/psetex/index.md +++ b/content/commands/psetex/index.md @@ -15,6 +15,16 @@ arguments: name: value type: string arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/psubscribe/index.md b/content/commands/psubscribe/index.md index 61913e3923..747c0151ba 100644 --- a/content/commands/psubscribe/index.md +++ b/content/commands/psubscribe/index.md @@ -8,6 +8,16 @@ arguments: name: pattern type: pattern arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - pubsub - noscript diff --git a/content/commands/psync/index.md b/content/commands/psync/index.md index 7a62b8764a..9c7ba76059 100644 --- a/content/commands/psync/index.md +++ b/content/commands/psync/index.md @@ -11,6 +11,16 @@ arguments: name: offset type: integer arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/pttl/index.md b/content/commands/pttl/index.md index 52db24fc1b..063686d772 100644 --- a/content/commands/pttl/index.md +++ b/content/commands/pttl/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/publish/index.md b/content/commands/publish/index.md index e4acbb0eee..d15b19e5f3 100644 --- a/content/commands/publish/index.md +++ b/content/commands/publish/index.md @@ -10,6 +10,16 @@ arguments: name: message type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - pubsub - loading diff --git a/content/commands/pubsub-channels/index.md b/content/commands/pubsub-channels/index.md index da79568615..2ff3be97e7 100644 --- a/content/commands/pubsub-channels/index.md +++ b/content/commands/pubsub-channels/index.md @@ -8,6 +8,16 @@ arguments: optional: true type: pattern arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - pubsub - loading diff --git a/content/commands/pubsub-help/index.md b/content/commands/pubsub-help/index.md index b69bfc5f58..e7c4c96219 100644 --- a/content/commands/pubsub-help/index.md +++ b/content/commands/pubsub-help/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/pubsub-numpat/index.md b/content/commands/pubsub-numpat/index.md index 23f4e05f31..1d192b2f2b 100644 --- a/content/commands/pubsub-numpat/index.md +++ b/content/commands/pubsub-numpat/index.md @@ -3,6 +3,16 @@ acl_categories: - '@pubsub' - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - pubsub - loading diff --git a/content/commands/pubsub-numsub/index.md b/content/commands/pubsub-numsub/index.md index e99887ed1d..05ce2b7d2b 100644 --- a/content/commands/pubsub-numsub/index.md +++ b/content/commands/pubsub-numsub/index.md @@ -9,6 +9,16 @@ arguments: optional: true type: string arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - pubsub - loading diff --git a/content/commands/pubsub-shardchannels/index.md b/content/commands/pubsub-shardchannels/index.md index e9f783d45a..3c95f6838c 100644 --- a/content/commands/pubsub-shardchannels/index.md +++ b/content/commands/pubsub-shardchannels/index.md @@ -8,6 +8,16 @@ arguments: optional: true type: pattern arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - pubsub - loading diff --git a/content/commands/pubsub-shardnumsub/index.md b/content/commands/pubsub-shardnumsub/index.md index ad2b22b4a1..9d867fb6f8 100644 --- a/content/commands/pubsub-shardnumsub/index.md +++ b/content/commands/pubsub-shardnumsub/index.md @@ -9,6 +9,16 @@ arguments: optional: true type: string arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - pubsub - loading diff --git a/content/commands/pubsub/index.md b/content/commands/pubsub/index.md index 140e07a124..d774ac1542 100644 --- a/content/commands/pubsub/index.md +++ b/content/commands/pubsub/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on subcommand. description: A container for Pub/Sub commands. group: pubsub diff --git a/content/commands/punsubscribe/index.md b/content/commands/punsubscribe/index.md index b79ce8c80c..518889a171 100644 --- a/content/commands/punsubscribe/index.md +++ b/content/commands/punsubscribe/index.md @@ -9,6 +9,16 @@ arguments: optional: true type: pattern arity: -1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - pubsub - noscript diff --git a/content/commands/quit/index.md b/content/commands/quit/index.md index e6c403e88d..170e98b33e 100644 --- a/content/commands/quit/index.md +++ b/content/commands/quit/index.md @@ -3,6 +3,16 @@ acl_categories: - '@fast' - '@connection' arity: -1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/randomkey/index.md b/content/commands/randomkey/index.md index 1dccc6ab75..5c151cce3c 100644 --- a/content/commands/randomkey/index.md +++ b/content/commands/randomkey/index.md @@ -4,6 +4,16 @@ acl_categories: - '@read' - '@slow' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) diff --git a/content/commands/readonly/index.md b/content/commands/readonly/index.md index b323d7b816..155c610cec 100644 --- a/content/commands/readonly/index.md +++ b/content/commands/readonly/index.md @@ -3,6 +3,16 @@ acl_categories: - '@fast' - '@connection' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/readwrite/index.md b/content/commands/readwrite/index.md index afb79bfc15..6a0012f4c6 100644 --- a/content/commands/readwrite/index.md +++ b/content/commands/readwrite/index.md @@ -3,6 +3,16 @@ acl_categories: - '@fast' - '@connection' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/rename/index.md b/content/commands/rename/index.md index 0471302ad0..7b1131e58b 100644 --- a/content/commands/rename/index.md +++ b/content/commands/rename/index.md @@ -13,6 +13,16 @@ arguments: name: newkey type: key arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write complexity: O(1) diff --git a/content/commands/renamenx/index.md b/content/commands/renamenx/index.md index 8d1ec705c3..1464b7fc5a 100644 --- a/content/commands/renamenx/index.md +++ b/content/commands/renamenx/index.md @@ -13,6 +13,16 @@ arguments: name: newkey type: key arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/replconf/index.md b/content/commands/replconf/index.md index 07fd450947..e3d57e6668 100644 --- a/content/commands/replconf/index.md +++ b/content/commands/replconf/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: -1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/replicaof/index.md b/content/commands/replicaof/index.md index 7b0a8a9aa9..ba97a54a89 100644 --- a/content/commands/replicaof/index.md +++ b/content/commands/replicaof/index.md @@ -28,6 +28,16 @@ arguments: name: args type: oneof arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/reset/index.md b/content/commands/reset/index.md index c08f614ec3..d16d4816d0 100644 --- a/content/commands/reset/index.md +++ b/content/commands/reset/index.md @@ -3,6 +3,16 @@ acl_categories: - '@fast' - '@connection' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/restore-asking/index.md b/content/commands/restore-asking/index.md index df910563e8..1ee2a140c1 100644 --- a/content/commands/restore-asking/index.md +++ b/content/commands/restore-asking/index.md @@ -40,6 +40,16 @@ arguments: token: FREQ type: integer arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -77,7 +87,8 @@ key_specs: linkTitle: RESTORE-ASKING since: 3.0.0 summary: An internal command for migrating keys in a cluster. -syntax_fmt: "RESTORE-ASKING key ttl serialized-value [REPLACE] [ABSTTL]\n [IDLETIME\_\ +syntax_fmt: "RESTORE-ASKING key ttl serialized-value [REPLACE] [ABSTTL] + [IDLETIME\_\ seconds] [FREQ\_frequency]" syntax_str: "ttl serialized-value [REPLACE] [ABSTTL] [IDLETIME\_seconds] [FREQ\_frequency]" title: RESTORE-ASKING diff --git a/content/commands/restore/index.md b/content/commands/restore/index.md index a02b3bbb95..6c75a9b03a 100644 --- a/content/commands/restore/index.md +++ b/content/commands/restore/index.md @@ -40,6 +40,16 @@ arguments: token: FREQ type: integer arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -74,7 +84,8 @@ key_specs: linkTitle: RESTORE since: 2.6.0 summary: Creates a key from the serialized representation of a value. -syntax_fmt: "RESTORE key ttl serialized-value [REPLACE] [ABSTTL]\n [IDLETIME\_seconds]\ +syntax_fmt: "RESTORE key ttl serialized-value [REPLACE] [ABSTTL] + [IDLETIME\_seconds]\ \ [FREQ\_frequency]" syntax_str: "ttl serialized-value [REPLACE] [ABSTTL] [IDLETIME\_seconds] [FREQ\_frequency]" title: RESTORE diff --git a/content/commands/role/index.md b/content/commands/role/index.md index be44ac9f91..8267e2bef8 100644 --- a/content/commands/role/index.md +++ b/content/commands/role/index.md @@ -4,6 +4,16 @@ acl_categories: - '@fast' - '@dangerous' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - loading diff --git a/content/commands/rpop/index.md b/content/commands/rpop/index.md index 8b830b6b89..1085d2d323 100644 --- a/content/commands/rpop/index.md +++ b/content/commands/rpop/index.md @@ -14,6 +14,16 @@ arguments: since: 6.2.0 type: integer arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/rpoplpush/index.md b/content/commands/rpoplpush/index.md index ede7eac81a..b31325e7f0 100644 --- a/content/commands/rpoplpush/index.md +++ b/content/commands/rpoplpush/index.md @@ -13,6 +13,16 @@ arguments: name: destination type: key arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/rpush/index.md b/content/commands/rpush/index.md index 170729a761..8d5d9cdb13 100644 --- a/content/commands/rpush/index.md +++ b/content/commands/rpush/index.md @@ -13,6 +13,16 @@ arguments: name: element type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/rpushx/index.md b/content/commands/rpushx/index.md index 06af3603b5..be7529733c 100644 --- a/content/commands/rpushx/index.md +++ b/content/commands/rpushx/index.md @@ -13,6 +13,16 @@ arguments: name: element type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/sadd/index.md b/content/commands/sadd/index.md index 7a88929237..95f7b932c4 100644 --- a/content/commands/sadd/index.md +++ b/content/commands/sadd/index.md @@ -13,6 +13,16 @@ arguments: name: member type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/save/index.md b/content/commands/save/index.md index 773aaab379..e61711c099 100644 --- a/content/commands/save/index.md +++ b/content/commands/save/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/scan/index.md b/content/commands/scan/index.md index 3e914992c9..75e0cefcc4 100644 --- a/content/commands/scan/index.md +++ b/content/commands/scan/index.md @@ -24,6 +24,16 @@ arguments: token: TYPE type: string arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) for every call. O(N) for a complete iteration, including enough command diff --git a/content/commands/scard/index.md b/content/commands/scard/index.md index 9ecc9aaf65..4b840ccedc 100644 --- a/content/commands/scard/index.md +++ b/content/commands/scard/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/script-debug/index.md b/content/commands/script-debug/index.md index 96d0c8853f..be77979572 100644 --- a/content/commands/script-debug/index.md +++ b/content/commands/script-debug/index.md @@ -19,6 +19,16 @@ arguments: name: mode type: oneof arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript complexity: O(1) diff --git a/content/commands/script-exists/index.md b/content/commands/script-exists/index.md index 4506d89c7a..580302fc34 100644 --- a/content/commands/script-exists/index.md +++ b/content/commands/script-exists/index.md @@ -8,6 +8,16 @@ arguments: name: sha1 type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript complexity: O(N) with N being the number of scripts to check (so checking a single diff --git a/content/commands/script-flush/index.md b/content/commands/script-flush/index.md index 8e71988023..b0c276d2cd 100644 --- a/content/commands/script-flush/index.md +++ b/content/commands/script-flush/index.md @@ -17,6 +17,16 @@ arguments: since: 6.2.0 type: oneof arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript complexity: O(N) with N being the number of scripts in cache diff --git a/content/commands/script-help/index.md b/content/commands/script-help/index.md index 326d64ff59..49ce32055b 100644 --- a/content/commands/script-help/index.md +++ b/content/commands/script-help/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@scripting' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/script-kill/index.md b/content/commands/script-kill/index.md index 12aaf847e2..c579cd795e 100644 --- a/content/commands/script-kill/index.md +++ b/content/commands/script-kill/index.md @@ -3,6 +3,16 @@ acl_categories: - '@slow' - '@scripting' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - allow_busy diff --git a/content/commands/script-load/index.md b/content/commands/script-load/index.md index bf37569e74..dfab0383dc 100644 --- a/content/commands/script-load/index.md +++ b/content/commands/script-load/index.md @@ -7,6 +7,16 @@ arguments: name: script type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - noscript - stale diff --git a/content/commands/script/index.md b/content/commands/script/index.md index 9e1178feeb..5b37c3a6d1 100644 --- a/content/commands/script/index.md +++ b/content/commands/script/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on subcommand. description: A container for Lua scripts management commands. group: scripting diff --git a/content/commands/sdiff/index.md b/content/commands/sdiff/index.md index 51a39bff91..746d401ae6 100644 --- a/content/commands/sdiff/index.md +++ b/content/commands/sdiff/index.md @@ -10,6 +10,16 @@ arguments: name: key type: key arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) where N is the total number of elements in all given sets. diff --git a/content/commands/sdiffstore/index.md b/content/commands/sdiffstore/index.md index 387b94184b..abee79e310 100644 --- a/content/commands/sdiffstore/index.md +++ b/content/commands/sdiffstore/index.md @@ -14,6 +14,16 @@ arguments: name: key type: key arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/select/index.md b/content/commands/select/index.md index 65bb193e7f..ec25f3f7d1 100644 --- a/content/commands/select/index.md +++ b/content/commands/select/index.md @@ -7,6 +7,16 @@ arguments: name: index type: integer arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/set/index.md b/content/commands/set/index.md index 192b1c5973..0de5bdea01 100644 --- a/content/commands/set/index.md +++ b/content/commands/set/index.md @@ -60,6 +60,16 @@ arguments: optional: true type: oneof arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -97,7 +107,8 @@ linkTitle: SET since: 1.0.0 summary: Sets the string value of a key, ignoring its type. The key is created if it doesn't exist. -syntax_fmt: "SET key value [NX | XX] [GET] [EX\_seconds | PX\_milliseconds |\n EXAT\_\ +syntax_fmt: "SET key value [NX | XX] [GET] [EX\_seconds | PX\_milliseconds | + EXAT\_\ unix-time-seconds | PXAT\_unix-time-milliseconds | KEEPTTL]" syntax_str: "value [NX | XX] [GET] [EX\_seconds | PX\_milliseconds | EXAT\_unix-time-seconds\ \ | PXAT\_unix-time-milliseconds | KEEPTTL]" @@ -111,10 +122,10 @@ Any previous time to live associated with the key is discarded on successful `SE The `SET` command supports a set of options that modify its behavior: -* `EX` *seconds* -- Set the specified expire time, in seconds. -* `PX` *milliseconds* -- Set the specified expire time, in milliseconds. -* `EXAT` *timestamp-seconds* -- Set the specified Unix time at which the key will expire, in seconds. -* `PXAT` *timestamp-milliseconds* -- Set the specified Unix time at which the key will expire, in milliseconds. +* `EX` *seconds* -- Set the specified expire time, in seconds (a positive integer). +* `PX` *milliseconds* -- Set the specified expire time, in milliseconds (a positive integer). +* `EXAT` *timestamp-seconds* -- Set the specified Unix time at which the key will expire, in seconds (a positive integer). +* `PXAT` *timestamp-milliseconds* -- Set the specified Unix time at which the key will expire, in milliseconds (a positive integer). * `NX` -- Only set the key if it does not already exist. * `XX` -- Only set the key if it already exists. * `KEEPTTL` -- Retain the time to live associated with the key. diff --git a/content/commands/setbit/index.md b/content/commands/setbit/index.md index a93aa6edb3..9c4b1f0bb3 100644 --- a/content/commands/setbit/index.md +++ b/content/commands/setbit/index.md @@ -15,6 +15,16 @@ arguments: name: value type: integer arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/setex/index.md b/content/commands/setex/index.md index 33c94f52b4..b5abea9cb2 100644 --- a/content/commands/setex/index.md +++ b/content/commands/setex/index.md @@ -15,6 +15,16 @@ arguments: name: value type: string arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/setnx/index.md b/content/commands/setnx/index.md index d33c931bd7..339850a80b 100644 --- a/content/commands/setnx/index.md +++ b/content/commands/setnx/index.md @@ -12,6 +12,16 @@ arguments: name: value type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/setrange/index.md b/content/commands/setrange/index.md index 0aba2f6ae1..33e9ddc3c3 100644 --- a/content/commands/setrange/index.md +++ b/content/commands/setrange/index.md @@ -15,6 +15,16 @@ arguments: name: value type: string arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/shutdown/index.md b/content/commands/shutdown/index.md index 04e1263729..b22bdcc3ae 100644 --- a/content/commands/shutdown/index.md +++ b/content/commands/shutdown/index.md @@ -35,6 +35,16 @@ arguments: token: ABORT type: pure-token arity: -1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/sinter/index.md b/content/commands/sinter/index.md index b80e1d4189..a8025e49d5 100644 --- a/content/commands/sinter/index.md +++ b/content/commands/sinter/index.md @@ -10,6 +10,16 @@ arguments: name: key type: key arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N*M) worst case where N is the cardinality of the smallest set and M diff --git a/content/commands/sintercard/index.md b/content/commands/sintercard/index.md index a401aa48a8..b9016dd073 100644 --- a/content/commands/sintercard/index.md +++ b/content/commands/sintercard/index.md @@ -18,6 +18,16 @@ arguments: token: LIMIT type: integer arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - movablekeys diff --git a/content/commands/sinterstore/index.md b/content/commands/sinterstore/index.md index a568a0e59a..2b04e9d5d8 100644 --- a/content/commands/sinterstore/index.md +++ b/content/commands/sinterstore/index.md @@ -14,6 +14,16 @@ arguments: name: key type: key arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/sismember/index.md b/content/commands/sismember/index.md index 0c90eecc68..bc718a20cd 100644 --- a/content/commands/sismember/index.md +++ b/content/commands/sismember/index.md @@ -12,6 +12,16 @@ arguments: name: member type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/slaveof/index.md b/content/commands/slaveof/index.md index 1fb0434dc0..a8a45d0bf0 100644 --- a/content/commands/slaveof/index.md +++ b/content/commands/slaveof/index.md @@ -28,6 +28,16 @@ arguments: name: args type: oneof arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/slowlog-get/index.md b/content/commands/slowlog-get/index.md index 28f0134c8e..297a89cac8 100644 --- a/content/commands/slowlog-get/index.md +++ b/content/commands/slowlog-get/index.md @@ -9,6 +9,16 @@ arguments: optional: true type: integer arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - loading diff --git a/content/commands/slowlog-help/index.md b/content/commands/slowlog-help/index.md index db6d54811c..1de47c8013 100644 --- a/content/commands/slowlog-help/index.md +++ b/content/commands/slowlog-help/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/slowlog-len/index.md b/content/commands/slowlog-len/index.md index 1ae26a566a..b2ed094218 100644 --- a/content/commands/slowlog-len/index.md +++ b/content/commands/slowlog-len/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - loading diff --git a/content/commands/slowlog-reset/index.md b/content/commands/slowlog-reset/index.md index 0ceb676f9b..40fa075e88 100644 --- a/content/commands/slowlog-reset/index.md +++ b/content/commands/slowlog-reset/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - loading diff --git a/content/commands/slowlog/index.md b/content/commands/slowlog/index.md index 45ff59aaa2..8942045d6b 100644 --- a/content/commands/slowlog/index.md +++ b/content/commands/slowlog/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on subcommand. description: A container for slow log commands. group: server diff --git a/content/commands/smembers/index.md b/content/commands/smembers/index.md index 9ef2601b3a..71c58f5fc7 100644 --- a/content/commands/smembers/index.md +++ b/content/commands/smembers/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) where N is the set cardinality. diff --git a/content/commands/smismember/index.md b/content/commands/smismember/index.md index 4647c90180..f4c383e10f 100644 --- a/content/commands/smismember/index.md +++ b/content/commands/smismember/index.md @@ -13,6 +13,16 @@ arguments: name: member type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/smove/index.md b/content/commands/smove/index.md index 7f3c880446..86e5b292c3 100644 --- a/content/commands/smove/index.md +++ b/content/commands/smove/index.md @@ -16,6 +16,16 @@ arguments: name: member type: string arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/sort/index.md b/content/commands/sort/index.md index 01045a4d25..c5bc6ad6ed 100644 --- a/content/commands/sort/index.md +++ b/content/commands/sort/index.md @@ -60,6 +60,16 @@ arguments: token: STORE type: key arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -108,7 +118,8 @@ linkTitle: SORT since: 1.0.0 summary: Sorts the elements in a list, a set, or a sorted set, optionally storing the result. -syntax_fmt: "SORT key [BY\_pattern] [LIMIT\_offset count] [GET\_pattern [GET pattern\n\ +syntax_fmt: "SORT key [BY\_pattern] [LIMIT\_offset count] [GET\_pattern [GET pattern +\ \ ...]] [ASC | DESC] [ALPHA] [STORE\_destination]" syntax_str: "[BY\_pattern] [LIMIT\_offset count] [GET\_pattern [GET pattern ...]]\ \ [ASC | DESC] [ALPHA] [STORE\_destination]" diff --git a/content/commands/sort_ro/index.md b/content/commands/sort_ro/index.md index ae6939cf82..2b05ec1df4 100644 --- a/content/commands/sort_ro/index.md +++ b/content/commands/sort_ro/index.md @@ -54,6 +54,16 @@ arguments: token: ALPHA type: pure-token arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - movablekeys @@ -89,7 +99,8 @@ key_specs: linkTitle: SORT_RO since: 7.0.0 summary: Returns the sorted elements of a list, a set, or a sorted set. -syntax_fmt: "SORT_RO key [BY\_pattern] [LIMIT\_offset count] [GET\_pattern [GET\n\ +syntax_fmt: "SORT_RO key [BY\_pattern] [LIMIT\_offset count] [GET\_pattern [GET +\ \ pattern ...]] [ASC | DESC] [ALPHA]" syntax_str: "[BY\_pattern] [LIMIT\_offset count] [GET\_pattern [GET pattern ...]]\ \ [ASC | DESC] [ALPHA]" diff --git a/content/commands/spop/index.md b/content/commands/spop/index.md index 18d3a4864b..cd7841ed46 100644 --- a/content/commands/spop/index.md +++ b/content/commands/spop/index.md @@ -14,6 +14,16 @@ arguments: since: 3.2.0 type: integer arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/spublish/index.md b/content/commands/spublish/index.md index eb0692e7f8..9b21371522 100644 --- a/content/commands/spublish/index.md +++ b/content/commands/spublish/index.md @@ -10,6 +10,16 @@ arguments: name: message type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - pubsub - loading diff --git a/content/commands/srandmember/index.md b/content/commands/srandmember/index.md index 96a2ab99e1..ec52e780e6 100644 --- a/content/commands/srandmember/index.md +++ b/content/commands/srandmember/index.md @@ -14,6 +14,16 @@ arguments: since: 2.6.0 type: integer arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: Without the count argument O(1), otherwise O(N) where N is the absolute diff --git a/content/commands/srem/index.md b/content/commands/srem/index.md index 8f5890a169..07364e14e3 100644 --- a/content/commands/srem/index.md +++ b/content/commands/srem/index.md @@ -13,6 +13,16 @@ arguments: name: member type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/sscan/index.md b/content/commands/sscan/index.md index b07be65f65..7c0f7f08b9 100644 --- a/content/commands/sscan/index.md +++ b/content/commands/sscan/index.md @@ -22,6 +22,16 @@ arguments: token: COUNT type: integer arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) for every call. O(N) for a complete iteration, including enough command diff --git a/content/commands/ssubscribe/index.md b/content/commands/ssubscribe/index.md index b5584d900e..7f0672dedd 100644 --- a/content/commands/ssubscribe/index.md +++ b/content/commands/ssubscribe/index.md @@ -8,6 +8,16 @@ arguments: name: shardchannel type: string arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - pubsub - noscript diff --git a/content/commands/strlen/index.md b/content/commands/strlen/index.md index 186180d76b..0dec127270 100644 --- a/content/commands/strlen/index.md +++ b/content/commands/strlen/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/subscribe/index.md b/content/commands/subscribe/index.md index a56cb80ac3..b3e37ef2af 100644 --- a/content/commands/subscribe/index.md +++ b/content/commands/subscribe/index.md @@ -8,6 +8,16 @@ arguments: name: channel type: string arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - pubsub - noscript diff --git a/content/commands/substr/index.md b/content/commands/substr/index.md index af0031b2da..77206d469d 100644 --- a/content/commands/substr/index.md +++ b/content/commands/substr/index.md @@ -15,6 +15,16 @@ arguments: name: end type: integer arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) where N is the length of the returned string. The complexity is ultimately diff --git a/content/commands/sunion/index.md b/content/commands/sunion/index.md index 5fd1a27613..d868d83c65 100644 --- a/content/commands/sunion/index.md +++ b/content/commands/sunion/index.md @@ -10,6 +10,16 @@ arguments: name: key type: key arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) where N is the total number of elements in all given sets. diff --git a/content/commands/sunionstore/index.md b/content/commands/sunionstore/index.md index 8882860067..c2ae426d2c 100644 --- a/content/commands/sunionstore/index.md +++ b/content/commands/sunionstore/index.md @@ -14,6 +14,16 @@ arguments: name: key type: key arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/sunsubscribe/index.md b/content/commands/sunsubscribe/index.md index d8ab5ef79d..7ce6d75c0b 100644 --- a/content/commands/sunsubscribe/index.md +++ b/content/commands/sunsubscribe/index.md @@ -9,6 +9,16 @@ arguments: optional: true type: string arity: -1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - pubsub - noscript diff --git a/content/commands/swapdb/index.md b/content/commands/swapdb/index.md index 49fb9f55af..1aa86ba233 100644 --- a/content/commands/swapdb/index.md +++ b/content/commands/swapdb/index.md @@ -12,6 +12,16 @@ arguments: name: index2 type: integer arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/sync/index.md b/content/commands/sync/index.md index 154273a04e..3db39c81cd 100644 --- a/content/commands/sync/index.md +++ b/content/commands/sync/index.md @@ -4,6 +4,16 @@ acl_categories: - '@slow' - '@dangerous' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - admin - noscript diff --git a/content/commands/tdigest.add/index.md b/content/commands/tdigest.add/index.md index 71e698c8f3..b7441a7908 100644 --- a/content/commands/tdigest.add/index.md +++ b/content/commands/tdigest.add/index.md @@ -8,6 +8,16 @@ arguments: multiple: true name: values type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) , where N is the number of samples to add description: Adds one or more observations to a t-digest sketch group: tdigest diff --git a/content/commands/tdigest.byrank/index.md b/content/commands/tdigest.byrank/index.md index 5461af3934..b2e83ae1d1 100644 --- a/content/commands/tdigest.byrank/index.md +++ b/content/commands/tdigest.byrank/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: rank type: double +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns, for each input rank, an estimation of the value (floating-point) with that rank diff --git a/content/commands/tdigest.byrevrank/index.md b/content/commands/tdigest.byrevrank/index.md index 5b507e01fb..5f1a390a0e 100644 --- a/content/commands/tdigest.byrevrank/index.md +++ b/content/commands/tdigest.byrevrank/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: reverse_rank type: double +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns, for each input reverse rank, an estimation of the value (floating-point) with that reverse rank diff --git a/content/commands/tdigest.cdf/index.md b/content/commands/tdigest.cdf/index.md index d3f59ff50c..a512bdeeaa 100644 --- a/content/commands/tdigest.cdf/index.md +++ b/content/commands/tdigest.cdf/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: value type: double +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns, for each input value, an estimation of the fraction (floating-point) of (observations smaller than the given value + half the observations equal to the diff --git a/content/commands/tdigest.create/index.md b/content/commands/tdigest.create/index.md index 35a84f7e18..16fd1aeb86 100644 --- a/content/commands/tdigest.create/index.md +++ b/content/commands/tdigest.create/index.md @@ -6,6 +6,16 @@ arguments: optional: true token: COMPRESSION type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Allocates memory and initializes a new t-digest sketch group: tdigest diff --git a/content/commands/tdigest.info/index.md b/content/commands/tdigest.info/index.md index 7f02665625..248fd8f623 100644 --- a/content/commands/tdigest.info/index.md +++ b/content/commands/tdigest.info/index.md @@ -2,6 +2,16 @@ arguments: - name: key type: key +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns information and statistics about a t-digest sketch group: tdigest diff --git a/content/commands/tdigest.max/index.md b/content/commands/tdigest.max/index.md index 112f02ee39..4b2322d811 100644 --- a/content/commands/tdigest.max/index.md +++ b/content/commands/tdigest.max/index.md @@ -2,6 +2,16 @@ arguments: - name: key type: key +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns the maximum observation value from a t-digest sketch group: tdigest diff --git a/content/commands/tdigest.merge/index.md b/content/commands/tdigest.merge/index.md index 2930362867..45bdbaddcb 100644 --- a/content/commands/tdigest.merge/index.md +++ b/content/commands/tdigest.merge/index.md @@ -20,6 +20,16 @@ arguments: optional: true token: OVERRIDE type: pure-token +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N*K), where N is the number of centroids and K being the number of input sketches description: Merges multiple t-digest sketches into a single sketch @@ -30,7 +40,8 @@ module: Bloom since: 2.4.0 stack_path: docs/data-types/probabilistic summary: Merges multiple t-digest sketches into a single sketch -syntax_fmt: "TDIGEST.MERGE destination-key numkeys source-key [source-key ...]\n \ +syntax_fmt: "TDIGEST.MERGE destination-key numkeys source-key [source-key ...] + \ \ [COMPRESSION compression] [OVERRIDE]" syntax_str: numkeys source-key [source-key ...] [COMPRESSION compression] [OVERRIDE] title: TDIGEST.MERGE diff --git a/content/commands/tdigest.min/index.md b/content/commands/tdigest.min/index.md index 3bb9094eb1..77f4fc8244 100644 --- a/content/commands/tdigest.min/index.md +++ b/content/commands/tdigest.min/index.md @@ -2,6 +2,16 @@ arguments: - name: key type: key +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns the minimum observation value from a t-digest sketch group: tdigest diff --git a/content/commands/tdigest.quantile/index.md b/content/commands/tdigest.quantile/index.md index df2fd9bcc7..4eae93b76b 100644 --- a/content/commands/tdigest.quantile/index.md +++ b/content/commands/tdigest.quantile/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: quantile type: double +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns, for each input fraction, an estimation of the value (floating point) that is smaller than the given fraction of observations diff --git a/content/commands/tdigest.rank/index.md b/content/commands/tdigest.rank/index.md index 317e24b130..43d7a2cebc 100644 --- a/content/commands/tdigest.rank/index.md +++ b/content/commands/tdigest.rank/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: value type: double +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns, for each input value (floating-point), the estimated rank of the value (the number of observations in the sketch that are smaller than the value diff --git a/content/commands/tdigest.reset/index.md b/content/commands/tdigest.reset/index.md index 1f45359fd9..4b7a489fcd 100644 --- a/content/commands/tdigest.reset/index.md +++ b/content/commands/tdigest.reset/index.md @@ -2,6 +2,16 @@ arguments: - name: key type: key +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: 'Resets a t-digest sketch: empty the sketch and re-initializes it.' group: tdigest diff --git a/content/commands/tdigest.revrank/index.md b/content/commands/tdigest.revrank/index.md index 9b319062b4..0e8f39700b 100644 --- a/content/commands/tdigest.revrank/index.md +++ b/content/commands/tdigest.revrank/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: value type: double +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns, for each input value (floating-point), the estimated reverse rank of the value (the number of observations in the sketch that are larger than diff --git a/content/commands/tdigest.trimmed_mean/index.md b/content/commands/tdigest.trimmed_mean/index.md index dff37b56be..010d6e68b8 100644 --- a/content/commands/tdigest.trimmed_mean/index.md +++ b/content/commands/tdigest.trimmed_mean/index.md @@ -6,6 +6,16 @@ arguments: type: double - name: high_cut_quantile type: double +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) where N is the number of centroids description: Returns an estimation of the mean value from the sketch, excluding observation values outside the low and high cutoff quantiles diff --git a/content/commands/tfcall/index.md b/content/commands/tfcall/index.md index 0ed665f17a..b5f3a8114d 100644 --- a/content/commands/tfcall/index.md +++ b/content/commands/tfcall/index.md @@ -24,6 +24,16 @@ bannerText: 'The triggers and functions feature of Redis Stack and its documenta GitHub using the "Create new issue" link in the top right-hand corner of this page. ' +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on the function that is executed. description: Invoke a JavaScript function group: triggers_and_functions diff --git a/content/commands/tfcallasync/index.md b/content/commands/tfcallasync/index.md index 6d072f90e8..492b2c0cb5 100644 --- a/content/commands/tfcallasync/index.md +++ b/content/commands/tfcallasync/index.md @@ -24,6 +24,16 @@ bannerText: 'The triggers and functions feature of Redis Stack and its documenta GitHub using the "Create new issue" link in the top right-hand corner of this page. ' +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on the function that is executed. description: Invoke an asynchronous JavaScript function group: triggers_and_functions diff --git a/content/commands/tfunction-delete/index.md b/content/commands/tfunction-delete/index.md index 662619db6c..679751e5ba 100644 --- a/content/commands/tfunction-delete/index.md +++ b/content/commands/tfunction-delete/index.md @@ -14,6 +14,16 @@ bannerText: 'The triggers and functions feature of Redis Stack and its documenta GitHub using the "Create new issue" link in the top right-hand corner of this page. ' +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Delete a JavaScript library from Redis by name group: triggers_and_functions @@ -23,7 +33,8 @@ module: Triggers and functions since: 2.0.0 stack_path: docs/interact/programmability/triggers-and-functions summary: Delete a JavaScript library from Redis by name -syntax: "TFUNCTION DELETE \"\" \n" +syntax: "TFUNCTION DELETE \"\" +" syntax_fmt: TFUNCTION DELETE library name syntax_str: '' title: TFUNCTION DELETE diff --git a/content/commands/tfunction-list/index.md b/content/commands/tfunction-list/index.md index 2658a717e4..4f0ac06fb8 100644 --- a/content/commands/tfunction-list/index.md +++ b/content/commands/tfunction-list/index.md @@ -31,6 +31,16 @@ bannerText: 'The triggers and functions feature of Redis Stack and its documenta GitHub using the "Create new issue" link in the top right-hand corner of this page. ' +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) where N is the number of libraries loaded into Redis @@ -42,7 +52,8 @@ module: Triggers and functions since: 2.0.0 stack_path: docs/interact/programmability/triggers-and-functions summary: List all JavaScript libraries loaded into Redis -syntax: "TFUNCTION LIST [WITHCODE] [VERBOSE] [v] [LIBRARY ] \n" +syntax: "TFUNCTION LIST [WITHCODE] [VERBOSE] [v] [LIBRARY ] +" syntax_fmt: "TFUNCTION LIST [LIBRARYNAME\_library name] [WITHCODE] [VERBOSE] [V]" syntax_str: '[WITHCODE] [VERBOSE] [V]' title: TFUNCTION LIST diff --git a/content/commands/tfunction-load/index.md b/content/commands/tfunction-load/index.md index dfed30e02d..725c65b18e 100644 --- a/content/commands/tfunction-load/index.md +++ b/content/commands/tfunction-load/index.md @@ -24,6 +24,16 @@ bannerText: 'The triggers and functions feature of Redis Stack and its documenta GitHub using the "Create new issue" link in the top right-hand corner of this page. ' +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Load a new JavaScript library into Redis group: triggers_and_functions @@ -33,7 +43,8 @@ module: Triggers and functions since: 2.0.0 stack_path: docs/interact/programmability/triggers-and-functions summary: Load a new JavaScript library into Redis -syntax: "TFUNCTION LOAD [REPLACE] [CONFIG ] \"\" \n" +syntax: "TFUNCTION LOAD [REPLACE] [CONFIG ] \"\" +" syntax_fmt: "TFUNCTION LOAD [REPLACE] [CONFIG\_config] library code" syntax_str: "[CONFIG\_config] library code" title: TFUNCTION LOAD diff --git a/content/commands/time/index.md b/content/commands/time/index.md index 6b463d09fb..b5133b6f01 100644 --- a/content/commands/time/index.md +++ b/content/commands/time/index.md @@ -2,6 +2,16 @@ acl_categories: - '@fast' arity: 1 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/topk.add/index.md b/content/commands/topk.add/index.md index 0d700105b7..4d948c13d7 100644 --- a/content/commands/topk.add/index.md +++ b/content/commands/topk.add/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: items type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n * k) where n is the number of items and k is the depth description: Increases the count of one or more items by increment group: topk diff --git a/content/commands/topk.count/index.md b/content/commands/topk.count/index.md index a23a4f84d2..fdfbd45522 100644 --- a/content/commands/topk.count/index.md +++ b/content/commands/topk.count/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n) where n is the number of items deprecated_since: '2.4' description: Return the count for one or more items are in a sketch diff --git a/content/commands/topk.incrby/index.md b/content/commands/topk.incrby/index.md index 29f41a2db7..15336e6f6c 100644 --- a/content/commands/topk.incrby/index.md +++ b/content/commands/topk.incrby/index.md @@ -10,6 +10,16 @@ arguments: multiple: true name: items type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n * k * incr) where n is the number of items, k is the depth and incr is the increment description: Increases the count of one or more items by increment diff --git a/content/commands/topk.info/index.md b/content/commands/topk.info/index.md index 3864af13fe..640ba823ac 100644 --- a/content/commands/topk.info/index.md +++ b/content/commands/topk.info/index.md @@ -2,6 +2,16 @@ arguments: - name: key type: key +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns information about a sketch group: topk diff --git a/content/commands/topk.list/index.md b/content/commands/topk.list/index.md index 36a75e9c1b..77cb2ad45b 100644 --- a/content/commands/topk.list/index.md +++ b/content/commands/topk.list/index.md @@ -6,6 +6,16 @@ arguments: optional: true token: WITHCOUNT type: pure-token +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(k) where k is the value of top-k description: Return full list of items in Top K list group: topk diff --git a/content/commands/topk.query/index.md b/content/commands/topk.query/index.md index bbe7e6625d..532ed92d75 100644 --- a/content/commands/topk.query/index.md +++ b/content/commands/topk.query/index.md @@ -5,6 +5,16 @@ arguments: - multiple: true name: item type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n) where n is the number of items description: Checks whether one or more items are in a sketch group: topk diff --git a/content/commands/topk.reserve/index.md b/content/commands/topk.reserve/index.md index 47aa675795..73e0404d26 100644 --- a/content/commands/topk.reserve/index.md +++ b/content/commands/topk.reserve/index.md @@ -14,6 +14,16 @@ arguments: name: params optional: true type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Initializes a TopK with specified parameters group: topk diff --git a/content/commands/touch/index.md b/content/commands/touch/index.md index 3de977aa62..922a0eee98 100644 --- a/content/commands/touch/index.md +++ b/content/commands/touch/index.md @@ -10,6 +10,16 @@ arguments: name: key type: key arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/ts.add/index.md b/content/commands/ts.add/index.md index 9df410fafa..b68c106e52 100644 --- a/content/commands/ts.add/index.md +++ b/content/commands/ts.add/index.md @@ -58,6 +58,16 @@ arguments: optional: true token: LABELS type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(M) when M is the amount of compaction rules or O(1) with no compaction description: Append a sample to a time series group: timeseries @@ -67,12 +77,21 @@ module: TimeSeries since: 1.0.0 stack_path: docs/data-types/timeseries summary: Append a sample to a time series -syntax: "TS.ADD key timestamp value \n [RETENTION retentionPeriod] \n [ENCODING\ - \ [COMPRESSED|UNCOMPRESSED]] \n [CHUNK_SIZE size] \n [ON_DUPLICATE policy] \n\ - \ [LABELS {label value}...]\n" -syntax_fmt: "TS.ADD key timestamp value [RETENTION\_retentionPeriod]\n [ENCODING\_\ - ] [CHUNK_SIZE\_size]\n [ON_DUPLICATE\_]\n [LABELS\_label value [label value ...]]" +syntax: "TS.ADD key timestamp value + [RETENTION retentionPeriod] + [ENCODING\ + \ [COMPRESSED|UNCOMPRESSED]] + [CHUNK_SIZE size] + [ON_DUPLICATE policy] +\ + \ [LABELS {label value}...] +" +syntax_fmt: "TS.ADD key timestamp value [RETENTION\_retentionPeriod] + [ENCODING\_\ + ] [CHUNK_SIZE\_size] + [ON_DUPLICATE\_] + [LABELS\_label value [label value ...]]" syntax_str: "timestamp value [RETENTION\_retentionPeriod] [ENCODING\_] [CHUNK_SIZE\_size] [ON_DUPLICATE\_] [LABELS\_label value [label value ...]]" diff --git a/content/commands/ts.alter/index.md b/content/commands/ts.alter/index.md index 262f943799..f44c5859af 100644 --- a/content/commands/ts.alter/index.md +++ b/content/commands/ts.alter/index.md @@ -43,6 +43,16 @@ arguments: optional: true token: LABELS type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) where N is the number of labels requested to update description: Update the retention, chunk size, duplicate policy, and labels of an existing time series @@ -54,10 +64,17 @@ since: 1.0.0 stack_path: docs/data-types/timeseries summary: Update the retention, chunk size, duplicate policy, and labels of an existing time series -syntax: "TS.ALTER key \n [RETENTION retentionPeriod] \n [CHUNK_SIZE size] \n [DUPLICATE_POLICY\ - \ policy] \n [LABELS [{label value}...]]\n" -syntax_fmt: "TS.ALTER key [RETENTION\_retentionPeriod] [CHUNK_SIZE\_size]\n [DUPLICATE_POLICY\_\ - ]\n [LABELS\_label value [label value ...]]" +syntax: "TS.ALTER key + [RETENTION retentionPeriod] + [CHUNK_SIZE size] + [DUPLICATE_POLICY\ + \ policy] + [LABELS [{label value}...]] +" +syntax_fmt: "TS.ALTER key [RETENTION\_retentionPeriod] [CHUNK_SIZE\_size] + [DUPLICATE_POLICY\_\ + ] + [LABELS\_label value [label value ...]]" syntax_str: "[RETENTION\_retentionPeriod] [CHUNK_SIZE\_size] [DUPLICATE_POLICY\_] [LABELS\_label value [label value ...]]" title: TS.ALTER diff --git a/content/commands/ts.create/index.md b/content/commands/ts.create/index.md index 06d26b00fd..7035d3bb99 100644 --- a/content/commands/ts.create/index.md +++ b/content/commands/ts.create/index.md @@ -54,6 +54,16 @@ arguments: optional: true token: LABELS type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Create a new time series group: timeseries @@ -63,10 +73,18 @@ module: TimeSeries since: 1.0.0 stack_path: docs/data-types/timeseries summary: Create a new time series -syntax: "TS.CREATE key \n [RETENTION retentionPeriod] \n [ENCODING [UNCOMPRESSED|COMPRESSED]]\ - \ \n [CHUNK_SIZE size] \n [DUPLICATE_POLICY policy] \n [LABELS {label value}...]\n" -syntax_fmt: "TS.CREATE key [RETENTION\_retentionPeriod] [ENCODING\_] [CHUNK_SIZE\_size] [DUPLICATE_POLICY\_] [CHUNK_SIZE\_size] [DUPLICATE_POLICY\_] [LABELS\_label value [label value ...]]" syntax_str: "[RETENTION\_retentionPeriod] [ENCODING\_]\ \ [CHUNK_SIZE\_size] [DUPLICATE_POLICY\_]\ diff --git a/content/commands/ts.createrule/index.md b/content/commands/ts.createrule/index.md index f29750375f..78e4c225f3 100644 --- a/content/commands/ts.createrule/index.md +++ b/content/commands/ts.createrule/index.md @@ -54,6 +54,16 @@ arguments: optional: true since: 1.8.0 type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Create a compaction rule group: timeseries @@ -63,10 +73,15 @@ module: TimeSeries since: 1.0.0 stack_path: docs/data-types/timeseries summary: Create a compaction rule -syntax: "TS.CREATERULE sourceKey destKey \n AGGREGATION aggregator bucketDuration\ - \ \n [alignTimestamp]\n" -syntax_fmt: "TS.CREATERULE sourceKey destKey AGGREGATION\_ bucketDuration\ +syntax: "TS.CREATERULE sourceKey destKey + AGGREGATION aggregator bucketDuration\ + \ + [alignTimestamp] +" +syntax_fmt: "TS.CREATERULE sourceKey destKey AGGREGATION\_ bucketDuration\ \ [alignTimestamp]" syntax_str: "destKey AGGREGATION\_ bucketDuration [alignTimestamp]" diff --git a/content/commands/ts.decrby/index.md b/content/commands/ts.decrby/index.md index 3a23934a78..e3ce620ff0 100644 --- a/content/commands/ts.decrby/index.md +++ b/content/commands/ts.decrby/index.md @@ -30,6 +30,16 @@ arguments: optional: true token: LABELS type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(M) when M is the amount of compaction rules or O(1) with no compaction description: Decrease the value of the sample with the maximum existing timestamp, or create a new sample with a value equal to the value of the sample with the maximum @@ -43,10 +53,18 @@ stack_path: docs/data-types/timeseries summary: Decrease the value of the sample with the maximum existing timestamp, or create a new sample with a value equal to the value of the sample with the maximum existing timestamp with a given decrement -syntax: "TS.DECRBY key subtrahend \n [TIMESTAMP timestamp] \n [RETENTION retentionPeriod]\ - \ \n [UNCOMPRESSED] \n [CHUNK_SIZE size] \n [LABELS {label value}...]\n" -syntax_fmt: "TS.DECRBY key value [TIMESTAMP\_timestamp]\n [RETENTION\_retentionPeriod]\ - \ [UNCOMPRESSED] [CHUNK_SIZE\_size]\n [LABELS\_label value [label value ...]]" +syntax: "TS.DECRBY key subtrahend + [TIMESTAMP timestamp] + [RETENTION retentionPeriod]\ + \ + [UNCOMPRESSED] + [CHUNK_SIZE size] + [LABELS {label value}...] +" +syntax_fmt: "TS.DECRBY key value [TIMESTAMP\_timestamp] + [RETENTION\_retentionPeriod]\ + \ [UNCOMPRESSED] [CHUNK_SIZE\_size] + [LABELS\_label value [label value ...]]" syntax_str: "value [TIMESTAMP\_timestamp] [RETENTION\_retentionPeriod] [UNCOMPRESSED]\ \ [CHUNK_SIZE\_size] [LABELS\_label value [label value ...]]" title: TS.DECRBY diff --git a/content/commands/ts.del/index.md b/content/commands/ts.del/index.md index 3b40caa21c..bfb7b7295f 100644 --- a/content/commands/ts.del/index.md +++ b/content/commands/ts.del/index.md @@ -6,6 +6,16 @@ arguments: type: integer - name: to_timestamp type: integer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N) where N is the number of data points that will be removed description: Delete all samples between two timestamps for a given time series group: timeseries diff --git a/content/commands/ts.deleterule/index.md b/content/commands/ts.deleterule/index.md index 35e57e8b58..465a9cb47a 100644 --- a/content/commands/ts.deleterule/index.md +++ b/content/commands/ts.deleterule/index.md @@ -4,6 +4,16 @@ arguments: type: key - name: destKey type: key +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Delete a compaction rule group: timeseries diff --git a/content/commands/ts.get/index.md b/content/commands/ts.get/index.md index c239da6708..e79ac87a3b 100644 --- a/content/commands/ts.get/index.md +++ b/content/commands/ts.get/index.md @@ -6,6 +6,16 @@ arguments: optional: true since: 1.8.0 type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Get the sample with the highest timestamp from a given time series group: timeseries @@ -15,7 +25,9 @@ module: TimeSeries since: 1.0.0 stack_path: docs/data-types/timeseries summary: Get the sample with the highest timestamp from a given time series -syntax: "TS.GET key \n [LATEST]\n" +syntax: "TS.GET key + [LATEST] +" syntax_fmt: TS.GET key [LATEST] syntax_str: '[LATEST]' title: TS.GET @@ -36,9 +48,9 @@ is key name for the time series.
LATEST (since RedisTimeSeries v1.8) -is used when a time series is a compaction. With `LATEST`, TS.GET reports the compacted value of the latest, possibly partial, bucket. Without `LATEST`, TS.GET does not report the latest, possibly partial, bucket. When a time series is not a compaction, `LATEST` is ignored. +is used when a time series is a compaction. With `LATEST`, TS.GET reports the compacted value of the latest (possibly partial) bucket. Without `LATEST`, TS.GET does not report the latest (possibly partial) bucket. When a time series is not a compaction, `LATEST` is ignored. -The data in the latest bucket of a compaction is possibly partial. A bucket is _closed_ and compacted only upon arrival of a new sample that _opens_ a new _latest_ bucket. There are cases, however, when the compacted value of the latest, possibly partial, bucket is also required. In such a case, use `LATEST`. +The data in the latest bucket of a compaction is possibly partial. A bucket is _closed_ and compacted only upon arrival of a new sample that _opens_ a new _latest_ bucket. There are cases, however, when the compacted value of the latest (possibly partial) bucket is also required. In such a case, use `LATEST`.
## Return value diff --git a/content/commands/ts.incrby/index.md b/content/commands/ts.incrby/index.md index d76082f4f1..72d7ae7604 100644 --- a/content/commands/ts.incrby/index.md +++ b/content/commands/ts.incrby/index.md @@ -30,6 +30,16 @@ arguments: optional: true token: LABELS type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(M) when M is the amount of compaction rules or O(1) with no compaction description: Increase the value of the sample with the maximum existing timestamp, or create a new sample with a value equal to the value of the sample with the maximum @@ -43,10 +53,18 @@ stack_path: docs/data-types/timeseries summary: Increase the value of the sample with the maximum existing timestamp, or create a new sample with a value equal to the value of the sample with the maximum existing timestamp with a given increment -syntax: "TS.INCRBY key addend \n [TIMESTAMP timestamp] \n [RETENTION retentionPeriod]\ - \ \n [UNCOMPRESSED] \n [CHUNK_SIZE size] \n [LABELS {label value}...]\n" -syntax_fmt: "TS.INCRBY key value [TIMESTAMP\_timestamp]\n [RETENTION\_retentionPeriod]\ - \ [UNCOMPRESSED] [CHUNK_SIZE\_size]\n [LABELS\_label value [label value ...]]" +syntax: "TS.INCRBY key addend + [TIMESTAMP timestamp] + [RETENTION retentionPeriod]\ + \ + [UNCOMPRESSED] + [CHUNK_SIZE size] + [LABELS {label value}...] +" +syntax_fmt: "TS.INCRBY key value [TIMESTAMP\_timestamp] + [RETENTION\_retentionPeriod]\ + \ [UNCOMPRESSED] [CHUNK_SIZE\_size] + [LABELS\_label value [label value ...]]" syntax_str: "value [TIMESTAMP\_timestamp] [RETENTION\_retentionPeriod] [UNCOMPRESSED]\ \ [CHUNK_SIZE\_size] [LABELS\_label value [label value ...]]" title: TS.INCRBY diff --git a/content/commands/ts.info/index.md b/content/commands/ts.info/index.md index 5f9f23184a..6b0f028b42 100644 --- a/content/commands/ts.info/index.md +++ b/content/commands/ts.info/index.md @@ -5,6 +5,16 @@ arguments: - name: DEBUG optional: true type: string +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(1) description: Returns information and statistics for a time series group: timeseries @@ -14,7 +24,9 @@ module: TimeSeries since: 1.0.0 stack_path: docs/data-types/timeseries summary: Returns information and statistics for a time series -syntax: "TS.INFO key \n [DEBUG]\n" +syntax: "TS.INFO key + [DEBUG] +" syntax_fmt: TS.INFO key [DEBUG] syntax_str: '[DEBUG]' title: TS.INFO diff --git a/content/commands/ts.madd/index.md b/content/commands/ts.madd/index.md index bac7ffeccb..1c7556cd01 100644 --- a/content/commands/ts.madd/index.md +++ b/content/commands/ts.madd/index.md @@ -10,6 +10,16 @@ arguments: multiple: true name: ktv type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(N*M) when N is the amount of series updated and M is the amount of compaction rules or O(N) with no compaction description: Append new samples to one or more time series diff --git a/content/commands/ts.mget/index.md b/content/commands/ts.mget/index.md index 9fc241c684..b19da5525e 100644 --- a/content/commands/ts.mget/index.md +++ b/content/commands/ts.mget/index.md @@ -37,6 +37,16 @@ arguments: name: filterExpr token: FILTER type: oneof +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n) where n is the number of time-series that match the filters description: Get the sample with the highest timestamp from each time series matching a specific filter @@ -51,8 +61,10 @@ summary: Get the sample with the highest timestamp from each time series matchin syntax: 'TS.MGET [LATEST] [WITHLABELS | SELECTED_LABELS label...] FILTER filterExpr... ' -syntax_fmt: "TS.MGET [LATEST] [WITHLABELS | SELECTED_LABELS label1 [label1 ...]]\n\ - \ FILTER\_" syntax_str: "[WITHLABELS | SELECTED_LABELS label1 [label1 ...]] FILTER\_ -FILTER filterExpr... +FILTER filterExpr... filters time series based on their labels and label values. Each filter expression has one of the following syntaxes: - - `label=value`, where `label` equals `value` - - `label!=value`, where `label` does not equal `value` - - `label=`, where `key` does not have label `label` - - `label!=`, where `key` has label `label` - - `label=(value1,value2,...)`, where `key` with label `label` equals one of the values in the list - - `label!=(value1,value2,...)` where key with label `label` does not equal any of the values in the list - - NOTES: - - At least one `label=value` filter is required. - - Filters are conjunctive. For example, the FILTER `type=temperature room=study` means the a time series is a temperature time series of a study room. - - Don't use whitespaces in the filter expression. + - `label!=` - the time series has a label named `label` + - `label=value` - the time series has a label named `label` with a value equal to `value` + - `label=(value1,value2,...)` - the time series has a label named `label` with a value equal to one of the values in the list + - `label=` - the time series does not have a label named `label` + - `label!=value` - the time series does not have a label named `label` with a value equal to `value` + - `label!=(value1,value2,...)` - the time series does not have a label named `label` with a value equal to any of the values in the list + + Notes: + - At least one filter expression with a syntax `label=value` or `label=(value1,value2,...)` is required. + - Filter expressions are conjunctive. For example, the filter `type=temperature room=study` means that a time series is a temperature time series of a study room. + - Whitespaces are unallowed in a filter expression except between quotes or double quotes in values - e.g., `x="y y"` or `x='(y y,z z)'`. @@ -90,9 +102,9 @@ filters time series based on their labels and label values. Each filter expressi
LATEST (since RedisTimeSeries v1.8) -is used when a time series is a compaction. With `LATEST`, TS.MGET also reports the compacted value of the latest possibly partial bucket, given that this bucket's start time falls within `[fromTimestamp, toTimestamp]`. Without `LATEST`, TS.MGET does not report the latest possibly partial bucket. When a time series is not a compaction, `LATEST` is ignored. +is used when a time series is a compaction. With `LATEST`, TS.MGET also reports the compacted value of the latest (possibly partial) bucket, given that this bucket's start time falls within `[fromTimestamp, toTimestamp]`. Without `LATEST`, TS.MGET does not report the latest (possibly partial) bucket. When a time series is not a compaction, `LATEST` is ignored. -The data in the latest bucket of a compaction is possibly partial. A bucket is _closed_ and compacted only upon arrival of a new sample that _opens_ a new _latest_ bucket. There are cases, however, when the compacted value of the latest possibly partial bucket is also required. In such a case, use `LATEST`. +The data in the latest bucket of a compaction is possibly partial. A bucket is _closed_ and compacted only upon the arrival of a new sample that _opens_ a new _latest_ bucket. There are cases, however, when the compacted value of the latest (possibly partial) bucket is also required. In such a case, use `LATEST`.
@@ -112,7 +124,7 @@ If `WITHLABELS` or `SELECTED_LABELS` are not specified, by default, an empty lis
-Note: The [`MGET`](/commands/mget) command cannot be part of transaction when running on a Redis cluster. +Note: The [`MGET`](/commands/mget) command cannot be part of a transaction when running on a Redis cluster. ## Return value diff --git a/content/commands/ts.mrange/index.md b/content/commands/ts.mrange/index.md index b07cb69cbb..a1faea70a0 100644 --- a/content/commands/ts.mrange/index.md +++ b/content/commands/ts.mrange/index.md @@ -138,6 +138,16 @@ arguments: name: groupby optional: true type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n/m+k) where n = Number of data points, m = Chunk size (data points per chunk), k = Number of data points that are in the requested ranges description: Query a range across multiple time series by filters in forward direction @@ -148,17 +158,34 @@ module: TimeSeries since: 1.0.0 stack_path: docs/data-types/timeseries summary: Query a range across multiple time series by filters in forward direction -syntax: "TS.MRANGE fromTimestamp toTimestamp\n [LATEST]\n [FILTER_BY_TS ts...]\n\ - \ [FILTER_BY_VALUE min max]\n [WITHLABELS | SELECTED_LABELS label...]\n [COUNT\ - \ count]\n [[ALIGN align] AGGREGATION aggregator bucketDuration [BUCKETTIMESTAMP\ - \ bt] [EMPTY]]\n FILTER filterExpr...\n [GROUPBY label REDUCE reducer]\n" -syntax_fmt: "TS.MRANGE fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp\n\ - \ [Timestamp ...]] [FILTER_BY_VALUE min max] [WITHLABELS |\n SELECTED_LABELS label1\ - \ [label1 ...]] [COUNT\_count] [[ALIGN\_value]\n AGGREGATION\_ bucketDuration\n\ - \ [BUCKETTIMESTAMP] [EMPTY]] FILTER\_\ - \ [GROUPBY label REDUCE\n reducer]" +syntax: "TS.MRANGE fromTimestamp toTimestamp + [LATEST] + [FILTER_BY_TS ts...] +\ + \ [FILTER_BY_VALUE min max] + [WITHLABELS | SELECTED_LABELS label...] + [COUNT\ + \ count] + [[ALIGN align] AGGREGATION aggregator bucketDuration [BUCKETTIMESTAMP\ + \ bt] [EMPTY]] + FILTER filterExpr... + [GROUPBY label REDUCE reducer] +" +syntax_fmt: "TS.MRANGE fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp +\ + \ [Timestamp ...]] [FILTER_BY_VALUE min max] [WITHLABELS | + SELECTED_LABELS label1\ + \ [label1 ...]] [COUNT\_count] [[ALIGN\_value] + AGGREGATION\_ bucketDuration +\ + \ [BUCKETTIMESTAMP] [EMPTY]] FILTER\_\ + \ [GROUPBY label REDUCE + reducer]" syntax_str: "toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp [Timestamp ...]] [FILTER_BY_VALUE\ \ min max] [WITHLABELS | SELECTED_LABELS label1 [label1 ...]] [COUNT\_count] [[ALIGN\_\ value] AGGREGATION\_ fromTimestamp -is start timestamp for the range query (integer Unix timestamp in milliseconds) or `-` to denote the timestamp of the earliest sample amongs all time series that passes `FILTER filterExpr...`. +is the start timestamp for the range query (integer Unix timestamp in milliseconds) or `-` to denote the timestamp of the earliest sample among all the time series that passes `FILTER filterExpr...`.
toTimestamp -is end timestamp for the range query (integer Unix timestamp in milliseconds) or `+` to denote the timestamp of the latest sample amongs all time series that passes `FILTER filterExpr...`. +is the end timestamp for the range query (integer Unix timestamp in milliseconds) or `+` to denote the timestamp of the latest sample among all the time series that passes `FILTER filterExpr...`.
@@ -191,17 +218,17 @@ is end timestamp for the range query (integer Unix timestamp in milliseconds) or filters time series based on their labels and label values. Each filter expression has one of the following syntaxes: - - `label=value`, where `label` equals `value` - - `label!=value`, where `label` does not equal `value` - - `label=`, where `key` does not have label `label` - - `label!=`, where `key` has label `label` - - `label=(value1,value2,...)`, where `key` with label `label` equals one of the values in the list - - `label!=(value1,value2,...)`, where key with label `label` does not equal any of the values in the list - -Notes: - - At least one `label=value` filter is required. - - Filters are conjunctive. For example, the FILTER `type=temperature room=study` means the a time series is a temperature time series of a study room. - - Don't use whitespaces in the filter expression. + - `label!=` - the time series has a label named `label` + - `label=value` - the time series has a label named `label` with a value equal to `value` + - `label=(value1,value2,...)` - the time series has a label named `label` with a value equal to one of the values in the list + - `label=` - the time series does not have a label named `label` + - `label!=value` - the time series does not have a label named `label` with a value equal to `value` + - `label!=(value1,value2,...)` - the time series does not have a label named `label` with a value equal to any of the values in the list + + Notes: + - At least one filter expression with a syntax `label=value` or `label=(value1,value2,...)` is required. + - Filter expressions are conjunctive. For example, the filter `type=temperature room=study` means that a time series is a temperature time series of a study room. + - Whitespaces are unallowed in a filter expression except between quotes or double quotes in values - e.g., `x="y y"` or `x='(y y,z z)'`.
@@ -210,9 +237,9 @@ filters time series based on their labels and label values. Each filter expressi
LATEST (since RedisTimeSeries v1.8) -is used when a time series is a compaction. With `LATEST`, TS.MRANGE also reports the compacted value of the latest possibly partial bucket, given that this bucket's start time falls within `[fromTimestamp, toTimestamp]`. Without `LATEST`, TS.MRANGE does not report the latest possibly partial bucket. When a time series is not a compaction, `LATEST` is ignored. +is used when a time series is a compaction. With `LATEST`, TS.MRANGE also reports the compacted value of the latest (possibly partial) bucket, given that this bucket's start time falls within `[fromTimestamp, toTimestamp]`. Without `LATEST`, TS.MRANGE does not report the latest (possibly partial) bucket. When a time series is not a compaction, `LATEST` is ignored. -The data in the latest bucket of a compaction is possibly partial. A bucket is _closed_ and compacted only upon arrival of a new sample that _opens_ a new _latest_ bucket. There are cases, however, when the compacted value of the latest possibly partial bucket is also required. In such a case, use `LATEST`. +The data in the latest bucket of a compaction is possibly partial. A bucket is _closed_ and compacted only upon the arrival of a new sample that _opens_ a new _latest_ bucket. There are cases, however, when the compacted value of the latest (possibly partial) bucket is also required. In such a case, use `LATEST`.
diff --git a/content/commands/ts.mrevrange/index.md b/content/commands/ts.mrevrange/index.md index 242f5e99a6..e95616b8d7 100644 --- a/content/commands/ts.mrevrange/index.md +++ b/content/commands/ts.mrevrange/index.md @@ -138,6 +138,16 @@ arguments: name: groupby optional: true type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n/m+k) where n = Number of data points, m = Chunk size (data points per chunk), k = Number of data points that are in the requested ranges description: Query a range across multiple time-series by filters in reverse direction @@ -148,17 +158,34 @@ module: TimeSeries since: 1.4.0 stack_path: docs/data-types/timeseries summary: Query a range across multiple time-series by filters in reverse direction -syntax: "TS.MREVRANGE fromTimestamp toTimestamp\n [LATEST]\n [FILTER_BY_TS TS...]\n\ - \ [FILTER_BY_VALUE min max]\n [WITHLABELS | SELECTED_LABELS label...]\n [COUNT\ - \ count]\n [[ALIGN align] AGGREGATION aggregator bucketDuration [BUCKETTIMESTAMP\ - \ bt] [EMPTY]]\n FILTER filterExpr...\n [GROUPBY label REDUCE reducer]\n" -syntax_fmt: "TS.MREVRANGE fromTimestamp toTimestamp [LATEST]\n [FILTER_BY_TS\_Timestamp\ - \ [Timestamp ...]] [FILTER_BY_VALUE min max]\n [WITHLABELS | SELECTED_LABELS label1\ - \ [label1 ...]] [COUNT\_count]\n [[ALIGN\_value] AGGREGATION\_\n\ - \ bucketDuration [BUCKETTIMESTAMP] [EMPTY]] FILTER\_ [GROUPBY label REDUCE\n reducer]" +syntax: "TS.MREVRANGE fromTimestamp toTimestamp + [LATEST] + [FILTER_BY_TS TS...] +\ + \ [FILTER_BY_VALUE min max] + [WITHLABELS | SELECTED_LABELS label...] + [COUNT\ + \ count] + [[ALIGN align] AGGREGATION aggregator bucketDuration [BUCKETTIMESTAMP\ + \ bt] [EMPTY]] + FILTER filterExpr... + [GROUPBY label REDUCE reducer] +" +syntax_fmt: "TS.MREVRANGE fromTimestamp toTimestamp [LATEST] + [FILTER_BY_TS\_Timestamp\ + \ [Timestamp ...]] [FILTER_BY_VALUE min max] + [WITHLABELS | SELECTED_LABELS label1\ + \ [label1 ...]] [COUNT\_count] + [[ALIGN\_value] AGGREGATION\_ +\ + \ bucketDuration [BUCKETTIMESTAMP] [EMPTY]] FILTER\_ [GROUPBY label REDUCE + reducer]" syntax_str: "toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp [Timestamp ...]] [FILTER_BY_VALUE\ \ min max] [WITHLABELS | SELECTED_LABELS label1 [label1 ...]] [COUNT\_count] [[ALIGN\_\ value] AGGREGATION\_ fromTimestamp -is start timestamp for the range query (integer Unix timestamp in milliseconds) or `-` to denote the timestamp of the earliest sample amongs all time series that passes `FILTER filterExpr...`. +is the start timestamp for the range query (integer Unix timestamp in milliseconds) or `-` to denote the timestamp of the earliest sample among all the time series that passes `FILTER filterExpr...`.
toTimestamp -is end timestamp for the range query (integer Unix timestamp in milliseconds) or `+` to denote the timestamp of the latest sample amongs all time series that passes `FILTER filterExpr...`. +is the end timestamp for the range query (integer Unix timestamp in milliseconds) or `+` to denote the timestamp of the latest sample among all the time series that passes `FILTER filterExpr...`.
FILTER filterExpr... -filters time series based on their labels and label values. Each filter expression has one of the following syntaxes: +filters time series based on their labels and label values. Each filter expression has one of the following syntaxes: - - `label=value`, where `label` equals `value` - - `label!=value`, where `label` does not equal `value` - - `label=`, where `key` does not have label `label` - - `label!=`, where `key` has label `label` - - `label=(value1,value2,...)`, where `key` with label `label` equals one of the values in the list - - `label!=(value1,value2,...)`, where key with label `label` does not equal any of the values in the list + - `label!=` - the time series has a label named `label` + - `label=value` - the time series has a label named `label` with a value equal to `value` + - `label=(value1,value2,...)` - the time series has a label named `label` with a value equal to one of the values in the list + - `label=` - the time series does not have a label named `label` + - `label!=value` - the time series does not have a label named `label` with a value equal to `value` + - `label!=(value1,value2,...)` - the time series does not have a label named `label` with a value equal to any of the values in the list -Notes: - - At least one `label=value` filter is required. - - Filters are conjunctive. For example, the FILTER `type=temperature room=study` means the a time series is a temperature time series of a study room. - - Don't use whitespaces in the filter expression. + Notes: + - At least one filter expression with a syntax `label=value` or `label=(value1,value2,...)` is required. + - Filter expressions are conjunctive. For example, the filter `type=temperature room=study` means that a time series is a temperature time series of a study room. + - Whitespaces are unallowed in a filter expression except between quotes or double quotes in values - e.g., `x="y y"` or `x='(y y,z z)'`.
@@ -210,9 +237,9 @@ filters time series based on their labels and label values. Each filter expressi
LATEST (since RedisTimeSeries v1.8) -is used when a time series is a compaction. With `LATEST`, TS.MREVRANGE also reports the compacted value of the latest possibly partial bucket, given that this bucket's start time falls within `[fromTimestamp, toTimestamp]`. Without `LATEST`, TS.MREVRANGE does not report the latest possibly partial bucket. When a time series is not a compaction, `LATEST` is ignored. +is used when a time series is a compaction. With `LATEST`, TS.MREVRANGE also reports the compacted value of the latest (possibly partial) bucket, given that this bucket's start time falls within `[fromTimestamp, toTimestamp]`. Without `LATEST`, TS.MREVRANGE does not report the latest (possibly partial) bucket. When a time series is not a compaction, `LATEST` is ignored. -The data in the latest bucket of a compaction is possibly partial. A bucket is _closed_ and compacted only upon arrival of a new sample that _opens_ a new _latest_ bucket. There are cases, however, when the compacted value of the latest possibly partial bucket is also required. In such a case, use `LATEST`. +The data in the latest bucket of a compaction is possibly partial. A bucket is _closed_ and compacted only upon the arrival of a new sample that _opens_ a new _latest_ bucket. There are cases, however, when the compacted value of the latest (possibly partial) bucket is also required. In such a case, use `LATEST`.
diff --git a/content/commands/ts.queryindex/index.md b/content/commands/ts.queryindex/index.md index 4740bcf983..e3d28c3329 100644 --- a/content/commands/ts.queryindex/index.md +++ b/content/commands/ts.queryindex/index.md @@ -16,6 +16,16 @@ arguments: multiple: true name: filterExpr type: oneof +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n) where n is the number of time-series that match the filters description: Get all time series keys matching a filter list group: timeseries @@ -28,8 +38,10 @@ summary: Get all time series keys matching a filter list syntax: 'TS.QUERYINDEX filterExpr... ' -syntax_fmt: "TS.QUERYINDEX " +syntax_fmt: "TS.QUERYINDEX " syntax_str: '' title: TS.QUERYINDEX --- @@ -42,23 +54,24 @@ Get all time series keys matching a filter list
filterExpr... + filters time series based on their labels and label values. Each filter expression has one of the following syntaxes: - - `label=value`, where `label` equals `value` - - `label!=value`, where `label` does not equal `value` - - `label=`, where `key` does not have label `label` - - `label!=`, where `key` has label `label` - - `label=(value1,value2,...)`, where `key` with label `label` equals one of the values in the list - - `label!=(value1,value2,...)`, where key with label `label` does not equal any of the values in the list + - `label!=` - the time series has a label named `label` + - `label=value` - the time series has a label named `label` with a value equal to `value` + - `label=(value1,value2,...)` - the time series has a label named `label` with a value equal to one of the values in the list + - `label=` - the time series does not have a label named `label` + - `label!=value` - the time series does not have a label named `label` with a value equal to `value` + - `label!=(value1,value2,...)` - the time series does not have a label named `label` with a value equal to any of the values in the list Notes: - - At least one `label=value` filter is required. - - Filters are conjunctive. For example, the FILTER `type=temperature room=study` means the a time series is a temperature time series of a study room. - - Don't use whitespaces in the filter expression. + - At least one filter expression with a syntax `label=value` or `label=(value1,value2,...)` is required. + - Filter expressions are conjunctive. For example, the filter `type=temperature room=study` means that a time series is a temperature time series of a study room. + - Whitespaces are unallowed in a filter expression except between quotes or double quotes in values - e.g., `x="y y"` or `x='(y y,z z)'`.
-Note: The `QUERYINDEX` command cannot be part of transaction when running on a Redis cluster. +Note: The `QUERYINDEX` command cannot be part of a transaction when running on a Redis cluster. ## Return value diff --git a/content/commands/ts.range/index.md b/content/commands/ts.range/index.md index c3e8a9b03a..ae87acc468 100644 --- a/content/commands/ts.range/index.md +++ b/content/commands/ts.range/index.md @@ -94,6 +94,16 @@ arguments: name: aggregation optional: true type: block +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: O(n/m+k) where n = Number of data points, m = Chunk size (data points per chunk), k = Number of data points that are in the requested range description: Query a range in forward direction @@ -104,13 +114,23 @@ module: TimeSeries since: 1.0.0 stack_path: docs/data-types/timeseries summary: Query a range in forward direction -syntax: "TS.RANGE key fromTimestamp toTimestamp\n [LATEST]\n [FILTER_BY_TS ts...]\n\ - \ [FILTER_BY_VALUE min max]\n [COUNT count] \n [[ALIGN align] AGGREGATION aggregator\ - \ bucketDuration [BUCKETTIMESTAMP bt] [EMPTY]]\n" -syntax_fmt: "TS.RANGE key fromTimestamp toTimestamp [LATEST]\n [FILTER_BY_TS\_Timestamp\ - \ [Timestamp ...]] [FILTER_BY_VALUE min max]\n [COUNT\_count] [[ALIGN\_value] AGGREGATION\_\ - \n bucketDuration [BUCKETTIMESTAMP] [EMPTY]]" +syntax: "TS.RANGE key fromTimestamp toTimestamp + [LATEST] + [FILTER_BY_TS ts...] +\ + \ [FILTER_BY_VALUE min max] + [COUNT count] + [[ALIGN align] AGGREGATION aggregator\ + \ bucketDuration [BUCKETTIMESTAMP bt] [EMPTY]] +" +syntax_fmt: "TS.RANGE key fromTimestamp toTimestamp [LATEST] + [FILTER_BY_TS\_Timestamp\ + \ [Timestamp ...]] [FILTER_BY_VALUE min max] + [COUNT\_count] [[ALIGN\_value] AGGREGATION\_\ + + bucketDuration [BUCKETTIMESTAMP] [EMPTY]]" syntax_str: "fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp [Timestamp\ \ ...]] [FILTER_BY_VALUE min max] [COUNT\_count] [[ALIGN\_value] AGGREGATION\_\n bucketDuration [BUCKETTIMESTAMP] [EMPTY]]" +syntax: "TS.REVRANGE key fromTimestamp toTimestamp + [LATEST] + [FILTER_BY_TS TS...] +\ + \ [FILTER_BY_VALUE min max] + [COUNT count] + [[ALIGN align] AGGREGATION aggregator\ + \ bucketDuration [BUCKETTIMESTAMP bt] [EMPTY]] +" +syntax_fmt: "TS.REVRANGE key fromTimestamp toTimestamp [LATEST] + [FILTER_BY_TS\_\ + Timestamp [Timestamp ...]] [FILTER_BY_VALUE min max] + [COUNT\_count] [[ALIGN\_\ + value] AGGREGATION\_ + bucketDuration [BUCKETTIMESTAMP] [EMPTY]]" syntax_str: "fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp [Timestamp\ \ ...]] [FILTER_BY_VALUE min max] [COUNT\_count] [[ALIGN\_value] AGGREGATION\_ [= | ~] threshold\n [LIMIT\_\ +syntax_fmt: "XADD key [NOMKSTREAM] [ [= | ~] threshold + [LIMIT\_\ count]] <* | id> field value [field value ...]" syntax_str: "[NOMKSTREAM] [ [= | ~] threshold [LIMIT\_count]] <* |\ \ id> field value [field value ...]" diff --git a/content/commands/xautoclaim/index.md b/content/commands/xautoclaim/index.md index 1e82497ca1..0a1b04e514 100644 --- a/content/commands/xautoclaim/index.md +++ b/content/commands/xautoclaim/index.md @@ -31,6 +31,16 @@ arguments: token: JUSTID type: pure-token arity: -6 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast @@ -62,7 +72,8 @@ linkTitle: XAUTOCLAIM since: 6.2.0 summary: Changes, or acquires, ownership of messages in a consumer group, as if the messages were delivered to as consumer group member. -syntax_fmt: "XAUTOCLAIM key group consumer min-idle-time start [COUNT\_count]\n [JUSTID]" +syntax_fmt: "XAUTOCLAIM key group consumer min-idle-time start [COUNT\_count] + [JUSTID]" syntax_str: "group consumer min-idle-time start [COUNT\_count] [JUSTID]" title: XAUTOCLAIM --- diff --git a/content/commands/xclaim/index.md b/content/commands/xclaim/index.md index 620ed869f5..904e5f1440 100644 --- a/content/commands/xclaim/index.md +++ b/content/commands/xclaim/index.md @@ -52,6 +52,16 @@ arguments: token: LASTID type: string arity: -6 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast @@ -80,8 +90,10 @@ linkTitle: XCLAIM since: 5.0.0 summary: Changes, or acquires, ownership of a message in a consumer group, as if the message was delivered a consumer group member. -syntax_fmt: "XCLAIM key group consumer min-idle-time id [id ...] [IDLE\_ms]\n [TIME\_\ - unix-time-milliseconds] [RETRYCOUNT\_count] [FORCE] [JUSTID]\n [LASTID\_lastid]" +syntax_fmt: "XCLAIM key group consumer min-idle-time id [id ...] [IDLE\_ms] + [TIME\_\ + unix-time-milliseconds] [RETRYCOUNT\_count] [FORCE] [JUSTID] + [LASTID\_lastid]" syntax_str: "group consumer min-idle-time id [id ...] [IDLE\_ms] [TIME\_unix-time-milliseconds]\ \ [RETRYCOUNT\_count] [FORCE] [JUSTID] [LASTID\_lastid]" title: XCLAIM diff --git a/content/commands/xdel/index.md b/content/commands/xdel/index.md index fae412279e..c5d3877d18 100644 --- a/content/commands/xdel/index.md +++ b/content/commands/xdel/index.md @@ -13,6 +13,16 @@ arguments: name: id type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/xgroup-create/index.md b/content/commands/xgroup-create/index.md index a66823dc5e..fda2ac8f73 100644 --- a/content/commands/xgroup-create/index.md +++ b/content/commands/xgroup-create/index.md @@ -32,6 +32,16 @@ arguments: token: ENTRIESREAD type: integer arity: -5 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -58,7 +68,8 @@ key_specs: linkTitle: XGROUP CREATE since: 5.0.0 summary: Creates a consumer group. -syntax_fmt: "XGROUP CREATE key group [MKSTREAM]\n [ENTRIESREAD\_entries-read]" +syntax_fmt: "XGROUP CREATE key group [MKSTREAM] + [ENTRIESREAD\_entries-read]" syntax_str: "group [MKSTREAM] [ENTRIESREAD\_entries-read]" title: XGROUP CREATE --- diff --git a/content/commands/xgroup-createconsumer/index.md b/content/commands/xgroup-createconsumer/index.md index 5b3537afcd..64f7857175 100644 --- a/content/commands/xgroup-createconsumer/index.md +++ b/content/commands/xgroup-createconsumer/index.md @@ -15,6 +15,16 @@ arguments: name: consumer type: string arity: 5 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/xgroup-delconsumer/index.md b/content/commands/xgroup-delconsumer/index.md index 81570decd6..0d4077976b 100644 --- a/content/commands/xgroup-delconsumer/index.md +++ b/content/commands/xgroup-delconsumer/index.md @@ -15,6 +15,16 @@ arguments: name: consumer type: string arity: 5 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write complexity: O(1) diff --git a/content/commands/xgroup-destroy/index.md b/content/commands/xgroup-destroy/index.md index de44cbfdd0..84f97292e6 100644 --- a/content/commands/xgroup-destroy/index.md +++ b/content/commands/xgroup-destroy/index.md @@ -12,6 +12,16 @@ arguments: name: group type: string arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write complexity: O(N) where N is the number of entries in the group's pending entries list diff --git a/content/commands/xgroup-help/index.md b/content/commands/xgroup-help/index.md index 334433519f..dfa9272bc5 100644 --- a/content/commands/xgroup-help/index.md +++ b/content/commands/xgroup-help/index.md @@ -3,6 +3,16 @@ acl_categories: - '@stream' - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/xgroup-setid/index.md b/content/commands/xgroup-setid/index.md index 9ab9f01a9d..de10d70f7b 100644 --- a/content/commands/xgroup-setid/index.md +++ b/content/commands/xgroup-setid/index.md @@ -27,6 +27,16 @@ arguments: token: ENTRIESREAD type: integer arity: -5 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write complexity: O(1) diff --git a/content/commands/xgroup/index.md b/content/commands/xgroup/index.md index 08ed8d7dfa..128c643e29 100644 --- a/content/commands/xgroup/index.md +++ b/content/commands/xgroup/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on subcommand. description: A container for consumer groups commands. group: stream diff --git a/content/commands/xinfo-consumers/index.md b/content/commands/xinfo-consumers/index.md index 18a638a6b8..b530528602 100644 --- a/content/commands/xinfo-consumers/index.md +++ b/content/commands/xinfo-consumers/index.md @@ -12,6 +12,16 @@ arguments: name: group type: string arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) diff --git a/content/commands/xinfo-groups/index.md b/content/commands/xinfo-groups/index.md index e8ddc38019..4590f979e6 100644 --- a/content/commands/xinfo-groups/index.md +++ b/content/commands/xinfo-groups/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) diff --git a/content/commands/xinfo-help/index.md b/content/commands/xinfo-help/index.md index 5fb42bc7c2..a908d9bc5c 100644 --- a/content/commands/xinfo-help/index.md +++ b/content/commands/xinfo-help/index.md @@ -3,6 +3,16 @@ acl_categories: - '@stream' - '@slow' arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - loading - stale diff --git a/content/commands/xinfo-stream/index.md b/content/commands/xinfo-stream/index.md index 72c061d6b7..be326d684c 100644 --- a/content/commands/xinfo-stream/index.md +++ b/content/commands/xinfo-stream/index.md @@ -22,6 +22,16 @@ arguments: optional: true type: block arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) diff --git a/content/commands/xinfo/index.md b/content/commands/xinfo/index.md index c6bc6dbe8f..3cdff2d893 100644 --- a/content/commands/xinfo/index.md +++ b/content/commands/xinfo/index.md @@ -2,6 +2,16 @@ acl_categories: - '@slow' arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients complexity: Depends on subcommand. description: A container for stream introspection commands. group: stream diff --git a/content/commands/xlen/index.md b/content/commands/xlen/index.md index 101dfb23cc..a693c02429 100644 --- a/content/commands/xlen/index.md +++ b/content/commands/xlen/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/xpending/index.md b/content/commands/xpending/index.md index 58e1c348ec..11d46b57f0 100644 --- a/content/commands/xpending/index.md +++ b/content/commands/xpending/index.md @@ -35,6 +35,16 @@ arguments: optional: true type: block arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) with N being the number of elements returned, so asking for a small diff --git a/content/commands/xrange/index.md b/content/commands/xrange/index.md index 7089699700..07109e4129 100644 --- a/content/commands/xrange/index.md +++ b/content/commands/xrange/index.md @@ -20,6 +20,16 @@ arguments: token: COUNT type: integer arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) with N being the number of elements being returned. If N is constant diff --git a/content/commands/xread/index.md b/content/commands/xread/index.md index 35ab1a5d4b..042decfba2 100644 --- a/content/commands/xread/index.md +++ b/content/commands/xread/index.md @@ -29,6 +29,16 @@ arguments: token: STREAMS type: block arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - blocking @@ -55,7 +65,8 @@ linkTitle: XREAD since: 5.0.0 summary: Returns messages from multiple streams with IDs greater than the ones requested. Blocks until a message is available otherwise. -syntax_fmt: "XREAD [COUNT\_count] [BLOCK\_milliseconds] STREAMS\_key [key ...] id\n\ +syntax_fmt: "XREAD [COUNT\_count] [BLOCK\_milliseconds] STREAMS\_key [key ...] id +\ \ [id ...]" syntax_str: "[BLOCK\_milliseconds] STREAMS\_key [key ...] id [id ...]" title: XREAD diff --git a/content/commands/xreadgroup/index.md b/content/commands/xreadgroup/index.md index 9d68020bba..2acc027737 100644 --- a/content/commands/xreadgroup/index.md +++ b/content/commands/xreadgroup/index.md @@ -44,6 +44,16 @@ arguments: token: STREAMS type: block arity: -7 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - blocking @@ -74,7 +84,8 @@ linkTitle: XREADGROUP since: 5.0.0 summary: Returns new or historical messages from a stream for a consumer in a group. Blocks until a message is available otherwise. -syntax_fmt: "XREADGROUP GROUP\_group consumer [COUNT\_count] [BLOCK\_milliseconds]\n\ +syntax_fmt: "XREADGROUP GROUP\_group consumer [COUNT\_count] [BLOCK\_milliseconds] +\ \ [NOACK] STREAMS\_key [key ...] id [id ...]" syntax_str: "[COUNT\_count] [BLOCK\_milliseconds] [NOACK] STREAMS\_key [key ...] id\ \ [id ...]" diff --git a/content/commands/xrevrange/index.md b/content/commands/xrevrange/index.md index e4e5d5563e..5bb7e5ac90 100644 --- a/content/commands/xrevrange/index.md +++ b/content/commands/xrevrange/index.md @@ -20,6 +20,16 @@ arguments: token: COUNT type: integer arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) with N being the number of elements returned. If N is constant (e.g. diff --git a/content/commands/xsetid/index.md b/content/commands/xsetid/index.md index acd59b79a4..b4b4fd4f8e 100644 --- a/content/commands/xsetid/index.md +++ b/content/commands/xsetid/index.md @@ -24,6 +24,16 @@ arguments: token: MAXDELETEDID type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -51,7 +61,8 @@ key_specs: linkTitle: XSETID since: 5.0.0 summary: An internal command for replicating stream values. -syntax_fmt: "XSETID key last-id [ENTRIESADDED\_entries-added]\n [MAXDELETEDID\_max-deleted-id]" +syntax_fmt: "XSETID key last-id [ENTRIESADDED\_entries-added] + [MAXDELETEDID\_max-deleted-id]" syntax_str: "last-id [ENTRIESADDED\_entries-added] [MAXDELETEDID\_max-deleted-id]" title: XSETID --- diff --git a/content/commands/xtrim/index.md b/content/commands/xtrim/index.md index 8a8c012024..38632ac238 100644 --- a/content/commands/xtrim/index.md +++ b/content/commands/xtrim/index.md @@ -45,6 +45,16 @@ arguments: name: trim type: block arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write complexity: O(N), with N being the number of evicted entries. Constant times are very diff --git a/content/commands/zadd/index.md b/content/commands/zadd/index.md index e9cd597a29..9f4b431186 100644 --- a/content/commands/zadd/index.md +++ b/content/commands/zadd/index.md @@ -57,6 +57,16 @@ arguments: name: data type: block arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -91,7 +101,8 @@ linkTitle: ZADD since: 1.2.0 summary: Adds one or more members to a sorted set, or updates their scores. Creates the key if it doesn't exist. -syntax_fmt: "ZADD key [NX | XX] [GT | LT] [CH] [INCR] score member [score member\n\ +syntax_fmt: "ZADD key [NX | XX] [GT | LT] [CH] [INCR] score member [score member +\ \ ...]" syntax_str: '[NX | XX] [GT | LT] [CH] [INCR] score member [score member ...]' title: ZADD diff --git a/content/commands/zcard/index.md b/content/commands/zcard/index.md index 82aa526370..2c4c9c524f 100644 --- a/content/commands/zcard/index.md +++ b/content/commands/zcard/index.md @@ -9,6 +9,16 @@ arguments: name: key type: key arity: 2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/zcount/index.md b/content/commands/zcount/index.md index 5430e7ee4f..263f73de57 100644 --- a/content/commands/zcount/index.md +++ b/content/commands/zcount/index.md @@ -15,6 +15,16 @@ arguments: name: max type: double arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/zdiff/index.md b/content/commands/zdiff/index.md index 9e0fff1d37..96ebe038f0 100644 --- a/content/commands/zdiff/index.md +++ b/content/commands/zdiff/index.md @@ -18,6 +18,16 @@ arguments: token: WITHSCORES type: pure-token arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - movablekeys diff --git a/content/commands/zdiffstore/index.md b/content/commands/zdiffstore/index.md index 71340da256..20befe9417 100644 --- a/content/commands/zdiffstore/index.md +++ b/content/commands/zdiffstore/index.md @@ -17,6 +17,16 @@ arguments: name: key type: key arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/zincrby/index.md b/content/commands/zincrby/index.md index 41810dc996..536562959c 100644 --- a/content/commands/zincrby/index.md +++ b/content/commands/zincrby/index.md @@ -15,6 +15,16 @@ arguments: name: member type: string arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom diff --git a/content/commands/zinter/index.md b/content/commands/zinter/index.md index 88a4be87ab..fef7d43f51 100644 --- a/content/commands/zinter/index.md +++ b/content/commands/zinter/index.md @@ -41,6 +41,16 @@ arguments: token: WITHSCORES type: pure-token arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - movablekeys @@ -66,7 +76,8 @@ key_specs: linkTitle: ZINTER since: 6.2.0 summary: Returns the intersect of multiple sorted sets. -syntax_fmt: "ZINTER numkeys key [key ...] [WEIGHTS\_weight [weight ...]]\n [AGGREGATE\_\ +syntax_fmt: "ZINTER numkeys key [key ...] [WEIGHTS\_weight [weight ...]] + [AGGREGATE\_\ ] [WITHSCORES]" syntax_str: "key [key ...] [WEIGHTS\_weight [weight ...]] [AGGREGATE\_] [WITHSCORES]" diff --git a/content/commands/zintercard/index.md b/content/commands/zintercard/index.md index d7dbf8ea58..0ef60ded42 100644 --- a/content/commands/zintercard/index.md +++ b/content/commands/zintercard/index.md @@ -18,6 +18,16 @@ arguments: token: LIMIT type: integer arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - movablekeys diff --git a/content/commands/zinterstore/index.md b/content/commands/zinterstore/index.md index f92b759509..e6267cff96 100644 --- a/content/commands/zinterstore/index.md +++ b/content/commands/zinterstore/index.md @@ -40,6 +40,16 @@ arguments: token: AGGREGATE type: oneof arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -78,7 +88,8 @@ key_specs: linkTitle: ZINTERSTORE since: 2.0.0 summary: Stores the intersect of multiple sorted sets in a key. -syntax_fmt: "ZINTERSTORE destination numkeys key [key ...] [WEIGHTS\_weight\n [weight\ +syntax_fmt: "ZINTERSTORE destination numkeys key [key ...] [WEIGHTS\_weight + [weight\ \ ...]] [AGGREGATE\_]" syntax_str: "numkeys key [key ...] [WEIGHTS\_weight [weight ...]] [AGGREGATE\_]" diff --git a/content/commands/zlexcount/index.md b/content/commands/zlexcount/index.md index 1646e77372..82fc1b50f6 100644 --- a/content/commands/zlexcount/index.md +++ b/content/commands/zlexcount/index.md @@ -15,6 +15,16 @@ arguments: name: max type: string arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/zmpop/index.md b/content/commands/zmpop/index.md index 95e4c34246..754d7a1ea2 100644 --- a/content/commands/zmpop/index.md +++ b/content/commands/zmpop/index.md @@ -29,6 +29,16 @@ arguments: token: COUNT type: integer arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - movablekeys diff --git a/content/commands/zmscore/index.md b/content/commands/zmscore/index.md index fbcf640c1f..53ef4d4262 100644 --- a/content/commands/zmscore/index.md +++ b/content/commands/zmscore/index.md @@ -13,6 +13,16 @@ arguments: name: member type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/zpopmax/index.md b/content/commands/zpopmax/index.md index 44e153a163..f316dd53d3 100644 --- a/content/commands/zpopmax/index.md +++ b/content/commands/zpopmax/index.md @@ -13,6 +13,16 @@ arguments: optional: true type: integer arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/zpopmin/index.md b/content/commands/zpopmin/index.md index d9f19acb0a..736ffb08f6 100644 --- a/content/commands/zpopmin/index.md +++ b/content/commands/zpopmin/index.md @@ -13,6 +13,16 @@ arguments: optional: true type: integer arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/zrandmember/index.md b/content/commands/zrandmember/index.md index 93ea14c1c0..e436ce63b8 100644 --- a/content/commands/zrandmember/index.md +++ b/content/commands/zrandmember/index.md @@ -21,6 +21,16 @@ arguments: optional: true type: block arity: -2 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(N) where N is the number of members returned diff --git a/content/commands/zrange/index.md b/content/commands/zrange/index.md index f8d754161a..49e2e7d7cd 100644 --- a/content/commands/zrange/index.md +++ b/content/commands/zrange/index.md @@ -51,6 +51,16 @@ arguments: token: WITHSCORES type: pure-token arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(log(N)+M) with N being the number of elements in the sorted set and @@ -77,7 +87,8 @@ key_specs: linkTitle: ZRANGE since: 1.2.0 summary: Returns members in a sorted set within a range of indexes. -syntax_fmt: "ZRANGE key start stop [BYSCORE | BYLEX] [REV] [LIMIT\_offset count]\n\ +syntax_fmt: "ZRANGE key start stop [BYSCORE | BYLEX] [REV] [LIMIT\_offset count] +\ \ [WITHSCORES]" syntax_str: "start stop [BYSCORE | BYLEX] [REV] [LIMIT\_offset count] [WITHSCORES]" title: ZRANGE diff --git a/content/commands/zrangebylex/index.md b/content/commands/zrangebylex/index.md index 3fd23241fc..f01fd7a0a0 100644 --- a/content/commands/zrangebylex/index.md +++ b/content/commands/zrangebylex/index.md @@ -26,6 +26,16 @@ arguments: token: LIMIT type: block arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(log(N)+M) with N being the number of elements in the sorted set and diff --git a/content/commands/zrangebyscore/index.md b/content/commands/zrangebyscore/index.md index fc3bc2ab99..7a613a618d 100644 --- a/content/commands/zrangebyscore/index.md +++ b/content/commands/zrangebyscore/index.md @@ -32,6 +32,16 @@ arguments: token: LIMIT type: block arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(log(N)+M) with N being the number of elements in the sorted set and diff --git a/content/commands/zrangestore/index.md b/content/commands/zrangestore/index.md index 24eb501488..df82edfe63 100644 --- a/content/commands/zrangestore/index.md +++ b/content/commands/zrangestore/index.md @@ -47,6 +47,16 @@ arguments: token: LIMIT type: block arity: -5 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -83,7 +93,8 @@ key_specs: linkTitle: ZRANGESTORE since: 6.2.0 summary: Stores a range of members from sorted set in a key. -syntax_fmt: "ZRANGESTORE dst src min max [BYSCORE | BYLEX] [REV] [LIMIT\_offset\n\ +syntax_fmt: "ZRANGESTORE dst src min max [BYSCORE | BYLEX] [REV] [LIMIT\_offset +\ \ count]" syntax_str: "src min max [BYSCORE | BYLEX] [REV] [LIMIT\_offset count]" title: ZRANGESTORE diff --git a/content/commands/zrank/index.md b/content/commands/zrank/index.md index d71db478bd..d2abe39001 100644 --- a/content/commands/zrank/index.md +++ b/content/commands/zrank/index.md @@ -17,6 +17,16 @@ arguments: token: WITHSCORE type: pure-token arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/zrem/index.md b/content/commands/zrem/index.md index c0ef5eb90e..816c191491 100644 --- a/content/commands/zrem/index.md +++ b/content/commands/zrem/index.md @@ -13,6 +13,16 @@ arguments: name: member type: string arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - fast diff --git a/content/commands/zremrangebylex/index.md b/content/commands/zremrangebylex/index.md index f44e80eaad..e4fbbed799 100644 --- a/content/commands/zremrangebylex/index.md +++ b/content/commands/zremrangebylex/index.md @@ -15,6 +15,16 @@ arguments: name: max type: string arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write complexity: O(log(N)+M) with N being the number of elements in the sorted set and diff --git a/content/commands/zremrangebyrank/index.md b/content/commands/zremrangebyrank/index.md index 344d1d1825..e493c8b9c5 100644 --- a/content/commands/zremrangebyrank/index.md +++ b/content/commands/zremrangebyrank/index.md @@ -15,6 +15,16 @@ arguments: name: stop type: integer arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write complexity: O(log(N)+M) with N being the number of elements in the sorted set and diff --git a/content/commands/zremrangebyscore/index.md b/content/commands/zremrangebyscore/index.md index 63a3e5388b..8390887bb1 100644 --- a/content/commands/zremrangebyscore/index.md +++ b/content/commands/zremrangebyscore/index.md @@ -15,6 +15,16 @@ arguments: name: max type: double arity: 4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write complexity: O(log(N)+M) with N being the number of elements in the sorted set and diff --git a/content/commands/zrevrange/index.md b/content/commands/zrevrange/index.md index 2894c89152..827f151a41 100644 --- a/content/commands/zrevrange/index.md +++ b/content/commands/zrevrange/index.md @@ -20,6 +20,16 @@ arguments: token: WITHSCORES type: pure-token arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(log(N)+M) with N being the number of elements in the sorted set and diff --git a/content/commands/zrevrangebylex/index.md b/content/commands/zrevrangebylex/index.md index 0869944891..9aca773491 100644 --- a/content/commands/zrevrangebylex/index.md +++ b/content/commands/zrevrangebylex/index.md @@ -26,6 +26,16 @@ arguments: token: LIMIT type: block arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(log(N)+M) with N being the number of elements in the sorted set and diff --git a/content/commands/zrevrangebyscore/index.md b/content/commands/zrevrangebyscore/index.md index 44d767bd07..9c26ea08a9 100644 --- a/content/commands/zrevrangebyscore/index.md +++ b/content/commands/zrevrangebyscore/index.md @@ -31,6 +31,16 @@ arguments: token: LIMIT type: block arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(log(N)+M) with N being the number of elements in the sorted set and diff --git a/content/commands/zrevrank/index.md b/content/commands/zrevrank/index.md index 142dc182d0..d6ec45c909 100644 --- a/content/commands/zrevrank/index.md +++ b/content/commands/zrevrank/index.md @@ -17,6 +17,16 @@ arguments: token: WITHSCORE type: pure-token arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/zscan/index.md b/content/commands/zscan/index.md index bd6c1cb116..ed674050c9 100644 --- a/content/commands/zscan/index.md +++ b/content/commands/zscan/index.md @@ -22,6 +22,16 @@ arguments: token: COUNT type: integer arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly complexity: O(1) for every call. O(N) for a complete iteration, including enough command diff --git a/content/commands/zscore/index.md b/content/commands/zscore/index.md index acc6c90558..bc1063e8b4 100644 --- a/content/commands/zscore/index.md +++ b/content/commands/zscore/index.md @@ -12,6 +12,16 @@ arguments: name: member type: string arity: 3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - fast diff --git a/content/commands/zunion/index.md b/content/commands/zunion/index.md index fac5062b99..8db2507c4f 100644 --- a/content/commands/zunion/index.md +++ b/content/commands/zunion/index.md @@ -41,6 +41,16 @@ arguments: token: WITHSCORES type: pure-token arity: -3 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - readonly - movablekeys @@ -65,7 +75,8 @@ key_specs: linkTitle: ZUNION since: 6.2.0 summary: Returns the union of multiple sorted sets. -syntax_fmt: "ZUNION numkeys key [key ...] [WEIGHTS\_weight [weight ...]]\n [AGGREGATE\_\ +syntax_fmt: "ZUNION numkeys key [key ...] [WEIGHTS\_weight [weight ...]] + [AGGREGATE\_\ ] [WITHSCORES]" syntax_str: "key [key ...] [WEIGHTS\_weight [weight ...]] [AGGREGATE\_] [WITHSCORES]" diff --git a/content/commands/zunionstore/index.md b/content/commands/zunionstore/index.md index bddda0c230..e8d56160aa 100644 --- a/content/commands/zunionstore/index.md +++ b/content/commands/zunionstore/index.md @@ -40,6 +40,16 @@ arguments: token: AGGREGATE type: oneof arity: -4 +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients command_flags: - write - denyoom @@ -77,7 +87,8 @@ key_specs: linkTitle: ZUNIONSTORE since: 2.0.0 summary: Stores the union of multiple sorted sets in a key. -syntax_fmt: "ZUNIONSTORE destination numkeys key [key ...] [WEIGHTS\_weight\n [weight\ +syntax_fmt: "ZUNIONSTORE destination numkeys key [key ...] [WEIGHTS\_weight + [weight\ \ ...]] [AGGREGATE\_]" syntax_str: "numkeys key [key ...] [WEIGHTS\_weight [weight ...]] [AGGREGATE\_]" diff --git a/content/develop/_index.md b/content/develop/_index.md index 7916f182c7..e939c970da 100644 --- a/content/develop/_index.md +++ b/content/develop/_index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Learn how to develop with Redis hideListLinks: true linkTitle: Develop diff --git a/content/develop/connect/_index.md b/content/develop/connect/_index.md index bc3dad725f..70a816969a 100644 --- a/content/develop/connect/_index.md +++ b/content/develop/connect/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/ui +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Learn how to use user interfaces and client libraries linkTitle: Connect title: Connect to Redis @@ -15,24 +23,24 @@ You can connect to Redis in the following ways: ## Redis command line interface -The [Redis command line interface](/docs/connect/cli) (also known as `redis-cli`) is a terminal program that sends commands to and reads replies from the Redis server. It has the following two main modes: +The [Redis command line interface]({{< relref "/develop/connect/cli" >}}) (also known as `redis-cli`) is a terminal program that sends commands to and reads replies from the Redis server. It has the following two main modes: 1. An interactive Read Eval Print Loop (REPL) mode where the user types Redis commands and receives replies. 2. A command mode where `redis-cli` is executed with additional arguments, and the reply is printed to the standard output. ## RedisInsight -[RedisInsight](/docs/connect/insight) combines a graphical user interface with Redis CLI to let you work with any Redis deployment. You can visually browse and interact with data, take advantage of diagnostic tools, learn by example, and much more. Best of all, RedisInsight is free. +[RedisInsight]({{< relref "/develop/connect/insight" >}}) combines a graphical user interface with Redis CLI to let you work with any Redis deployment. You can visually browse and interact with data, take advantage of diagnostic tools, learn by example, and much more. Best of all, RedisInsight is free. ## Client libraries It's easy to connect your application to a Redis database. The official client libraries cover the following languages: -* [C#/.NET](/docs/connect/clients/dotnet) -* [Go](/docs/connect/clients/go) -* [Java](/docs/connect/clients/java) -* [Node.js](/docs/connect/clients/nodejs) -* [Python](/docs/connect/clients/python) +* [C#/.NET]({{< relref "/develop/connect/clients/dotnet" >}}) +* [Go]({{< relref "/develop/connect/clients/go" >}}) +* [Java]({{< relref "/develop/connect/clients/java" >}}) +* [Node.js]({{< relref "/develop/connect/clients/nodejs" >}}) +* [Python]({{< relref "/develop/connect/clients/python" >}}) You can find a complete list of all client libraries, including the community-maintained ones, on the [clients page](/resources/clients/). diff --git a/content/develop/connect/cli.md b/content/develop/connect/cli.md index d5ef40da47..9f64661f99 100644 --- a/content/develop/connect/cli.md +++ b/content/develop/connect/cli.md @@ -1,8 +1,14 @@ --- -aliases: -- /docs/manual/cli -- /docs/management/cli -- /docs/ui/cli +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Overview of redis-cli, the Redis command line interface ' diff --git a/content/develop/connect/clients/_index.md b/content/develop/connect/clients/_index.md index e9e4c2d9c3..d281bb9f82 100644 --- a/content/develop/connect/clients/_index.md +++ b/content/develop/connect/clients/_index.md @@ -1,17 +1,23 @@ --- -aliases: -- /docs/redis-clients -- /docs/stack/get-started/clients/ -- /docs/clients/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Connect your application to a Redis database and try an example linkTitle: Clients title: Connect with Redis clients weight: 45 --- -Here, you will learn how to connect your application to a Redis database. If you're new to Redis, you might first want to [install Redis with Redis Stack and RedisInsight](/docs/getting-started/install-stack/). +Here, you will learn how to connect your application to a Redis database. If you're new to Redis, you might first want to [install Redis with Redis Stack and RedisInsight]({{< relref "/develop/getting-started/install-stack/" >}}). -For more Redis topics, see [Using](/docs/manual/) and [Managing](/docs/management/) Redis. +For more Redis topics, see [Using]({{< relref "/develop/manual/" >}}) and [Managing]({{< relref "/develop/management/" >}}) Redis. If you're ready to get started, see the following guides for the official client libraries you can use with Redis. For a complete list of community-driven clients, see [Clients](/resources/clients/). @@ -20,9 +26,9 @@ If you're ready to get started, see the following guides for the official client The Redis OM client libraries let you use the document modeling, indexing, and querying capabilities of Redis Stack much like the way you'd use an [ORM](https://en.wikipedia.org/wiki/Object%E2%80%93relational_mapping). The following Redis OM libraries support Redis Stack: -* [Redis OM .NET](/docs/clients/om-clients/stack-dotnet/) -* [Redis OM Node](/docs/clients/om-clients/stack-node/) -* [Redis OM Python](/docs/clients/om-clients/stack-python/) -* [Redis OM Spring](/docs/clients/om-clients/stack-spring/) +* [Redis OM .NET]({{< relref "/develop/clients/om-clients/stack-dotnet/" >}}) +* [Redis OM Node]({{< relref "/develop/clients/om-clients/stack-node/" >}}) +* [Redis OM Python]({{< relref "/develop/clients/om-clients/stack-python/" >}}) +* [Redis OM Spring]({{< relref "/develop/clients/om-clients/stack-spring/" >}})
\ No newline at end of file diff --git a/content/develop/connect/clients/dotnet.md b/content/develop/connect/clients/dotnet.md index dfd8c407c4..a1c0c3457c 100644 --- a/content/develop/connect/clients/dotnet.md +++ b/content/develop/connect/clients/dotnet.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/clients/dotnet/ -- /docs/redis-clients/dotnet/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Connect your .NET application to a Redis database linkTitle: C#/.NET title: C#/.NET guide @@ -13,7 +20,7 @@ Install Redis and the Redis client, then connect your .NET application to a Redi ## NRedisStack [NRedisStack](https://github.com/redis/NRedisStack) is a .NET client for Redis. -`NredisStack` requires a running Redis or [Redis Stack](https://redis.io/docs/getting-started/install-stack/) server. See [Getting started](/docs/getting-started/) for Redis installation instructions. +`NredisStack` requires a running Redis or [Redis Stack](https://redis.io/docs/getting-started/install-stack/) server. See [Getting started]({{< relref "/develop/getting-started/" >}}) for Redis installation instructions. ### Install @@ -98,7 +105,7 @@ Console.WriteLine(db.StringGet("foo")); // prints bar #### Connect to your production Redis with TLS -When you deploy your application, use TLS and follow the [Redis security](/docs/management/security/) guidelines. +When you deploy your application, use TLS and follow the [Redis security]({{< relref "/develop/management/security/" >}}) guidelines. Before connecting your application to the TLS-enabled Redis server, ensure that your certificates and private keys are in the correct format. @@ -213,7 +220,7 @@ var user3 = new { }; ``` -Create an index. In this example, all JSON documents with the key prefix `user:` are indexed. For more information, see [Query syntax](/docs/interact/search-and-query/query/). +Create an index. In this example, all JSON documents with the key prefix `user:` are indexed. For more information, see [Query syntax]({{< relref "/develop/interact/search-and-query/query/" >}}). ```csharp var schema = new Schema() diff --git a/content/develop/connect/clients/go.md b/content/develop/connect/clients/go.md index c48fa43e76..02f239c8e7 100644 --- a/content/develop/connect/clients/go.md +++ b/content/develop/connect/clients/go.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/clients/go/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Connect your Go application to a Redis database linkTitle: Go title: Go guide @@ -107,7 +115,7 @@ client := redis.NewClusterClient(&redis.ClusterOptions{ #### Connect to your production Redis with TLS -When you deploy your application, use TLS and follow the [Redis security](/docs/management/security/) guidelines. +When you deploy your application, use TLS and follow the [Redis security]({{< relref "/develop/management/security/" >}}) guidelines. Establish a secure connection with your Redis database using this snippet. diff --git a/content/develop/connect/clients/java.md b/content/develop/connect/clients/java.md index 28fcc5fc90..fc7e79ebce 100644 --- a/content/develop/connect/clients/java.md +++ b/content/develop/connect/clients/java.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/clients/java/ -- /docs/redis-clients/java/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Connect your Java application to a Redis database linkTitle: Java title: Java guide @@ -107,7 +114,7 @@ JedisCluster jedis = new JedisCluster(jedisClusterNodes); #### Connect to your production Redis with TLS -When you deploy your application, use TLS and follow the [Redis security](/docs/management/security/) guidelines. +When you deploy your application, use TLS and follow the [Redis security]({{< relref "/develop/management/security/" >}}) guidelines. Before connecting your application to the TLS-enabled Redis server, ensure that your certificates and private keys are in the correct format. @@ -230,7 +237,7 @@ User user2 = new User("Eden Zamir", "eden.zamir@example.com", 29, "Tel Aviv"); User user3 = new User("Paul Zamir", "paul.zamir@example.com", 35, "Tel Aviv"); ``` -Create an index. In this example, all JSON documents with the key prefix `user:` are indexed. For more information, see [Query syntax](/docs/interact/search-and-query/query/). +Create an index. In this example, all JSON documents with the key prefix `user:` are indexed. For more information, see [Query syntax]({{< relref "/develop/interact/search-and-query/query/" >}}). ```java jedis.ftCreate("idx:users", diff --git a/content/develop/connect/clients/nodejs.md b/content/develop/connect/clients/nodejs.md index 23433e613d..b4e1a9981f 100644 --- a/content/develop/connect/clients/nodejs.md +++ b/content/develop/connect/clients/nodejs.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/clients/nodejs/ -- /docs/redis-clients/nodejs/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Connect your Node.js application to a Redis database linkTitle: Node.js title: Node.js guide @@ -13,7 +20,7 @@ Install Redis and the Redis client, then connect your Node.js application to a R ## node-redis [node-redis](https://github.com/redis/node-redis) is a modern, high-performance Redis client for Node.js. -`node-redis` requires a running Redis or [Redis Stack](https://redis.io/docs/getting-started/install-stack/) server. See [Getting started](/docs/getting-started/) for Redis installation instructions. +`node-redis` requires a running Redis or [Redis Stack](https://redis.io/docs/getting-started/install-stack/) server. See [Getting started]({{< relref "/develop/getting-started/" >}}) for Redis installation instructions. ### Install @@ -107,7 +114,7 @@ await cluster.quit(); #### Connect to your production Redis with TLS -When you deploy your application, use TLS and follow the [Redis security](/docs/management/security/) guidelines. +When you deploy your application, use TLS and follow the [Redis security]({{< relref "/develop/management/security/" >}}) guidelines. ```js const client = createClient({ diff --git a/content/develop/connect/clients/om-clients/_index.md b/content/develop/connect/clients/om-clients/_index.md index 0d2e494824..9ac424d7f3 100644 --- a/content/develop/connect/clients/om-clients/_index.md +++ b/content/develop/connect/clients/om-clients/_index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Object-Mapper libraries for Redis Stack linkTitle: Object mapping (Beta) stack: true @@ -10,9 +20,9 @@ Redis OM (pronounced *REDiss OHM*) is a library that provides object mapping for You can use Redis OM with the following four programming languages: -* [Node.js](/docs/connect/clients/om-clients/stack-node/) -* [Python](/docs/connect/clients/om-clients/stack-python/) -* [C# | .NET](/docs/connect/clients/om-clients/stack-dotnet/) -* [Java | Spring](/docs/connect/clients/om-clients/stack-spring/) +* [Node.js]({{< relref "/develop/connect/clients/om-clients/stack-node/" >}}) +* [Python]({{< relref "/develop/connect/clients/om-clients/stack-python/" >}}) +* [C# | .NET]({{< relref "/develop/connect/clients/om-clients/stack-dotnet/" >}}) +* [Java | Spring]({{< relref "/develop/connect/clients/om-clients/stack-spring/" >}})
diff --git a/content/develop/connect/clients/om-clients/stack-dotnet.md b/content/develop/connect/clients/om-clients/stack-dotnet.md index 743305ba6c..82247940ee 100644 --- a/content/develop/connect/clients/om-clients/stack-dotnet.md +++ b/content/develop/connect/clients/om-clients/stack-dotnet.md @@ -1,8 +1,14 @@ --- -aliases: -- /docs/stack/get-started/tutorials/stack-dotnet/ -- /docs/clients/stack-dotnet/ -- /docs/clients/om-clients/stack-dotnet/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Learn how to build with Redis Stack and .NET linkTitle: OM for .NET stack: true diff --git a/content/develop/connect/clients/om-clients/stack-node.md b/content/develop/connect/clients/om-clients/stack-node.md index 49f5995999..a1cc29f99f 100644 --- a/content/develop/connect/clients/om-clients/stack-node.md +++ b/content/develop/connect/clients/om-clients/stack-node.md @@ -1,8 +1,14 @@ --- -aliases: -- /docs/stack/get-started/tutorials/stack-node/ -- /docs/clients/stack-node/ -- /docs/clients/om-clients/stack-node/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Learn how to build with Redis Stack and Node.js linkTitle: OM for Node.js stack: true diff --git a/content/develop/connect/clients/om-clients/stack-python.md b/content/develop/connect/clients/om-clients/stack-python.md index e249219980..bdb2b4060b 100644 --- a/content/develop/connect/clients/om-clients/stack-python.md +++ b/content/develop/connect/clients/om-clients/stack-python.md @@ -1,8 +1,14 @@ --- -aliases: -- /docs/stack/get-started/tutorials/stack-python/ -- /docs/clients/stack-python/ -- /docs/clients/om-clients/stack-python/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Learn how to build with Redis Stack and Python linkTitle: OM for Python stack: true diff --git a/content/develop/connect/clients/om-clients/stack-spring.md b/content/develop/connect/clients/om-clients/stack-spring.md index f1a4672dee..13d461ad28 100644 --- a/content/develop/connect/clients/om-clients/stack-spring.md +++ b/content/develop/connect/clients/om-clients/stack-spring.md @@ -1,8 +1,14 @@ --- -aliases: -- /docs/stack/get-started/tutorials/stack-spring/ -- /docs/clients/stack-spring/ -- /docs/clients/om-clients/stack-spring/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Learn how to build with Redis Stack and Spring linkTitle: OM for Spring / Java stack: true diff --git a/content/develop/connect/clients/python.md b/content/develop/connect/clients/python.md index aed49364d4..a0cb4419fb 100644 --- a/content/develop/connect/clients/python.md +++ b/content/develop/connect/clients/python.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/clients/python/ -- /docs/redis-clients/python/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Connect your Python application to a Redis database linkTitle: Python title: Python guide @@ -14,7 +21,7 @@ Install Redis and the Redis client, then connect your Python application to a Re Get started with the [redis-py](https://github.com/redis/redis-py) client for Redis. -`redis-py` requires a running Redis or [Redis Stack](/docs/getting-started/install-stack/) server. See [Getting started](/docs/getting-started/) for Redis installation instructions. +`redis-py` requires a running Redis or [Redis Stack]({{< relref "/develop/getting-started/install-stack/" >}}) server. See [Getting started]({{< relref "/develop/getting-started/" >}}) for Redis installation instructions. ### Install @@ -88,7 +95,7 @@ For more information, see [redis-py Clustering](https://redis-py.readthedocs.io/ #### Connect to your production Redis with TLS -When you deploy your application, use TLS and follow the [Redis security](/docs/management/security/) guidelines. +When you deploy your application, use TLS and follow the [Redis security]({{< relref "/develop/management/security/" >}}) guidelines. ```python import redis @@ -163,7 +170,7 @@ schema = ( ) ``` -Create an index. In this example, all JSON documents with the key prefix `user:` will be indexed. For more information, see [Query syntax](/docs/interact/search-and-query/query/). +Create an index. In this example, all JSON documents with the key prefix `user:` will be indexed. For more information, see [Query syntax]({{< relref "/develop/interact/search-and-query/query/" >}}). ```python rs = r.ft("idx:users") diff --git a/content/develop/connect/insight/_index.md b/content/develop/connect/insight/_index.md index e488af2f13..e020ff6a98 100644 --- a/content/develop/connect/insight/_index.md +++ b/content/develop/connect/insight/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/insight +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Visualize and optimize Redis data linkTitle: RedisInsight stack: true @@ -32,7 +40,7 @@ RedisInsight is a powerful tool for visualizing and optimizing data in Redis or Browse, filter and visualize your key-value Redis data structures. * [CRUD](https://en.wikipedia.org/wiki/Create,_read,_update_and_delete) support for lists, hashes, strings, sets, sorted sets, and streams -* CRUD support for [JSON](/docs/stack/json) +* CRUD support for [JSON]({{< relref "/develop/stack/json" >}}) * Group keys according to their namespaces diff --git a/content/develop/connect/insight/tutorials/insight-stream-consumer.md b/content/develop/connect/insight/tutorials/insight-stream-consumer.md index e136ca7f08..91c894799b 100644 --- a/content/develop/connect/insight/tutorials/insight-stream-consumer.md +++ b/content/develop/connect/insight/tutorials/insight-stream-consumer.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Learn how to manage streams and consumer groups in RedisInsight linkTitle: Streams title: Manage streams and consumer groups in RedisInsight @@ -9,7 +19,7 @@ A _stream_ is an append-only log file. When you add data to it, you cannot change it. That may seem like a disadvantage; however, a stream serves as a log or single source of truth. It can also be used as a buffer between processes that work at different speeds and do not need to know about each other. -For more conceptual information about streams, see [Redis Streams](/docs/manual/data-types/streams). +For more conceptual information about streams, see [Redis Streams]({{< relref "/develop/manual/data-types/streams" >}}). In this topic, you will learn how to add and work with streams as well as consumer groups in RedisInsight. @@ -199,5 +209,5 @@ Use streams for auditing and processing events in banking, gaming, supply chain, ## Related topics -- [Redis Streams](/docs/manual/data-types/streams) +- [Redis Streams]({{< relref "/develop/manual/data-types/streams" >}}) - [Introducing Redis Streams with RedisInsight, node.js, and Python](https://www.youtube.com/watch?v=q2UOkQmIo9Q) (video) \ No newline at end of file diff --git a/content/develop/data-types/_index.md b/content/develop/data-types/_index.md index 5f0b5f45ca..c858ab38e5 100644 --- a/content/develop/data-types/_index.md +++ b/content/develop/data-types/_index.md @@ -1,8 +1,14 @@ --- -aliases: -- /docs/manual/data-types -- /topics/data-types -- /docs/data-types/tutorial +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Overview of data types supported by Redis linkTitle: Understand data types title: Understand Redis data types @@ -10,7 +16,7 @@ weight: 35 --- Redis is a data structure server. -At its core, Redis provides a collection of native data types that help you solve a wide variety of problems, from [caching](/docs/manual/client-side-caching/) to [queuing](/docs/data-types/lists/) to [event processing](/docs/data-types/streams/). +At its core, Redis provides a collection of native data types that help you solve a wide variety of problems, from [caching]({{< relref "/develop/manual/client-side-caching/" >}}) to [queuing]({{< relref "/develop/data-types/lists/" >}}) to [event processing]({{< relref "/develop/data-types/streams/" >}}). Below is a short description of each data type, with links to broader overviews and command references. If you'd like to try a comprehensive tutorial for each data structure, see their overview pages below. @@ -20,93 +26,93 @@ If you'd like to try a comprehensive tutorial for each data structure, see their ### Strings -[Redis strings](/docs/data-types/strings) are the most basic Redis data type, representing a sequence of bytes. +[Redis strings]({{< relref "/develop/data-types/strings" >}}) are the most basic Redis data type, representing a sequence of bytes. For more information, see: -* [Overview of Redis strings](/docs/data-types/strings/) +* [Overview of Redis strings]({{< relref "/develop/data-types/strings/" >}}) * [Redis string command reference](/commands/?group=string) ### Lists -[Redis lists](/docs/data-types/lists) are lists of strings sorted by insertion order. +[Redis lists]({{< relref "/develop/data-types/lists" >}}) are lists of strings sorted by insertion order. For more information, see: -* [Overview of Redis lists](/docs/data-types/lists/) +* [Overview of Redis lists]({{< relref "/develop/data-types/lists/" >}}) * [Redis list command reference](/commands/?group=list) ### Sets -[Redis sets](/docs/data-types/sets) are unordered collections of unique strings that act like the sets from your favorite programming language (for example, [Java HashSets](https://docs.oracle.com/javase/7/docs/api/java/util/HashSet.html), [Python sets](https://docs.python.org/3.10/library/stdtypes.html#set-types-set-frozenset), and so on). +[Redis sets]({{< relref "/develop/data-types/sets" >}}) are unordered collections of unique strings that act like the sets from your favorite programming language (for example, [Java HashSets](https://docs.oracle.com/javase/7/docs/api/java/util/HashSet.html), [Python sets](https://docs.python.org/3.10/library/stdtypes.html#set-types-set-frozenset), and so on). With a Redis set, you can add, remove, and test for existence in O(1) time (in other words, regardless of the number of set elements). For more information, see: -* [Overview of Redis sets](/docs/data-types/sets/) +* [Overview of Redis sets]({{< relref "/develop/data-types/sets/" >}}) * [Redis set command reference](/commands/?group=set) ### Hashes -[Redis hashes](/docs/data-types/hashes) are record types modeled as collections of field-value pairs. +[Redis hashes]({{< relref "/develop/data-types/hashes" >}}) are record types modeled as collections of field-value pairs. As such, Redis hashes resemble [Python dictionaries](https://docs.python.org/3/tutorial/datastructures.html#dictionaries), [Java HashMaps](https://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html), and [Ruby hashes](https://ruby-doc.org/core-3.1.2/Hash.html). For more information, see: -* [Overview of Redis hashes](/docs/data-types/hashes/) +* [Overview of Redis hashes]({{< relref "/develop/data-types/hashes/" >}}) * [Redis hashes command reference](/commands/?group=hash) ### Sorted sets -[Redis sorted sets](/docs/data-types/sorted-sets) are collections of unique strings that maintain order by each string's associated score. +[Redis sorted sets]({{< relref "/develop/data-types/sorted-sets" >}}) are collections of unique strings that maintain order by each string's associated score. For more information, see: -* [Overview of Redis sorted sets](/docs/data-types/sorted-sets) +* [Overview of Redis sorted sets]({{< relref "/develop/data-types/sorted-sets" >}}) * [Redis sorted set command reference](/commands/?group=sorted-set) ### Streams -A [Redis stream](/docs/data-types/streams) is a data structure that acts like an append-only log. +A [Redis stream]({{< relref "/develop/data-types/streams" >}}) is a data structure that acts like an append-only log. Streams help record events in the order they occur and then syndicate them for processing. For more information, see: -* [Overview of Redis Streams](/docs/data-types/streams) +* [Overview of Redis Streams]({{< relref "/develop/data-types/streams" >}}) * [Redis Streams command reference](/commands/?group=stream) ### Geospatial indexes -[Redis geospatial indexes](/docs/data-types/geospatial) are useful for finding locations within a given geographic radius or bounding box. +[Redis geospatial indexes]({{< relref "/develop/data-types/geospatial" >}}) are useful for finding locations within a given geographic radius or bounding box. For more information, see: -* [Overview of Redis geospatial indexes](/docs/data-types/geospatial/) +* [Overview of Redis geospatial indexes]({{< relref "/develop/data-types/geospatial/" >}}) * [Redis geospatial indexes command reference](/commands/?group=geo) ### Bitmaps -[Redis bitmaps](/docs/data-types/bitmaps/) let you perform bitwise operations on strings. +[Redis bitmaps]({{< relref "/develop/data-types/bitmaps/" >}}) let you perform bitwise operations on strings. For more information, see: -* [Overview of Redis bitmaps](/docs/data-types/bitmaps/) +* [Overview of Redis bitmaps]({{< relref "/develop/data-types/bitmaps/" >}}) * [Redis bitmap command reference](/commands/?group=bitmap) ### Bitfields -[Redis bitfields](/docs/data-types/bitfields/) efficiently encode multiple counters in a string value. +[Redis bitfields]({{< relref "/develop/data-types/bitfields/" >}}) efficiently encode multiple counters in a string value. Bitfields provide atomic get, set, and increment operations and support different overflow policies. For more information, see: -* [Overview of Redis bitfields](/docs/data-types/bitfields/) +* [Overview of Redis bitfields]({{< relref "/develop/data-types/bitfields/" >}}) * The [`BITFIELD`](/commands/bitfield) command. ### HyperLogLog -The [Redis HyperLogLog](/docs/data-types/hyperloglogs) data structures provide probabilistic estimates of the cardinality (i.e., number of elements) of large sets. For more information, see: +The [Redis HyperLogLog]({{< relref "/develop/data-types/hyperloglogs" >}}) data structures provide probabilistic estimates of the cardinality (i.e., number of elements) of large sets. For more information, see: -* [Overview of Redis HyperLogLog](/docs/data-types/hyperloglogs) +* [Overview of Redis HyperLogLog]({{< relref "/develop/data-types/hyperloglogs" >}}) * [Redis HyperLogLog command reference](/commands/?group=hyperloglog) ## Extensions To extend the features provided by the included data types, use one of these options: -1. Write your own custom [server-side functions in Lua](/docs/manual/programmability/). -1. Write your own Redis module using the [modules API](/docs/reference/modules/) or check out the [community-supported modules](/docs/modules/). -1. Use [JSON](/docs/stack/json/), [querying](/docs/stack/search/), [time series](/docs/stack/timeseries/), and other capabilities provided by [Redis Stack](/docs/stack/). +1. Write your own custom [server-side functions in Lua]({{< relref "/develop/manual/programmability/" >}}). +1. Write your own Redis module using the [modules API]({{< relref "/develop/reference/modules/" >}}) or check out the [community-supported modules]({{< relref "/develop/modules/" >}}). +1. Use [JSON]({{< relref "/develop/stack/json/" >}}), [querying]({{< relref "/develop/stack/search/" >}}), [time series]({{< relref "/develop/stack/timeseries/" >}}), and other capabilities provided by [Redis Stack]({{< relref "/develop/stack/" >}}).
diff --git a/content/develop/data-types/bitfields.md b/content/develop/data-types/bitfields.md index 5270f07fd2..e36de9b457 100644 --- a/content/develop/data-types/bitfields.md +++ b/content/develop/data-types/bitfields.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Introduction to Redis bitfields ' @@ -22,37 +32,25 @@ Bitfields support atomic read, write and increment operations, making them a goo ## Examples -Suppose you're keeping track of activity in an online game. -You want to maintain two crucial metrics for each player: the total amount of gold and the number of monsters slain. -Because your game is highly addictive, these counters should be at least 32 bits wide. +## Example -You can represent these counters with one bitfield per player. +Suppose you want to maintain two metrics for various bicycles: the current price and the number of owners over time. You can represent these counters with a 32-bit wide bitfield per for each bike. -* New players start the tutorial with 1000 gold (counter in offset 0). -``` -> BITFIELD player:1:stats SET u32 #0 1000 -1) (integer) 0 -``` +* Bike 1 initially costs 1,000 (counter in offset 0) and has never had an owner. After being sold, it's now considered used and the price instantly drops to reflect its new condition, and it now has an owner (offset 1). After quite some time, the bike becomes a classic. The original owner sells it for a profit, so the price goes up and the number of owners does as well.Finally, you can look at the bike's current price and number of owners. -* After killing the goblin holding the prince captive, add the 50 gold earned and increment the "slain" counter (offset 1). -``` -> BITFIELD player:1:stats INCRBY u32 #0 50 INCRBY u32 #1 1 -1) (integer) 1050 -2) (integer) 1 -``` - -* Pay the blacksmith 999 gold to buy a legendary rusty dagger. -``` -> BITFIELD player:1:stats INCRBY u32 #0 -999 -1) (integer) 51 -``` - -* Read the player's stats: -``` -> BITFIELD player:1:stats GET u32 #0 GET u32 #1 -1) (integer) 51 +{{< clients-example bitfield_tutorial bf >}} +> BITFIELD bike:1:stats SET u32 #0 1000 +1) (integer) 0 +> BITFIELD bike:1:stats INCRBY u32 #0 -50 INCRBY u32 #1 1 +1) (integer) 950 2) (integer) 1 -``` +> BITFIELD bike:1:stats INCRBY u32 #0 500 INCRBY u32 #1 1 +1) (integer) 1450 +2) (integer) 2 +> BITFIELD bike:1:stats GET u32 #0 GET u32 #1 +1) (integer) 1450 +2) (integer) 2 +{{< /clients-example >}} ## Performance diff --git a/content/develop/data-types/bitmaps.md b/content/develop/data-types/bitmaps.md index 8942b2107e..69e0ef53ee 100644 --- a/content/develop/data-types/bitmaps.md +++ b/content/develop/data-types/bitmaps.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Introduction to Redis bitmaps ' @@ -22,37 +32,30 @@ Some examples of bitmap use cases include: * [`SETBIT`](/commands/setbit) sets a bit at the provided offset to 0 or 1. * [`GETBIT`](/commands/getbit) returns the value of a bit at a given offset. -* [`BITOP`](/commands/bitop) lets you perform bitwise operations against one or more strings. See the [complete list of bitmap commands](https://redis.io/commands/?group=bitmap). -## Examples +## Example -Suppose you have 1000 sensors deployed in the field, labeled 0-999. -You want to quickly determine whether a given sensor has pinged the server within the hour. +Suppose you have 1000 cyclists racing through the country-side, with sensors on their bikes labeled 0-999. +You want to quickly determine whether a given sensor has pinged a tracking server within the hour to check in on a rider. You can represent this scenario using a bitmap whose key references the current hour. -* Sensor 123 pings the server on January 1, 2024 within the 00:00 hour. -``` +* Rider 123 pings the server on January 1, 2024 within the 00:00 hour. You can then confirm that rider 123 pinged the server. You can also check to see if rider 456 has pinged the server for that same hour. + +{{< clients-example bitmap_tutorial ping >}} > SETBIT pings:2024-01-01-00:00 123 1 (integer) 0 -``` - -* Did sensor 123 ping the server on January 1, 2024 within the 00:00 hour? -``` > GETBIT pings:2024-01-01-00:00 123 1 -``` - -* What about sensor 456? -``` > GETBIT pings:2024-01-01-00:00 456 0 -``` +{{< /clients-example >}} +## Bit Operations Bit operations are divided into two groups: constant-time single bit operations, like setting a bit to 1 or 0, or getting its value, and @@ -65,15 +68,6 @@ where different users are represented by incremental user IDs, it is possible to remember a single bit information (for example, knowing whether a user wants to receive a newsletter) of 4 billion users using just 512 MB of memory. -Bits are set and retrieved using the [`SETBIT`](/commands/setbit) and [`GETBIT`](/commands/getbit) commands: - - > setbit key 10 1 - (integer) 0 - > getbit key 10 - (integer) 1 - > getbit key 11 - (integer) 0 - The [`SETBIT`](/commands/setbit) command takes as its first argument the bit number, and as its second argument the value to set the bit to, which is 1 or 0. The command automatically enlarges the string if the addressed bit is outside the @@ -90,15 +84,12 @@ There are three commands operating on group of bits: 3. [`BITPOS`](/commands/bitpos) finds the first bit having the specified value of 0 or 1. Both [`BITPOS`](/commands/bitpos) and [`BITCOUNT`](/commands/bitcount) are able to operate with byte ranges of the -string, instead of running for the whole length of the string. The following -is a trivial example of [`BITCOUNT`](/commands/bitcount) call: - - > setbit key 0 1 - (integer) 0 - > setbit key 100 1 - (integer) 0 - > bitcount key - (integer) 2 +string, instead of running for the whole length of the string. We can trivially see the number of bits that have been set in a bitmap. + +{{< clients-example bitmap_tutorial bitcount >}} +> BITCOUNT pings:2024-01-01-00:00 +(integer) 1 +{{< /clients-example >}} For example imagine you want to know the longest streak of daily visits of your web site users. You start counting days starting from zero, that is the diff --git a/content/develop/data-types/geospatial.md b/content/develop/data-types/geospatial.md index 33cfb20bed..9eff1179b1 100644 --- a/content/develop/data-types/geospatial.md +++ b/content/develop/data-types/geospatial.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Introduction to the Redis Geospatial data type ' diff --git a/content/develop/data-types/hashes.md b/content/develop/data-types/hashes.md index 9cceff27dc..87e23ed170 100644 --- a/content/develop/data-types/hashes.md +++ b/content/develop/data-types/hashes.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Introduction to Redis hashes ' diff --git a/content/develop/data-types/json/_index.md b/content/develop/data-types/json/_index.md index 79f5ce075a..d552f9e6df 100644 --- a/content/develop/data-types/json/_index.md +++ b/content/develop/data-types/json/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/json +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: JSON support for Redis linkTitle: JSON stack: true @@ -16,7 +24,7 @@ The JSON capability of Redis Stack provides JavaScript Object Notation (JSON) su ## Primary features * Full support for the JSON standard -* A [JSONPath](http://goessner.net/articles/JsonPath/) syntax for selecting/updating elements inside documents (see [JSONPath syntax](/docs/data-types/json/path#jsonpath-syntax)) +* A [JSONPath](http://goessner.net/articles/JsonPath/) syntax for selecting/updating elements inside documents (see [JSONPath syntax]({{< relref "/develop/data-types/json/path#jsonpath-syntax" >}})) * Documents stored as binary data in a tree structure, allowing fast access to sub-elements * Typed atomic operations for all JSON value types @@ -39,7 +47,7 @@ The first JSON command to try is [`JSON.SET`](/commands/json.set), which sets a 1) "string" ``` -Note how the commands include the dollar sign character `$`. This is the [path](/docs/data-types/json/path) to the value in the JSON document (in this case it just means the root). +Note how the commands include the dollar sign character `$`. This is the [path]({{< relref "/develop/data-types/json/path" >}}) to the value in the JSON document (in this case it just means the root). Here are a few more string operations. [`JSON.STRLEN`](/commands/json.strlen) tells you the length of the string, and you can append another string to it with [`JSON.STRAPPEND`](/commands/json.strappend). @@ -162,7 +170,7 @@ To run RedisJSON with Docker, use the `redis-stack-server` Docker image: $ docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest ``` -For more information about running Redis Stack in a Docker container, see [Run Redis Stack on Docker](/docs/getting-started/install-stack/docker). +For more information about running Redis Stack in a Docker container, see [Run Redis Stack on Docker]({{< relref "/develop/getting-started/install-stack/docker" >}}). ### Download binaries diff --git a/content/develop/data-types/json/developer.md b/content/develop/data-types/json/developer.md index c28f64bf05..794f942c68 100644 --- a/content/develop/data-types/json/developer.md +++ b/content/develop/data-types/json/developer.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/json/developer +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Notes on debugging, testing and documentation ' diff --git a/content/develop/data-types/json/indexing_JSON.md b/content/develop/data-types/json/indexing_JSON.md index 533819f655..00f05688df 100644 --- a/content/develop/data-types/json/indexing_JSON.md +++ b/content/develop/data-types/json/indexing_JSON.md @@ -1,14 +1,22 @@ --- -aliases: -- /docs/stack/json/indexing_json +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Combine Redis JSON and Search and Query to index and search JSON documents linkTitle: Index/Search title: Index/Search JSON documents weight: 2 --- -In addition to storing JSON documents, you can also index them using the [Search and Query](/docs/stack/search) feature. This enables full-text search capabilities and document retrieval based on their content. +In addition to storing JSON documents, you can also index them using the [Search and Query]({{< relref "/develop/stack/search" >}}) feature. This enables full-text search capabilities and document retrieval based on their content. -To use these features, you must install two modules: RedisJSON and RediSearch. [Redis Stack](/docs/stack) automatically includes both modules. +To use these features, you must install two modules: RedisJSON and RediSearch. [Redis Stack]({{< relref "/develop/stack" >}}) automatically includes both modules. -See the [tutorial](/docs/stack/search/indexing_json) to learn how to search and query your JSON. \ No newline at end of file +See the [tutorial]({{< relref "/develop/stack/search/indexing_json" >}}) to learn how to search and query your JSON. \ No newline at end of file diff --git a/content/develop/data-types/json/path.md b/content/develop/data-types/json/path.md index ec0d01dcc5..c5dde8e104 100644 --- a/content/develop/data-types/json/path.md +++ b/content/develop/data-types/json/path.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/json/path +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Access specific elements within a JSON document linkTitle: Path title: Path diff --git a/content/develop/data-types/json/performance/_index.md b/content/develop/data-types/json/performance/_index.md index a49e4a67c3..accb6468b5 100644 --- a/content/develop/data-types/json/performance/_index.md +++ b/content/develop/data-types/json/performance/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/json/performance +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Performance benchmarks ' diff --git a/content/develop/data-types/json/ram.md b/content/develop/data-types/json/ram.md index 6bf7e480bf..192505e599 100644 --- a/content/develop/data-types/json/ram.md +++ b/content/develop/data-types/json/ram.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/json/ram +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Debugging memory consumption ' diff --git a/content/develop/data-types/json/resp3.md b/content/develop/data-types/json/resp3.md index 6a746ded07..d8f25ce314 100644 --- a/content/develop/data-types/json/resp3.md +++ b/content/develop/data-types/json/resp3.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: JSON RESP2 to RESP3 replies reference for client developers linkTitle: RESP3 migration guide title: Guide for migrating from RESP2 to RESP3 replies diff --git a/content/develop/data-types/json/use_cases.md b/content/develop/data-types/json/use_cases.md index d62937f83e..bbd4df9cdb 100644 --- a/content/develop/data-types/json/use_cases.md +++ b/content/develop/data-types/json/use_cases.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'JSON use cases ' diff --git a/content/develop/data-types/lists.md b/content/develop/data-types/lists.md index 1eb258c34e..3939e6609d 100644 --- a/content/develop/data-types/lists.md +++ b/content/develop/data-types/lists.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Introduction to Redis lists ' @@ -122,7 +132,7 @@ taken at constant length in constant time. When fast access to the middle of a large collection of elements is important, there is a different data structure that can be used, called sorted sets. -Sorted sets are covered in the [Sorted sets](/docs/data-types/sorted-sets) tutorial page. +Sorted sets are covered in the [Sorted sets]({{< relref "/develop/data-types/sorted-sets" >}}) tutorial page. ### First steps with Redis Lists @@ -408,7 +418,7 @@ Exercise caution when running these commands, mainly when operating on large lis ## Alternatives -Consider [Redis streams](/docs/data-types/streams) as an alternative to lists when you need to store and process an indeterminate series of events. +Consider [Redis streams]({{< relref "/develop/data-types/streams" >}}) as an alternative to lists when you need to store and process an indeterminate series of events. ## Learn more diff --git a/content/develop/data-types/probabilistic/Configuration.md b/content/develop/data-types/probabilistic/Configuration.md index ecaafc71fb..a8ae293d9e 100644 --- a/content/develop/data-types/probabilistic/Configuration.md +++ b/content/develop/data-types/probabilistic/Configuration.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'RedisBloom supports multiple module configuration parameters. All of these parameters can only be set at load-time. @@ -12,13 +22,13 @@ weight: 100 Setting configuration parameters at load-time is done by appending arguments after the `--loadmodule` argument when starting a server from the command line or after the `loadmodule` directive in a Redis config file. For example: -In [redis.conf](/docs/manual/config/): +In [redis.conf]({{< relref "/develop/manual/config/" >}}): ```sh loadmodule ./redisbloom.so [OPT VAL]... ``` -From the [Redis CLI](/docs/manual/cli/), using the [MODULE LOAD](/commands/module-load/) command: +From the [Redis CLI]({{< relref "/develop/manual/cli/" >}}), using the [MODULE LOAD](/commands/module-load/) command: ``` 127.0.0.6379> MODULE LOAD redisbloom.so [OPT VAL]... diff --git a/content/develop/data-types/probabilistic/_index.md b/content/develop/data-types/probabilistic/_index.md index 829ddb963d..ded7f777bc 100644 --- a/content/develop/data-types/probabilistic/_index.md +++ b/content/develop/data-types/probabilistic/_index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Probabilistic data structures in Redis linkTitle: Probabilistic title: Probabilistic diff --git a/content/develop/data-types/probabilistic/bloom-filter.md b/content/develop/data-types/probabilistic/bloom-filter.md index 3624a4f591..e4553fc8d3 100644 --- a/content/develop/data-types/probabilistic/bloom-filter.md +++ b/content/develop/data-types/probabilistic/bloom-filter.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/bloom/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Bloom filters are a probabilistic data structure that checks for presence of an element in a set linkTitle: Bloom filter diff --git a/content/develop/data-types/probabilistic/count-min-sketch.md b/content/develop/data-types/probabilistic/count-min-sketch.md index cf4c1d6ac5..8c181ba0af 100644 --- a/content/develop/data-types/probabilistic/count-min-sketch.md +++ b/content/develop/data-types/probabilistic/count-min-sketch.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Count-min sketch is a probabilistic data structure that estimates the frequency of an element in a data stream. linkTitle: Count-min sketch diff --git a/content/develop/data-types/probabilistic/cuckoo-filter.md b/content/develop/data-types/probabilistic/cuckoo-filter.md index d30e065515..24c41989b6 100644 --- a/content/develop/data-types/probabilistic/cuckoo-filter.md +++ b/content/develop/data-types/probabilistic/cuckoo-filter.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Cuckoo filters are a probabilistic data structure that checks for presence of an element in a set linkTitle: Cuckoo filter diff --git a/content/develop/data-types/probabilistic/hyperloglogs.md b/content/develop/data-types/probabilistic/hyperloglogs.md index 06fd663862..a63fb0a07b 100644 --- a/content/develop/data-types/probabilistic/hyperloglogs.md +++ b/content/develop/data-types/probabilistic/hyperloglogs.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/data-types/hyperloglogs/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'HyperLogLog is a probabilistic data structure that estimates the cardinality of a set. diff --git a/content/develop/data-types/probabilistic/t-digest.md b/content/develop/data-types/probabilistic/t-digest.md index 7addd45020..e6602ace5a 100644 --- a/content/develop/data-types/probabilistic/t-digest.md +++ b/content/develop/data-types/probabilistic/t-digest.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: t-digest is a probabilistic data structure that allows you to estimate the percentile of a data stream. linkTitle: t-digest diff --git a/content/develop/data-types/probabilistic/top-k.md b/content/develop/data-types/probabilistic/top-k.md index bd20368613..f88fff5e86 100644 --- a/content/develop/data-types/probabilistic/top-k.md +++ b/content/develop/data-types/probabilistic/top-k.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Top-K is a probabilistic data structure that allows you to find the most frequent items in a data stream. linkTitle: Top-K diff --git a/content/develop/data-types/sets.md b/content/develop/data-types/sets.md index d517cf66f2..a8c043a243 100644 --- a/content/develop/data-types/sets.md +++ b/content/develop/data-types/sets.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Introduction to Redis sets ' @@ -170,10 +180,10 @@ As an alternative, consider the [`SSCAN`](/commands/sscan), which lets you retri ## Alternatives Sets membership checks on large datasets (or on streaming data) can use a lot of memory. -If you're concerned about memory usage and don't need perfect precision, consider a [Bloom filter or Cuckoo filter](/docs/stack/bloom) as an alternative to a set. +If you're concerned about memory usage and don't need perfect precision, consider a [Bloom filter or Cuckoo filter]({{< relref "/develop/stack/bloom" >}}) as an alternative to a set. Redis sets are frequently used as a kind of index. -If you need to index and query your data, consider the [JSON](/docs/stack/json) data type and the [Search and query](/docs/stack/search) features. +If you need to index and query your data, consider the [JSON]({{< relref "/develop/stack/json" >}}) data type and the [Search and query]({{< relref "/develop/stack/search" >}}) features. ## Learn more diff --git a/content/develop/data-types/sorted-sets.md b/content/develop/data-types/sorted-sets.md index 0b6dd6d1e6..379533ff0c 100644 --- a/content/develop/data-types/sorted-sets.md +++ b/content/develop/data-types/sorted-sets.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Introduction to Redis sorted sets ' @@ -239,7 +249,7 @@ This command's time complexity is O(log(n) + m), where _m_ is the number of resu ## Alternatives Redis sorted sets are sometimes used for indexing other Redis data structures. -If you need to index and query your data, consider the [JSON](/docs/stack/json) data type and the [Search and query](/docs/stack/search) features. +If you need to index and query your data, consider the [JSON]({{< relref "/develop/stack/json" >}}) data type and the [Search and query]({{< relref "/develop/stack/search" >}}) features. ## Learn more diff --git a/content/develop/data-types/streams.md b/content/develop/data-types/streams.md index 53594e5515..46bb3d94b0 100644 --- a/content/develop/data-types/streams.md +++ b/content/develop/data-types/streams.md @@ -1,8 +1,14 @@ --- -aliases: -- /topics/streams-intro -- /docs/manual/data-types/streams -- /docs/data-types/streams-tutorial/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Introduction to Redis streams ' @@ -930,6 +936,6 @@ A few remarks: ## Learn more -* The [Redis Streams Tutorial](/docs/data-types/streams-tutorial) explains Redis streams with many examples. +* The [Redis Streams Tutorial]({{< relref "/develop/data-types/streams-tutorial" >}}) explains Redis streams with many examples. * [Redis Streams Explained](https://www.youtube.com/watch?v=Z8qcpXyMAiA) is an entertaining introduction to streams in Redis. * [Redis University's RU202](https://university.redis.com/courses/ru202/) is a free, online course dedicated to Redis Streams. diff --git a/content/develop/data-types/strings.md b/content/develop/data-types/strings.md index 202fc75bed..d1df0b8570 100644 --- a/content/develop/data-types/strings.md +++ b/content/develop/data-types/strings.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Introduction to Redis strings ' @@ -114,7 +124,7 @@ By default, a single Redis string can be a maximum of 512 MB. ### Bitwise operations -To perform bitwise operations on a string, see the [bitmaps data type](/docs/data-types/bitmaps) docs. +To perform bitwise operations on a string, see the [bitmaps data type]({{< relref "/develop/data-types/bitmaps" >}}) docs. See the [complete list of string commands](/commands/?group=string). @@ -126,7 +136,7 @@ These random-access string commands may cause performance issues when dealing wi ## Alternatives -If you're storing structured data as a serialized string, you may also want to consider Redis [hashes](/docs/data-types/hashes) or [JSON](/docs/stack/json). +If you're storing structured data as a serialized string, you may also want to consider Redis [hashes]({{< relref "/develop/data-types/hashes" >}}) or [JSON]({{< relref "/develop/stack/json" >}}). ## Learn more diff --git a/content/develop/data-types/timeseries/_index.md b/content/develop/data-types/timeseries/_index.md index ed844c66e1..b06be5c4ad 100644 --- a/content/develop/data-types/timeseries/_index.md +++ b/content/develop/data-types/timeseries/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/timeseries/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Ingest and query time series data with Redis linkTitle: Time series stack: true diff --git a/content/develop/data-types/timeseries/clients.md b/content/develop/data-types/timeseries/clients.md index 94e008e128..64db82be6e 100644 --- a/content/develop/data-types/timeseries/clients.md +++ b/content/develop/data-types/timeseries/clients.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/timeseries/clients +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Time Series Client Libraries ' diff --git a/content/develop/data-types/timeseries/configuration.md b/content/develop/data-types/timeseries/configuration.md index 27cd74ac00..81d31acb8f 100644 --- a/content/develop/data-types/timeseries/configuration.md +++ b/content/develop/data-types/timeseries/configuration.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/timeseries/configuration +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'RedisTimeSeries supports multiple module configuration parameters. All of these parameters can only be set at load-time. @@ -14,13 +22,13 @@ weight: 3 Setting configuration parameters at load-time is done by appending arguments after the `--loadmodule` argument when starting a server from the command line or after the `loadmodule` directive in a Redis config file. For example: -In [redis.conf](/docs/manual/config/): +In [redis.conf]({{< relref "/develop/manual/config/" >}}): ```sh loadmodule ./redistimeseries.so [OPT VAL]... ``` -From the [Redis CLI](/docs/manual/cli/), using the [MODULE LOAD](/commands/module-load/) command: +From the [Redis CLI]({{< relref "/develop/manual/cli/" >}}), using the [MODULE LOAD](/commands/module-load/) command: ``` 127.0.0.6379> MODULE LOAD redistimeseries.so [OPT VAL]... diff --git a/content/develop/data-types/timeseries/development.md b/content/develop/data-types/timeseries/development.md index 96cbd3ec5c..9686f20658 100644 --- a/content/develop/data-types/timeseries/development.md +++ b/content/develop/data-types/timeseries/development.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/timeseries/development +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Developing RedisTimeSeries ' diff --git a/content/develop/data-types/timeseries/quickstart.md b/content/develop/data-types/timeseries/quickstart.md index bd899c99fb..733f571421 100644 --- a/content/develop/data-types/timeseries/quickstart.md +++ b/content/develop/data-types/timeseries/quickstart.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/timeseries/quickstart +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Quick Start Guide to Time Series ' @@ -188,7 +196,7 @@ With this creation rule, datapoints added to the `sensor1` timeseries will be gr ## Filtering -You can filter yor time series by value, timestamp and labels: +You can filter your time series by value, timestamp and labels: ### Filtering by label You can retrieve datapoints from multiple timeseries in the same query, and the way to do this is by using label filters. For example: diff --git a/content/develop/data-types/timeseries/reference/_index.md b/content/develop/data-types/timeseries/reference/_index.md index e7cde828a1..0c1651673e 100644 --- a/content/develop/data-types/timeseries/reference/_index.md +++ b/content/develop/data-types/timeseries/reference/_index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Reference ' diff --git a/content/develop/data-types/timeseries/reference/out-of-order_performance_considerations/_index.md b/content/develop/data-types/timeseries/reference/out-of-order_performance_considerations/_index.md index d9a4f275dc..2e31b1e538 100644 --- a/content/develop/data-types/timeseries/reference/out-of-order_performance_considerations/_index.md +++ b/content/develop/data-types/timeseries/reference/out-of-order_performance_considerations/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/timeseries/reference/out-of-order_performance_considerations/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Out-of-order / backfilled ingestion performance considerations ' diff --git a/content/develop/data-types/timeseries/use_cases.md b/content/develop/data-types/timeseries/use_cases.md index 80fbdc07d3..b3837c1a62 100644 --- a/content/develop/data-types/timeseries/use_cases.md +++ b/content/develop/data-types/timeseries/use_cases.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/query_syntax/ -- /docs/stack/use-cases/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Time series use cases ' diff --git a/content/develop/get-started/_index.md b/content/develop/get-started/_index.md index b744036bbd..16612f579f 100644 --- a/content/develop/get-started/_index.md +++ b/content/develop/get-started/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/getting-started/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Redis quick start guides ' @@ -12,10 +20,10 @@ weight: 20 Redis can be used as a database, cache, streaming engine, message broker, and more. The following quick start guides will show you how to use Redis for the following specific purposes: -1. [Data structure store](/docs/get-started/data-store) -2. [Document database](/docs/get-started/document-database) -3. [Vector database](/docs/get-started/vector-database) +1. [Data structure store]({{< relref "/develop/get-started/data-store" >}}) +2. [Document database]({{< relref "/develop/get-started/document-database" >}}) +3. [Vector database]({{< relref "/develop/get-started/vector-database" >}}) Please select the guide that aligns best with your specific usage scenario. -You can find answers to frequently asked questions in the [FAQ](/docs/get-started/faq/). +You can find answers to frequently asked questions in the [FAQ]({{< relref "/develop/get-started/faq/" >}}). diff --git a/content/develop/get-started/data-store.md b/content/develop/get-started/data-store.md index 51fb8604f0..d3a76267b7 100644 --- a/content/develop/get-started/data-store.md +++ b/content/develop/get-started/data-store.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Understand how to use basic Redis data types linkTitle: Data structure store title: Redis as an in-memory data structure store quick start guide @@ -23,11 +33,11 @@ The easiest way to get started with Redis is to use Redis Cloud: -You can alternatively follow the [installation guides](/docs/install/install-stack/) to install Redis on your local machine. +You can alternatively follow the [installation guides]({{< relref "/develop/install/install-stack/" >}}) to install Redis on your local machine. ## Connect -The first step is to connect to Redis. You can find further details about the connection options in this documentation site's [connection section](/docs/connect). The following example shows how to connect to a Redis server that runs on localhost (`-h 127.0.0.1`) and listens on the default port (`-p 6379`): +The first step is to connect to Redis. You can find further details about the connection options in this documentation site's [connection section]({{< relref "/develop/connect" >}}). The following example shows how to connect to a Redis server that runs on localhost (`-h 127.0.0.1`) and listens on the default port (`-p 6379`): {{< clients-example search_quickstart connect >}} > redis-cli -h 127.0.0.1 -p 6379 @@ -68,11 +78,11 @@ Hashes are the equivalent of dictionaries (dicts or hash maps). Among other thin 8) "4972" {{< /clients-example >}} -You can get a complete overview of available data types in this documentation site's [data types section](/docs/data-types/). Each data type has commands allowing you to manipulate or retrieve data. The [commands reference](/commands/) provides a sophisticated explanation. +You can get a complete overview of available data types in this documentation site's [data types section]({{< relref "/develop/data-types/" >}}). Each data type has commands allowing you to manipulate or retrieve data. The [commands reference](/commands/) provides a sophisticated explanation. ## Scan the keyspace -Each item within Redis has a unique key. All items live within the Redis [keyspace](/docs/manual/keyspace/). You can scan the Redis keyspace via the [SCAN command](/commands/scan/). Here is an example that scans for the first 100 keys that have the prefix `bike:`: +Each item within Redis has a unique key. All items live within the Redis [keyspace]({{< relref "/develop/manual/keyspace/" >}}). You can scan the Redis keyspace via the [SCAN command](/commands/scan/). Here is an example that scans for the first 100 keys that have the prefix `bike:`: {{< clients-example scan_example >}} SCAN 0 MATCH "bike:*" COUNT 100 @@ -84,5 +94,5 @@ SCAN 0 MATCH "bike:*" COUNT 100 You can address more use cases with Redis by learning about Redis Stack. Here are two additional quick start guides: -* [Redis as a document database](/docs/get-started/document-database/) -* [Redis as a vector database](/docs/get-started/vector-database/) \ No newline at end of file +* [Redis as a document database]({{< relref "/develop/get-started/document-database/" >}}) +* [Redis as a vector database]({{< relref "/develop/get-started/vector-database/" >}}) \ No newline at end of file diff --git a/content/develop/get-started/document-database.md b/content/develop/get-started/document-database.md index 5a25cf82ae..9cc53a65d5 100644 --- a/content/develop/get-started/document-database.md +++ b/content/develop/get-started/document-database.md @@ -1,8 +1,14 @@ --- -aliases: -- /docs/interact/search-and-query/quick_start -- /docs/interact/search-and-query/quickstart/ -- /docs/stack/search/quick_start +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Understand how to use Redis as a document database linkTitle: Document database stack: true @@ -14,7 +20,7 @@ weight: 2 This quick start guide shows you how to: 1. Create a secondary index -2. Add [JSON](/docs/data-types/json/) documents +2. Add [JSON]({{< relref "/develop/data-types/json/" >}}) documents 3. Search and query your data The examples in this article refer to a simple bicycle inventory that contains JSON documents with the following structure: @@ -31,7 +37,7 @@ The examples in this article refer to a simple bicycle inventory that contains J ## Setup -The easiest way to get started with [Redis Stack](/docs/about/about-stack/) is to use Redis Cloud: +The easiest way to get started with [Redis Stack]({{< relref "/develop/about/about-stack/" >}}) is to use Redis Cloud: 1. Create a [free account](https://redis.com/try-free?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). 2. Follow the instructions to create a free database. @@ -40,12 +46,12 @@ The easiest way to get started with [Redis Stack](/docs/about/about-stack/) is t This free Redis Cloud database comes out of the box with all the Redis Stack features. -You can alternatively use the [installation guides](/docs/install/install-stack) to install Redis Stack on your local machine. +You can alternatively use the [installation guides]({{< relref "/develop/install/install-stack" >}}) to install Redis Stack on your local machine. ## Connect -The first step is to connect to your Redis Stack database. You can find further details about the connection options in this documentation site's [connection section](/docs/connect). The following example shows how to connect to a Redis Stack server that runs on localhost (`-h 127.0.0.1`) and listens on the default port (`-p 6379`): +The first step is to connect to your Redis Stack database. You can find further details about the connection options in this documentation site's [connection section]({{< relref "/develop/connect" >}}). The following example shows how to connect to a Redis Stack server that runs on localhost (`-h 127.0.0.1`) and listens on the default port (`-p 6379`): {{< clients-example search_quickstart connect >}} > redis-cli -h 127.0.0.1 -p 6379 @@ -59,9 +65,9 @@ You can copy and paste the connection details from the Redis Cloud database conf ## Create an index -As explained in the [in-memory data store](/docs/get-started/data-store/) quick start guide, Redis allows you to access an item directly via its key. You also learned how to scan the keyspace. Whereby you can use other data structures (e.g., hashes and sorted sets) as secondary indexes, your application would need to maintain those indexes manually. Redis Stack turns Redis into a document database by allowing you to declare which fields are auto-indexed. Redis Stack currently supports secondary index creation on the [hashes](/docs/data-types/hashes) and [JSON](/docs/data-types/json) documents. +As explained in the [in-memory data store]({{< relref "/develop/get-started/data-store/" >}}) quick start guide, Redis allows you to access an item directly via its key. You also learned how to scan the keyspace. Whereby you can use other data structures (e.g., hashes and sorted sets) as secondary indexes, your application would need to maintain those indexes manually. Redis Stack turns Redis into a document database by allowing you to declare which fields are auto-indexed. Redis Stack currently supports secondary index creation on the [hashes]({{< relref "/develop/data-types/hashes" >}}) and [JSON]({{< relref "/develop/data-types/json" >}}) documents. -The following example shows an [FT.CREATE](/commands/ft.create/) command that creates an index with some text fields, a numeric field (price), and a tag field (condition). The text fields have a weight of 1.0, meaning they have the same relevancy in the context of full-text searches. The field names follow the [JSONPath](/docs/data-types/json/path/) notion. Each such index field maps to a property within the JSON document. +The following example shows an [FT.CREATE](/commands/ft.create/) command that creates an index with some text fields, a numeric field (price), and a tag field (condition). The text fields have a weight of 1.0, meaning they have the same relevancy in the context of full-text searches. The field names follow the [JSONPath]({{< relref "/develop/data-types/json/path/" >}}) notion. Each such index field maps to a property within the JSON document. {{< clients-example search_quickstart create_index >}} @@ -73,7 +79,7 @@ Any pre-existing JSON documents with a key prefix `bicycle:` are automatically a ## Add JSON documents -The example below shows you how to use the [JSON.SET](/commands/ft.set/) command to create new JSON documents: +The example below shows you how to use the [JSON.SET](/commands/json.set/) command to create new JSON documents: {{< clients-example search_quickstart add_documents "" 2 >}} > JSON.SET "bicycle:0" "." "{\"brand\": \"Velorim\", \"model\": \"Jigger\", \"price\": 270, \"description\": \"Small and powerful, the Jigger is the best ride for the smallest of tikes! This is the tiniest kids\\u2019 pedal bike on the market available without a coaster brake, the Jigger is the vehicle of choice for the rare tenacious little rider raring to go.\", \"condition\": \"new\"}" @@ -163,10 +169,10 @@ Below is a command to perform an exact match query that finds all bicycles with 2) "{\"brand\":\"Noka Bikes\",\"model\":\"Kahuna\",\"price\":3200,\"description\":\"Whether you want to try your hand at XC racing or are looking for a lively trail bike that's just as inspiring on the climbs as it is over rougher ground, the Wilder is one heck of a bike built specifically for short women. Both the frames and components have been tweaked to include a women\xe2\x80\x99s saddle, different bars and unique colourway.\",\"condition\":\"used\"}" {{< / clients-example >}} -Please see the [query documentation](/docs/interact/search-and-query/query/) to learn how to make more advanced queries. +Please see the [query documentation]({{< relref "/develop/interact/search-and-query/query/" >}}) to learn how to make more advanced queries. ## Next steps You can learn more about how to use Redis Stack as a vector database in the following quick start guide: -* [Redis as a vector database](/docs/get-started/vector-database/) \ No newline at end of file +* [Redis as a vector database]({{< relref "/develop/get-started/vector-database/" >}}) diff --git a/content/develop/get-started/faq.md b/content/develop/get-started/faq.md index 8b28bf0a5e..0ec280b273 100644 --- a/content/develop/get-started/faq.md +++ b/content/develop/get-started/faq.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/getting-started/faq +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Commonly asked questions when getting started with Redis ' @@ -77,7 +85,7 @@ with an error to write commands (but will continue to accept read-only commands). You can also configure Redis to evict keys when the max memory limit -is reached. See the [eviction policy docs](/docs/manual/eviction/) for more information on this. +is reached. See the [eviction policy docs]({{< relref "/develop/manual/eviction/" >}}) for more information on this. ## Background saving fails with a fork() error on Linux? diff --git a/content/develop/get-started/vector-database.md b/content/develop/get-started/vector-database.md index a9191ed2f2..38d81805e4 100644 --- a/content/develop/get-started/vector-database.md +++ b/content/develop/get-started/vector-database.md @@ -1,8 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/vectors/ -- /redisearch/reference/vectors -- /docs/interact/search-and-query/search/vectors/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Understand how to use Redis as a vector database linkTitle: Vector database stack: true @@ -24,13 +30,13 @@ Data is often unstructured, which means that it isn't described by a well-define You can use Redis Stack as a vector database. It allows you to: -* Store vectors and the associated metadata within hashes or [JSON](/docs/data-types/json) documents +* Store vectors and the associated metadata within hashes or [JSON]({{< relref "/develop/data-types/json" >}}) documents * Retrieve vectors * Perform vector searches ## Set a vector database up -The easiest way to get started with [Redis Stack](/docs/about/about-stack/) is to use Redis Cloud: +The easiest way to get started with [Redis Stack]({{< relref "/develop/about/about-stack/" >}}) is to use Redis Cloud: 1. Create a [free account](https://redis.com/try-free?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). 2. Follow the instructions to create a free database. @@ -39,13 +45,13 @@ The easiest way to get started with [Redis Stack](/docs/about/about-stack/) is t This free Redis Cloud database comes out of the box with all the Redis Stack features. -You can alternatively use the [installation guides](/docs/install/install-stack) to install Redis Stack on your local machine. +You can alternatively use the [installation guides]({{< relref "/develop/install/install-stack" >}}) to install Redis Stack on your local machine. ## Install the required Python packages The code examples are currently provided for Redis CLI and Python. For Python, you will need to create a virtual environment and install the following Python packages: -* `redis`: You can find further details about the `redis-py` client library in the [clients](/docs/connect/clients/python/) section of this documentation site. +* `redis`: You can find further details about the `redis-py` client library in the [clients]({{< relref "/develop/connect/clients/python/" >}}) section of this documentation site. * `pandas`: Pandas is a data analysis library. * `sentence-transformers`: You will use the [SentenceTransformers](https://www.sbert.net/) framework to generate embeddings on full text. Sentence-BERT (SBERT) is a [BERT](https://en.wikipedia.org/wiki/BERT_(language_model)) model modification that produces consistent and contextually rich sentence embeddings. SBERT improves tasks like semantic search and text grouping by allowing for efficient and meaningful comparison of sentence-level semantic similarity. * `tabulate`: This package is optional. Pandas use it to render Markdown. @@ -164,19 +170,19 @@ Here is a breakdown of the `VECTOR` schema field definition: * `DIM 768`: The length or dimension of the embeddings, which you determined previously to be `768`. * `DISTANCE_METRIC COSINE`: The distance function is, in this example, [cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity). -You can find further details about all these options in the [vector reference documentation](/docs/interact/search-and-query/advanced-concepts/vectors/). +You can find further details about all these options in the [vector reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}). ### 2. Check the state of the index As soon as you execute the [FT.CREATE](https://redis.io/commands/ft.create/) command, the indexing process runs in the background. In a short time, all JSON documents should be indexed and ready to be queried. To validate that, you can use the [FT.INFO](https://redis.io/commands/ft.info/) command, which provides details and statistics about the index. Of particular interest are the number of documents successfully indexed and the number of failures: {{< clients-example search_vss validate_index >}} -FT_INFO idx:bikes_vss +FT.INFO idx:bikes_vss {{< /clients-example >}} ## Search and query -This quick start guide focuses on the vector search aspect. Still, you can learn more about how to query based on vector metadata in the [document database quick start guide](/docs/get-started/document-database/). +This quick start guide focuses on the vector search aspect. Still, you can learn more about how to query based on vector metadata in the [document database quick start guide]({{< relref "/develop/get-started/document-database/" >}}). ### 1. Embed your prompts @@ -239,6 +245,6 @@ From the description, this bike is an excellent match for younger children, and ## Next steps -1. You can learn more about the query options, such as pre-filters and radius queries, by reading the [vector reference documentation](/docs/interact/search-and-query/advanced-concepts/vectors/). +1. You can learn more about the query options, such as pre-filters and radius queries, by reading the [vector reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}). 2. The complete [search and query documentation](https://redis.io/docs/interact/search-and-query/) might be interesting for you. 3. If you want to follow the code examples more interactively, then you can use the [Jupyter notebook](https://github.com/RedisVentures/redis-vss-getting-started/blob/main/vector_similarity_with_redis.ipynb) that inspired this quick start guide. diff --git a/content/develop/interact/_index.md b/content/develop/interact/_index.md index acbfe7a8c0..c22d06dda1 100644 --- a/content/develop/interact/_index.md +++ b/content/develop/interact/_index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'How to interact with data in Redis, including searching, querying, triggered functions, transactions, and pub/sub. diff --git a/content/develop/interact/programmability/_index.md b/content/develop/interact/programmability/_index.md index dec98bbb1f..61860b66d7 100644 --- a/content/develop/interact/programmability/_index.md +++ b/content/develop/interact/programmability/_index.md @@ -1,7 +1,14 @@ --- -aliases: -- /topics/programmability -- /docs/manual/programmability/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Extending Redis with Lua and Redis Functions ' @@ -10,7 +17,7 @@ title: Redis programmability weight: 20 --- -Redis provides a programming interface that lets you execute custom scripts on the server itself. In Redis 7 and beyond, you can use [Redis Functions](/docs/manual/programmability/functions-intro) to manage and run your scripts. In Redis 6.2 and below, you use [Lua scripting with the EVAL command](/docs/manual/programmability/eval-intro) to program the server. +Redis provides a programming interface that lets you execute custom scripts on the server itself. In Redis 7 and beyond, you can use [Redis Functions]({{< relref "/develop/manual/programmability/functions-intro" >}}) to manage and run your scripts. In Redis 6.2 and below, you use [Lua scripting with the EVAL command]({{< relref "/develop/manual/programmability/eval-intro" >}}) to program the server. ## Background diff --git a/content/develop/interact/programmability/eval-intro.md b/content/develop/interact/programmability/eval-intro.md index af2db3fb38..e8e21d0f6a 100644 --- a/content/develop/interact/programmability/eval-intro.md +++ b/content/develop/interact/programmability/eval-intro.md @@ -1,7 +1,14 @@ --- -aliases: -- /topics/eval-intro -- /docs/manual/programmability/eval-intro/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Executing Lua in Redis ' diff --git a/content/develop/interact/programmability/functions-intro.md b/content/develop/interact/programmability/functions-intro.md index 93349d4e90..408d1b3b1f 100644 --- a/content/develop/interact/programmability/functions-intro.md +++ b/content/develop/interact/programmability/functions-intro.md @@ -1,7 +1,14 @@ --- -aliases: -- /topics/functions-intro -- /docs/manual/programmability/functions-intro/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Scripting with Redis 7 and beyond ' @@ -10,7 +17,7 @@ title: Redis functions weight: 1 --- -Redis Functions is an API for managing code to be executed on the server. This feature, which became available in Redis 7, supersedes the use of [EVAL](/docs/manual/programmability/eval-intro) in prior versions of Redis. +Redis Functions is an API for managing code to be executed on the server. This feature, which became available in Redis 7, supersedes the use of [EVAL]({{< relref "/develop/manual/programmability/eval-intro" >}}) in prior versions of Redis. ## Prologue (or, what's wrong with Eval Scripts?) @@ -273,14 +280,20 @@ redis> FUNCTION LIST 2) "my_hset" 3) "description" 4) (nil) + 5) "flags" + 6) (empty array) 2) 1) "name" 2) "my_hgetall" 3) "description" 4) (nil) + 5) "flags" + 6) (empty array) 3) 1) "name" 2) "my_hlastmodified" 3) "description" 4) (nil) + 5) "flags" + 6) (empty array) ``` You can see that it is easy to update our library with new capabilities. diff --git a/content/develop/interact/programmability/lua-api.md b/content/develop/interact/programmability/lua-api.md index ca16e95324..85142a4cb1 100644 --- a/content/develop/interact/programmability/lua-api.md +++ b/content/develop/interact/programmability/lua-api.md @@ -1,7 +1,14 @@ --- -aliases: -- /topics/lua-api -- /docs/manual/programmability/lua-api/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Executing Lua in Redis ' @@ -445,7 +452,7 @@ redis> FUNCTION LOAD "#!lua name=mylib\n redis.register_function{function_name=' **Important:** Use script flags with care, which may negatively impact if misused. -Note that the default for Eval scripts are different than the default for functions that are mentioned below, see [Eval Flags](/docs/manual/programmability/eval-intro/#eval-flags) +Note that the default for Eval scripts are different than the default for functions that are mentioned below, see [Eval Flags]({{< relref "/develop/manual/programmability/eval-intro/#eval-flags" >}}) When you register a function or load an Eval script, the server does not know how it accesses the database. By default, Redis assumes that all scripts read and write data. @@ -473,7 +480,7 @@ You can use the following flags and instruct the server to treat the scripts' ex However, note that the server will return an error if the script attempts to call a write command. Also note that currently [`PUBLISH`](/commands/publish), [`SPUBLISH`](/commands/spublish) and [`PFCOUNT`](/commands/pfcount) are also considered write commands in scripts, because they could attempt to propagate commands to replicas and AOF file. - For more information please refer to [Read-only scripts](/docs/manual/programmability/#read-only_scripts) + For more information please refer to [Read-only scripts]({{< relref "/develop/manual/programmability/#read-only_scripts" >}}) * `allow-oom`: use this flag to allow a script to execute when the server is out of memory (OOM). @@ -501,7 +508,7 @@ You can use the following flags and instruct the server to treat the scripts' ex This flag has no effect when cluster mode is disabled. -Please refer to [Function Flags](/docs/manual/programmability/functions-intro/#function-flags) and [Eval Flags](/docs/manual/programmability/eval-intro/#eval-flags) for a detailed example. +Please refer to [Function Flags]({{< relref "/develop/manual/programmability/functions-intro/#function-flags" >}}) and [Eval Flags]({{< relref "/develop/manual/programmability/eval-intro/#eval-flags" >}}) for a detailed example. ### `redis.REDIS_VERSION` diff --git a/content/develop/interact/programmability/lua-debugging.md b/content/develop/interact/programmability/lua-debugging.md index dffe49f12a..56a7c8147a 100644 --- a/content/develop/interact/programmability/lua-debugging.md +++ b/content/develop/interact/programmability/lua-debugging.md @@ -1,7 +1,14 @@ --- -aliases: -- /topics/ldb -- /docs/manual/programmability/lua-debugging/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: How to use the built-in Lua debugger linkTitle: Debugging Lua title: Debugging Lua scripts in Redis diff --git a/content/develop/interact/programmability/triggers-and-functions/Configuration.md b/content/develop/interact/programmability/triggers-and-functions/Configuration.md index fb0050c8a6..0a6554565a 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Configuration.md +++ b/content/develop/interact/programmability/triggers-and-functions/Configuration.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Configure the operation parameters ' diff --git a/content/develop/interact/programmability/triggers-and-functions/Debugging.md b/content/develop/interact/programmability/triggers-and-functions/Debugging.md index 91686566a7..5e0bd03cf7 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Debugging.md +++ b/content/develop/interact/programmability/triggers-and-functions/Debugging.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Methods for debugging your Redis Stack functions ' @@ -11,7 +21,7 @@ weight: 5 There are two methods you can use to debug your Redis Stack functions: 1. Make judicious use of the `redis.log` function, which writes to the Redis log file. -1. Use Redis [pub/sub](/docs/interact/pubsub/). +1. Use Redis [pub/sub]({{< relref "/develop/interact/pubsub/" >}}). ### Use `redis.log` @@ -36,7 +46,7 @@ After loading the library and executing the function with [`TFCALL`](/commands/t ### Use Redis pub/sub -If you don't have access to your Redis database log files, you can use pub/sub. The following example demonstrates how to use the [client.call](/docs/interact/programmability/triggers-and-functions/concepts/javascript_api/#clientcall) API to publish to a pub/sub channel. +If you don't have access to your Redis database log files, you can use pub/sub. The following example demonstrates how to use the [client.call]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/javascript_api/#clientcall" >}}) API to publish to a pub/sub channel. ```javascript #!js api_version=1.0 name=lib diff --git a/content/develop/interact/programmability/triggers-and-functions/Development.md b/content/develop/interact/programmability/triggers-and-functions/Development.md index aea1a6f172..cbb0ff7d61 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Development.md +++ b/content/develop/interact/programmability/triggers-and-functions/Development.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'How to develop for triggers and functions ' @@ -7,7 +17,7 @@ title: Development weight: 4 --- -To aid in the development of new libraries of triggers and functions, you can use the type declaration files for the [triggers and functions API](/docs/interact/programmability/triggers-and-functions/concepts/javascript_api/), which allows your preferred development environment to provide autocompletion and type checking. You can install this information using the following command: +To aid in the development of new libraries of triggers and functions, you can use the type declaration files for the [triggers and functions API]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/javascript_api/" >}}), which allows your preferred development environment to provide autocompletion and type checking. You can install this information using the following command: ```bash npm install https://gitpkg.now.sh/RedisGears/RedisGears/js_api --save-dev diff --git a/content/develop/interact/programmability/triggers-and-functions/Examples.md b/content/develop/interact/programmability/triggers-and-functions/Examples.md index 4784d1e2f9..21746cc619 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Examples.md +++ b/content/develop/interact/programmability/triggers-and-functions/Examples.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'How triggers and functions can be used ' diff --git a/content/develop/interact/programmability/triggers-and-functions/Known_Limitations.md b/content/develop/interact/programmability/triggers-and-functions/Known_Limitations.md index 57f12906e6..cea39e8ac7 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Known_Limitations.md +++ b/content/develop/interact/programmability/triggers-and-functions/Known_Limitations.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Overview of the known limitations ' diff --git a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md index 7ec4ba312d..903c483352 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md +++ b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/interact/programmability/triggers-and-functions/quick_start +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Get started with triggers and functions using redis-cli ' @@ -9,7 +17,7 @@ title: Quick start using redis-cli weight: 2 --- -Make sure that you have [Redis Stack installed](/docs/getting-started/install-stack/) and running. Alternatively, you can create a [free Redis Cloud account](https://redis.com/try-free/?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). The triggers and functions preview is available in the fixed subscription plan for the Google Cloud Asia Pacific (Tokyo) and AWS Asia Pacific (Singapore) regions. +Make sure that you have [Redis Stack installed]({{< relref "/develop/getting-started/install-stack/" >}}) and running. Alternatively, you can create a [free Redis Cloud account](https://redis.com/try-free/?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). The triggers and functions preview is available in the fixed subscription plan for the Google Cloud Asia Pacific (Tokyo) and AWS Asia Pacific (Singapore) regions. ## Connect to Redis Stack diff --git a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md index dddbb17256..b9ac879ee0 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md +++ b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Get started with triggers and functions using RedisInsight ' @@ -7,7 +17,9 @@ title: Quick start using RedisInsight weight: 1 --- -Make sure that you have [Redis Stack installed](/docs/getting-started/install-stack/) and running. Alternatively, you can create a [free Redis Cloud account](https://redis.com/try-free/?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). The triggers and functions preview is available in the fixed subscription plan for the Google Cloud Asia Pacific (Tokyo) and AWS Asia Pacific (Singapore) regions. +Make sure that you have [Redis Stack installed]({{< relref "/develop/getting-started/install-stack/" >}}) and running. Alternatively, you can create a [free Redis Cloud account](https://redis.com/try-free/?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). + +If you haven't already installed RedisInsight, you can download the latest version [here](https://redis.com/redis-enterprise/redis-insight/?_ga=2.232184223.127667221.1704724457-86137583.1685485233&_gl=1*1gygred*_ga*ODYxMzc1ODMuMTY4NTQ4NTIzMw..*_ga_8BKGRQKRPV*MTcwNDkyMzExMC40MDEuMS4xNzA0OTI3MjQ2LjUyLjAuMA..*_gcl_au*MTQzODY1OTU4OS4xNzAxMTg0MzY0). If this is your first time using RedisInsight, you may wish to read through the [RedisInsight guide](https://redis.io/docs/connect/insight/) before continuing with this guide. ## Connect to Redis Stack diff --git a/content/develop/interact/programmability/triggers-and-functions/_index.md b/content/develop/interact/programmability/triggers-and-functions/_index.md index 52907840bd..c5d56bcb6d 100644 --- a/content/develop/interact/programmability/triggers-and-functions/_index.md +++ b/content/develop/interact/programmability/triggers-and-functions/_index.md @@ -11,6 +11,16 @@ bannerText: 'The triggers and functions feature of Redis Stack and its documenta GitHub using the "Create new issue" link in the top right-hand corner of this page. ' +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Trigger and execute JavaScript functions in the Redis process linktitle: Triggers and functions stack: true @@ -25,7 +35,7 @@ The triggers and functions feature of Redis Stack allows running JavaScript func ## Quick links -* [Quick start guide](/docs/interact/programmability/triggers-and-functions/quick_start) +* [Quick start guide]({{< relref "/develop/interact/programmability/triggers-and-functions/quick_start" >}}) * [Source code](https://github.com/RedisGears/RedisGears) * [Latest release](https://github.com/RedisGears/RedisGears/releases) * [Docker image](https://hub.docker.com/r/redis/redis-stack-server/) diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/Binary_Data.md b/content/develop/interact/programmability/triggers-and-functions/concepts/Binary_Data.md index 568e2ce10a..05ee5162c8 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/Binary_Data.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/Binary_Data.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Working with binary data ' @@ -11,12 +21,12 @@ By default, triggers and functions will decode all data as a string and will rai 1. Binary function arguments 2. Binary command results -3. Binary key names on [keyspace triggers](/docs/interact/programmability/triggers-and-functions/concepts/triggers/keyspace_triggers/) -4. Binary data on [stream triggers](/docs/interact/programmability/triggers-and-functions/concepts/triggers/stream_triggers/) +3. Binary key names on [keyspace triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/keyspace_triggers/" >}}) +4. Binary data on [stream triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/stream_triggers/" >}}) ### Binary function arguments -It is possible to instruct triggers and functions not to decode function arguments as `JS` `Strings` using the [redis.functionFlags.RAW_ARGUMENTS](/docs/interact/programmability/triggers-and-functions/concepts/function_flags/) function flag. In this case, the function arguments will be given as `JS` `ArrayBuffer`. Example: +It is possible to instruct triggers and functions not to decode function arguments as `JS` `Strings` using the [redis.functionFlags.RAW_ARGUMENTS]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/function_flags/" >}}) function flag. In this case, the function arguments will be given as `JS` `ArrayBuffer`. Example: ```js #!js api_version=1.0 name=lib @@ -70,7 +80,7 @@ Notice that a `JS` `ArrayBuffer` can be returned by a function, it will be retur ### Binary keys names on database triggers -On [keyspace triggers](/docs/interact/programmability/triggers-and-functions/concepts/triggers/keyspace_triggers/), if the key name that triggered the event is binary, the `data.key` field will be NULL. The `data.key_raw` field is always provided as a `JS` `ArrayBuffer` and can be used as in the following example: +On [keyspace triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/keyspace_triggers/" >}}), if the key name that triggered the event is binary, the `data.key` field will be NULL. The `data.key_raw` field is always provided as a `JS` `ArrayBuffer` and can be used as in the following example: ```js #!js api_version=1.0 name=lib @@ -106,11 +116,11 @@ OK 3) "\xaa" ``` -For more information see [keyspace triggers](/docs/interact/programmability/triggers-and-functions/concepts/triggers/keyspace_triggers/). +For more information see [keyspace triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/keyspace_triggers/" >}}). ### Binary data on stream consumers -On [stream triggers](/docs/interact/programmability/triggers-and-functions/concepts/triggers/stream_triggers/), if the key name is binary. The `data.stream_name` field will be NULL. The `data.stream_name_raw` field is always provided as a `JS` `ArrayBuffer` and can be used in this case. In addition, if the content of the steam is binary, it will also appear as `null` under `data.record`. In this case, it is possible to use `data.record` (which always exists) and contains the data as a `JS` `ArrayBuffer`. Example: +On [stream triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/stream_triggers/" >}}), if the key name is binary. The `data.stream_name` field will be NULL. The `data.stream_name_raw` field is always provided as a `JS` `ArrayBuffer` and can be used in this case. In addition, if the content of the steam is binary, it will also appear as `null` under `data.record`. In this case, it is possible to use `data.record` (which always exists) and contains the data as a `JS` `ArrayBuffer`. Example: ```js #!js api_version=1.0 name=lib diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/Cluster_Support.md b/content/develop/interact/programmability/triggers-and-functions/concepts/Cluster_Support.md index 631922c5ca..18c259ab4b 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/Cluster_Support.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/Cluster_Support.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Cluster support for triggers and functions ' @@ -21,7 +31,7 @@ redis.registerClusterFunction("dbsize", async(async_client) => { }); ``` -`redis.registerClusterFunction` is passed the remote function name, which will be used later to call the remote function, and the remote function code. The remote function must be a Coroutine (async function) and it is executed in the background on the remote shard. For more information about async function, please refer to [sync and async](/docs/interact/programmability/triggers-and-functions/concepts/sync_async/) page. +`redis.registerClusterFunction` is passed the remote function name, which will be used later to call the remote function, and the remote function code. The remote function must be a Coroutine (async function) and it is executed in the background on the remote shard. For more information about async function, please refer to [sync and async]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/sync_async/" >}}) page. We have couple of options for calling a remote function. These options are exposed through the async client that is given to a Coroutine: @@ -99,11 +109,11 @@ The remote function arguments and results are serialized in the following way: ## Execution timeout -Remote functions will not be permitted to run forever and will timeout. The timeout period can be configured using [remote-task-default-timeout](/docs/interact/programmability/triggers-and-functions/configuration/#remote-task-default-timeout). When using `async_client.runOnShards` API, the timeout will be added as error to the error array. When using `async_client.runOnKey`, a timeout will cause an exception to be raised. +Remote functions will not be permitted to run forever and will timeout. The timeout period can be configured using [remote-task-default-timeout]({{< relref "/develop/interact/programmability/triggers-and-functions/configuration/#remote-task-default-timeout" >}}). When using `async_client.runOnShards` API, the timeout will be added as error to the error array. When using `async_client.runOnKey`, a timeout will cause an exception to be raised. ## Remote function limitations -All the limitations listed on [coroutines](/docs/interact/programmability/triggers-and-functions/concepts/sync_async/) also apply to remote functions. Remote function also come with some extra limitations: +All the limitations listed on [coroutines]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/sync_async/" >}}) also apply to remote functions. Remote function also come with some extra limitations: * Remote functions can only perform read operations. An attempt to perform a write operation will result in an error. * Remote function are not guaranteed to succeed (if the shard crashed for example). In such cases a timeout error will be given. diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/Function_Flags.md b/content/develop/interact/programmability/triggers-and-functions/concepts/Function_Flags.md index a33ae90da3..09c15f9537 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/Function_Flags.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/Function_Flags.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Function flags for JavaScript functions ' diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API.md b/content/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API.md index cf30ae3d14..8440a0bc0c 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Overview of the JavaScript API ' diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/Library_Configuration.md b/content/develop/interact/programmability/triggers-and-functions/concepts/Library_Configuration.md index f4e1c5ff27..f1fa95e5d8 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/Library_Configuration.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/Library_Configuration.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'How to use configuration in JavaScript functions ' @@ -7,7 +17,7 @@ title: Library configuration weight: 6 --- -When writing a library, you may want to provide a loading configuration so that different users can use the same library with slightly different behaviour, without changing the base code. For example, assume you write a library that adds a `__last_updated__` field to a hash (you can see how it can also be done with [keyspace triggers](/docs/interact/programmability/triggers-and-functions/concepts/triggers/keyspace_triggers/)), the code will look like this: +When writing a library, you may want to provide a loading configuration so that different users can use the same library with slightly different behaviour, without changing the base code. For example, assume you write a library that adds a `__last_updated__` field to a hash (you can see how it can also be done with [keyspace triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/keyspace_triggers/" >}})), the code will look like this: ```js #!js api_version=1.0 name=lib diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/RESP_JS_Conversion.md b/content/develop/interact/programmability/triggers-and-functions/concepts/RESP_JS_Conversion.md index 60d11c059b..2e0434af4d 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/RESP_JS_Conversion.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/RESP_JS_Conversion.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Converting RESP to and from JavaScript ' diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md b/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md index 3b5f7a4084..3241bae355 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Sync and async functions ' @@ -218,7 +228,7 @@ RedisGears also provided `client.callAsyncRaw` API, which is the same as `client Blocking Redis might fail for a few reasons: -* Redis reached OOM state and the `redis.functionFlags.NO_WRITES` or `redis.functionFlags.ALLOW_OOM` flags are not set (see [functions flags](/docs/interact/programmability/triggers-and-functions/concepts/function_flags/) for more information) +* Redis reached OOM state and the `redis.functionFlags.NO_WRITES` or `redis.functionFlags.ALLOW_OOM` flags are not set (see [functions flags]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/function_flags/" >}}) for more information) * `redis.functionFlags.NO_WRITES` flag is not set and the Redis instance changed roles and is now a replica. * The ACL user that invoked the function was deleted. @@ -226,7 +236,7 @@ The failure will result in an exception that the function writer can choose to h # Block Redis timeout -Blocking Redis for a long time is discouraged and is considered an unsafe operation. The triggers and functions feature attempts to protect the function writer and will time out the blocking function if it continues for too long. The timeout can be set as a [module configuration](/docs/interact/programmability/triggers-and-functions/configuration/) along side the fatal failure policy that indicates how to handle the timeout. Policies can be one of the following: +Blocking Redis for a long time is discouraged and is considered an unsafe operation. The triggers and functions feature attempts to protect the function writer and will time out the blocking function if it continues for too long. The timeout can be set as a [module configuration]({{< relref "/develop/interact/programmability/triggers-and-functions/configuration/" >}}) along side the fatal failure policy that indicates how to handle the timeout. Policies can be one of the following: * Abort - Stop the function invocation even at the cost of losing the atomicity property. * Kill - Keep the atomicity property and do not stop the function invocation. In this case there is a risk of an external process killing the Redis server, thinking that the shard is not responding. diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/_index.md b/content/develop/interact/programmability/triggers-and-functions/concepts/_index.md index 78232c7b36..8867945945 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/_index.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/_index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Details about triggers, the JavaScript API, and advanced concepts ' diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md index 6c861fbbe7..1c8282225e 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Execute a JavaScript function based on a keyspace notification ' @@ -35,7 +45,7 @@ Argument Description: * `consumer`: The consumer name. * `prefix `: The key prefix on which the trigger should be fired. -* `callback`: The callback function to invoke, following the same rules of [Sync and Async invocation](/docs/interact/programmability/triggers-and-functions/concepts/sync_async/). The callback will only be invoked on the primary shard. +* `callback`: The callback function to invoke, following the same rules of [Sync and Async invocation]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/sync_async/" >}}). The callback will only be invoked on the primary shard. Run the example: @@ -111,7 +121,7 @@ We can display trigger information using [`TFUNCTION LIST`](/commands/tfunction- If the callback function passed to the trigger is a `JS` function (not a Coroutine), it is guaranteed that the callback will be invoked atomically along side the operation that caused the trigger; meaning all clients will see the data only after the callback has completed. In addition, it is guaranteed that the effect of the callback will be replicated to the replica and the AOF in a `multi/exec` block together with the command that fired the trigger. -If the callback is a Coroutine, it will be executed in the background and there is no guarantee on where or if it will be executed. The guarantees are the same as described on [sync and async invocation](/docs/interact/programmability/triggers-and-functions/concepts/sync_async/). +If the callback is a Coroutine, it will be executed in the background and there is no guarantee on where or if it will be executed. The guarantees are the same as described on [sync and async invocation]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/sync_async/" >}}). ## Upgrades diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md index 072e3b6dcb..01e0e0fed1 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Execute a JavaScript function when an item is added to a stream ' @@ -34,9 +44,9 @@ Argument Description: * consumer - the consumer name. * stream - streams name prefix on which to trigger the callback. -* callback - the callback to invoke on each element in the stream. Following the same rules of [sync and async invocation](/docs/interact/programmability/triggers-and-functions/concepts/sync_async/). The callback will be invoke only on primary shard. +* callback - the callback to invoke on each element in the stream. Following the same rules of [sync and async invocation]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/sync_async/" >}}). The callback will be invoke only on primary shard. -If we register this library (see the [quick start](/docs/interact/programmability/triggers-and-functions/quick_start/) section to learn how to Register a RedisGears function) and run the following command on our Redis: +If we register this library (see the [quick start]({{< relref "/develop/interact/programmability/triggers-and-functions/quick_start/" >}}) section to learn how to Register a RedisGears function) and run the following command on our Redis: ``` XADD stream:1 * foo1 bar1 diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/User_Functions.md b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/User_Functions.md index c8f1b33952..4d5c89ec9e 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/User_Functions.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/User_Functions.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Execute JavaScript functions via `TFCALL` or `TFCALLASYNC` ' diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/_index.md b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/_index.md index e228ed597f..98641a46a3 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/_index.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/_index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Different ways of executing JavaScript functions ' diff --git a/content/develop/interact/pubsub.md b/content/develop/interact/pubsub.md index 83faabeff6..1f892b3bf7 100644 --- a/content/develop/interact/pubsub.md +++ b/content/develop/interact/pubsub.md @@ -1,8 +1,14 @@ --- -aliases: -- /topics/pubsub -- /docs/manual/pub-sub -- /docs/manual/pubsub +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: How to use pub/sub channels in Redis linkTitle: Pub/sub title: Redis Pub/Sub @@ -48,7 +54,7 @@ As the name suggests, it means that a message will be delivered once if at all. Once the message is sent by the Redis server, there's no chance of it being sent again. If the subscriber is unable to handle the message (for example, due to an error or a network disconnect) the message is forever lost. -If your application requires stronger delivery guarantees, you may want to learn about [Redis Streams](/docs/data-types/streams-tutorial). +If your application requires stronger delivery guarantees, you may want to learn about [Redis Streams]({{< relref "/develop/data-types/streams-tutorial" >}}). Messages in streams are persisted, and support both _at-most-once_ as well as _at-least-once_ delivery semantics. ## Format of pushed messages diff --git a/content/develop/interact/search-and-query/_index.md b/content/develop/interact/search-and-query/_index.md index c4172ae7c8..0557d1abf3 100644 --- a/content/develop/interact/search-and-query/_index.md +++ b/content/develop/interact/search-and-query/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Searching and querying Redis data highlighted: true linkTitle: Search and query @@ -18,7 +26,7 @@ Redis Stack offers an enhanced Redis experience via the following search and que - Geospatial queries - Aggregations -You can find a complete list of features in the [reference documentation](/docs/interact/search-and-query/advanced-concepts/). +You can find a complete list of features in the [reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/" >}}). The search and query features of Redis Stack allow you to use Redis as a: @@ -29,9 +37,9 @@ The search and query features of Redis Stack allow you to use Redis as a: Here are the next steps to get you started: -1. Follow our [quick start guide](/docs/get-started/document-database/) to get some initial hands-on experience. -2. Learn how to [create an index](/docs/interact/search-and-query/indexing/). -3. Learn how to [query your data](/docs/interact/search-and-query/query/). +1. Follow our [quick start guide]({{< relref "/develop/get-started/document-database/" >}}) to get some initial hands-on experience. +2. Learn how to [create an index]({{< relref "/develop/interact/search-and-query/indexing/" >}}). +3. Learn how to [query your data]({{< relref "/develop/interact/search-and-query/query/" >}}). ## License and source code diff --git a/content/develop/interact/search-and-query/administration/_index.md b/content/develop/interact/search-and-query/administration/_index.md index d4f1bc096d..f513209b50 100644 --- a/content/develop/interact/search-and-query/administration/_index.md +++ b/content/develop/interact/search-and-query/administration/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search/administration/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Administration of search and query ' diff --git a/content/develop/interact/search-and-query/administration/design.md b/content/develop/interact/search-and-query/administration/design.md index 18e14b6fb0..2c7e9fcdd4 100644 --- a/content/develop/interact/search-and-query/administration/design.md +++ b/content/develop/interact/search-and-query/administration/design.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search/design/design/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Details about design choices and implementations ' diff --git a/content/develop/interact/search-and-query/administration/extensions.md b/content/develop/interact/search-and-query/administration/extensions.md index db9aa2bbb8..b4c0f7f113 100644 --- a/content/develop/interact/search-and-query/administration/extensions.md +++ b/content/develop/interact/search-and-query/administration/extensions.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/extensions/ -- /redisearch/reference/extensions +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Details about extensions for query expanders and scoring functions linkTitle: Extensions title: Extensions diff --git a/content/develop/interact/search-and-query/administration/gc.md b/content/develop/interact/search-and-query/administration/gc.md index 9456776bd9..f1ad47a762 100644 --- a/content/develop/interact/search-and-query/administration/gc.md +++ b/content/develop/interact/search-and-query/administration/gc.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search/design/gc/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Details about garbage collection linkTitle: Garbage collection title: Garbage collection diff --git a/content/develop/interact/search-and-query/administration/indexing.md b/content/develop/interact/search-and-query/administration/indexing.md index 1cc4f4adc8..2ed8c07db3 100644 --- a/content/develop/interact/search-and-query/administration/indexing.md +++ b/content/develop/interact/search-and-query/administration/indexing.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search/design/indexing/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: This document describes how documents are added to the index. linkTitle: Indexing title: Document Indexing diff --git a/content/develop/interact/search-and-query/administration/overview.md b/content/develop/interact/search-and-query/administration/overview.md index 4143d567da..c39230ff1d 100644 --- a/content/develop/interact/search-and-query/administration/overview.md +++ b/content/develop/interact/search-and-query/administration/overview.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/stack/search/overview -- /docs/stack/search/design/overview/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Technical details of the internal design of RediSearch ' @@ -128,7 +135,7 @@ Optionally, you can choose not to save any one of those attributes besides the I ### Numeric index -Numeric properties are indexed in a special data structure that enables filtering by numeric ranges in an efficient way. One could view a numeric value as a term operating just like an inverted index. For example, all the products with the price $100 are in a specific list, which is intersected with the rest of the query. See [query execution engine](/docs/interact/search-and-query/administration/design/#query-execution-engine) for more information. +Numeric properties are indexed in a special data structure that enables filtering by numeric ranges in an efficient way. One could view a numeric value as a term operating just like an inverted index. For example, all the products with the price $100 are in a specific list, which is intersected with the rest of the query. See [query execution engine]({{< relref "/develop/interact/search-and-query/administration/design/#query-execution-engine" >}}) for more information. However, in order to filter by a range of prices, you would have to intersect the query with all the distinct prices within that range, or perform a union query. If the range has many values in it, this becomes highly inefficient. @@ -166,7 +173,7 @@ Simple syntax is supported for complex queries that can be combined together to * Selection of specific fields using the syntax `@field:hello world`. * Numeric Range matches on numeric fields with the syntax `@field:[{min} {max}]`. * Geo radius matches on geo fields with the syntax `@field:[{lon} {lat} {radius} {m|km|mi|ft}]` -* Tag field filters with the syntax `@field:{tag | tag | ...}`. See the [full documentation on tag fields](/docs/interact/search-and-query/query/#tag-filters). +* Tag field filters with the syntax `@field:{tag | tag | ...}`. See the [full documentation on tag fields]({{< relref "/develop/interact/search-and-query/query/#tag-filters" >}}). * Optional terms or clauses: `foo ~bar` means bar is optional but documents with bar in them will rank higher. ### Complex query examples @@ -222,7 +229,7 @@ And negative clauses can also be added to filter out plasma and CRT TVs: Redis Stack comes with a few very basic scoring functions to evaluate document relevance. They are all based on document scores and term frequency. This is regardless of the ability to use sortable fields (see below). Scoring functions are specified by adding the `SCORER {scorer_name}` argument to a search request. -If you prefer a custom scoring function, it is possible to add more functions using the [extension API](/docs/interact/search-and-query/administration/extensions/). +If you prefer a custom scoring function, it is possible to add more functions using the [extension API]({{< relref "/develop/interact/search-and-query/administration/extensions/" >}}). These are the pre-bundled scoring functions available in Redis Stack: diff --git a/content/develop/interact/search-and-query/advanced-concepts/_index.md b/content/develop/interact/search-and-query/advanced-concepts/_index.md index d9005ba679..694e570176 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/_index.md +++ b/content/develop/interact/search-and-query/advanced-concepts/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Details about query syntax, aggregation, scoring, and other search and query options linkTitle: Advanced concepts @@ -14,7 +22,7 @@ Redis Stack supports the following search and query features. This article provi * Secondary indexing * Vector indexing -* Index on [JSON](/docs/data-types/json/) documents +* Index on [JSON]({{< relref "/develop/data-types/json/" >}}) documents * Full-text indexing of multiple fields in a document * Incremental indexing without performance loss * Document deletion and updating with index garbage collection @@ -23,24 +31,24 @@ Redis Stack supports the following search and query features. This article provi ## Query features * Multi-field queries -* Query on [JSON](/docs/data-types/json/) documents -* [Aggregation](/docs/interact/search-and-query/search/aggregations/) +* Query on [JSON]({{< relref "/develop/data-types/json/" >}}) documents +* [Aggregation]({{< relref "/develop/interact/search-and-query/search/aggregations/" >}}) * Boolean queries with AND, OR, and NOT operators between subqueries * Optional query clauses * Retrieval of full document contents or only their IDs * Exact phrase search and slop-based search -* [Numeric filters](/docs/interact/search-and-query/query/#numeric-filters-in-query) and ranges -* [Geo-filtering](/docs/interact/search-and-query/query/#geo-filters-in-query) using Redis [geo commands](/commands/?group=geo) -* [Vector similartiy search](/docs/interact/search-and-query/advanced-concepts/vectors/) +* [Numeric filters]({{< relref "/develop/interact/search-and-query/query/#numeric-filters-in-query" >}}) and ranges +* [Geo-filtering]({{< relref "/develop/interact/search-and-query/query/#geo-filters-in-query" >}}) using Redis [geo commands](/commands/?group=geo) +* [Vector similartiy search]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}) ## Full-text search features -* [Prefix-based searches](/docs/interact/search-and-query/query/#prefix-matching) +* [Prefix-based searches]({{< relref "/develop/interact/search-and-query/query/#prefix-matching" >}}) * Field weights -* [Auto-complete](/docs/interact/search-and-query/administration/overview/#auto-complete) and fuzzy prefix suggestions -* [Stemming](/docs/interact/search-and-query/advanced-concepts/stemming/)-based query expansion for [many languages](/docs/interact/search-and-query/advanced-concepts/stemming//#supported-languages) using [Snowball](http://snowballstem.org/) -* Support for custom functions for query expansion and scoring (see [Extensions](/docs/interact/search-and-query/administration/extensions/)) +* [Auto-complete]({{< relref "/develop/interact/search-and-query/administration/overview/#auto-complete" >}}) and fuzzy prefix suggestions +* [Stemming]({{< relref "/develop/interact/search-and-query/advanced-concepts/stemming/" >}})-based query expansion for [many languages]({{< relref "/develop/interact/search-and-query/advanced-concepts/stemming//#supported-languages" >}}) using [Snowball](http://snowballstem.org/) +* Support for custom functions for query expansion and scoring (see [Extensions]({{< relref "/develop/interact/search-and-query/administration/extensions/" >}})) * Unicode support (UTF-8 input required) * Document ranking diff --git a/content/develop/interact/search-and-query/advanced-concepts/aggregations.md b/content/develop/interact/search-and-query/advanced-concepts/aggregations.md index 068defcf93..971fda91c0 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/aggregations.md +++ b/content/develop/interact/search-and-query/advanced-concepts/aggregations.md @@ -1,8 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/aggregations/ -- /redisearch/reference/aggregations -- /docs/interact/search-and-query/search/aggregations/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Groupings, projections, and aggregation functions linkTitle: Aggregations title: Aggregations @@ -143,7 +149,7 @@ FT.AGGREGATE myIndex "*" SORTBY 2 @hour ASC ``` -And as a final step, format the hour as a human readable timestamp. This is done by calling the transformation function `timefmt` that formats Unix timestamps. You can specify a format to be passed to the system's `strftime` function ([see documentation](http://strftime.org/)), but not specifying one is equivalent to specifying `%FT%TZ` to `strftime`. +And as a final step, format the hour as a human readable timestamp. This is done by calling the transformation function `timefmt` that formats Unix timestamps. You can specify a format to be passed to the system's `strftime` function ([see documentation](https://pubs.opengroup.org/onlinepubs/9699919799/functions/strftime.html)), but not specifying one is equivalent to specifying `%FT%TZ` to `strftime`. ``` FT.AGGREGATE myIndex "*" @@ -401,7 +407,7 @@ Note that these operators apply only to numeric values and numeric sub-expressio | Function | Description | | ------------------- | ------------------------------------------------------------ | -| timefmt(x, [fmt]) | Return a formatted time string based on a numeric timestamp value x.
See [strftime](http://strftime.org/) for formatting options.
Not specifying `fmt` is equivalent to `%FT%TZ`. | +| timefmt(x, [fmt]) | Return a formatted time string based on a numeric timestamp value x.
See [strftime](https://pubs.opengroup.org/onlinepubs/9699919799/functions/strftime.html) for formatting options.
Not specifying `fmt` is equivalent to `%FT%TZ`. | | parsetime(timesharing, [fmt]) | The opposite of timefmt() - parse a time format using a given format string | | day(timestamp) | Round a Unix timestamp to midnight (00:00) start of the current day. | | hour(timestamp) | Round a Unix timestamp to the beginning of the current hour. | diff --git a/content/develop/interact/search-and-query/advanced-concepts/chinese.md b/content/develop/interact/search-and-query/advanced-concepts/chinese.md index 00cac8b0a5..36f7b856b1 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/chinese.md +++ b/content/develop/interact/search-and-query/advanced-concepts/chinese.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search/chinese/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Chinese support linkTitle: Chinese title: Chinese diff --git a/content/develop/interact/search-and-query/advanced-concepts/dialects.md b/content/develop/interact/search-and-query/advanced-concepts/dialects.md new file mode 100644 index 0000000000..98b14c5b1c --- /dev/null +++ b/content/develop/interact/search-and-query/advanced-concepts/dialects.md @@ -0,0 +1,228 @@ +--- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients +description: 'Learn how to use query dialects + + ' +linkTitle: Query dialects +title: Query dialects +weight: 5 +--- + +Redis Stack currently supports four query dialects for use with the [`FT.SEARCH`](/commands/ft.search), [`FT.AGGREGATE`](/commands/ft.aggregate), and other search and query commands. +Dialects provide for enhancing the query API incrementally, introducing innovative behaviors and new features that support new use cases in a way that does not break the API for existing applications.``` + +## `DIALECT 1` + +Dialect version 1 was the default query syntax dialect from the first release of search and query until dialect version 2 was introduced with version [2.4](https://github.com/RediSearch/RediSearch/releases/tag/v2.4.3). +This dialect is also the default dialect. See below for information about changing the default dialect. + +## `DIALECT 2` + +Dialect version 2 was introduced in the [2.4](https://github.com/RediSearch/RediSearch/releases/tag/v2.4.3) release to address query parser inconsistencies found in previous versions of Redis Stack. Dialect version 1 remains the default dialect. To use dialect version 2, append `DIALECT 2` to your query command. +Support for vector search also was introduced in the 2.4 release and requires `DIALECT 2`. See [here](https://redis.io/docs/interact/search-and-query/query/vector-search/) for more details. +`FT.SEARCH ... DIALECT 2` + +It was determined that under certain conditions some query parsing rules did not behave as originally intended. +Particularly, some queries containing the operators below could return unexpected results. + +1. AND, multi-word phrases that imply intersection +1. `"..."` (exact), `~` (optional), `-` (negation), and `%` (fuzzy) +1. OR, words separated by the `|` (pipe) character that imply union +1. wildcard characters + +Existing queries that used dialect 1 may behave differently using dialect 2 if they fall into any of the following categories: + +1. Your query has a field modifier followed by multiple words. Consider the sample query: + + `@name:James Brown` + + Here, the field modifier `@name` is followed by two words, `James` and `Brown`. + + In `DIALECT 1`, this query would be interpreted as find `James Brown` in the `@name` field. + In `DIALECT 2`, this query would be interpreted as find `James` in the `@name` field, and `Brown` in any text field. In other words, it would be interpreted as `(@name:James) Brown`. + In `DIALECT 2`, you could achieve the dialect 1 behavior by updating your query to `@name:(James Brown)`. + +1. Your query uses `"..."`, `~`, `-`, and/or `%`. Consider a simple query with negation: + + `-hello world` + + In `DIALECT 1`, this query is interpreted as find values in any field that do not contain `hello` and do not contain `world`; the equivalent of `-(hello world)` or `-hello -world`. + In `DIALECT 2`, this query is interpreted as `-hello` and `world` (only `hello` is negated). + In `DIALECT 2`, you could achieve the dialect 1 behavior by updating your query to `-(hello world)`. + +1. Your query used `|`. Consider the simple query: + + `hello world | "goodbye" moon` + + In `DIALECT 1`, this query is interpreted as searching for `(hello world | "goodbye") moon`. + In `DIALECT 2`, this query is interpreted as searching for either `hello world` `"goodbye" moon`. + +1. Your query uses a wildcard pattern. Consider the simple query: + + `"w'foo*bar?'"` + + As shown above, you must use double quotes to contain the `w` pattern. + +With `DIALECT 2` you can use un-escaped spaces in tag queries, even with stopwords. + +{{% alert title=Note %}} +`DIALECT 2` is required with vector searches. +{{% /alert %}} + +## `DIALECT 3` + +Dialect version 3 was introduced in the [2.6](https://github.com/RediSearch/RediSearch/releases/tag/v2.6.3) release. This version introduced support for multi-value indexing and querying of attributes for any attribute type ( [TEXT](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-text), [TAG](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-tag), [NUMERIC](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-numeric), [GEO](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-geo) and [VECTOR](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-vector)) defined by a [JSONPath](https://redis.io/docs/stack/json/path/) leading to an array or multiple scalar values. Support for [GEOSHAPE](https://redis.io/docs/interact/search-and-query/query/geo-spatial/) queries was also introduced in this dialect. + +The primary difference between dialects version 2 and version 3 is that JSON is returned rather than scalars for multi-value attributes. Apart from specifying `DIALECT 3` at the end of a [`FT.SEARCH`](/commands/ft.search) command, there are no other syntactic changes. Dialect version 1 remains the default dialect. To use dialect version 3, append `DIALECT 3` to your query command. + +`FT.SEARCH ... DIALECT 3` + +**Example** + +Sample JSON: + +``` +{ + "id": 123, + "underlyings": [ + { + "currency": "USD", + "spot": 99, + "underlier": "AAPL UW" + }, + { + "currency": "USD", + "spot": 100, + "underlier": "NFLX UW" + } + ] +} +``` + +Create an index: + +``` +FT.CREATE js_idx ON JSON PREFIX 1 js: SCHEMA $.underlyings[*].underlier AS und TAG +``` + +Now search, with and without `DIALECT 3`. + +- With dialect 1 (default): + + ``` + ft.search js_idx * return 1 und + 1) (integer) 1 + 2) "js:1" + 3) 1) "und" + 2) "AAPL UW" + ``` + + Only the first element of the expected two elements is returned. + +- With dialect 3: + + ``` + ft.search js_idx * return 1 und DIALECT 3 + 1) (integer) 1 + 2) "js:1" + 3) 1) "und" + 2) "[\"AAPL UW\",\"NFLX UW\"]" + ``` + + Both elements are returned. + +{{% alert title=Note %}} +DIALECT 3 is required for shape-based (`POINT` or `POLYGON`) geospatial queries. +{{% /alert %}} + +## `DIALECT 4` + +Dialect version 4 was introduced in the [2.8](https://github.com/RediSearch/RediSearch/releases/tag/v2.8.4) release. It introduces performance optimizations for sorting operations on [`FT.SEARCH`](/commands/ft.search) and [`FT.AGGREGATE`](/commands/ft.aggregate). Apart from specifying `DIALECT 4` at the end of a [`FT.SEARCH`](/commands/ft.search) command, there are no other syntactic changes. Dialect version 1 remains the default dialect. To use dialect version 4, append `DIALECT 4` to your query command. + +`FT.SEARCH ... DIALECT 4` + +Dialect version 4 will improve performance in four different scenarios: + +1. **Skip sorter** - applied when there is no sorting to be done. The query can return once it reaches the `LIMIT` of requested results. +1. **Partial range** - applied when there is a `SORTBY` on a numeric field, either with no filter or with a filter by the same numeric field. Such queries will iterate on a range large enough to satisfy the `LIMIT` of requested results. +1. **Hybrid** - applied when there is a `SORTBY` on a numeric field in addition to another non-numeric filter. It could be the case that some results will get filtered, leaving too small a range to satisfy any specified `LIMIT`. In such cases, the iterator then is re-wound and additional iterations occur to collect result up to the requested `LIMIT`. +1. **No optimization** - If there is a sort by score or by a non-numeric field, there is no other option but to retrieve all results and compare their values to the search parameters. +## Use [`FT.EXPLAINCLI`](/commands/ft.explaincli) to compare dialects + +The [[`FT.EXPLAINCLI`](/commands/ft.explaincli)](https://redis.io/commands/ft.explaincli/) is a powerful tool that provides a window into the inner workings of your queries. It's like a roadmap that details your query's journey from start to finish. + +When you run [`FT.EXPLAINCLI`](/commands/ft.explaincli), it returns an array representing the execution plan of a complex query. This plan is a step-by-step guide of how Redis interprets your query and how it plans to fetch results. It's a behind-the-scenes look at the process, giving you insights into how the search engine works. + +The [`FT.EXPLAINCLI`](/commands/ft.explaincli) accepts a `DIALECT` argument, allowing you to execute the query using different dialect versions, allowing you to compare the resulting query plans. + +To use [`FT.EXPLAINCLI`](/commands/ft.explaincli), you need to provide an index and a query predicate. The index is the name of the index you created using [`FT.CREATE`](/commands/ft.create), and the query predicate is the same as if you were sending it to [`FT.SEARCH`](/commands/ft.search) or [`FT.AGGREGATE`](/commands/ft.aggregate). + +Here's an example of how to use [`FT.EXPLAINCLI`](/commands/ft.explaincli) to understand differences in dialect versions 1 and 2. + +Negation of the intersection between tokens `hello` and `world`: + +```FT.EXPLAINCLI idx:dialects "-hello world" DIALECT 1 +1) NOT { +2) INTERSECT { +3) hello +4) world +5) } +6) } +7) +``` + +Intersection of the negation of the token `hello` together with token `world`: + +``` +FT.EXPLAINCLI idx:dialects "-hello world" DIALECT 2 + 1) INTERSECT { + 2) NOT { + 3) hello + 4) } + 5) UNION { + 6) world + 7) +world(expanded) + 8) } + 9) } +10) +``` + +Same result as `DIALECT 1`: + +``` +FT.EXPLAINCLI idx:dialects "-(hello world)" DIALECT 2 +1) NOT { +2) INTERSECT { +3) hello +4) world +5) } +6) } +7) +``` + +{{% alert title=Note %}} +[`FT.EXPLAIN`](/commands/ft.explain) doesn't execute the query. It only explains the plan. It's a way to understand how your query is interpreted by the query engine, which can be invaluable when you're trying to optimize your searches. +{{% /alert %}} + +## Change the default dialect + +The default dialect is `DIALECT 1`. If you wish to change that, you can do so by using the `DEFAULT_DIALECT` parameter when loading the RediSearch module: + +``` +$ redis-server --loadmodule ./redisearch.so DEFAULT_DIALECT 2 +``` + +You can also change the query dialect on an already running server using the `FT.CONFIG` command: + +``` +FT.CONFIG SET DEFAULT_DIALECT 2 +``` \ No newline at end of file diff --git a/content/develop/interact/search-and-query/advanced-concepts/escaping.md b/content/develop/interact/search-and-query/advanced-concepts/escaping.md index b34628847c..132bbacf8f 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/escaping.md +++ b/content/develop/interact/search-and-query/advanced-concepts/escaping.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/escaping/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Controlling text tokenization and escaping linkTitle: Tokenization title: Tokenization @@ -11,7 +19,7 @@ weight: 4 Redis Stack uses a very simple tokenizer for documents and a slightly more sophisticated tokenizer for queries. Both allow a degree of control over string escaping and tokenization. -Note: There is a different mechanism for tokenizing text and tag fields, this document refers only to text fields. For tag fields please refer to the [tag fields](/docs/interact/search-and-query/advanced-concepts/tags/) documentation. +Note: There is a different mechanism for tokenizing text and tag fields, this document refers only to text fields. For tag fields please refer to the [tag fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/tags/" >}}) documentation. ## The rules of text field tokenization diff --git a/content/develop/interact/search-and-query/advanced-concepts/highlight.md b/content/develop/interact/search-and-query/advanced-concepts/highlight.md index a09dc9f6a0..6f0ad7dad4 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/highlight.md +++ b/content/develop/interact/search-and-query/advanced-concepts/highlight.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/highlight/ -- /redisearch/reference/highlight +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Highlighting full-text results linkTitle: Highlighting title: Highlighting diff --git a/content/develop/interact/search-and-query/advanced-concepts/phonetic_matching.md b/content/develop/interact/search-and-query/advanced-concepts/phonetic_matching.md index 12ed037ea4..b33a989838 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/phonetic_matching.md +++ b/content/develop/interact/search-and-query/advanced-concepts/phonetic_matching.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/phonetic_matching/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Phonetic matching linkTitle: Phonetic title: Phonetic @@ -15,7 +23,7 @@ Phonetic matching is based on the use of a phonetic algorithm. A phonetic algori As of v1.4, RediSearch provides phonetic matching of text fields specified with the `PHONETIC` attribute. This causes the terms in such fields to be indexed both by their textual value as well as their phonetic approximation. -Performing a search on `PHONETIC` fields will, by default, also return results for phonetically similar terms. This behavior can be controlled with the [`$phonetic` query attribute](/docs/interact/search-and-query/query/#query-attributes). +Performing a search on `PHONETIC` fields will, by default, also return results for phonetically similar terms. This behavior can be controlled with the [`$phonetic` query attribute]({{< relref "/develop/interact/search-and-query/query/#query-attributes" >}}). ## Phonetic algorithms support diff --git a/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md b/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md index bf5073ed80..7a4c7b5b00 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md +++ b/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/query_syntax/ -- /redisearch/reference/query_syntax +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Learn how to use query syntax ' @@ -42,7 +49,7 @@ You can use simple syntax for complex queries using these rules: * Georadius matches on geo fields with the syntax `@field:[{lon} {lat} {radius} {m|km|mi|ft}]`. * As of 2.6, range queries on vector fields with the syntax `@field:[VECTOR_RANGE {radius} $query_vec]`, where `query_vec` is given as a query parameter. * As of v2.4, k-nearest neighbors (KNN) queries on vector fields with or without pre-filtering with the syntax `{filter_query}=>[KNN {num} @field $query_vec]`. -* Tag field filters with the syntax `@field:{tag | tag | ...}`. See the full documentation on [tags](/docs/interact/search-and-query/advanced-concepts/tags/). +* Tag field filters with the syntax `@field:{tag | tag | ...}`. See the full documentation on [tags]({{< relref "/develop/interact/search-and-query/advanced-concepts/tags/" >}}). * Optional terms or clauses: `foo ~bar` means bar is optional but documents containing `bar` will rank higher. * Fuzzy matching on terms: `%hello%` means all terms with Levenshtein distance of 1 from it. Use multiple pairs of '%' brackets to increase the Levenshtein distance. * An expression in a query can be wrapped in parentheses to disambiguate, for example, `(hello|hella) (world|werld)`. @@ -159,7 +166,7 @@ Finally, there's new [`FT.SEARCH`](/commands/ft.search) syntax that allows you t Here's an example using two stacked polygons that represent a box contained within a house. -![two stacked polygons](/docs/interact/search-and-query/img/polygons.png) +![two stacked polygons]({{< relref "/develop/interact/search-and-query/img/polygons.png" >}}) First, create an index using a `FLAT` `GEOSHAPE`, representing a 2D X Y coordinate system. @@ -258,7 +265,7 @@ The general syntax for hybrid query is `{some filter query}=>[ KNN {num|$num} @v `@vector_field:[VECTOR_RANGE 0.5 $query_vec]` -As of v2.4, the KNN vector search can be used at most once in a query, while, as of v2.6, the vector range filter can be used multiple times in a query. For more information on vector similarity syntax, see [Querying vector fields](/docs/interact/search-and-query/advanced-concepts/vectors/), and [Vector search examples](/docs/interact/search-and-query/advanced-concepts/vectors/#vector-search-examples) sections. +As of v2.4, the KNN vector search can be used at most once in a query, while, as of v2.6, the vector range filter can be used multiple times in a query. For more information on vector similarity syntax, see [Querying vector fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}), and [Vector search examples]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/#vector-search-examples" >}}) sections. ## Prefix matching @@ -363,7 +370,7 @@ The supported attributes are: As of v2.6.1, the query attributes syntax supports these additional attributes: * **$yield_distance_as**: specifies the distance field name, used for later sorting and/or returning, for clauses that yield some distance metric. It is currently supported for vector queries only (both KNN and range). -* **vector query params**: pass optional parameters for [vector queries](/docs/interact/search-and-query/advanced-concepts/vectors/#querying-vector-fields) in key-value format. +* **vector query params**: pass optional parameters for [vector queries]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/#querying-vector-fields" >}}) in key-value format. ## A few query examples @@ -453,4 +460,4 @@ As of v2.6.1, the query attributes syntax supports these additional attributes: The query parser is built using the Lemon Parser Generator and a Ragel based lexer. You can see the `DIALECT 2` grammar definition [at this git repo](https://github.com/RediSearch/RediSearch/blob/master/src/query_parser/v2/parser.y). -You can also see the [DEFAULT_DIALECT](/docs/interact/search-and-query/basic-constructs/configuration-parameters/#default_dialect) configuration parameter. +You can also see the [DEFAULT_DIALECT]({{< relref "/develop/interact/search-and-query/basic-constructs/configuration-parameters/#default_dialect" >}}) configuration parameter. diff --git a/content/develop/interact/search-and-query/advanced-concepts/scoring.md b/content/develop/interact/search-and-query/advanced-concepts/scoring.md index 650aba2719..43c48cf7d4 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/scoring.md +++ b/content/develop/interact/search-and-query/advanced-concepts/scoring.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/scoring/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Full-text scoring functions linkTitle: Scoring title: Scoring @@ -11,9 +19,9 @@ weight: 8 When searching, documents are scored based on their relevance to the query. The score is a floating point number between 0.0 and 1.0, where 1.0 is the highest score. The score is returned as part of the search results and can be used to sort the results. -Redis Stack comes with a few very basic scoring functions to evaluate document relevance. They are all based on document scores and term frequency. This is regardless of the ability to use [sortable fields](/docs/interact/search-and-query/advanced-concepts/sorting/). Scoring functions are specified by adding the `SCORER {scorer_name}` argument to a search query. +Redis Stack comes with a few very basic scoring functions to evaluate document relevance. They are all based on document scores and term frequency. This is regardless of the ability to use [sortable fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/sorting/" >}}). Scoring functions are specified by adding the `SCORER {scorer_name}` argument to a search query. -If you prefer a custom scoring function, it is possible to add more functions using the [extension API](/docs/interact/search-and-query/administration/extensions/). +If you prefer a custom scoring function, it is possible to add more functions using the [extension API]({{< relref "/develop/interact/search-and-query/administration/extensions/" >}}). The following is a list of the pre-bundled scoring functions available in Redis Stack and a short explanation about how they work. Each function is mentioned by registered name, which can be passed as a `SCORER` argument in [`FT.SEARCH`](/commands/ft.search). diff --git a/content/develop/interact/search-and-query/advanced-concepts/sorting.md b/content/develop/interact/search-and-query/advanced-concepts/sorting.md index fafc4e79dd..6e75120751 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/sorting.md +++ b/content/develop/interact/search-and-query/advanced-concepts/sorting.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/sorting/ -- /redisearch/reference/sorting +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Support for sorting query results linkTitle: Sorting title: Sorting diff --git a/content/develop/interact/search-and-query/advanced-concepts/spellcheck.md b/content/develop/interact/search-and-query/advanced-concepts/spellcheck.md index 0a9974bbdf..b62284ad5b 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/spellcheck.md +++ b/content/develop/interact/search-and-query/advanced-concepts/spellcheck.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/spellcheck/ -- /redisearch/reference/spellcheck +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Query spelling correction support linkTitle: Spellchecking title: Spellchecking diff --git a/content/develop/interact/search-and-query/advanced-concepts/stemming.md b/content/develop/interact/search-and-query/advanced-concepts/stemming.md index fa92ac9c8d..fa955dcf3d 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/stemming.md +++ b/content/develop/interact/search-and-query/advanced-concepts/stemming.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/stemming/ -- /redisearch/reference/stemming +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Stemming support linkTitle: Stemming title: Stemming @@ -22,7 +29,7 @@ For further details see the [Snowball Stemmer website](https://snowballstem.org/ Stemming maps different forms of the same word to a common root - "stem" - for example, the English stemmer maps *studied* ,*studies* and *study* to *studi* . So a searching for *studied* would also find documents which only have the other forms. -In order to define which language the Stemmer should apply when building the index, you need to specify the `LANGUAGE` parameter for the entire index or for the specific field. For more details check the [FT.CREATE](/docs/commands/ft.create.md) syntax. +In order to define which language the Stemmer should apply when building the index, you need to specify the `LANGUAGE` parameter for the entire index or for the specific field. For more details check the [FT.CREATE]({{< relref "/develop/commands/ft.create.md" >}}) syntax. **Create a index with language definition** diff --git a/content/develop/interact/search-and-query/advanced-concepts/stopwords.md b/content/develop/interact/search-and-query/advanced-concepts/stopwords.md index c031a4f55a..6ed14a0a26 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/stopwords.md +++ b/content/develop/interact/search-and-query/advanced-concepts/stopwords.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/stopwords/ -- /redisearch/reference/stopwords +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Stop words support linkTitle: Stop words title: Stop words diff --git a/content/develop/interact/search-and-query/advanced-concepts/synonyms.md b/content/develop/interact/search-and-query/advanced-concepts/synonyms.md index 29f126af60..f04927324e 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/synonyms.md +++ b/content/develop/interact/search-and-query/advanced-concepts/synonyms.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/synonyms/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Synonym support linkTitle: Synonym title: Synonym diff --git a/content/develop/interact/search-and-query/advanced-concepts/tags.md b/content/develop/interact/search-and-query/advanced-concepts/tags.md index 4e776b9a0d..fab3f38391 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/tags.md +++ b/content/develop/interact/search-and-query/advanced-concepts/tags.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/tags/ -- /redisearch/reference/tags +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Details about tag fields linkTitle: Tags title: Tags @@ -148,6 +155,6 @@ You can see what that looks like in the following example: (error) Syntax error at offset 27 near be ``` -Note: stop words are words that are so common that a search engine ignores them. To learn more, see [stop words](/docs/interact/search-and-query/advanced-concepts/stopwords/). +Note: stop words are words that are so common that a search engine ignores them. To learn more, see [stop words]({{< relref "/develop/interact/search-and-query/advanced-concepts/stopwords/" >}}). Given the potential for syntax errors,it is recommended that you escape all spaces within tag queries. diff --git a/content/develop/interact/search-and-query/advanced-concepts/vectors.md b/content/develop/interact/search-and-query/advanced-concepts/vectors.md index 47f4d96c30..df641712e7 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/vectors.md +++ b/content/develop/interact/search-and-query/advanced-concepts/vectors.md @@ -1,7 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/vectors/ -- /redisearch/reference/vectors +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Learn how to use vector fields and vector similarity queries linkTitle: Vectors math: true @@ -10,7 +17,7 @@ weight: 14 --- *Vector fields* allow you to use vector similarity queries in the [`FT.SEARCH`](/commands/ft.search) command. -*Vector similarity* enables you to load, index, and query vectors stored as fields in Redis hashes or in JSON documents (via integration with the [JSON module](/docs/stack/json/)) +*Vector similarity* enables you to load, index, and query vectors stored as fields in Redis hashes or in JSON documents (via integration with the [JSON module]({{< relref "/develop/stack/json/" >}})) Vector similarity provides these functionalities: @@ -173,7 +180,7 @@ Unlike in hashes, vectors are stored in JSON documents as arrays (not as blobs). JSON.SET 1 $ '{"vec":[1,2,3,4]}' ``` -As of v2.6.1, JSON supports multi-value indexing. This capability accounts for vectors as well. Thus, it is possible to index multiple vectors under the same JSONPath. Additional information is available in the [Indexing JSON documents](/docs/stack/search/indexing_json/#index-json-arrays-as-vector) section. +As of v2.6.1, JSON supports multi-value indexing. This capability accounts for vectors as well. Thus, it is possible to index multiple vectors under the same JSONPath. Additional information is available in the [Indexing JSON documents]({{< relref "/develop/stack/search/indexing_json/#index-json-arrays-as-vector" >}}) section. **Example** ``` @@ -214,11 +221,11 @@ Every `*_attribute` parameter should refer to an attribute in the [`PARAMS`](/co * `$` - An attribute that holds the query vector as blob and must be passed through the `PARAMS` section. The blob's byte size should match the vector field dimension and type. -* `[ |$ [...]]` - An optional part for passing one or more vector similarity query parameters. Parameters should come in key-value pairs and should be valid parameters for the query. See which [runtime parameters](/docs/stack/search/reference/vectors/#runtime-attributes) are valid for each algorithm. +* `[ |$ [...]]` - An optional part for passing one or more vector similarity query parameters. Parameters should come in key-value pairs and should be valid parameters for the query. See which [runtime parameters]({{< relref "/develop/stack/search/reference/vectors/#runtime-attributes" >}}) are valid for each algorithm. * `[AS | $]` - An optional part for specifying a distance field name, for later sorting by the similarity metric and/or returning it. By default, the distance field name is "`___score`" and it can be used for sorting without using `AS ` in the query. -**Note:** As of v2.6, vector query params and distance field name can be specified in [query attributes](/docs/stack/search/reference/query_syntax/#query-attributes) like syntax as well. Thus, the following format is also supported: +**Note:** As of v2.6, vector query params and distance field name can be specified in [query attributes]({{< relref "/develop/stack/search/reference/query_syntax/#query-attributes" >}}) like syntax as well. Thus, the following format is also supported: ``` =>[]=>{$: ( | $); ... } diff --git a/content/develop/interact/search-and-query/basic-constructs/_index.md b/content/develop/interact/search-and-query/basic-constructs/_index.md index 39ab297f53..dfde56179d 100644 --- a/content/develop/interact/search-and-query/basic-constructs/_index.md +++ b/content/develop/interact/search-and-query/basic-constructs/_index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Basic constructs for searching and querying Redis data linkTitle: Basic constructs title: Basic constructs @@ -29,6 +39,6 @@ Fields that are not indexed will not contribute to search results. However, they The index structure in defined by a schema. The schema defines how fields are stored and indexed. It specifies the type of each field and other important information. -To create an index, you need to define the schema for your collection. Learn more about how to define the schema on the [schema definition](/docs/interact/search-and-query/basic-constructs/schema-definition/) page. +To create an index, you need to define the schema for your collection. Learn more about how to define the schema on the [schema definition]({{< relref "/develop/interact/search-and-query/basic-constructs/schema-definition/" >}}) page. ## Learn more: \ No newline at end of file diff --git a/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md b/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md index 22c8a338b3..1d7b2ceda6 100644 --- a/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md +++ b/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search/configuring/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Querying and searching in Redis Stack can be tuned through multiple configuration parameters. Some of these parameters can only be set at load-time, while other parameters can be set either at load-time or at run-time. @@ -15,13 +23,13 @@ weight: 4 Setting configuration parameters at load-time is done by appending arguments after the `--loadmodule` argument when starting a server from the command line, or after the `loadmodule` directive in a Redis config file. For example: -In [redis.conf](/docs/manual/config/): +In [redis.conf]({{< relref "/develop/manual/config/" >}}): ``` loadmodule ./redisearch.so [OPT VAL]... ``` -From the [Redis CLI](/docs/manual/cli/), using the [MODULE LOAD](/commands/module-load/) command: +From the [Redis CLI]({{< relref "/develop/manual/cli/" >}}), using the [MODULE LOAD](/commands/module-load/) command: ``` 127.0.0.6379> MODULE LOAD redisearch.so [OPT VAL]... @@ -174,7 +182,7 @@ $ redis-server --loadmodule ./redisearch.so CONCURRENT_WRITE_MODE ### EXTLOAD -If present, RediSearch will try to load an extension dynamic library from its specified file path. See [Extensions](/docs/interact/search-and-query/administration/extensions/) for details. +If present, RediSearch will try to load an extension dynamic library from its specified file path. See [Extensions]({{< relref "/develop/interact/search-and-query/administration/extensions/" >}}) for details. #### Default @@ -274,7 +282,7 @@ $ redis-server --loadmodule ./redisearch.so MAXAGGREGATERESULTS 3000000 ### FRISOINI -If present, load the custom Chinese dictionary from the specified path. See [Using custom dictionaries](/docs/interact/search-and-query/advanced-concepts/chinese/#using-custom-dictionaries) for more details. +If present, load the custom Chinese dictionary from the specified path. See [Using custom dictionaries]({{< relref "/develop/interact/search-and-query/advanced-concepts/chinese/#using-custom-dictionaries" >}}) for more details. #### Default @@ -290,7 +298,7 @@ $ redis-server --loadmodule ./redisearch.so FRISOINI /opt/dict/friso.ini ### CURSOR_MAX_IDLE -The maximum idle time (in ms) that can be set to the [cursor api](/docs/interact/search-and-query/search/aggregations/#cursor-api). +The maximum idle time (in ms) that can be set to the [cursor api]({{< relref "/develop/interact/search-and-query/search/aggregations/#cursor-api" >}}). #### Default @@ -540,7 +548,7 @@ $ redis-server --loadmodule ./redisearch.so DEFAULT_DIALECT 2 ### VSS_MAX_RESIZE -The maximum memory resize for vector similarity indexes in bytes. This value will override default memory limits if you need to allow for a large [`BLOCK_SIZE`](/docs/interact/search-and-query/search/vectors/#creation-attributes-per-algorithm). +The maximum memory resize for vector similarity indexes in bytes. This value will override default memory limits if you need to allow for a large [`BLOCK_SIZE`]({{< relref "/develop/interact/search-and-query/search/vectors/#creation-attributes-per-algorithm" >}}). #### Default diff --git a/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md b/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md index 8991eb587b..52504018ba 100644 --- a/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md +++ b/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Available field types and options. ' @@ -95,7 +105,7 @@ Notice that `{count}` represents the total number of attribute pairs passed in t * `{attribute_name} {attribute_value}` are algorithm attributes for the creation of the vector index. Every algorithm has its own mandatory and optional attributes. -For more information about vector fields, see [vector fields](/docs/interact/search-and-query/advanced-concepts/vectors/). +For more information about vector fields, see [vector fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}). ## Tag fields @@ -119,7 +129,7 @@ You can search for documents with specific tags using the `@:{} FT.SEARCH idx "@tags:{blue}" ``` -For more information about tag fields, see [Tag Fields](/docs/interact/search-and-query/advanced-concepts/tags/). +For more information about tag fields, see [Tag Fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/tags/" >}}). ## Text fields @@ -142,7 +152,7 @@ where - `dm:pt` - double metaphone for Portuguese - `dm:es` - double metaphone for Spanish - For more information, see [Phonetic Matching](/docs/interact/search-and-query/advanced-concepts/phonetic_matching/). + For more information, see [Phonetic Matching]({{< relref "/develop/interact/search-and-query/advanced-concepts/phonetic_matching/" >}}). - `SORTABLE` indicates that the field can be sorted. This is useful for performing range queries and sorting search results based on text values. - `NOINDEX` indicates that the field is not indexed. This is useful for storing text that you don't want to search for, but that you still want to retrieve in search results. - `WITHSUFFIXTRIE` indicates that the field will be indexed with a suffix trie. The index will keep a suffix trie with all terms which match the suffix. It is used to optimize `contains (*foo*)` and `suffix (*foo)` queries. Otherwise, a brute-force search on the trie is performed. If a suffix trie exists for some fields, these queries will be disabled for other fields. diff --git a/content/develop/interact/search-and-query/basic-constructs/schema-definition.md b/content/develop/interact/search-and-query/basic-constructs/schema-definition.md index f4fae5ab56..d07865348e 100644 --- a/content/develop/interact/search-and-query/basic-constructs/schema-definition.md +++ b/content/develop/interact/search-and-query/basic-constructs/schema-definition.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'How to define the schema of an index. ' diff --git a/content/develop/interact/search-and-query/deprecated/_index.md b/content/develop/interact/search-and-query/deprecated/_index.md index 020a2b5512..ca0014b3ca 100644 --- a/content/develop/interact/search-and-query/deprecated/_index.md +++ b/content/develop/interact/search-and-query/deprecated/_index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Deprecated features linkTitle: Deprecated title: Deprecated diff --git a/content/develop/interact/search-and-query/deprecated/development.md b/content/develop/interact/search-and-query/deprecated/development.md index c1ed829270..608f7c421a 100644 --- a/content/develop/interact/search-and-query/deprecated/development.md +++ b/content/develop/interact/search-and-query/deprecated/development.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search/development/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Notes on debugging, testing and documentation linkTitle: Developer notes title: Developer notes diff --git a/content/develop/interact/search-and-query/deprecated/payloads.md b/content/develop/interact/search-and-query/deprecated/payloads.md index b04f953fea..b6da68a161 100644 --- a/content/develop/interact/search-and-query/deprecated/payloads.md +++ b/content/develop/interact/search-and-query/deprecated/payloads.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search/reference/payloads/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Payload support(deprecated) linkTitle: Payload title: Payload diff --git a/content/develop/interact/search-and-query/indexing/_index.md b/content/develop/interact/search-and-query/indexing/_index.md index fcda8a0894..e10eb4639c 100644 --- a/content/develop/interact/search-and-query/indexing/_index.md +++ b/content/develop/interact/search-and-query/indexing/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/search/indexing_json/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: How to index and search JSON documents linkTitle: Indexing title: Indexing @@ -13,7 +21,7 @@ In addition to indexing Redis hashes, Redis Stack can also index JSON documents. Before you can index and search JSON documents, you need a database with either: -- [Redis Stack](/docs/getting-started/install-stack/), which automatically includes JSON and searching and querying features +- [Redis Stack]({{< relref "/develop/getting-started/install-stack/" >}}), which automatically includes JSON and searching and querying features - Redis v6.x or later with the following modules installed and enabled: - RediSearch v2.2 or later - RedisJSON v2.0 or later @@ -22,7 +30,7 @@ Before you can index and search JSON documents, you need a database with either: When you create an index with the [`FT.CREATE`](/commands/ft.create) command, include the `ON JSON` keyword to index any existing and future JSON documents stored in the database. -To define the `SCHEMA`, you can provide [JSONPath](/docs/stack/json/path) expressions. +To define the `SCHEMA`, you can provide [JSONPath]({{< relref "/develop/stack/json/path" >}}) expressions. The result of each JSONPath expression is indexed and associated with a logical name called an `attribute` (previously known as a `field`). You can use these attributes in queries. @@ -159,7 +167,7 @@ And lastly, search for the Bluetooth headphones that are most similar to an imag 4) "{\"name\":\"Wireless earbuds\",\"description\":\"Wireless Bluetooth in-ear headphones\",\"connection\":{\"wireless\":true,\"connection\":\"Bluetooth\"},\"price\":64.99,\"stock\":17,\"colors\":[\"black\",\"white\"],\"embedding\":[-0.7,-0.51,0.88,0.14]}" ``` -For more information about search queries, see [Search query syntax](/docs/stack/search/reference/query_syntax). +For more information about search queries, see [Search query syntax]({{< relref "/develop/stack/search/reference/query_syntax" >}}). {{% alert title="Note" color="info" %}} [`FT.SEARCH`](/commands/ft.search) queries require `attribute` modifiers. Don't use JSONPath expressions in queries because the query parser doesn't fully support them. @@ -167,7 +175,7 @@ For more information about search queries, see [Search query syntax](/docs/stack ## Index JSON arrays as TAG -If you want to index string or boolean values as TAG within a JSON array, use the [JSONPath](/docs/stack/json/path) wildcard operator. +If you want to index string or boolean values as TAG within a JSON array, use the [JSONPath]({{< relref "/develop/stack/json/path" >}}) wildcard operator. To index an item's list of available colors, specify the JSONPath `$.colors.*` in the `SCHEMA` definition during index creation: @@ -416,7 +424,7 @@ Now you can search for the two headphones that are most similar to the image emb 4) "{\"name\":\"Wireless earbuds\",\"description\":\"Wireless Bluetooth in-ear headphones\",\"price\":64.99,\"stock\":17,\"colors\":[\"black\",\"white\"],\"embedding\":[-0.7,-0.51,0.88,0.14]}" ``` -If you want to index multiple numeric arrays as VECTOR, use a [JSONPath](/docs/stack/json/path/) leading to multiple numeric arrays using JSONPath operators such as wildcard, filter, union, array slice, and/or recursive descent. +If you want to index multiple numeric arrays as VECTOR, use a [JSONPath]({{< relref "/develop/stack/json/path/" >}}) leading to multiple numeric arrays using JSONPath operators such as wildcard, filter, union, array slice, and/or recursive descent. For example, assume that your JSON items include an array of vector embeddings, where each vector represents a different image of the same product. To index these vectors, specify the JSONPath `$.embeddings[*]` in the schema definition during index creation: @@ -451,7 +459,7 @@ Now you can search for the two headphones that are most similar to an image embe ``` Note that `0.771500051022` is the L2 distance between the query vector and `[-0.8,-0.15,0.33,-0.01]`, which is the second element in the embedding array, and it is lower than the L2 distance between the query vector and `[-0.7,-0.51,0.88,0.14]`, which is the first element in the embedding array. -For more information on vector similarity syntax, see [Vector fields](/docs/interact/search-and-query/advanced-concepts/vectors/). +For more information on vector similarity syntax, see [Vector fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}). ## Index JSON objects @@ -519,7 +527,7 @@ For example, this query only returns the `name` and `price` of each set of headp ### Project with JSONPath -You can use [JSONPath](/docs/stack/json/path) expressions in a `RETURN` statement to extract any part of the JSON document, even fields that were not defined in the index `SCHEMA`. +You can use [JSONPath]({{< relref "/develop/stack/json/path" >}}) expressions in a `RETURN` statement to extract any part of the JSON document, even fields that were not defined in the index `SCHEMA`. For example, the following query uses the JSONPath expression `$.stock` to return each item's stock in addition to the name and price attributes. @@ -569,7 +577,7 @@ This query returns the field as the alias `"stock"` instead of the JSONPath expr ### Highlight search terms -You can [highlight](/docs/stack/search/reference/highlight) relevant search terms in any indexed `TEXT` attribute. +You can [highlight]({{< relref "/develop/stack/search/reference/highlight" >}}) relevant search terms in any indexed `TEXT` attribute. For [`FT.SEARCH`](/commands/ft.search), you have to explicitly set which attributes you want highlighted after the `RETURN` and `HIGHLIGHT` parameters. @@ -598,9 +606,9 @@ For example, highlight the word "bluetooth" with bold HTML tags in item names an ## Aggregate with JSONPath -You can use [aggregation](/docs/interact/search-and-query/search/aggregations/) to generate statistics or build facet queries. +You can use [aggregation]({{< relref "/develop/interact/search-and-query/search/aggregations/" >}}) to generate statistics or build facet queries. -The `LOAD` option accepts [JSONPath](/docs/stack/json/path) expressions. You can use any value in the pipeline, even if the value is not indexed. +The `LOAD` option accepts [JSONPath]({{< relref "/develop/stack/json/path" >}}) expressions. You can use any value in the pipeline, even if the value is not indexed. This example uses aggregation to calculate a 10% price discount for each item and sorts the items from least expensive to most expensive: diff --git a/content/develop/interact/search-and-query/query-use-cases/_index.md b/content/develop/interact/search-and-query/query-use-cases/_index.md index 72acc47aba..940703c2ca 100644 --- a/content/develop/interact/search-and-query/query-use-cases/_index.md +++ b/content/develop/interact/search-and-query/query-use-cases/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/stack/use-cases/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Search and query use cases ' diff --git a/content/develop/interact/search-and-query/query/_index.md b/content/develop/interact/search-and-query/query/_index.md index 6c0ca2ce93..e07dc795ab 100644 --- a/content/develop/interact/search-and-query/query/_index.md +++ b/content/develop/interact/search-and-query/query/_index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Understand how to query, search, and aggregate Redis data hideListLinks: true linkTitle: Query @@ -23,13 +33,13 @@ Here is a short SQL comparison using the [bicycle dataset](./data/bicycles.txt): The following articles provide an overview of how to query data with the [FT.SEARCH](/commands/ft.search/) command: -* [Exact match queries](/docs/interact/search-and-query/query/exact-match) -* [Range queries](/docs/interact/search-and-query/query/range) -* [Full-text search ](/docs/interact/search-and-query/query/full-text) -* [Geospatial queries](/docs/interact/search-and-query/query/geo-spatial) -* [Vector search](/docs/interact/search-and-query/query/vector-search) -* [Combined queries](/docs/interact/search-and-query/query/combined) +* [Exact match queries]({{< relref "/develop/interact/search-and-query/query/exact-match" >}}) +* [Range queries]({{< relref "/develop/interact/search-and-query/query/range" >}}) +* [Full-text search ]({{< relref "/develop/interact/search-and-query/query/full-text" >}}) +* [Geospatial queries]({{< relref "/develop/interact/search-and-query/query/geo-spatial" >}}) +* [Vector search]({{< relref "/develop/interact/search-and-query/query/vector-search" >}}) +* [Combined queries]({{< relref "/develop/interact/search-and-query/query/combined" >}}) You can find further details about aggregation queries with [FT.AGGREGATE](/commands/ft.aggregate/) in the following article: -* [Aggregation queries](/docs/interact/search-and-query/query/aggregation) \ No newline at end of file +* [Aggregation queries]({{< relref "/develop/interact/search-and-query/query/aggregation" >}}) \ No newline at end of file diff --git a/content/develop/interact/search-and-query/query/aggregation.md b/content/develop/interact/search-and-query/query/aggregation.md index 1ece25f99a..d13c37a718 100644 --- a/content/develop/interact/search-and-query/query/aggregation.md +++ b/content/develop/interact/search-and-query/query/aggregation.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Group and aggregate query results linkTitle: Aggregation title: Aggregation queries @@ -11,7 +21,7 @@ An aggregation query allows you to perform the following actions: - Group data based on field values. - Apply aggregation functions on the grouped data. -This article explains the basic usage of the [FT.AGGREGATE](/commands/ft.aggregate/) command. For further details, see the [command specification](/commands/ft.aggregate/) and the [aggregations reference documentation](/docs/interact/search-and-query/advanced-concepts/aggregations). +This article explains the basic usage of the [FT.AGGREGATE](/commands/ft.aggregate/) command. For further details, see the [command specification](/commands/ft.aggregate/) and the [aggregations reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/aggregations" >}}). The examples in this article use a schema with the following fields: @@ -30,7 +40,7 @@ FT.AGGREGATE index "query_expr" LOAD n "field_1" .. "field_n" APPLY "function_ex Here is a more detailed explanation of the query syntax: -1. **Query expression**: you can use the same query expressions as you would use with the [`FT.SEARCH`](/commands/ft.search) command. You can substitute `query_expr` with any of the expressions explained in the articles of this [query topic](/docs/interact/search-and-query/query/). Vector search queries are an exception. You can't combine a vector search with an aggregation query. +1. **Query expression**: you can use the same query expressions as you would use with the [`FT.SEARCH`](/commands/ft.search) command. You can substitute `query_expr` with any of the expressions explained in the articles of this [query topic]({{< relref "/develop/interact/search-and-query/query/" >}}). Vector search queries are an exception. You can't combine a vector search with an aggregation query. 2. **Loaded fields**: if field values weren't already loaded into the aggregation pipeline, you can force their presence via the `LOAD` clause. This clause takes the number of fields (`n`), followed by the field names (`"field_1" .. "field_n"`). 3. **Mapping function**: this mapping function operates on the field values. A specific field is referenced as `@field_name` within the function expression. The result is returned as `result_field`. @@ -78,7 +88,7 @@ FT.AGGREGATE index "query_expr" ... GROUPBY n "field_1" .. "field_n" REDUCE AGG Here is an explanation of the additional constructs: 1. **Grouping**: you can group by one or many fields. Each ordered sequence of field values then defines one group. It's also possible to group by values that resulted from a previous `APPLY ... AS`. -2. **Aggregation**: you must replace `AGG_FUNC` with one of the supported aggregation functions (e.g., `SUM` or `COUNT`). A complete list of functions is available in the [aggregations reference documentation](/docs/interact/search-and-query/advanced-concepts/aggregations). Replace `aggregated_result_field` with a value of your choice. +2. **Aggregation**: you must replace `AGG_FUNC` with one of the supported aggregation functions (e.g., `SUM` or `COUNT`). A complete list of functions is available in the [aggregations reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/aggregations" >}}). Replace `aggregated_result_field` with a value of your choice. The following query shows you how to group by the field `condition` and apply a reduction based on the previously derived `price_category`. The expression `@price<1000` causes a bicycle to have the price category `1` if its price is lower than 1000 USD. Otherwise, it has the price category `0`. The output is the number of affordable bicycles grouped by price category. diff --git a/content/develop/interact/search-and-query/query/combined.md b/content/develop/interact/search-and-query/query/combined.md index 770c163d26..f948bc0c15 100644 --- a/content/develop/interact/search-and-query/query/combined.md +++ b/content/develop/interact/search-and-query/query/combined.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Combine query expressions linkTitle: Combined title: Combined queries @@ -7,16 +17,16 @@ weight: 9 A combined query is a combination of several query types, such as: -* [Exact match](/docs/interact/search-and-query/query/exact-match) -* [Range](/docs/interact/search-and-query/query/range) -* [Full-text](/docs/interact/search-and-query/query/full-text) -* [Geospatial](/docs/interact/search-and-query/query/geo-spatial) -* [Vector search](/docs/interact/search-and-query/query/vector-search) +* [Exact match]({{< relref "/develop/interact/search-and-query/query/exact-match" >}}) +* [Range]({{< relref "/develop/interact/search-and-query/query/range" >}}) +* [Full-text]({{< relref "/develop/interact/search-and-query/query/full-text" >}}) +* [Geospatial]({{< relref "/develop/interact/search-and-query/query/geo-spatial" >}}) +* [Vector search]({{< relref "/develop/interact/search-and-query/query/vector-search" >}}) You can use logical query operators to combine query expressions for numeric, tag, and text fields. For vector fields, you can combine a KNN query with a pre-filter. {{% alert title="Note" color="warning" %}} -The operators are interpreted slightly differently depending on the query dialect used. The default dialect is `DIALECT 1`; see [this article](/docs/interact/search-and-query/basic-constructs/configuration-parameters/#default_dialect) for information on how to change the dialect version. This article uses the second version of the query dialect, `DIALECT 2`, and uses additional brackets (`(...)`) to help clarify the examples. Further details can be found in the [query syntax documentation](/docs/interact/search-and-query/advanced-concepts/query_syntax/). +The operators are interpreted slightly differently depending on the query dialect used. The default dialect is `DIALECT 1`; see [this article]({{< relref "/develop/interact/search-and-query/basic-constructs/configuration-parameters/#default_dialect" >}}) for information on how to change the dialect version. This article uses the second version of the query dialect, `DIALECT 2`, and uses additional brackets (`(...)`) to help clarify the examples. Further details can be found in the [query syntax documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/query_syntax/" >}}). {{% /alert %}} The examples in this article use the following schema: @@ -117,7 +127,7 @@ The [FT.SEARCH](/commands/ft.search/) command allows you to combine any query ex FT.SEARCH index "expr" FILTER numeric_field start end ``` -Please see the [range query article](/docs/interact/search-and-query/query/range) to learn more about numeric range queries and such filters. +Please see the [range query article]({{< relref "/develop/interact/search-and-query/query/range" >}}) to learn more about numeric range queries and such filters. ## Pre-filter for a KNN vector query @@ -134,4 +144,4 @@ Here is an example: FT.SEARCH idx:bikes_vss "(@price:[500 1000] @condition:{new})=>[KNN 3 @vector $query_vector]" PARAMS 2 "query_vector" "Z\xf8\x15:\xf23\xa1\xbfZ\x1dI>\r\xca9..." DIALECT 2 ``` -The [vector search article](/docs/interact/search-and-query/query/vector-search) provides further details about vector queries in general. \ No newline at end of file +The [vector search article]({{< relref "/develop/interact/search-and-query/query/vector-search" >}}) provides further details about vector queries in general. \ No newline at end of file diff --git a/content/develop/interact/search-and-query/query/exact-match.md b/content/develop/interact/search-and-query/query/exact-match.md index 3d10d37cf4..e522b4da6a 100644 --- a/content/develop/interact/search-and-query/query/exact-match.md +++ b/content/develop/interact/search-and-query/query/exact-match.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Perform simple exact match queries linkTitle: Exact match title: Exact match queries @@ -17,7 +27,7 @@ The examples in this article use a schema with the following fields: | `condition` | `TAG` | | `price` | `NUMERIC` | -You can find more details about creating the index and loading the demo data in the [quick start guide](/docs/interact/search-and-query/quickstart/). +You can find more details about creating the index and loading the demo data in the [quick start guide]({{< relref "/develop/interact/search-and-query/quickstart/" >}}). ## Numeric field @@ -27,7 +37,7 @@ To perform an exact match query on a numeric field, you need to construct a rang FT.SEARCH index "@field:[start end]" ``` -As described in the [article about range queries](/docs/interact/search-and-query/query/range), you can also use the `FILTER` argument: +As described in the [article about range queries]({{< relref "/develop/interact/search-and-query/query/range" >}}), you can also use the `FILTER` argument: ``` FT.SEARCH index "*" FILTER field start end @@ -70,7 +80,7 @@ FT.SEARCH idx:bicycle "@condition:{new}" ## Full-text field -A detailed explanation of full-text queries is available in the [full-text queries documentation](/docs/interact/search-and-query/query/full-text). You can also query for an exact match of a phrase within a text field: +A detailed explanation of full-text queries is available in the [full-text queries documentation]({{< relref "/develop/interact/search-and-query/query/full-text" >}}). You can also query for an exact match of a phrase within a text field: ``` FT.SEARCH index "@field:\"phrase\"" @@ -79,7 +89,7 @@ FT.SEARCH index "@field:\"phrase\"" {{% alert title="Important" color="warning" %}} The phrase must be wrapped by escaped double quotes for an exact match query. -You can't use a phrase that starts with a [stop word](/docs/interact/search-and-query/advanced-concepts/stopwords). +You can't use a phrase that starts with a [stop word]({{< relref "/develop/interact/search-and-query/advanced-concepts/stopwords" >}}). {{% /alert %}} Here is an example for finding all bicycles that have a description containing the exact text 'rough terrain': diff --git a/content/develop/interact/search-and-query/query/full-text.md b/content/develop/interact/search-and-query/query/full-text.md index 82c78876c7..791ffbcad7 100644 --- a/content/develop/interact/search-and-query/query/full-text.md +++ b/content/develop/interact/search-and-query/query/full-text.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Perform a full-text search linkTitle: Full-text title: Full-text search @@ -7,7 +17,7 @@ weight: 3 A full-text search finds words or phrases within larger texts. You can search within a specific text field or across all text fields. -This article provides a good overview of the most relevant full-text search capabilities. Please find further details about all the full-text search features in the [reference documentation](/docs/interact/search-and-query/advanced-concepts/). +This article provides a good overview of the most relevant full-text search capabilities. Please find further details about all the full-text search features in the [reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/" >}}). The examples in this article use a schema with the following fields: @@ -32,7 +42,7 @@ Instead of searching across all text fields, you might want to limit the search FT.SEARCH index "@field: word" ``` -Words that occur very often in natural language, such as `the` or `a` for the English language, aren't indexed and will not return a search result. You can find further details in the [stop words article](/docs/interact/search-and-query/advanced-concepts/stopwords). +Words that occur very often in natural language, such as `the` or `a` for the English language, aren't indexed and will not return a search result. You can find further details in the [stop words article]({{< relref "/develop/interact/search-and-query/advanced-concepts/stopwords" >}}). The following example searches for all bicycles that have the word 'kids' in the description: @@ -42,7 +52,7 @@ FT.SEARCH idx:bicycle "@description: kids" ## Phrase -A phrase is a sentence, sentence fragment, or small group of words. You can find further details about how to find exact phrases in the [exact match article](/docs/interact/search-and-query/query/exact-match). +A phrase is a sentence, sentence fragment, or small group of words. You can find further details about how to find exact phrases in the [exact match article]({{< relref "/develop/interact/search-and-query/query/exact-match" >}}). ## Word prefix diff --git a/content/develop/interact/search-and-query/query/geo-spatial.md b/content/develop/interact/search-and-query/query/geo-spatial.md index 41f4f23566..aba54ead78 100644 --- a/content/develop/interact/search-and-query/query/geo-spatial.md +++ b/content/develop/interact/search-and-query/query/geo-spatial.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Query based on geographic data linkTitle: Geospatial title: Geospatial queries diff --git a/content/develop/interact/search-and-query/query/range.md b/content/develop/interact/search-and-query/query/range.md index 9090f9c9f2..c510f88ae2 100644 --- a/content/develop/interact/search-and-query/query/range.md +++ b/content/develop/interact/search-and-query/query/range.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Perform numeric range queries linkTitle: Range title: Range queries @@ -69,4 +79,4 @@ FT.SEARCH idx:bicycle "@price:[-inf 2000] SORTBY price LIMIT 0 5" ## Non-numeric range queries -You can learn more about non-numeric range queries, such as [geospatial](/docs/interact/search-and-query/query/geo-spatial) or [vector search](/docs/interact/search-and-query/query/vector-search) queries, in their dedicated articles. \ No newline at end of file +You can learn more about non-numeric range queries, such as [geospatial]({{< relref "/develop/interact/search-and-query/query/geo-spatial" >}}) or [vector search]({{< relref "/develop/interact/search-and-query/query/vector-search" >}}) queries, in their dedicated articles. \ No newline at end of file diff --git a/content/develop/interact/search-and-query/query/vector-search.md b/content/develop/interact/search-and-query/query/vector-search.md index 5a94e82f48..c00779b42d 100644 --- a/content/develop/interact/search-and-query/query/vector-search.md +++ b/content/develop/interact/search-and-query/query/vector-search.md @@ -1,13 +1,21 @@ --- -aliases: -- /docs/interact/search-and-query/query/vector-similarity/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Query for data based on vector embeddings linkTitle: Vector title: Vector search weight: 5 --- -This article gives you a good overview of how to perform vector search queries with Redis Stack. See the [Redis as a vector database quick start guide](/docs/get-started/vector-database/) for more information about Redis as a vector database. You can also find more detailed information about all the parameters in the [vector reference documentation](/docs/interact/search-and-query/advanced-concepts/vectors/). +This article gives you a good overview of how to perform vector search queries with Redis Stack. See the [Redis as a vector database quick start guide]({{< relref "/develop/get-started/vector-database/" >}}) for more information about Redis as a vector database. You can also find more detailed information about all the parameters in the [vector reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}). A vector search query on a vector field allows you to find all vectors in a vector space that are close to a given vector. You can query for the k-nearest neighbors or vectors within a given radius. @@ -72,15 +80,15 @@ FT.SEARCH index "@field:[VECTOR_RANGE radius $vector]=>{$YIELD_DISTANCE_AS: dist Here is a more detailed explanation of this query: -1. **Range query**: the syntax of a radius query is very similar to the regular range query, except for the keyword `VECTOR_RANGE`. You can also combine a vector radius query with other queries in the same way as regular range queries. See [combined queries article](/docs/interact/search-and-query/query/combined/) for more details. +1. **Range query**: the syntax of a radius query is very similar to the regular range query, except for the keyword `VECTOR_RANGE`. You can also combine a vector radius query with other queries in the same way as regular range queries. See [combined queries article]({{< relref "/develop/interact/search-and-query/query/combined/" >}}) for more details. 2. **Additional step**: the `=>` arrow means that the range query is followed by evaluating additional parameters. -3. **Range query parameters**: parameters such as `$YIELD_DISTANCE_AS` can be found in the [vectors reference documentation](/docs/interact/search-and-query/advanced-concepts/vectors/). +3. **Range query parameters**: parameters such as `$YIELD_DISTANCE_AS` can be found in the [vectors reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}). 4. **Vector binary data**: you need to use `PARAMS` to pass the binary representation of the vector. 5. **Dialect**: vector search has been available since version two of the query dialect. {{% alert title="Note" color="warning" %}} -By default, [`FT.SEARCH`](/commands/ft.search) returns only the first ten results. The [range query article](/docs/interact/search-and-query/query/range) explains to you how to scroll through the result set. +By default, [`FT.SEARCH`](/commands/ft.search) returns only the first ten results. The [range query article]({{< relref "/develop/interact/search-and-query/query/range" >}}) explains to you how to scroll through the result set. {{% /alert %}} The example below shows a radius query that returns the description and the distance within a radius of `0.5`. The result is sorted by the distance. diff --git a/content/develop/interact/transactions.md b/content/develop/interact/transactions.md index 4ec6d6a496..1dbeb2c542 100644 --- a/content/develop/interact/transactions.md +++ b/content/develop/interact/transactions.md @@ -1,7 +1,14 @@ --- -aliases: -- /topics/transactions -- /docs/manual/transactions/ +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: How transactions work in Redis linkTitle: Transactions title: Transactions diff --git a/content/develop/manual/_index.md b/content/develop/manual/_index.md new file mode 100644 index 0000000000..33b9f80dbd --- /dev/null +++ b/content/develop/manual/_index.md @@ -0,0 +1,16 @@ +--- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients +description: A developer's guide to Redis +linkTitle: Use Redis +title: Use Redis +weight: 50 +--- diff --git a/content/develop/manual/client-side-caching.md b/content/develop/manual/client-side-caching.md new file mode 100644 index 0000000000..1e0e472971 --- /dev/null +++ b/content/develop/manual/client-side-caching.md @@ -0,0 +1,344 @@ +--- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients +description: 'Server-assisted, client-side caching in Redis + + ' +linkTitle: Client-side caching +title: Client-side caching in Redis +weight: 2 +--- + +Client-side caching is a technique used to create high performance services. +It exploits the memory available on application servers, servers that are +usually distinct computers compared to the database nodes, to store some subset +of the database information directly in the application side. + +Normally when data is required, the application servers ask the database about +such information, like in the following diagram: + + + +-------------+ +----------+ + | | ------- GET user:1234 -------> | | + | Application | | Database | + | | <---- username = Alice ------- | | + +-------------+ +----------+ + +When client-side caching is used, the application will store the reply of +popular queries directly inside the application memory, so that it can +reuse such replies later, without contacting the database again: + + +-------------+ +----------+ + | | | | + | Application | ( No chat needed ) | Database | + | | | | + +-------------+ +----------+ + | Local cache | + | | + | user:1234 = | + | username | + | Alice | + +-------------+ + +While the application memory used for the local cache may not be very big, +the time needed in order to access the local computer memory is orders of +magnitude smaller compared to accessing a networked service like a database. +Since often the same small percentage of data are accessed frequently, +this pattern can greatly reduce the latency for the application to get data +and, at the same time, the load in the database side. + +Moreover there are many datasets where items change very infrequently. +For instance, most user posts in a social network are either immutable or +rarely edited by the user. Adding to this the fact that usually a small +percentage of the posts are very popular, either because a small set of users +have a lot of followers and/or because recent posts have a lot more +visibility, it is clear why such a pattern can be very useful. + +Usually the two key advantages of client-side caching are: + +1. Data is available with a very small latency. +2. The database system receives less queries, allowing it to serve the same dataset with a smaller number of nodes. + +## There are two hard problems in computer science... + +A problem with the above pattern is how to invalidate the information that +the application is holding, in order to avoid presenting stale data to the +user. For example after the application above locally cached the information +for user:1234, Alice may update her username to Flora. Yet the application +may continue to serve the old username for user:1234. + +Sometimes, depending on the exact application we are modeling, this isn't a +big deal, so the client will just use a fixed maximum "time to live" for the +cached information. Once a given amount of time has elapsed, the information +will no longer be considered valid. More complex patterns, when using Redis, +leverage the Pub/Sub system in order to send invalidation messages to +listening clients. This can be made to work but is tricky and costly from +the point of view of the bandwidth used, because often such patterns involve +sending the invalidation messages to every client in the application, even +if certain clients may not have any copy of the invalidated data. Moreover +every application query altering the data requires to use the [`PUBLISH`](/commands/publish) +command, costing the database more CPU time to process this command. + +Regardless of what schema is used, there is a simple fact: many very large +applications implement some form of client-side caching, because it is the +next logical step to having a fast store or a fast cache server. For this +reason Redis 6 implements direct support for client-side caching, in order +to make this pattern much simpler to implement, more accessible, reliable, +and efficient. + +## The Redis implementation of client-side caching + +The Redis client-side caching support is called _Tracking_, and has two modes: + +* In the default mode, the server remembers what keys a given client accessed, and sends invalidation messages when the same keys are modified. This costs memory in the server side, but sends invalidation messages only for the set of keys that the client might have in memory. +* In the _broadcasting_ mode, the server does not attempt to remember what keys a given client accessed, so this mode costs no memory at all in the server side. Instead clients subscribe to key prefixes such as `object:` or `user:`, and receive a notification message every time a key matching a subscribed prefix is touched. + +To recap, for now let's forget for a moment about the broadcasting mode, to +focus on the first mode. We'll describe broadcasting in more detail later. + +1. Clients can enable tracking if they want. Connections start without tracking enabled. +2. When tracking is enabled, the server remembers what keys each client requested during the connection lifetime (by sending read commands about such keys). +3. When a key is modified by some client, or is evicted because it has an associated expire time, or evicted because of a _maxmemory_ policy, all the clients with tracking enabled that may have the key cached, are notified with an _invalidation message_. +4. When clients receive invalidation messages, they are required to remove the corresponding keys, in order to avoid serving stale data. + +This is an example of the protocol: + +* Client 1 `->` Server: CLIENT TRACKING ON +* Client 1 `->` Server: GET foo +* (The server remembers that Client 1 may have the key "foo" cached) +* (Client 1 may remember the value of "foo" inside its local memory) +* Client 2 `->` Server: SET foo SomeOtherValue +* Server `->` Client 1: INVALIDATE "foo" + +This looks great superficially, but if you imagine 10k connected clients all +asking for millions of keys over long living connection, the server ends up +storing too much information. For this reason Redis uses two key ideas in +order to limit the amount of memory used server-side and the CPU cost of +handling the data structures implementing the feature: + +* The server remembers the list of clients that may have cached a given key in a single global table. This table is called the **Invalidation Table**. The invalidation table can contain a maximum number of entries. If a new key is inserted, the server may evict an older entry by pretending that such key was modified (even if it was not), and sending an invalidation message to the clients. Doing so, it can reclaim the memory used for this key, even if this will force the clients having a local copy of the key to evict it. +* Inside the invalidation table we don't really need to store pointers to clients' structures, that would force a garbage collection procedure when the client disconnects: instead what we do is just store client IDs (each Redis client has a unique numerical ID). If a client disconnects, the information will be incrementally garbage collected as caching slots are invalidated. +* There is a single keys namespace, not divided by database numbers. So if a client is caching the key `foo` in database 2, and some other client changes the value of the key `foo` in database 3, an invalidation message will still be sent. This way we can ignore database numbers reducing both the memory usage and the implementation complexity. + +## Two connections mode + +Using the new version of the Redis protocol, RESP3, supported by Redis 6, it is possible to run the data queries and receive the invalidation messages in the same connection. However many client implementations may prefer to implement client-side caching using two separated connections: one for data, and one for invalidation messages. For this reason when a client enables tracking, it can specify to redirect the invalidation messages to another connection by specifying the "client ID" of a different connection. Many data connections can redirect invalidation messages to the same connection, this is useful for clients implementing connection pooling. The two connections model is the only one that is also supported for RESP2 (which lacks the ability to multiplex different kind of information in the same connection). + +Here's an example of a complete session using the Redis protocol in the old RESP2 mode involving the following steps: enabling tracking redirecting to another connection, asking for a key, and getting an invalidation message once the key gets modified. + +To start, the client opens a first connection that will be used for invalidations, requests the connection ID, and subscribes via Pub/Sub to the special channel that is used to get invalidation messages when in RESP2 modes (remember that RESP2 is the usual Redis protocol, and not the more advanced protocol that you can use, optionally, with Redis 6 using the [`HELLO`](/commands/hello) command): + +``` +(Connection 1 -- used for invalidations) +CLIENT ID +:4 +SUBSCRIBE __redis__:invalidate +*3 +$9 +subscribe +$20 +__redis__:invalidate +:1 +``` + +Now we can enable tracking from the data connection: + +``` +(Connection 2 -- data connection) +CLIENT TRACKING on REDIRECT 4 ++OK + +GET foo +$3 +bar +``` + +The client may decide to cache `"foo" => "bar"` in the local memory. + +A different client will now modify the value of the "foo" key: + +``` +(Some other unrelated connection) +SET foo bar ++OK +``` + +As a result, the invalidations connection will receive a message that invalidates the specified key. + +``` +(Connection 1 -- used for invalidations) +*3 +$7 +message +$20 +__redis__:invalidate +*1 +$3 +foo +``` +The client will check if there are cached keys in this caching slot, and will evict the information that is no longer valid. + +Note that the third element of the Pub/Sub message is not a single key but +is a Redis array with just a single element. Since we send an array, if there +are groups of keys to invalidate, we can do that in a single message. +In case of a flush ([`FLUSHALL`](/commands/flushall) or [`FLUSHDB`](/commands/flushdb)), a `null` message will be sent. + +A very important thing to understand about client-side caching used with +RESP2 and a Pub/Sub connection in order to read the invalidation messages, +is that using Pub/Sub is entirely a trick **in order to reuse old client +implementations**, but actually the message is not really sent to a channel +and received by all the clients subscribed to it. Only the connection we +specified in the `REDIRECT` argument of the [`CLIENT`](/commands/client) command will actually +receive the Pub/Sub message, making the feature a lot more scalable. + +When RESP3 is used instead, invalidation messages are sent (either in the +same connection, or in the secondary connection when redirection is used) +as `push` messages (read the RESP3 specification for more information). + +## What tracking tracks + +As you can see clients do not need, by default, to tell the server what keys +they are caching. Every key that is mentioned in the context of a read-only +command is tracked by the server, because it *could be cached*. + +This has the obvious advantage of not requiring the client to tell the server +what it is caching. Moreover in many clients implementations, this is what +you want, because a good solution could be to just cache everything that is not +already cached, using a first-in first-out approach: we may want to cache a +fixed number of objects, every new data we retrieve, we could cache it, +discarding the oldest cached object. More advanced implementations may instead +drop the least used object or alike. + +Note that anyway if there is write traffic on the server, caching slots +will get invalidated during the course of the time. In general when the +server assumes that what we get we also cache, we are making a tradeoff: + +1. It is more efficient when the client tends to cache many things with a policy that welcomes new objects. +2. The server will be forced to retain more data about the client keys. +3. The client will receive useless invalidation messages about objects it did not cache. + +So there is an alternative described in the next section. + +## Opt-in caching + +Clients implementations may want to cache only selected keys, and communicate +explicitly to the server what they'll cache and what they will not. This will +require more bandwidth when caching new objects, but at the same time reduces +the amount of data that the server has to remember and the amount of +invalidation messages received by the client. + +In order to do this, tracking must be enabled using the OPTIN option: + + CLIENT TRACKING on REDIRECT 1234 OPTIN + +In this mode, by default, keys mentioned in read queries *are not supposed to be cached*, instead when a client wants to cache something, it must send a special command immediately before the actual command to retrieve the data: + + CLIENT CACHING YES + +OK + GET foo + "bar" + +The `CACHING` command affects the command executed immediately after it, +however in case the next command is [`MULTI`](/commands/multi), all the commands in the +transaction will be tracked. Similarly in case of Lua scripts, all the +commands executed by the script will be tracked. + +## Broadcasting mode + +So far we described the first client-side caching model that Redis implements. +There is another one, called broadcasting, that sees the problem from the +point of view of a different tradeoff, does not consume any memory on the +server side, but instead sends more invalidation messages to clients. +In this mode we have the following main behaviors: + +* Clients enable client-side caching using the `BCAST` option, specifying one or more prefixes using the `PREFIX` option. For instance: `CLIENT TRACKING on REDIRECT 10 BCAST PREFIX object: PREFIX user:`. If no prefix is specified at all, the prefix is assumed to be the empty string, so the client will receive invalidation messages for every key that gets modified. Instead if one or more prefixes are used, only keys matching one of the specified prefixes will be sent in the invalidation messages. +* The server does not store anything in the invalidation table. Instead it uses a different **Prefixes Table**, where each prefix is associated to a list of clients. +* No two prefixes can track overlapping parts of the keyspace. For instance, having the prefix "foo" and "foob" would not be allowed, since they would both trigger an invalidation for the key "foobar". However, just using the prefix "foo" is sufficient. +* Every time a key matching any of the prefixes is modified, all the clients subscribed to that prefix, will receive the invalidation message. +* The server will consume CPU proportional to the number of registered prefixes. If you have just a few, it is hard to see any difference. With a big number of prefixes the CPU cost can become quite large. +* In this mode the server can perform the optimization of creating a single reply for all the clients subscribed to a given prefix, and send the same reply to all. This helps to lower the CPU usage. + +## The NOLOOP option + +By default client-side tracking will send invalidation messages to the +client that modified the key. Sometimes clients want this, since they +implement very basic logic that does not involve automatically caching +writes locally. However, more advanced clients may want to cache even the +writes they are doing in the local in-memory table. In such case receiving +an invalidation message immediately after the write is a problem, since it +will force the client to evict the value it just cached. + +In this case it is possible to use the `NOLOOP` option: it works both +in normal and broadcasting mode. Using this option, clients are able to +tell the server they don't want to receive invalidation messages for keys +that they modified. + +## Avoiding race conditions + +When implementing client-side caching redirecting the invalidation messages +to a different connection, you should be aware that there is a possible +race condition. See the following example interaction, where we'll call +the data connection "D" and the invalidation connection "I": + + [D] client -> server: GET foo + [I] server -> client: Invalidate foo (somebody else touched it) + [D] server -> client: "bar" (the reply of "GET foo") + +As you can see, because the reply to the GET was slower to reach the +client, we received the invalidation message before the actual data that +is already no longer valid. So we'll keep serving a stale version of the +foo key. To avoid this problem, it is a good idea to populate the cache +when we send the command with a placeholder: + + Client cache: set the local copy of "foo" to "caching-in-progress" + [D] client-> server: GET foo. + [I] server -> client: Invalidate foo (somebody else touched it) + Client cache: delete "foo" from the local cache. + [D] server -> client: "bar" (the reply of "GET foo") + Client cache: don't set "bar" since the entry for "foo" is missing. + +Such a race condition is not possible when using a single connection for both +data and invalidation messages, since the order of the messages is always known +in that case. + +## What to do when losing connection with the server + +Similarly, if we lost the connection with the socket we use in order to +get the invalidation messages, we may end with stale data. In order to avoid +this problem, we need to do the following things: + +1. Make sure that if the connection is lost, the local cache is flushed. +2. Both when using RESP2 with Pub/Sub, or RESP3, ping the invalidation channel periodically (you can send PING commands even when the connection is in Pub/Sub mode!). If the connection looks broken and we are not able to receive ping backs, after a maximum amount of time, close the connection and flush the cache. + +## What to cache + +Clients may want to run internal statistics about the number of times +a given cached key was actually served in a request, to understand in the +future what is good to cache. In general: + +* We don't want to cache many keys that change continuously. +* We don't want to cache many keys that are requested very rarely. +* We want to cache keys that are requested often and change at a reasonable rate. For an example of key not changing at a reasonable rate, think of a global counter that is continuously [`INCR`](/commands/incr)emented. + +However simpler clients may just evict data using some random sampling just +remembering the last time a given cached value was served, trying to evict +keys that were not served recently. + +## Other hints for implementing client libraries + +* Handling TTLs: make sure you also request the key TTL and set the TTL in the local cache if you want to support caching keys with a TTL. +* Putting a max TTL on every key is a good idea, even if it has no TTL. This protects against bugs or connection issues that would make the client have old data in the local copy. +* Limiting the amount of memory used by clients is absolutely needed. There must be a way to evict old keys when new ones are added. + +## Limiting the amount of memory used by Redis + +Be sure to configure a suitable value for the maximum number of keys remembered by Redis or alternatively use the BCAST mode that consumes no memory at all on the Redis side. Note that the memory consumed by Redis when BCAST is not used, is proportional both to the number of keys tracked and the number of clients requesting such keys. + diff --git a/content/develop/manual/keyspace-notifications.md b/content/develop/manual/keyspace-notifications.md new file mode 100644 index 0000000000..2577c62dea --- /dev/null +++ b/content/develop/manual/keyspace-notifications.md @@ -0,0 +1,191 @@ +--- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients +description: 'Monitor changes to Redis keys and values in real time + + ' +linkTitle: Keyspace notifications +title: Redis keyspace notifications +weight: 4 +--- + +Keyspace notifications allow clients to subscribe to Pub/Sub channels in order +to receive events affecting the Redis data set in some way. + +Examples of events that can be received are: + +* All the commands affecting a given key. +* All the keys receiving an LPUSH operation. +* All the keys expiring in the database 0. + +Note: Redis Pub/Sub is *fire and forget* that is, if your Pub/Sub client disconnects, +and reconnects later, all the events delivered during the time the client was +disconnected are lost. + +### Type of events + +Keyspace notifications are implemented by sending two distinct types of events +for every operation affecting the Redis data space. For instance a [`DEL`](/commands/del) +operation targeting the key named `mykey` in database `0` will trigger +the delivering of two messages, exactly equivalent to the following two +[`PUBLISH`](/commands/publish) commands: + + PUBLISH __keyspace@0__:mykey del + PUBLISH __keyevent@0__:del mykey + +The first channel listens to all the events targeting +the key `mykey` and the other channel listens only to `del` operation +events on the key `mykey` + +The first kind of event, with `keyspace` prefix in the channel is called +a **Key-space notification**, while the second, with the `keyevent` prefix, +is called a **Key-event notification**. + +In the previous example a `del` event was generated for the key `mykey` resulting +in two messages: + +* The Key-space channel receives as message the name of the event. +* The Key-event channel receives as message the name of the key. + +It is possible to enable only one kind of notification in order to deliver +just the subset of events we are interested in. + +### Configuration + +By default keyspace event notifications are disabled because while not +very sensible the feature uses some CPU power. Notifications are enabled +using the `notify-keyspace-events` of redis.conf or via the **CONFIG SET**. + +Setting the parameter to the empty string disables notifications. +In order to enable the feature a non-empty string is used, composed of multiple +characters, where every character has a special meaning according to the +following table: + + K Keyspace events, published with __keyspace@__ prefix. + E Keyevent events, published with __keyevent@__ prefix. + g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... + $ String commands + l List commands + s Set commands + h Hash commands + z Sorted set commands + t Stream commands + d Module key type events + x Expired events (events generated every time a key expires) + e Evicted events (events generated when a key is evicted for maxmemory) + m Key miss events (events generated when a key that doesn't exist is accessed) + n New key events (Note: not included in the 'A' class) + A Alias for "g$lshztxed", so that the "AKE" string means all the events except "m" and "n". + +At least `K` or `E` should be present in the string, otherwise no event +will be delivered regardless of the rest of the string. + +For instance to enable just Key-space events for lists, the configuration +parameter must be set to `Kl`, and so forth. + +You can use the string `KEA` to enable most types of events. + +### Events generated by different commands + +Different commands generate different kind of events according to the following list. + +* [`DEL`](/commands/del) generates a `del` event for every deleted key. +* [`RENAME`](/commands/rename) generates two events, a `rename_from` event for the source key, and a `rename_to` event for the destination key. +* [`MOVE`](/commands/move) generates two events, a `move_from` event for the source key, and a `move_to` event for the destination key. +* [`COPY`](/commands/copy) generates a `copy_to` event. +* [`MIGRATE`](/commands/migrate) generates a `del` event if the source key is removed. +* [`RESTORE`](/commands/restore) generates a `restore` event for the key. +* [`EXPIRE`](/commands/expire) and all its variants ([`PEXPIRE`](/commands/pexpire), [`EXPIREAT`](/commands/expireat), [`PEXPIREAT`](/commands/pexpireat)) generate an `expire` event when called with a positive timeout (or a future timestamp). Note that when these commands are called with a negative timeout value or timestamp in the past, the key is deleted and only a `del` event is generated instead. +* [`SORT`](/commands/sort) generates a `sortstore` event when `STORE` is used to set a new key. If the resulting list is empty, and the `STORE` option is used, and there was already an existing key with that name, the result is that the key is deleted, so a `del` event is generated in this condition. +* [`SET`](/commands/set) and all its variants ([`SETEX`](/commands/setex), [`SETNX`](/commands/setnx),[`GETSET`](/commands/getset)) generate `set` events. However [`SETEX`](/commands/setex) will also generate an `expire` events. +* [`MSET`](/commands/mset) generates a separate `set` event for every key. +* [`SETRANGE`](/commands/setrange) generates a `setrange` event. +* [`INCR`](/commands/incr), [`DECR`](/commands/decr), [`INCRBY`](/commands/incrby), [`DECRBY`](/commands/decrby) commands all generate `incrby` events. +* [`INCRBYFLOAT`](/commands/incrbyfloat) generates an `incrbyfloat` events. +* [`APPEND`](/commands/append) generates an `append` event. +* [`LPUSH`](/commands/lpush) and [`LPUSHX`](/commands/lpushx) generates a single `lpush` event, even in the variadic case. +* [`RPUSH`](/commands/rpush) and [`RPUSHX`](/commands/rpushx) generates a single `rpush` event, even in the variadic case. +* [`RPOP`](/commands/rpop) generates an `rpop` event. Additionally a `del` event is generated if the key is removed because the last element from the list was popped. +* [`LPOP`](/commands/lpop) generates an `lpop` event. Additionally a `del` event is generated if the key is removed because the last element from the list was popped. +* [`LINSERT`](/commands/linsert) generates an `linsert` event. +* [`LSET`](/commands/lset) generates an `lset` event. +* [`LREM`](/commands/lrem) generates an `lrem` event, and additionally a `del` event if the resulting list is empty and the key is removed. +* [`LTRIM`](/commands/ltrim) generates an `ltrim` event, and additionally a `del` event if the resulting list is empty and the key is removed. +* [`RPOPLPUSH`](/commands/rpoplpush) and [`BRPOPLPUSH`](/commands/brpoplpush) generate an `rpop` event and an `lpush` event. In both cases the order is guaranteed (the `lpush` event will always be delivered after the `rpop` event). Additionally a `del` event will be generated if the resulting list is zero length and the key is removed. +* [`LMOVE`](/commands/lmove) and [`BLMOVE`](/commands/blmove) generate an `lpop`/`rpop` event (depending on the wherefrom argument) and an `lpush`/`rpush` event (depending on the whereto argument). In both cases the order is guaranteed (the `lpush`/`rpush` event will always be delivered after the `lpop`/`rpop` event). Additionally a `del` event will be generated if the resulting list is zero length and the key is removed. +* [`HSET`](/commands/hset), [`HSETNX`](/commands/hsetnx) and [`HMSET`](/commands/hmset) all generate a single `hset` event. +* [`HINCRBY`](/commands/hincrby) generates an `hincrby` event. +* [`HINCRBYFLOAT`](/commands/hincrbyfloat) generates an `hincrbyfloat` event. +* [`HDEL`](/commands/hdel) generates a single `hdel` event, and an additional `del` event if the resulting hash is empty and the key is removed. +* [`SADD`](/commands/sadd) generates a single `sadd` event, even in the variadic case. +* [`SREM`](/commands/srem) generates a single `srem` event, and an additional `del` event if the resulting set is empty and the key is removed. +* [`SMOVE`](/commands/smove) generates an `srem` event for the source key, and an `sadd` event for the destination key. +* [`SPOP`](/commands/spop) generates an `spop` event, and an additional `del` event if the resulting set is empty and the key is removed. +* [`SINTERSTORE`](/commands/sinterstore), [`SUNIONSTORE`](/commands/sunionstore), [`SDIFFSTORE`](/commands/sdiffstore) generate `sinterstore`, `sunionstore`, `sdiffstore` events respectively. In the special case the resulting set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. +* `ZINCR` generates a `zincr` event. +* [`ZADD`](/commands/zadd) generates a single `zadd` event even when multiple elements are added. +* [`ZREM`](/commands/zrem) generates a single `zrem` event even when multiple elements are deleted. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. +* `ZREMBYSCORE` generates a single `zrembyscore` event. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. +* `ZREMBYRANK` generates a single `zrembyrank` event. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. +* [`ZDIFFSTORE`](/commands/zdiffstore), [`ZINTERSTORE`](/commands/zinterstore) and [`ZUNIONSTORE`](/commands/zunionstore) respectively generate `zdiffstore`, `zinterstore` and `zunionstore` events. In the special case the resulting sorted set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. +* [`XADD`](/commands/xadd) generates an `xadd` event, possibly followed an `xtrim` event when used with the `MAXLEN` subcommand. +* [`XDEL`](/commands/xdel) generates a single `xdel` event even when multiple entries are deleted. +* [`XGROUP CREATE`](/commands/xgroup-create) generates an `xgroup-create` event. +* [`XGROUP CREATECONSUMER`](/commands/xgroup-createconsumer) generates an `xgroup-createconsumer` event. +* [`XGROUP DELCONSUMER`](/commands/xgroup-delconsumer) generates an `xgroup-delconsumer` event. +* [`XGROUP DESTROY`](/commands/xgroup-destroy) generates an `xgroup-destroy` event. +* [`XGROUP SETID`](/commands/xgroup-setid) generates an `xgroup-setid` event. +* [`XSETID`](/commands/xsetid) generates an `xsetid` event. +* [`XTRIM`](/commands/xtrim) generates an `xtrim` event. +* [`PERSIST`](/commands/persist) generates a `persist` event if the expiry time associated with key has been successfully deleted. +* Every time a key with a time to live associated is removed from the data set because it expired, an `expired` event is generated. +* Every time a key is evicted from the data set in order to free memory as a result of the `maxmemory` policy, an `evicted` event is generated. +* Every time a new key is added to the data set, a `new` event is generated. + +**IMPORTANT** all the commands generate events only if the target key is really modified. For instance an [`SREM`](/commands/srem) deleting a non-existing element from a Set will not actually change the value of the key, so no event will be generated. + +If in doubt about how events are generated for a given command, the simplest +thing to do is to watch yourself: + + $ redis-cli config set notify-keyspace-events KEA + $ redis-cli --csv psubscribe '__key*__:*' + Reading messages... (press Ctrl-C to quit) + "psubscribe","__key*__:*",1 + +At this point use `redis-cli` in another terminal to send commands to the +Redis server and watch the events generated: + + "pmessage","__key*__:*","__keyspace@0__:foo","set" + "pmessage","__key*__:*","__keyevent@0__:set","foo" + ... + +### Timing of expired events + +Keys with a time to live associated are expired by Redis in two ways: + +* When the key is accessed by a command and is found to be expired. +* Via a background system that looks for expired keys in the background, incrementally, in order to be able to also collect keys that are never accessed. + +The `expired` events are generated when a key is accessed and is found to be expired by one of the above systems, as a result there are no guarantees that the Redis server will be able to generate the `expired` event at the time the key time to live reaches the value of zero. + +If no command targets the key constantly, and there are many keys with a TTL associated, there can be a significant delay between the time the key time to live drops to zero, and the time the `expired` event is generated. + +Basically `expired` events **are generated when the Redis server deletes the key** and not when the time to live theoretically reaches the value of zero. + +### Events in a cluster + +Every node of a Redis cluster generates events about its own subset of the keyspace as described above. However, unlike regular Pub/Sub communication in a cluster, events' notifications **are not** broadcasted to all nodes. Put differently, keyspace events are node-specific. This means that to receive all keyspace events of a cluster, clients need to subscribe to each of the nodes. + +@history + +* `>= 6.0`: Key miss events were added. +* `>= 7.0`: Event type `new` added + diff --git a/content/develop/manual/keyspace.md b/content/develop/manual/keyspace.md new file mode 100644 index 0000000000..56dfce68ac --- /dev/null +++ b/content/develop/manual/keyspace.md @@ -0,0 +1,154 @@ +--- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients +description: 'Managing keys in Redis: Key expiration, scanning, altering and querying + the key space + + ' +linkTitle: Keyspace +title: Keyspace +weight: 1 +--- + +Redis keys are binary safe; this means that you can use any binary sequence as a +key, from a string like "foo" to the content of a JPEG file. +The empty string is also a valid key. + +A few other rules about keys: + +* Very long keys are not a good idea. For instance a key of 1024 bytes is a bad + idea not only memory-wise, but also because the lookup of the key in the + dataset may require several costly key-comparisons. Even when the task at hand + is to match the existence of a large value, hashing it (for example + with SHA1) is a better idea, especially from the perspective of memory + and bandwidth. +* Very short keys are often not a good idea. There is little point in writing + "u1000flw" as a key if you can instead write "user:1000:followers". The latter + is more readable and the added space is minor compared to the space used by + the key object itself and the value object. While short keys will obviously + consume a bit less memory, your job is to find the right balance. +* Try to stick with a schema. For instance "object-type:id" is a good + idea, as in "user:1000". Dots or dashes are often used for multi-word + fields, as in "comment:4321:reply.to" or "comment:4321:reply-to". +* The maximum allowed key size is 512 MB. + +## Altering and querying the key space + +There are commands that are not defined on particular types, but are useful +in order to interact with the space of keys, and thus, can be used with +keys of any type. + +For example the [`EXISTS`](/commands/exists) command returns 1 or 0 to signal if a given key +exists or not in the database, while the [`DEL`](/commands/del) command deletes a key +and associated value, whatever the value is. + + > set mykey hello + OK + > exists mykey + (integer) 1 + > del mykey + (integer) 1 + > exists mykey + (integer) 0 + +From the examples you can also see how [`DEL`](/commands/del) itself returns 1 or 0 depending on whether +the key was removed (it existed) or not (there was no such key with that +name). + +There are many key space related commands, but the above two are the +essential ones together with the [`TYPE`](/commands/type) command, which returns the kind +of value stored at the specified key: + + > set mykey x + OK + > type mykey + string + > del mykey + (integer) 1 + > type mykey + none + +## Key expiration + +Before moving on, we should look at an important Redis feature that works regardless of the type of value you're storing: key expiration. Key expiration lets you set a timeout for a key, also known as a "time to live", or "TTL". When the time to live elapses, the key is automatically destroyed. + +A few important notes about key expiration: + +* They can be set both using seconds or milliseconds precision. +* However the expire time resolution is always 1 millisecond. +* Information about expires are replicated and persisted on disk, the time virtually passes when your Redis server remains stopped (this means that Redis saves the date at which a key will expire). + +Use the [`EXPIRE`](/commands/expire) command to set a key's expiration: + + > set key some-value + OK + > expire key 5 + (integer) 1 + > get key (immediately) + "some-value" + > get key (after some time) + (nil) + +The key vanished between the two [`GET`](/commands/get) calls, since the second call was +delayed more than 5 seconds. In the example above we used [`EXPIRE`](/commands/expire) in +order to set the expire (it can also be used in order to set a different +expire to a key already having one, like [`PERSIST`](/commands/persist) can be used in order +to remove the expire and make the key persistent forever). However we +can also create keys with expires using other Redis commands. For example +using [`SET`](/commands/set) options: + + > set key 100 ex 10 + OK + > ttl key + (integer) 9 + +The example above sets a key with the string value `100`, having an expire +of ten seconds. Later the [`TTL`](/commands/ttl) command is called in order to check the +remaining time to live for the key. + +In order to set and check expires in milliseconds, check the [`PEXPIRE`](/commands/pexpire) and +the [`PTTL`](/commands/pttl) commands, and the full list of [`SET`](/commands/set) options. + +## Navigating the keyspace + +### Scan +To incrementally iterate over the keys in a Redis database in an efficient manner, you can use the [`SCAN`](/commands/scan) command. + +Since [`SCAN`](/commands/scan) allows for incremental iteration, returning only a small number of elements per call, it can be used in production without the downside of commands like [`KEYS`](/commands/keys) or [`SMEMBERS`](/commands/smembers) that may block the server for a long time (even several seconds) when called against big collections of keys or elements. + +However while blocking commands like [`SMEMBERS`](/commands/smembers) are able to provide all the elements that are part of a Set in a given moment. +The [`SCAN`](/commands/scan) family of commands only offer limited guarantees about the returned elements since the collection that we incrementally iterate can change during the iteration process. + +### Keys + +Another way to iterate over the keyspace is to use the [`KEYS`](/commands/keys) command, but this approach should be used with care, since [`KEYS`](/commands/keys) will block the Redis server until all keys are returned. + +**Warning**: consider [`KEYS`](/commands/keys) as a command that should only be used in production +environments with extreme care. + +[`KEYS`](/commands/keys) may ruin performance when it is executed against large databases. +This command is intended for debugging and special operations, such as changing +your keyspace layout. +Don't use [`KEYS`](/commands/keys) in your regular application code. +If you're looking for a way to find keys in a subset of your keyspace, consider +using [`SCAN`](/commands/scan) or [sets][tdts]. + +[tdts]: /topics/data-types#sets + +Supported glob-style patterns: + +* `h?llo` matches `hello`, `hallo` and `hxllo` +* `h*llo` matches `hllo` and `heeeello` +* `h[ae]llo` matches `hello` and `hallo,` but not `hillo` +* `h[^e]llo` matches `hallo`, `hbllo`, ... but not `hello` +* `h[a-b]llo` matches `hallo` and `hbllo` + +Use `\` to escape special characters if you want to match them verbatim. diff --git a/content/develop/manual/patterns/_index.md b/content/develop/manual/patterns/_index.md new file mode 100644 index 0000000000..714a33b1c3 --- /dev/null +++ b/content/develop/manual/patterns/_index.md @@ -0,0 +1,18 @@ +--- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients +description: Novel patterns for working with Redis data structures +linkTitle: Patterns +title: Redis programming patterns +weight: 6 +--- + +The following documents describe some novel development patterns you can use with Redis. diff --git a/content/develop/manual/patterns/bulk-loading.md b/content/develop/manual/patterns/bulk-loading.md new file mode 100644 index 0000000000..3ce70b0d98 --- /dev/null +++ b/content/develop/manual/patterns/bulk-loading.md @@ -0,0 +1,156 @@ +--- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients +description: 'Writing data in bulk using the Redis protocol + + ' +linkTitle: Bulk loading +title: Bulk loading +weight: 1 +--- + +Bulk loading is the process of loading Redis with a large amount of pre-existing data. Ideally, you want to perform this operation quickly and efficiently. This document describes some strategies for bulk loading data in Redis. + +## Bulk loading using the Redis protocol + +Using a normal Redis client to perform bulk loading is not a good idea +for a few reasons: the naive approach of sending one command after the other +is slow because you have to pay for the round trip time for every command. +It is possible to use pipelining, but for bulk loading of many records +you need to write new commands while you read replies at the same time to +make sure you are inserting as fast as possible. + +Only a small percentage of clients support non-blocking I/O, and not all the +clients are able to parse the replies in an efficient way in order to maximize +throughput. For all of these reasons the preferred way to mass import data into +Redis is to generate a text file containing the Redis protocol, in raw format, +in order to call the commands needed to insert the required data. + +For instance if I need to generate a large data set where there are billions +of keys in the form: `keyN -> ValueN' I will create a file containing the +following commands in the Redis protocol format: + + SET Key0 Value0 + SET Key1 Value1 + ... + SET KeyN ValueN + +Once this file is created, the remaining action is to feed it to Redis +as fast as possible. In the past the way to do this was to use the +`netcat` with the following command: + + (cat data.txt; sleep 10) | nc localhost 6379 > /dev/null + +However this is not a very reliable way to perform mass import because netcat +does not really know when all the data was transferred and can't check for +errors. In 2.6 or later versions of Redis the `redis-cli` utility +supports a new mode called **pipe mode** that was designed in order to perform +bulk loading. + +Using the pipe mode the command to run looks like the following: + + cat data.txt | redis-cli --pipe + +That will produce an output similar to this: + + All data transferred. Waiting for the last reply... + Last reply received from server. + errors: 0, replies: 1000000 + +The redis-cli utility will also make sure to only redirect errors received +from the Redis instance to the standard output. + +### Generating Redis Protocol + +The Redis protocol is extremely simple to generate and parse, and is +[Documented here](/topics/protocol). However in order to generate protocol for +the goal of bulk loading you don't need to understand every detail of the +protocol, but just that every command is represented in the following way: + + * + $ + + + ... + + +Where `` means "\r" (or ASCII character 13) and `` means "\n" (or ASCII character 10). + +For instance the command **SET key value** is represented by the following protocol: + + *3 + $3 + SET + $3 + key + $5 + value + +Or represented as a quoted string: + + "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n" + +The file you need to generate for bulk loading is just composed of commands +represented in the above way, one after the other. + +The following Ruby function generates valid protocol: + + def gen_redis_proto(*cmd) + proto = "" + proto << "*"+cmd.length.to_s+"\r\n" + cmd.each{|arg| + proto << "$"+arg.to_s.bytesize.to_s+"\r\n" + proto << arg.to_s+"\r\n" + } + proto + end + + puts gen_redis_proto("SET","mykey","Hello World!").inspect + +Using the above function it is possible to easily generate the key value pairs +in the above example, with this program: + + (0...1000).each{|n| + STDOUT.write(gen_redis_proto("SET","Key#{n}","Value#{n}")) + } + +We can run the program directly in pipe to redis-cli in order to perform our +first mass import session. + + $ ruby proto.rb | redis-cli --pipe + All data transferred. Waiting for the last reply... + Last reply received from server. + errors: 0, replies: 1000 + +### How the pipe mode works under the hood + +The magic needed inside the pipe mode of redis-cli is to be as fast as netcat +and still be able to understand when the last reply was sent by the server +at the same time. + +This is obtained in the following way: + ++ redis-cli --pipe tries to send data as fast as possible to the server. ++ At the same time it reads data when available, trying to parse it. ++ Once there is no more data to read from stdin, it sends a special **ECHO** +command with a random 20 byte string: we are sure this is the latest command +sent, and we are sure we can match the reply checking if we receive the same +20 bytes as a bulk reply. ++ Once this special final command is sent, the code receiving replies starts +to match replies with these 20 bytes. When the matching reply is reached it +can exit with success. + +Using this trick we don't need to parse the protocol we send to the server +in order to understand how many commands we are sending, but just the replies. + +However while parsing the replies we take a counter of all the replies parsed +so that at the end we are able to tell the user the amount of commands +transferred to the server by the mass insert session. diff --git a/content/develop/manual/patterns/distributed-locks.md b/content/develop/manual/patterns/distributed-locks.md new file mode 100644 index 0000000000..55526d4fc2 --- /dev/null +++ b/content/develop/manual/patterns/distributed-locks.md @@ -0,0 +1,242 @@ +--- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients +description: 'A distributed lock pattern with Redis + + ' +linkTitle: Distributed locks +title: Distributed Locks with Redis +weight: 1 +--- +Distributed locks are a very useful primitive in many environments where +different processes must operate with shared resources in a mutually +exclusive way. + +There are a number of libraries and blog posts describing how to implement +a DLM (Distributed Lock Manager) with Redis, but every library uses a different +approach, and many use a simple approach with lower guarantees compared to +what can be achieved with slightly more complex designs. + +This page describes a more canonical algorithm to implement +distributed locks with Redis. We propose an algorithm, called **Redlock**, +which implements a DLM which we believe to be safer than the vanilla single +instance approach. We hope that the community will analyze it, provide +feedback, and use it as a starting point for the implementations or more +complex or alternative designs. + +## Implementations + +Before describing the algorithm, here are a few links to implementations +already available that can be used for reference. + +* [Redlock-rb](https://github.com/antirez/redlock-rb) (Ruby implementation). There is also a [fork of Redlock-rb](https://github.com/leandromoreira/redlock-rb) that adds a gem for easy distribution. +* [Redlock-py](https://github.com/SPSCommerce/redlock-py) (Python implementation). +* [Pottery](https://github.com/brainix/pottery#redlock) (Python implementation). +* [Aioredlock](https://github.com/joanvila/aioredlock) (Asyncio Python implementation). +* [Redlock-php](https://github.com/ronnylt/redlock-php) (PHP implementation). +* [PHPRedisMutex](https://github.com/malkusch/lock#phpredismutex) (further PHP implementation). +* [cheprasov/php-redis-lock](https://github.com/cheprasov/php-redis-lock) (PHP library for locks). +* [rtckit/react-redlock](https://github.com/rtckit/reactphp-redlock) (Async PHP implementation). +* [Redsync](https://github.com/go-redsync/redsync) (Go implementation). +* [Redisson](https://github.com/mrniko/redisson) (Java implementation). +* [Redis::DistLock](https://github.com/sbertrang/redis-distlock) (Perl implementation). +* [Redlock-cpp](https://github.com/jacket-code/redlock-cpp) (C++ implementation). +* [Redis-plus-plus](https://github.com/sewenew/redis-plus-plus/#redlock) (C++ implementation). +* [Redlock-cs](https://github.com/kidfashion/redlock-cs) (C#/.NET implementation). +* [RedLock.net](https://github.com/samcook/RedLock.net) (C#/.NET implementation). Includes async and lock extension support. +* [ScarletLock](https://github.com/psibernetic/scarletlock) (C# .NET implementation with configurable datastore). +* [Redlock4Net](https://github.com/LiZhenNet/Redlock4Net) (C# .NET implementation). +* [node-redlock](https://github.com/mike-marcacci/node-redlock) (NodeJS implementation). Includes support for lock extension. +* [Deno DLM](https://github.com/oslabs-beta/Deno-Redlock) (Deno implementation) +* [Rslock](https://github.com/hexcowboy/rslock) (Rust implementation). Includes async and lock extension support. + +## Safety and Liveness Guarantees + +We are going to model our design with just three properties that, from our point of view, are the minimum guarantees needed to use distributed locks in an effective way. + +1. Safety property: Mutual exclusion. At any given moment, only one client can hold a lock. +2. Liveness property A: Deadlock free. Eventually it is always possible to acquire a lock, even if the client that locked a resource crashes or gets partitioned. +3. Liveness property B: Fault tolerance. As long as the majority of Redis nodes are up, clients are able to acquire and release locks. + +## Why Failover-based Implementations Are Not Enough + +To understand what we want to improve, let’s analyze the current state of affairs with most Redis-based distributed lock libraries. + +The simplest way to use Redis to lock a resource is to create a key in an instance. The key is usually created with a limited time to live, using the Redis expires feature, so that eventually it will get released (property 2 in our list). When the client needs to release the resource, it deletes the key. + +Superficially this works well, but there is a problem: this is a single point of failure in our architecture. What happens if the Redis master goes down? +Well, let’s add a replica! And use it if the master is unavailable. This is unfortunately not viable. By doing so we can’t implement our safety property of mutual exclusion, because Redis replication is asynchronous. + +There is a race condition with this model: + +1. Client A acquires the lock in the master. +2. The master crashes before the write to the key is transmitted to the replica. +3. The replica gets promoted to master. +4. Client B acquires the lock to the same resource A already holds a lock for. **SAFETY VIOLATION!** + +Sometimes it is perfectly fine that, under special circumstances, for example during a failure, multiple clients can hold the lock at the same time. +If this is the case, you can use your replication based solution. Otherwise we suggest to implement the solution described in this document. + +## Correct Implementation with a Single Instance + +Before trying to overcome the limitation of the single instance setup described above, let’s check how to do it correctly in this simple case, since this is actually a viable solution in applications where a race condition from time to time is acceptable, and because locking into a single instance is the foundation we’ll use for the distributed algorithm described here. + +To acquire the lock, the way to go is the following: + + SET resource_name my_random_value NX PX 30000 + +The command will set the key only if it does not already exist (`NX` option), with an expire of 30000 milliseconds (`PX` option). +The key is set to a value “my\_random\_value”. This value must be unique across all clients and all lock requests. + +Basically the random value is used in order to release the lock in a safe way, with a script that tells Redis: remove the key only if it exists and the value stored at the key is exactly the one I expect to be. This is accomplished by the following Lua script: + + if redis.call("get",KEYS[1]) == ARGV[1] then + return redis.call("del",KEYS[1]) + else + return 0 + end + +This is important in order to avoid removing a lock that was created by another client. For example a client may acquire the lock, get blocked performing some operation for longer than the lock validity time (the time at which the key will expire), and later remove the lock, that was already acquired by some other client. +Using just [`DEL`](/commands/del) is not safe as a client may remove another client's lock. With the above script instead every lock is “signed” with a random string, so the lock will be removed only if it is still the one that was set by the client trying to remove it. + +What should this random string be? We assume it’s 20 bytes from `/dev/urandom`, but you can find cheaper ways to make it unique enough for your tasks. +For example a safe pick is to seed RC4 with `/dev/urandom`, and generate a pseudo random stream from that. +A simpler solution is to use a UNIX timestamp with microsecond precision, concatenating the timestamp with a client ID. It is not as safe, but probably sufficient for most environments. + +The "lock validity time" is the time we use as the key's time to live. It is both the auto release time, and the time the client has in order to perform the operation required before another client may be able to acquire the lock again, without technically violating the mutual exclusion guarantee, which is only limited to a given window of time from the moment the lock is acquired. + +So now we have a good way to acquire and release the lock. With this system, reasoning about a non-distributed system composed of a single, always available, instance, is safe. Let’s extend the concept to a distributed system where we don’t have such guarantees. + +## The Redlock Algorithm + +In the distributed version of the algorithm we assume we have N Redis masters. Those nodes are totally independent, so we don’t use replication or any other implicit coordination system. We already described how to acquire and release the lock safely in a single instance. We take for granted that the algorithm will use this method to acquire and release the lock in a single instance. In our examples we set N=5, which is a reasonable value, so we need to run 5 Redis masters on different computers or virtual machines in order to ensure that they’ll fail in a mostly independent way. + +In order to acquire the lock, the client performs the following operations: + +1. It gets the current time in milliseconds. +2. It tries to acquire the lock in all the N instances sequentially, using the same key name and random value in all the instances. During step 2, when setting the lock in each instance, the client uses a timeout which is small compared to the total lock auto-release time in order to acquire it. For example if the auto-release time is 10 seconds, the timeout could be in the ~ 5-50 milliseconds range. This prevents the client from remaining blocked for a long time trying to talk with a Redis node which is down: if an instance is not available, we should try to talk with the next instance ASAP. +3. The client computes how much time elapsed in order to acquire the lock, by subtracting from the current time the timestamp obtained in step 1. If and only if the client was able to acquire the lock in the majority of the instances (at least 3), and the total time elapsed to acquire the lock is less than lock validity time, the lock is considered to be acquired. +4. If the lock was acquired, its validity time is considered to be the initial validity time minus the time elapsed, as computed in step 3. +5. If the client failed to acquire the lock for some reason (either it was not able to lock N/2+1 instances or the validity time is negative), it will try to unlock all the instances (even the instances it believed it was not able to lock). + +### Is the Algorithm Asynchronous? + +The algorithm relies on the assumption that while there is no synchronized clock across the processes, the local time in every process updates at approximately at the same rate, with a small margin of error compared to the auto-release time of the lock. This assumption closely resembles a real-world computer: every computer has a local clock and we can usually rely on different computers to have a clock drift which is small. + +At this point we need to better specify our mutual exclusion rule: it is guaranteed only as long as the client holding the lock terminates its work within the lock validity time (as obtained in step 3), minus some time (just a few milliseconds in order to compensate for clock drift between processes). + +This paper contains more information about similar systems requiring a bound *clock drift*: [Leases: an efficient fault-tolerant mechanism for distributed file cache consistency](http://dl.acm.org/citation.cfm?id=74870). + +### Retry on Failure + +When a client is unable to acquire the lock, it should try again after a random delay in order to try to desynchronize multiple clients trying to acquire the lock for the same resource at the same time (this may result in a split brain condition where nobody wins). Also the faster a client tries to acquire the lock in the majority of Redis instances, the smaller the window for a split brain condition (and the need for a retry), so ideally the client should try to send the [`SET`](/commands/set) commands to the N instances at the same time using multiplexing. + +It is worth stressing how important it is for clients that fail to acquire the majority of locks, to release the (partially) acquired locks ASAP, so that there is no need to wait for key expiry in order for the lock to be acquired again (however if a network partition happens and the client is no longer able to communicate with the Redis instances, there is an availability penalty to pay as it waits for key expiration). + +### Releasing the Lock + +Releasing the lock is simple, and can be performed whether or not the client believes it was able to successfully lock a given instance. + +### Safety Arguments + +Is the algorithm safe? Let's examine what happens in different scenarios. + +To start let’s assume that a client is able to acquire the lock in the majority of instances. All the instances will contain a key with the same time to live. However, the key was set at different times, so the keys will also expire at different times. But if the first key was set at worst at time T1 (the time we sample before contacting the first server) and the last key was set at worst at time T2 (the time we obtained the reply from the last server), we are sure that the first key to expire in the set will exist for at least `MIN_VALIDITY=TTL-(T2-T1)-CLOCK_DRIFT`. All the other keys will expire later, so we are sure that the keys will be simultaneously set for at least this time. + +During the time that the majority of keys are set, another client will not be able to acquire the lock, since N/2+1 SET NX operations can’t succeed if N/2+1 keys already exist. So if a lock was acquired, it is not possible to re-acquire it at the same time (violating the mutual exclusion property). + +However we want to also make sure that multiple clients trying to acquire the lock at the same time can’t simultaneously succeed. + +If a client locked the majority of instances using a time near, or greater, than the lock maximum validity time (the TTL we use for SET basically), it will consider the lock invalid and will unlock the instances, so we only need to consider the case where a client was able to lock the majority of instances in a time which is less than the validity time. In this case for the argument already expressed above, for `MIN_VALIDITY` no client should be able to re-acquire the lock. So multiple clients will be able to lock N/2+1 instances at the same time (with "time" being the end of Step 2) only when the time to lock the majority was greater than the TTL time, making the lock invalid. + +### Liveness Arguments + +The system liveness is based on three main features: + +1. The auto release of the lock (since keys expire): eventually keys are available again to be locked. +2. The fact that clients, usually, will cooperate removing the locks when the lock was not acquired, or when the lock was acquired and the work terminated, making it likely that we don’t have to wait for keys to expire to re-acquire the lock. +3. The fact that when a client needs to retry a lock, it waits a time which is comparably greater than the time needed to acquire the majority of locks, in order to probabilistically make split brain conditions during resource contention unlikely. + +However, we pay an availability penalty equal to [`TTL`](/commands/ttl) time on network partitions, so if there are continuous partitions, we can pay this penalty indefinitely. +This happens every time a client acquires a lock and gets partitioned away before being able to remove the lock. + +Basically if there are infinite continuous network partitions, the system may become not available for an infinite amount of time. + +### Performance, Crash Recovery and fsync + +Many users using Redis as a lock server need high performance in terms of both latency to acquire and release a lock, and number of acquire / release operations that it is possible to perform per second. In order to meet this requirement, the strategy to talk with the N Redis servers to reduce latency is definitely multiplexing (putting the socket in non-blocking mode, send all the commands, and read all the commands later, assuming that the RTT between the client and each instance is similar). + +However there is another consideration around persistence if we want to target a crash-recovery system model. + +Basically to see the problem here, let’s assume we configure Redis without persistence at all. A client acquires the lock in 3 of 5 instances. One of the instances where the client was able to acquire the lock is restarted, at this point there are again 3 instances that we can lock for the same resource, and another client can lock it again, violating the safety property of exclusivity of lock. + +If we enable AOF persistence, things will improve quite a bit. For example we can upgrade a server by sending it a [`SHUTDOWN`](/commands/shutdown) command and restarting it. Because Redis expires are semantically implemented so that time still elapses when the server is off, all our requirements are fine. +However everything is fine as long as it is a clean shutdown. What about a power outage? If Redis is configured, as by default, to fsync on disk every second, it is possible that after a restart our key is missing. In theory, if we want to guarantee the lock safety in the face of any kind of instance restart, we need to enable `fsync=always` in the persistence settings. This will affect performance due to the additional sync overhead. + +However things are better than they look like at a first glance. Basically, +the algorithm safety is retained as long as when an instance restarts after a +crash, it no longer participates to any **currently active** lock. This means that the +set of currently active locks when the instance restarts were all obtained +by locking instances other than the one which is rejoining the system. + +To guarantee this we just need to make an instance, after a crash, unavailable +for at least a bit more than the max [`TTL`](/commands/ttl) we use. This is the time needed +for all the keys about the locks that existed when the instance crashed to +become invalid and be automatically released. + +Using *delayed restarts* it is basically possible to achieve safety even +without any kind of Redis persistence available, however note that this may +translate into an availability penalty. For example if a majority of instances +crash, the system will become globally unavailable for [`TTL`](/commands/ttl) (here globally means +that no resource at all will be lockable during this time). + +### Making the algorithm more reliable: Extending the lock + +If the work performed by clients consists of small steps, it is possible to +use smaller lock validity times by default, and extend the algorithm implementing +a lock extension mechanism. Basically the client, if in the middle of the +computation while the lock validity is approaching a low value, may extend the +lock by sending a Lua script to all the instances that extends the TTL of the key +if the key exists and its value is still the random value the client assigned +when the lock was acquired. + +The client should only consider the lock re-acquired if it was able to extend +the lock into the majority of instances, and within the validity time +(basically the algorithm to use is very similar to the one used when acquiring +the lock). + +However this does not technically change the algorithm, so the maximum number +of lock reacquisition attempts should be limited, otherwise one of the liveness +properties is violated. + +### Disclaimer about consistency + +Please consider thoroughly reviewing the [Analysis of Redlock](#analysis-of-redlock) section at the end of this page. +Martin Kleppman's article and antirez's answer to it are very relevant. +If you are concerned about consistency and correctness, you should pay attention to the following topics: + +1. You should implement fencing tokens. + This is especially important for processes that can take significant time and applies to any distributed locking system. + Extending locks' lifetime is also an option, but don´t assume that a lock is retained as long as the process that had acquired it is alive. +2. Redis is not using monotonic clock for TTL expiration mechanism. + That means that a wall-clock shift may result in a lock being acquired by more than one process. + Even though the problem can be mitigated by preventing admins from manually setting the server's time and setting up NTP properly, there's still a chance of this issue occurring in real life and compromising consistency. + +## Want to help? + +If you are into distributed systems, it would be great to have your opinion / analysis. Also reference implementations in other languages could be great. + +Thanks in advance! + +## Analysis of Redlock +--- + +1. Martin Kleppmann [analyzed Redlock here](http://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html). A counterpoint to this analysis can be [found here](http://antirez.com/news/101). diff --git a/content/develop/manual/patterns/indexes/2idx_0.png b/content/develop/manual/patterns/indexes/2idx_0.png new file mode 100644 index 0000000000000000000000000000000000000000..8cf870885fb73615e20407d248e51860fd1aedcc GIT binary patch literal 23007 zcmZU)1yCGMv^~r&3t0%Vc(AazySoOrKyZQ+Ah^3M4iN}W&_GCnyW8Tff#9wQ?hfCQ z{NDS&s*ftFW~QgRXL|bHd(OFcqtsR9FwsfSk&uuu735_!kdS~N#6L0^6>-J)2`U>B z5_+kPw6wZ{v@}%R#qo`eoh1^Id{ko6lUJHHggvbn5opC8OOKzeG^UKY59U zMwwNGy$JSmlmRMNC=1FcUX}@SceisKPmPH4dsYjOI{@ z#E3lX)BBjQRipRu^VW^4E$}V_=Q#jL?M-D^IZ~_`mAkAEJ>hTZZ4I=FA?H#zk4$$X=r?-QYos(Jz?JS^=GbR^v7U*6V(FOOQ7ovo6%DiMX(tghe4ee zEBgn6I(WN9pmSirs-mOwgLP6$uI6iGLV@tDZvExmI(RIK>K00=;qpW%QfU|O@M?ql zI(w#|>^5CM_t%N88G+p{-@GmqAC_SVQ)UF|-YQecNl51oijC@OX0S|a*A?DE*V)7= z(owZd@6#Iik)2v=msr5+9e@o^;6+V=3I=|Ilchjm%87&-f;XzjTNaW@u|%ZF-6mAu z={~ouy1q~1QvRSa2uKkPISD+9xUa8789RDcz5X1D#TMs>QBgTHz*^EnTHCz*n>!g& zpEy!o?@DCo?-SnBT_RM$g}5M8{2=sEiQzg*t}K)?l#jignQ$UdFh?1olLVPOP%0MS z0w;{cMsIT{!%l#8EHnH7e1{W{Vz316Ng$z1)H89#h~04ARO;U;+8^0jd7cC%yH;%ZoTj#H4i$u*ca;= z>u+61uW+uA!=%Q(323x4+9YHo==W>&yY|zg6@-2QDFri^E94mFD6E!r8Yl#RO`_3s zDYmGwh+5a{6OJ?205RG#S~1p4$mYA`pJ@1N;Elhs!Lu>`P4k<4oc^n$VodL{MU|zY zWgV9{mz-ruKgP?KuP_U^6QvgvMP-|1$-kM}2=NfIW@^g|zu5WqVW7X4Eg3UuD1kM( znL$`WZmjw@iw~7Q(H+Je<=wmV3NlSLeTPQF=K0t4j90YBACJe5QI0W>vDaG}IPeV# zaPiL>c$h^rWtjgkmuL*Ci+uG`akfLVkJ`f>jti!>N)piyPCQM#P5i}7uJuRbv%2<6 z4;?}V3iXx(k#ABu&aa|$7`5xaauq0QRqJfz6BZh2`wzuhau7$o*N}Z;Zm6O#B(3t* zL#EikUjMb6sJy67v*cYW>dOvAYS~=f+}GxYnp!{QxaBHUPs#};2NaoAZCka!|4OL& z=n;9&xA$fbhuoVyNwr2bV_5TbbV-Oqv_sy$zi3frqEYaoWb<=>-6cbD)mFoYj{rLp zQ#iMP^=Ko0<%ADopv}g(m~;i@PmYDMVW-hZhjse|+X=fsP|Fd6-pEe`+PP1H9y+a|MusOPEDJkt0b)S9KcMHV;8S#T1tPrZ{9N0v~Zgi zmKQdfvuHM<@sn%3qP=1y`pt$_b;qA@-tO5xpZGJBoe@4lJ{vwOyZb8MDwQ%(Q{f6J zs~4xy=FH){6MNGha=WOz#OI&R#pkdEeh8M4Gm#q!ycKXEFC;VOJWDwmj32C3?N*ig zF8iI)igXKe*q)7n$TJz3?4UzD1)buW+H&~gQ10OFu(rRszq2qW!tvbbxv$8zZa)iF zVG{4Y&F}iQ%8qxt1j|~RXYB?{2HOjNkZ-)Mch9^VG;C=DEi2g%EwA>^{5QX^C9huW zFzqt6YHbu6U8h}cU3#LbBmY842d7}Tq0bXs(#Z46<4-(iAtu8yBo!f8#nr&)f;6zW zNpJ6Qba5EzDhaTO2sAPInN{!n@h^;^9;7~|eqJ)B7hk`*sozx{Wy@X?Z`fnx$%l(a zjO$Hx%siL;x-T=*?1h%h2iJ7L8$rjqkMm~x)>W_FUY-^{YfQ;W`J~%8-CUmQjO9%0 zY%XGDM11hNN$z&#G9R=Q`X@BCGqy9F#!tgy+`9!s9JchSC@wv9o|yx({uEWiPUAf9 z{nO@9t$g)ApC^)dNX$GMWEyfPWI}$Q|K9kW#TCpw{6^90n7gs!w6eykx!+?ztmS!g zW@U@09IKVm@G|OVI5p{)4=+jQ_-*X^=XjcC{9T6#SCc|nTQ^9y2sZ-eNh+#&)dU?~ z7e}%8#V0nD)}Nl-)XX>t-%?-8EygU&4b@Og-!}&tI2Df8m3=+j!5aQ(Af9^r>iXy4 za7xBPBZ~+5?1zP+r8}pXj?B+gzjJnTzBG&ZrbQEXp>$DP6wi59T6p)Z+;!gN+#%;J z=CMBE!QK@+tY#_Mm8{qFmJZYCI*-l|nGf;A7JObHUgTGIc$l2!Eq+1aEI$4!|FxbA zzAw++_JDa?k5G&Od1A0rGJPmLExm>kLHhJ__P3&MN1Jtwp>g(Xyu;X_XM4qpyjQSgSTm^5AWzw?kcNKRNznr`8sg;BqH7}JH)L!O^syg0=?b-j^$PAlefA#ite8b74QsrEEeL6q6M zfx_SKOY2JWmQGu$11_%b-RNxI$ifLLwu+F@(FEvFgDxhK)Z3A=sak>= zI1fH;frf6b1%VGyTmGl%EJ#1xK_1GP`vbS?^ z74j0L`g?~E;`s40I~DZrEpE1=RJtnaP-#aOODHcJ4;u%S7&;UR6>+h6BcvfC`(Jm& znJAUDo12pmJG-Z+Cz~fXo1=>rJEx$aAUg*aI~Nx#;tp0VJd$&p0xc zuI4T_PHr}i4$#MOP0bwL-9)LV9w++WzkknZ>1Ff(W^!=-@3jyw$o|;E&dJ8X{=Z`* zx{5qr6;ikHvb58cv9Y&wa7D}^#>>ek^7sD#U(5fS@&EMH`@cPT|8LL#)AC+}>>h=$SM84v>wN-mFNiPwH~(p+Kv zyhI*&e|M2}7&9f#j#UQ5dz@iVZu#M79eyFh$34pzlFSsd`g6J)N`E4+EUO2YjK^a} zrxuiKlgmc?DbDl|Eg@{SX$DshcLzPE@3uS_Y6OLRy&My6i0|(XS_5eu zv2@v%8cxlVyardFc;`J=GmY&_K|eS!DhOUKIpUgQ$3h*r4Di4ww4(A0M*@l9M&h45 zA38I^LxDvwn1oOOw-iEw4B~vG(nB)}EmS_b(MaA}BJ=p5u218Xe;3A$UV_sR3xEP6 zCdT|m2dCk3_i}as0=Kl9&?n`1d) z5HwZ_ia)Di_r2<3dUN=D;~oix%k10h14_JTT=aOR^*MYHH=QL5nOva4;xoqcF0h?< zRaR=(dV7ocuxMWe1<(`ZaLnLm$#c8L%MTz*#yD4qDRA7k>ktMfP;smoQi^3{L73+> zr#Grp#*)Xv*CbGzq*V5pLHq<36Pd9I$h2=Jh=ztIZznk1+BJ}-Z=u- z$N1yJYEl9RBnkx1WU3*`hGB@UgZNTjbP!+tA6IdJu%IpVQd`onKMmzQuHqn{j~$r} zcQVis+(~rFq8x#O(Z5 znk#-^h(Df0j1+bVR9!nHKA5?W-jSA%c+LokH8$R^YxJb42z-el#~x$`C4*!ae#k>B zZF#SGNXtz^JSoevx9Tte|L*|?;;Nqpv$*jfz9_T|XPL(22T!ch(~8dm2}k+e4qb6Y zhMf(%jnE%2U;f(WCX_rc=A~qJJJfSoeeNt$+ux_{Bl}*GGHuUTnB}Wiw1ZTzVw~y< zL=|B!Z-6T_L3jY}8Y4_2{%*WdrCZxlh#{5fniB|>c%IR!$=IIh>N>AS5?5paX#*~! z3yDL4R@s3Z64GPU(pibI%`DQnkRv3dr1fgI7) z%VzCT7rZ1xJfi{MrhmHu6CkJ^Y4_V)c6Y4nP8-~5a4&@f>;i}Ot&4oY-%0-nMS}eS zNdeRwz@sF^4G~c7mt+Kexj5CwUy7?gXf1yDc(H*~-{Ge#{t=)SnU&@>xE#U4Y-H$T zj*$uUJNm@=;{nE`>u%yM^oC4Rfc|+;!0>?{r2yUvHwcw(^kldFy^3q`D{6CXWn^9e z_O0l2Hh?cW1IPFkGw9R#ZZHza2d7S#c-s=)hY|^PgjOcS5rA=o1T@D5qYP=j%>Cqj z91zT1OHWj|$KEDBC4kLm?f0x}IFcP7gd?!vIhon}_F{<~ikbqR=#MB~qT(0&RWSm} z&plMUFI~6oNf0Xh1{J`|ze=SkRY-2pp@ejQ+Td+VP3wAXK^dzUe0d5iLi0jdj; zYpI-=$NMh|yy?nykZ?FFY}P#A?IG``5v2ob3S_3nnX&n;T_Y^WW7K-1$x@b4HS?AU z=N+!Umsv0?L;x?vc{j^{-Ss&@OA>-`-+wk#&5Hf3ZSBny)y62rY;GaMg|D@*@Wr)% zQkKh`T1M6P>R;(^P~3r);41`43QMI1hXOHmrsC25HgcPLSP4O?11>g9e>f&XIL;hAQu*T=58-$kBgdI&4-V|DO-Y|kCf)K zRG@YX8oY5Twlz8slmi=frVX}eXVaA&j>q!iVgdARwFXAG13D;URV2D4J>*)+i`+Um znL(}2TjZc)grWRs4^6PbFQy!Y!97;1*{eO(WBPf3JH-$V)9;q(#Vz1nyyt>vUE!FJ zAh;LEjTvM}O3q%PiveH1_;}g&KFA{ah zU3>pr{P8BZ6+eEOWzNoQTDLv0=lF1>YS}RzP3L(Z6WwYw!De9RhBtl{EEQI zIyE>OiMins?!?LkQL)5O6c^(5vV&0-oRWqMH*qpzWF$(w2bMnB^=N~?;?@aQ4hx1O zV&+-ys-jkaWb7aOy>5CZet7o<1~ybZ0_lY=dC8WMFWHX~K8#Dfv+9l?#*}T|2p9f{ zcSP{OR=;#wQ9rw44A|_%BKj|nGAh57S#}8G)rcVhi{OH-qE~olI`7sJDj!74=@u3G z+Gs;)?0EF!kOpFv!pXl2!?lOs~B=YaBsul$gsrdtS9)p;S%n^+>iV*Vefy@{j*0sN`5=*{|UoPqOL|IW6h4^ zl$$-DobCd)=S2-Ep3lZeTOUe`^TQQK6g39t(iuFu?hDX^b)W2pNN@rpLR8DeeuIJW zc(Ik@4(X>j)_OCSYaQtn%nAHKu9{!IXPZ6+X`oHC0GNxweFf?TAF4u-K!$kP&|WmP zJq3ZDn#oz*cau~fTOENevJDUOhuap<1{SE~B}Xwuqbb#6;Y?PC&#?)g+cTx;yH#TW z(1X);OS#atI4Y`!R#JcdGNZ15ILR+pD?+;A80_6HwG1wrru?T2i1)8C6lFV7|aQ6dlROn#;VQl1iq zm66agYuv~#)={c+i-lc#yo59*4O^+MOSQSgPd@_!z;#q*#sO}YlWi%ut|n1pSae6{ zX+iITm*h*PhI1-_>^^Di|NG7G3(FRvL zI|mZR4n&b8*YgWJEpcKlm zcgL5a`<5Lu@!kQ=i^)3XN!;3v!P*P}76tYqc^6`m(F%P=14&6$Fga_Bc{nM_E`B~Y zm=D|Z0e0La!4`RGC?6vR!k`6kBSp262!g2@7p387FsD@J3k{tpvGip)23R5+06Pj8 zkjT7hQb{S*(N*>J>f=r~PaG?MTKF#n%0=Akt#X&6w}k4b?u%7dZ{}!;O*^Tvh#BN*F})ar#1Fp{+1Gu!D;v^Ty~Xg{0|BM7KnN(4rIPN~pTUw*2P6KE zPho?)S-~QsjTMyX3*kg41K1G>52nddANUYK0s|!UTzZoo%vTp!L$mTlj5PFP)Gzn+ zl0Tk0+ur>bq)fg<5g9H$A0E6-d8&=Ztnji?+fRrQ%4b^Yrx*Ic3S7%=PN__Pp8e6r4;S# z*2c#niZT~gOmE;>$ATaS)9RCiIDQ;1TFx^-taWGf=JP~`kcP;yg@1>LBtmcZ=n0r;N$agYS1xaXOrm9uGYs6 z#Qd2T)jjT{+JWi6HFDTnzemG%7&MVy2sWweOsC9Y^@EnMGVHj2RsG?EDUaA!)nEZ8 zyQI1ZlTxY<7|R6#-O?e`*p!;uEjEk~^md(P?TgCpn@N|!tkyt#BZn&%{=*p(Du21` zee|{r?V1~E2FleN{prSWFw23RoLwXTBOdO+r>%$UcoYMZ45$`*=kIsHU>)Yb5910*Z4uk)>=T?LAeU94wVxRByzL)B0 z5dBOz#J4YEUA3RjA}pIl%_juT3**Q?<{5D0GnIO;5^Xw;@gUNg>33)+TysXd%+ksT ze*3*cKECTs;gsdxtZ7aln1^zC7iqN9Dgi?}NC~wg6kVEPrnp59$GRlE06~}m9%BMOr zTW4UYuiScPP*d~fO^e}G+aLX7+Z|giN!9U|^NKd9AOhf1)=!CTtXWDAiR&fB->Vi* zpCFugHa=ieQn*j~4T8hY6_Ye}wLX;GbhSL(-w1mgmIdenUG7pG%`r&ZbHd}XJ<>_u zlxGo|ge`|(ML+3IeEKDg$K)gj#Dlk!WBs|F9K8b)aTr7OGD0*tz5p7jg)hgw5zg~} z2+xOoIdta214f}DgcN1Wsl$?MxdU?ieZ)2B z+t3)xf8m1SWse|M`!vE)1c0@pulBYh${Mc=nZP9^6_|)cfmeYrx`wMTLyHi44 zU0#pBG+Ow$xn+GWD@P=ZAk&m+b|h!=(tZ@zE*E`~~9Z&4QUncoV?P4h7Gea#9TMvb|MARL8w{~#Gy zy@f9)5!=4lKIgdN)>l6`8lgKn`78~^#FHP7{fW>rIt^`=TZ%cjpgreb!zcg6r+sD8 z+At)Hc&f`oUN?O2eD|iCvi^uSh!$@*zs+mmSG9pfe#OVqJ!?DWKxCkf z4&w$KpD0^=7{TlEFG)l101VDa4$j4b`paEygza}!9;`yh5 zf3Oj!_>idx(9oA=Nxdy3_}uzcV60P;`sX|Vzk$Z2uTTRkHp`f1L+oq5+5}znjUhA0 z_;+tOJCM?wUU8ZIVH$xF2GyXRd9DFc3ePnlG^V*PIf3{LUFzdwFeGpHps|DEcUI)5 zu7LtWYlNUzksY*io>Y3gu(D#;V*-9JV^U+@vA|g-rF=^S zqW^wjIW-d;aO^r8L-y<@+@sU^=UXYVPby1xQa2VL#2pypeHvJve`$b(O7;q8ECqi< zB)YhR8^98#QgKZa!<2f8qjH375Tx&arbEV=lrW@sPPy-I6Fco|A8=A0O(aR zh8aQEm|MM#;$OvDq|f+wz+2bTmCO5r(zW|F-$E<|4JMe~eGy|u%=jYW^6S~_yfrsW z-AP#t?;CQlxAy22j6&`G5~D}-ZrNUW+u(&?=W8!}ggrY)Oz#A`ti8l7lpWV@Im4eU z6$eCo`@3xfo^3^t33jhlU9II7KrgLV&O$KgO3zJ+K-$z!OduUsia<&Z5?DhrMtdkU zzTMg)eO7oqzE!H6PpM5-RhXPO1qA?J$Nf<&wOj-#W2xSf$`gGQN>lQ{G$|0>P>Fl= zwx7hB`3frvBUFI2;i0irp`UMi-1yH>>CV3& zbtS^tnwH#gep^c3?PHZi0sLa}?Djf|B1IPn>jQI$D-({!3-_1aYe3TLj&S}4JVMH{ zL+Qv8s-gx9r^#6Rr;wEO;TcM5+Fkm3z8NZ17xMJPGQz4cPJEa>iags*=k7P?UuvgK z4-G?0PCHjlsX5f;{I!0TIkkd`()I*P%nxExcDve}!BTsEgPBEALUnR+P>NaDqFIqF z-0V4Ac>HFHNbk#~;!IU-|k9M*F9PiFB5 z8CvX;lD+u9h~4(&Z@!|XM~=nn(KG+?YI3Z?;C{tOPJ5?)g9BPB)d-Le)Cn9r$~e$T zZ?E}N)Sj20ZPl1BnB>d;r6bU6%Jk=yftKXqWWi>5ZRI@WYdkK0>1-%Wt97WXU_S0bPJ`d$X{1|Iw*fW^6H(4d6=Msk4I#)KrjMdYFvP#JbJ?&et zI(TzqNRy?G+-~XF2KcT9mrT+8y8Lw2@WUgdUg|Tx zCI}ug>Fpe!b71#SeiXlbwnAn{BbPD6WaiDMFaeu!q#S5zrru~O#t=h3E9ytnmjW@k zp~JqCg1_RV)Itxr>L+SK6!}?CY*1ih;E7Bn*|ksR>@4iR&sf1VAu~HS@OjKYVluvO zcuwv*P7S2@1{$%~F=H5naaJlhy((cU96GOXnO+wxLXR_wT2iUtUNVFBaui~?mcKHSShu;F%e=F!;js8MxjM6b@tAmB}+>ztO zx5-*YYZ22_ftqgeb4p7~$#m`h6vv8VrU>qiUGu0;FZ}jm&+Gr{O)QS~`Budy9{vl3 z*+nPk{@QlPv+(w1DiaYlCd=`oTHQuyo2KMlKjG}^eNiz4>$@t}601t)+iLlkrG?T~ z-uCLiF_=3sMj&2x*%ax!yCO5VNZN#5HOnx``|@CkzR%;P>zfD3YwOi-QbeY3FxUbY zjF;k$t7H>zLpMZOMgx=VCP8V7n~C}vgGfZdV*(gZjHZ+=wPa>wX$h~3#!3|hr64Jh z5+UU>k{mBvmeK#ILC{2mxXL6rD`|ocjOKnfb!N0f`{cvJyW%SASST`#28h{>iz2CE zVGa7HgrvNK8=}Bgm90w-S1>{NAd+-Fh9U_Na0--CTUzfnEu~oHnIeR ze{waCJq7)uhCd^r+H@rVrK4efa(jmMD8$plo0@aC@4B}_qSkl$Xwcb7Vbt7e*wdf^v9RzuX$g8%KCL)HC|h= zq}`%(9w~JHg%hC zuk&_LW9}@P-N*hv2T>4Dp*GqdQS*zh@wWBh{zrgrwBnUB`cZxq#axV)wQ=J?KLXC> zlIt&Ny6HXw_w#-G1n-Ig+iAt&X7;`&x7OOO9GmeYtnf#V@}s<_yx()|i|#C;c(A|k zy+~C&U!$KjnYZ4Cw9YtP)6_PO_XoRe?xshUuszzfPw>9`2$G=_xYdwgs)VgIMm-rq3+;pHJcoVwGC!C& z_43V*etFTPX8UxJqlD4d_D53>gq#S)aEz_P!J^B;@H07NW2cpl`+`izM}=mW*rsZ^66-wQ?Nhqty(Vp%yIvvet^OM!t}BRE74#X?2Lk}>rkJ{4b*yV;mu zTK^2gjIj>}L?mm8@l+$edZ9bnZRxjM{=$%$Cz@i5`u?^3g=@}b=;SCpSFpmS>f`Eu z{uu8MOE0`Tu8;3bmHqpkp7t86r0p|%T98SSwIYWlE%o zWT<7ADj5yi+ffMcNSOeD$VDdlxe}5lwBb#TKs+psB{)%;Ls0vKE&dyZln`u}(lnjn zyxr9}NyfNSWSBNck`u7sHtbJ(f{}Q8Y{gaF-xsf8EscUG401s%@7V8uy@`t z>+q(s9ZtAJf`q{I06?6ncbFKA$j?gSTf$X!%ZDMEt74F?Rr%MR+oFq$d#>b$#@&g` z;F_%F3PnpsR9yQik9cjN0b z>xS~7#Eoaj2nkd%$lGe7_k&IC+px#{^+J*__&?oG?1fyN#nsuGEcO&;w6P_r5a(fH1f}dZHmHG|?7!{N3jdZpkfN`CrwV9^mKuo*n@ zS;zKl?p-{8bE=;nkK zxpSqE1Tkusg{5a5PKM02UVRqgC3w16u&2OOD=Q+=^SM{HM$J2(=7>CqZvRQDS&peo zkIqkzc+)Tmw1RqFFbg{(POt$hGhTA5sU&9MgEYfVex^3Bz8MV&Ucm#sh3J~5Pcm8a zc^ogN=X}wlq(gz_XTH)^UxK_II#v#31Xn4YR$eYJ_Z2ny&O{j@vU<=S&AuxeB42b? zW>COtf?eMNbdnI*7i|LufLUGy30m$u6(X`;FPuB6mBju-1X(%Kek4ONmCcMrMJ?aA z%MkYcy|XFu1t3n^I7Z1k1Jlr9V6s^lhov6@_1cX+JjQA7T z+{}hjcv=M1AjbA0+TBwP+Ob%W<|qabz38cGS$zf~q#wPvjd+LLbEgVwhpAvWWGFL~ z@ZksJhjf*fjIY>UW!>u0&NGSYQL3AMmY2th&c~BSTIe5DDJ7hWYeq*8Z36T(j+57EX1b)46nTZQt#vNEDHOF(=hjG^4En9Ow z1WT|iv31TMOyrU#Hn0z>jJnb_GldY(BjBIl(Q6^~%PD}11D(MSz9hS2xYOOqeg;x= zGR!^bu%uVE_34#t!>a~zw)XyvS4r_pwmurIP6j!M;j#rOUd(FQao4*U;7TSNn_Y z)S?81E@995?xu}xGb5Y)SZ~ydPAeZ)n?11JCslu&62#;2q}>%;fU z72g}9J5YJoSbkbP2oCh-)sckD7(;h{w@+?hwgX^BAVLA8fb5OS zBW!+x01FTW0P4<=j5e|=_YQas$EZIy`F)gVm*ngJO3{ggSkdc`5 z^L+p)Yu~5{GXKdGQGbU7lLR-hc`5=#BZsI{uk`%|1L7@d)-Y={AkR#Sp$+pD=70DF zYCK-+=FK)r9j{V@WT}>&SZ)}>WbMR^wPgfA&<;OuwVS>U{3;%hM zJd|v`{hi_U3j6`0n*o38Dz!O3WfcZDuv{O?oP5H0zlG@+S#5Ok9CvNSIdcu%$-|{0O^30BWPi+Gb6Ofnh7|Fw0RE5 z0tHG(!*#GF(kWv#UmK_-BQUs4BlbZq+gz+dio>YEgcEd&2aeb8iDE_Xt+apH!S3N; z73pj0TArp7kH?TcPy|uVWPh>nbU<@u!#(GreE;l0OaaSer_FAWj|RGgd-YSDzQ6$- z+Wuyamb3nonX4u6wr&i?#8Ld%)zmCeMJ-@J(j!z-IIdVLSmC<@s5gzqZsx;sW;jQf z!EX6b5#(!^X`ePVJ`gwMlN~|Y)r}I=GUvvAsBsL8P-sNE>uWq5yhEyd5uo0_6E~dN!QuCv;kvBTC&5qUdu}?qZSJF z4$#CVY-RC!%tx450O$N#jM*4=pCn_TBj*Vt3_@OF_!>QH?^&_LyfEN6*C%w75j-5< z&(!C7e#2y`ZR)%sJhw@k-FN5R+*7HA(BA7eQQPoEPVhmDi$75jLApvLMQgO0T`!4& zPw8}w`QvsNLRmu$qMh+d`^+~-GLl&lxYgbrce7QanE0}s#9x-60uNC!BeMAFQV1`$ zv()x!v$P30(1pX6vnH5rOefOmgK+xO)L*4(kO9J@-7U(`^z^YUQqU-4ubk9${(S@{ z&j-0|eon9PJpZ+QqDnMJIT|E!>`2_p+exxiSv*r4{`)8Q9k2g;hRT8^>TdM3$5s3a zi4^7K_72OIrHoxQe4DoTLat!p%R+C>)}@NU-+cQIIf;MY*5+6XjL56_&TKa0d!G4i zDqZE`5FMD?o7HrmXH0gwrCWAb72cMAIK8kij>fWIqDn>p^=_s>i?dF!_-?(t$mvKv zB1LEMG3u^*GfL}TfWCY8QOCy$U!b3QkZmH8vMa?5l;NvR9ev;j9IvOL89+IpupUQZ zjP0Kz87*82n7@%^+2x??%z}-E)6UO;nc$*EDd@{O!MUWJ)4bLa+P4vxz8!DCb2?la z=ut*TfAT!@o~<#1NFXVdP?O-du|hu>6ob-PqvMAWVzV$zkezs1(64&AjoDcK)*;2O z{;pb|wh&3@CAdpQQIb=zmid*E9d$_q$5Z;NLR;SIiF9U=W(3C8U!{6ENkRZVz?`R! zWuF%_ah?-c)>;I?AmkWqhQ|7Y`;8^`s7z`X(rs;WCaKs_l?V;CE8PFlS7Ci2sZP~o zz+1l*s+%Kz{a68o1mgj^1Yrjjv)B7U{dMQC+0-OCypT949z#?@5ZQW;RLE|6A5lba zeMGj5dd*vnOfs#U#xFNuWWFYif|IPITkJ%X|H z&|gunEml=s^KzgyIpCgV86d%9117|EW6(L}Gi2mQk@$McK#U?&iP1EJEgf$Xk%7EvO?zKC!fsX_S2l_EetzQAF`4l1EIh7(m?r} z?)r8kHW3>Yi5XNWgDd{m?vQ}Qpko~{SAyD-8~ld3u0+URdld?TI=wJ~UtRZqNViwQ z2a&DgYJ|3#Mxb&FbL%4}Z~#v*g7@jV8}}TX@cAqd{myp^9_|gEp)N?mDPVK)VUwFo zfnRJK$AV7WZRFk2gU7YsoYzvUbKT^R&+l}uc{{<26q&{uLE2C!15nBGZAgr}0~5xR z_(J6Vh4UxWGYHGEK-6_6;J+kZ7@R(JuZZQ+X>;p#(_N$wnBS&Cc_g`7cP;X;e7-*7 zPelnT8;0~&CbMWFZ!6I{pI5hZ(uUXtyCK0~XzJhu?8Qjsm=}Xb%}J_?r1nd_SI}%k zRV^a3^;{3$js?W4Ng$C;oH8c(DXy`wRYi(F+kMd19qKoAm$=3!WA>N$^ z7aYRflp-G}aC)ERHSrFvY?sx&^Yh*(q3~Q;Qk~!+G$Td?B3sj}&1A34qZCy=VSL_5 zuG@a_RCmOL{f4Q@*;s`#89hF)Q4Tm8%uLN+Z*lkx1yLO;3B?$G5BU5g^_vbzzl&OV zkqc%#!?{+YZD(b%)%jhB8N>xi0cV5~b3>O?;NC6Mo0y)3gpPg{Z*jCalKeB)s#zkt zKk^G?R!EHseEWf@gd~K*XO4lAnIm0@&X`Q8zojH6MPJK1%PRz3Nud=K=^o!fIt%Tf zWM|#zuv`<9ek7383SC!Pif^^bd04C3O-#k&VvJoQf6_EJDlE)3GZ?NEUD3Hsi)%Ym zt`cyQx~iYae!x0Hpeu&bf>d|IikzChzL~-Pj?_-~EcL$ddI6d@UvS8=INy%o5z6w= z^H)9VzTgyjy1|4fLn?9iwp~519TpjMgkE&WWt{()0?*SM69_r@qHpM=U0^bM8t+`i z1!|P-soCH)3>~kvmEfm6Vr;qi>i>9hAdZLi{n+@^z@R=D-v2Gn~emnlND%_HyOcql8k}@hGN;G?3DW#5)my`Q> zV2k%gyDl7z$nAs&v8eI;hRYIH{(hltQ>%oL+u~OYQTMOuqZX)LNzV|Jt2e1IFs zEJn`5y$`Md-ZrGrELcZs`tx+yd=o|chSG_*bPQ!dN)w zwd2iOS016`mV62#kMy&^OVdMPH~;+|sf8Z8;@b!n3eow%vYM#=0ry6J?6K>yO<7rs zjwS4?wU52NkBQUh*W068K3L2O@4h?7<*wyBmP?}h6*mc%)|5|n#$0r?60W9s4x*ph$_y$S&1m95>w@eS??5g|BNt<-8P%Tx)Lz#Y| zBEijQYvJb`<^KQGaNhA$|6d&Ec5yFqubq7*ql{45WNVli$zCCQq?CEDkiGY=l&`B? zWZb&tlFaONjqFttQg-~_*Z2GT&-HLW_wl&i?=xQK`8wp7Uk;$;vP3x}N}4K7pUV;9 zg{tOiHDX4qL0-b=cP^cKZQlMJwx#zzTz;fQ@YGG8=9v^qr+d+2|Bg$D@<3(Dr-5ye z(?SdEhb(LBlb_pF(mp!Q9cx>!kwZ#xh=^#)%dL(9S=>FR^QT%zKWcte<+%U~t_RR1 zQP_Bu1~d6lis)Qn@dxg%l;{H04>zg(UVr$*5#S!8*cmxR3@ouLvij<9ae7f@Z5GVe zB<0YL!SS;L({oUqu|imeOmPGER48u)0Bl{KK!e{tFFQMVy%LPXK(y~VfibMkO<4ILBBTlRR7AU>m`WFXx}2Wpee^+pL&( zprA=Alz7GRJNFH$uaKxWRWA!o7-W{~#h;SXRg+XrCTC`!YvFTqvLdH=4?`1{3y>QD z2(>eI((NtkMgf)Hx+a0U;^ctFp!F)b~X&k|}U~}etb@g5avVuD#I9ins zGC?UlxihooHxzox)6G7A{AVnlB}4FlFz`tRrU~1%;CD6ppks%lgIs__D~0eZXy>*d z>af9D?fMk051ef*pEx`?U?W|)Xa+Njx&NjpziENVs{ZeT@bvd(%4*51ain^g+%d&l zMq~7}u@>N%iD(8_p5-7#K0SJHYQmO{5TSYn!t!_8wScW=Ix2HaUCl6R{_un0$`?LI zr6&~7U`AJ~7YW8lSoZPJ7Avm)%9~6+Yo;5rv~}NpxWXx~0K7j`zOZ7U<}73Dc^FF% ziiw{{@stRl4{)HOp`7eursUhc*O^nG@x>^uIq9%se}BEP5&JH2S{w2_$^ZHLry6#& zkg7I;8g(B=2ZmU7LlRjS2h@4*=Q5Xx!sb^eDc`8+X!zNVbo7#v<+yo(>taO6N}uMO za4L@T>G0>fDEC9Z)@Y?~w=}+h)UVQpSV}Fb;yajjS88$gq_^C)uRY&xxdw{6fTtn1 z!;aV81SU{8F|2EEpatK6x6UXi#c{dF56X3V(KzAzI^tfK-FYN3 zCLChT1UNq?Ixha?>)|y6(|MQdQ*v}KN2_uH{mxVFU|4#7VEX|@YUScCRnlF6F2qt$ zmC_o+`6W19MXH@jOP5ZLx)7M zex37S7Tbg4>m*~I_Z7(aTd=pfwD&>V?a%0{+@GZHJv@&QZZth{<_MKcwwIoeX%)Y; zz{SKhSNDAFBL9n4$Rnxej+UqQnd5#b?yujpkxn-CWSY}aO5Y5O{+zpRwwE4A&>LLL zaJ%t~yHhFzi@=j^0bPSP30Mr8ib{%UfamTHLHo0%L9ni~4uS~|G~+LO-pgpn2Lqdp@ym9$=v9nBGeoY^g*Gnth# z^NO^in+csS?8*4^LQa75;!;b6&6j2;vp{vMiI&`B)p(h2{u`lAgv+ldI5gl=2ziaY@X6_aQX6)x>P6>31s8)6+gSyn%| za)MfsVHwnR*`Koj?+RG{JH8eBgnbLIMX+2k7xQSKLpwo6HeE9eWa6W=-~jCd!Cll{ z>{qqbKjgr@P39J&M|~UkJLiI)=4jdYeI9rjnV7y?`nGDfp!ve*m+3OQCCB}xcg(M= zxGmpHL4?(MI@>RPO>*@3R}1SOb-{^FZK%h5|66!E2;|IQ;KN(G^rP?7D)jP-_?fG;7z~0VO8I)G@Dj^2{Z$A%dc!hEKi1ACDF z7z6249n%|sSbruS`?E9zHr9_7##NGufT9zyZ>8uqgD4etXAv)XwgT^(A

a2)(wR zl~D@_FblY{a&SfjXvt2-^mueM;brUD8Lkd z;B&Pq2-mg3`V)_*D|b?uoiy|y;BW~u^EO*!R2x)`y)&-r-Fh_lxiu)ha>bg&+A}fadIPk#clqDU*QLl`aWOS@}VrN zm+B(@)=d4HC+tQfk?^Cuud-YI1wZK6xB(lS++@PJ&t~mlDM2T*2t|K0k4Cj4J4M>8X6HMOG;Oe!W^sNXXSu>hU(Xr0?fF z96Px{0|P6{bkcGpQDl!Z2ztp@@pq~oi4rY~HWFfk zP@@^)>Abe32a13Fta>LU9V?)Mp++0^lal&W#5 zyBP?#6MV#uo^1ZA?G%+Kx;5T7avilNQ1kIEUR2eb$ltGgHt%UX9R!mg;^2@j=9!NP zi^dcbe_$+)Iq$av!ADYFp=zR}_C`P}WZTUb5#@p4OsJwVh^z6oJ^Tv>H~lQo!K;P`xKgKRiEvvTG$ZIhI$f$7NLk)tgk zI$Q^mS;LUD-RqiG|LATV_hxIoHUoPs2bEgFOrP+{EmpOY1Ub4bT-o#vXploTdBL|I z9z`4^R5x?Hi83z#!aO|`D(oQ(Y<1z;l7D>QCz_8EqoMLYxca7FwQB3Vm-#;TOl@j{ z%?uQ1CTO<*8jGWWGS8eP^%Nat&^1t<<6)9gy{ra=Jt3TWBG!y7h%n{_e=zv3Q0|x7 z^B?-O_+o>06plDqW)N`!v@?f>3SM(4{wlD>fG;*f6~E{H=j#Oowncsu5F}?GxK@71`0?V!7(6VX6HsMr}3ydbPU{c@@xx1Q3x{Nkom> zkGx2wC?X2#fW1)_644<0EN6`l0#SA-T=C%jGeZ~??+RAXNx={yP;H*xT8&GaQWYU; zhKlCE3*HacRho_G%{{-ip5a}E5vcR-MD6;Nho}|9?_;5!rENMD6XukI&+>PIxGvAX zEOS->vV{bA6WatkH0#M2n2-N}y6MWXxGCC9<->;B>(gTIspzXD28VW`&bh{&^DK4A z?JxF$zNEeOIAw#LVc1`;SyM1A<73b{aj4A4gV=;pp6W*E@dK;)QVrsjr38g3^N57$w4v_p7m4SNzz7tsaj_#n_yGa-=(l z@UmhyRDQSn?z|;`A3$KB(o(g9@h=(S$rlB*Pv==8pZ~$C-=~1Du*^KlUS>?H%^@s!>Nmm~5uvV)#Vcj`&);=h^}`oy&}da#XX1aXl&q z&utRnu`cN(Gnh8&oT)vXWsq5+azMRZT;e{|A~B;oZ&Xc7pW!B?*XfcsjI9Lpj8(CMW}nU_vGGQOX8SGodu7pHldn0f zPB#fFxpBnR;yL^3;Y9S1l+10?hVwZ{j{HqE$y>Op0DVYKM4-M1b>)mpc;t#WI$ddi zEvx><#Ro$xTk74zLcG?&T9KY7^#lc)2I5y!YIW-o^7h)B{$(TIAJ>~%pgOk*|6=a5 z@FA75M0yc3Bp92C*j=&Z__0mDjB8=CvF})(BX6qVGqISk>g=6s)1ptzlL)y=n?Gf_ zsNsJye_#va4gR^VlJ9r8+n*a}wwK|z6TA^IEH(IX`YIcAT8{)ww!z3ikp&f}J&-wu zgnj(|%xyPGoV?x_?)$s=#QS%4qpFS`ujPGCvN`Od1F+Q@?CWdKU-6^cFvTU}Wf)>M zR8B6QqkhUPt06CV)v>W+VuJ5C!XO-Dw~L%3kA5g&lT0E;fnOh}j55$-@N?g$^F~}Q z>rx;2tGmy7REbqpzx`wMOVoSZupuK)T!%B_CCZha`$RlZf%za0h(?DRG0oT~M#}Sq zZzEO~AY2MU^!Oe_X5+Otv6lM`eCIwn5#1ReEq;k|Bfl}+9nZRG)!06&ak`5d-({75 zIy&m?cE_sTSrppI45N`^a2mC8;jH}cX`P}lXWEefg-~zn{$+1k51?Wc!aV#VopZMT z;o81)oS21~pgNSg$|LU9(66*MIpeE9k6MOC5Q5s;zDwf9& z6efL)CIt7RiH1-!zKEK=t9e&7P7Cq1CR!qQXQEk=D+a|&nl?4-M+>3i8#AwHxI$w# z-Uv~?sxq^dF6$7)d;dJ6r>*lT3ffFWRB~(uXcvV(D?JF4Id<97Vb^-W05#{ABd+`! zxmQYxq`EN(JLx>Z^JZRW9agTr5~}9)=p)7Gs(>f*K8!?NJr^2uO&P1}xt!Iqn;D7j z#qy$W$=j9}JuV!$1hg=_$YO;#8Y+F!&otPv4@_tuT>2^JdyO5(1Df9ukZEY$rf9F@ z?$qAqW%iy96W7@NAF4pG45mAQfdj<9ssDx?C8SY0u!t`HqUc&oq)T zyog;`fcSGY-Yq`!P#V4g)1?yAEN4QoW9JecFdVomaY}IJtWJ$tu{z{Ne|@ux?(7Q7i4q@_#26_wd>!8Cj*h$%f-H&OCYOmon>%Bt+iS&6%*o zp2DfT`{tgHSJidIAVrk@IEAHr?a)!5_7vP2saJyekz#115L6uP6=Q^-^>v}@LOHF3Wi^Uncc`UY2kj)#J^9zG!}aP?C$^vR9gt8 zWtb!-FI|75ZU?xv^;3C0%UOco>77HRjZQbl@br0{frul_*9oc3Mvc5O^m_CG`o-RA zGu#YIpvzA~x<0Zig5F`4ebM0c%tKu;aw`{O*Ua4{2qVB)3dJ&CbW*x^;3~ggrfMor zY4e6vXc7m}1?rxqU$Cq0vxJwZGA@b7(W+M=k-~AIiNzdXR;d;)%vx6*&pvL4L zR3oi$NBq+bIOHoVURtG>rpSwEI2lWE)oUpBzJ_vXVcv9~lZmO2s=f)e$4a6389RQ9 zeAOTQ-+(Jz?&lHDFGqpDj^R|u1_9zOLEzK&6!e)W^h1IMxf(mUlb$digc?_mD+mGP z-!urRssTOc44Iz*wWF&>IHvft%Ukl#pb;c5oABY`jR$25`H$c7tE%-yU9$M^QE%Xf a$GqJg_EI?bU$EPb;<}c;<{Nc8%>Mwdd6zu^ literal 0 HcmV?d00001 diff --git a/content/develop/manual/patterns/indexes/2idx_1.png b/content/develop/manual/patterns/indexes/2idx_1.png new file mode 100644 index 0000000000000000000000000000000000000000..637b6a6a1db358d7c9ef6c54901f699b49ff8098 GIT binary patch literal 11649 zcmeHN2{=`IyWiNf$yA|(unmQbkqp^}%oQPH<|zr=u+5UOL>Y?ALs3#@BGER>JS$|Z zWM`gdo4fYfIj8RTob!C=ez$Yaz4!Ax9&7LaTI>IRr{DX&zgLL5ni2&W0~rJYp-@ql z*MvX_p};R8j0E^5-{9R{2!t%pMovy$MNST`?(A^Y#?A@?Q4V<+b3p5|4P8U!dN(5< zD-HK!QQ1%z?#IDw$_F$^NsmWU6Fv^1pg&e!0B0kdx^w;zvC`8=?y*|AD#Rs!P0ADX64v^O)^W(Ug&^bCBa)^Phy$VZt z$B%MB=4fV-_;bRH)KpQKmyvrfnt~w-ZR_OL-q`K=J?-wHuUeFlNR}FnAuYATHJHfK zxK*bs3r+gr$(R!T@C3{@%9aqDe2AFyoL|tO)PqkTjJC9@#?sfx_vWYb4o-X5V&c*-8xhfo-NwAVGB#6M z6-lgyAkWeB)r+)$DpMa9ND){3dgA8WC%yFpVl(yLPwR=jc-yH@rUWz2ema@E z99TC^Y@_xmB|yHie)Gt1{c7(DWU<($@nA{SqTQv+da0YAumt>7v>xm%Brw8^s$+3* z|BDallEt^xh%lD2F_A|ZW8a#xJv)(5{Rwq1M(ARgYAZn;B5>JvF?jcN5pmbz-Qsa( z2(Rs-OyleVc7kiNZgM&n1sSf#AWhPc(#DT>f+m+mR%VWphz~#TBcbsl>y&9PWfe*# z&L<9UtV^jn3Jnm9WiGl;a%@}&K9JF# zQ&6XOfxftP)4yF#_fev{zzU2ZNb&J{=h0t?qs$nx$!%*}OnKr+y{r8f7{ut*{EI$H zdT<(&>DF+5Jh4G`gYhPWs>)%Elt$*hx=S=eGqu$%XT{C58jVw2tk>CJK(qs1$iGX! zqLG~Gok`I9%aky~-HbH)YjQ97J`gblO+32X$=G$a)9NdO%aJl7A1cHhW+mSSb;0w$ z5Z$A=eT%fd&Rj@vGw$$3+KfXeie-rKz4$uw0+(#c_lKwbDQe$Z^jWx@QkgU6+xHvK zP`eQZ*K&`MJGzOP9Ak{V-O|8Rm0V}^cDSD6v(!fw5A8SHBNQy*n9r!i;~PgdE^L%< z=x*2q(8%63m0LLONU9%X6l4*kQ^QmvXqLdG+ROUk5K+it9bcWMIbxAvk$aISL|&6q zKT7Am$CI`vn%y?t6y5MiVg1|Lm)uS$MJkLZk6(9Y+&r{Nbn9GKhL~mzw@q|%^p$4q zW>hmL>5HKI&kL!^p2TomcFwUZu?!j4Z#w(HLKDhu&uz_J(yRE~ z`T4S@kLHnXEt?}YCX*bKOx>JM9L`5Hj#<96GPEic@)T0C%55gs(9oiIA^cEo_&h=p zt;m#NW+QQ$jxR+=`K-cpMp#R8BY!MKOj|TxESl@Aj8a$eB(E2n&rvKnmKA$9sqolk z{wv6GL-df*YwpeCOW{jhOTp!)RhN9?q3UEu zY9BItxcx!EaqAdKoq&fc4|g8E=V8*G(o9g-(QwnH<6=>-cp;f_PS;5*M3-CV^%J2N z=e3JT3%RZOGZC+3M z2#3Ze4xA{WD{^n4h*1)de)(oJ>H3^SMcbRs8!4J6m|pT45#Bg6H;3?`*%D(hSKA{o4IC z=|L*7O!0iClT5~9*TtNfvW}UYT8&$5jcR?R_EzoOGsS1z){Gd6c6)xVqwcYUu}EF| zK6sx{9|k#vEI_&EROCwfnu z1dAhgAseFI;7}G-rs-wor9XDakWrHM(_u{-A?h+-7rC#qg7t#NdKbj_CB@#HykTBE zJ>`=X%-+hr#Lk@Cr62WrV&Y1Dafq!zZj@nzvHO|BN9YfGvMuop#u_!H+%Z?smJdTE ziEoQLl!gzP&s}?IdmEH-{Jsi$yMpJD^57sph16zg!qUxV&95fIPdy zzNIu0F+A8-!qUHs_A_wI>MYHFvM^2A9&R8VzoWI))7l=FJY3G}#`GaOUS#Rn^buQ!O~VpB9eK)k;55~Y)Iu?D?u_i~%bs$# zH0#$wp9c;Fx>JcWzmU$pQM#}Yo9ZdO&f+B9t@YeU-@A(PJm$>ByvfYs>>R{n7js=EL(UhB2zsYNOmxfh7_Ztbc8ZnTH9cc5zphN1u=#p= zyq&(dt)Er2ih0?teOi<8V8Ww}?2N^U(h2{V{FssD6wIK3y#b@I$X4-Iz=vB)cMFfY z8?hNTm!C$7Z!gIwKC+)VKlAYMn(OSP+}p&-lu+gE(n!6#QL;6(d3?MjYw})RY2MS( zm5Sn<>sz}nCv2`NR?!t=vLR%oVkb!a*55(YYaoxH8O(@J;xG;`_WIK`e*;!y#vck2FX}Ggw@e7`@wjPcQ00d_2`1(Yi^3g&c>g zSIa!FP8iS%%~(J_oWP(rv!77!jn;~mz}Dtyvq#8x6?_kHhY>faqTd%mAOy)aT6!*e zsuv|J9PIhcEFH|P_&w|$fzlQNk@S!NF72&c%-|mOc1V3qs>9_RoUPy@{HOT^*`&zeaJZziqt&k{S4=IsT zXC%S<|6%2~9lv{0|NAFTfB)onE8m`!6u=4aoj`bPf!_k^k|L87_*L~%WT(bIcS9iX zaTR%4Ef0d(VFDz(?gOZLoB5?r;;|j(8@y7nJ`abv1)%Dhibb053!jH7nVPyT=^kCS z(qMXYdUbDAde8HX2Q^&A4?;lrj&M(V7(GD>e)O*-h7uknBp`&ssNoB2)TJ=sFL0ax zw*OdO<*t|buKQYnk?#(A(4u5I`SZ?>UV%iezcQC6kIwqYrH19fY0L86R`Ffe z`B<`uD6enZ^igkoi8XnvxYKBAslfs6Y(%){Q!*UQHNvvD6;d;>+OUX!>z8`T5tHXE zdoyJczRCIKtBXu41%+s}lIaLxWW+#johalhq!AP2Z2AlDf0*@CmVtPqig+F zj)MnOk%9lVMU^p$LhbvL;Cq$bb5}|IZ3Dr_sXYo}WlLd0^B>S3YM(A9}ZG#UjR25 z%Yxb=X*g1SP@&NaBd^jUmJ|Il7rj|T?>tTlkB`(-O#9{ z%J=$+ymzM(CqKHwPSA?ro#X_ZdS4|nrhu3Klm%0Pz(Rl9Se*Bj5zkn4B7BGOQNlvj z-vVDKZP?u0b}TZd9Z_^>2BThVU@dOe3nS*6Q{-iwWpSpbcAom;$?Kq@9$f^2l(Eze z^!_Tq$$$O=;N&U_VPNnWY5@i~%9LFK+P_K{)<49>^f;CFCJq4YpR&;3vX5c@NB~V- zI!wM{OE6#onB<((&0VRXzn7c*;EMf2*EBaT&5rKv9+yO6mr{LqU9l%zm4URa>HqR| z*Cd*pVVGgh%#QM0SYK%h;7$uC9VMur$S-akMenYkOuY4G4OKF4fh}2|-BL=n7~C#e zEATNIT#ZO*SaY@SoBr%6`dEA$+tAw45&3S~eIsIT^(>S{RSS=$$pDs$I#b}0A%c*w zTImQa9;7(o)V*PbwRrQV`^hYH$=?E_u@^9_D|??HXx0rbIQ5Ow8mwS0a|LqQaHGgY z(1E1LVBc?pCa-CXFxV&=DJw0QcckY6b{umioTQACVl5rY6p;s?Sp!T~Y7z6G*QI}r zc>qWQ;;(=-FuLYivfhzo0U#4zM^ns`6BB^Z2A~|@Z z%tO?$>xwxVpcs+F&;Yy7^mvij0g>)qNe4xOAA~^%-U{Q1ZvIJG5X2fn0Y08;1GUIu*X)Cb_H`t5`wfnq#PLlwm`WqjyMu!>bZ zv9Es2%&(>MW`)ZYQf*yn(Z?SRk`NC8iA zBg_v?J|~vJA;MIGR$6nzIx5(Ue8_VM} zi-@zFv5K^klmpEi3f*b^okHy=)q;S5WdCiTe;erE5rjv@Ki^IL|Baye$~`Hph{f!y z(VN@l6^I5m>}*rV+HO3`4rmgkw{VaLv{&;iKt6J!81nX@N~u)a4be`hos@~(C0uQQ zy}2E1;)`89#pyza(|+dR1@^#HG2~w9p8SnxAk2yaiI*|FVfSN%^sdJYX)?a-g#xW@ z)lTcu^!&k{*1oYJT(0Uo4yTs<_{PN7WQ^1#Ar?%VM`@{HzjY))%F6F5i~+&gP)i2B z5fZq%C$tf=p{9xhUkAYSqQ@ezfqSvl7HIrVg+SYB{|%e3;|e`R?47nSsb$jYz4Zus z<}P=*E_loXuqT7v<*tiF3q9N!zC)Llf#$*J7Tp10RSD4x{zvDx^NNWF*`kD zEAigJJN5l&Id_*!>$Gk1Zn=@~wtLckqVQe8Wj>%_cc`oRPr3^n{mc0Qv;Lg*6^Gfg zh9u2^qDzxx$`0mSg7J%9r@(i5Xk6ZldkW^gS1W&<_sYM^d&b5$&V8p{Xh4F7Z8T^d zOTfN^-;#ST3 zmupP?L{D*Qm#?Pm=J*7+$lzL**dxmBk5@^nzbNLgf(1hU(frtIrlRO48MP(r zjU&0z?XPi7N=aT3+IQj@u6JRvD#mr0TVlQuUOL&xO5dk>=mSyr(By^pzV%M`{aEOK z3K#mj@deOAjW3Z)Js;dl$)ka0&QaXo#lS0-+7Q{xzy@}x-UNw^!>2B&T$ivbw|V;q zARbKNjU0`dwC#JhIUS^37HWr#{%gAzfEP7;HPTc0J#6#8{1LYQYt1r1c*lHemAP=G z$e*Or|HTe1k&!sw0tll9farMlp{xRDCQrajTRqOF7=0~M_QeX8o?HjJ$Y4?hC{ZuK*9AdZ0TM#>ZGx(nv2Kd_QmS z{?5aPJ^MMKxfEdh$c)t+lYC^`;9L(;o1DfzPx`!rXwfh5+S`ra#g7<3Z~#XPSsnUJ z$evoxpF>Ihz%~6Y?TI6yG}yrqIZCVis&AdNTH!!0A07)10VPCdA#2m7s_-9E+(&*6 z!}q%^;{OHe?^mq@+^fQYEWFAWXaT;6Ec6UQyz%A@x<~kcatAnF_BUA+w5*E4M#qIH7!;E$=8Z=V{@EP6Q{$z2S6u+FANdh3?6%p z`TP#w=mrC=D3zJpVSHE*0YHN4Xvo4R2P%MDXRevyz@=xIvDW~(j!B8oRRUkk%mz>s jK?5rIh6QzSM_{>f>dmM7WV*nApFmU;)Z}x|nFjm^*eSv~ literal 0 HcmV?d00001 diff --git a/content/develop/manual/patterns/indexes/2idx_2.png b/content/develop/manual/patterns/indexes/2idx_2.png new file mode 100644 index 0000000000000000000000000000000000000000..70dc6c381d0c70b01d5870af14e1d16671ed91ce GIT binary patch literal 13973 zcmeHt2T+sknl`;7(nX{QQj{tYL?D0yQk1F`1?doxE=V5=aQ_n-@LjJO9q^d^@|R%>2h0XUO~1yIpsCqD+i*S(y$qQBY8@>g#En zQBY7(1OKTQ=zuR~4?5QXWmm~XJnk#{P1`$_sau{)DAogER;lC-ZTF;HhcL1#bcpH)3M7&9F3UA zRiD`zdMPxIQl3)yMpI@JEpPac<>AVgD{(I|6<6X?F+@LiD$+}KK}rf^*Rp#h6pz(K zZk<<=;F;6PKI+kqS=(Z$5%%bca`I#E)CzJlGh36+IB#`&K7;Ui_bu#cl|-6=)XX+4 zg>&NG^Amc`FjB_oD)QM=$*WZlR5*Srzj~nYWUe*6|Jbq6s1%{{Z-I#5v5%X6M@!+sce`|e#&O3u1;jhaXKKBoEV z1WQ*iib77>1!HRUT6w^+e8~8-Ffgt`UC#O zA14aeBR^wl-Hd*uM`$CPwmHX}Hv2XxR?FR5nJZAM9>&Nf^{^i#N?8aFkmnK5Kn8!!+h#u9s$CNiwJdfF!8oY|xo+3U?O6E|$L-BR9$lj5f zRH?%h1U{kqZgq~y^XqPZ02 z5xLs^SyxS8=7i=@_8pps9DbDjn(w;S2bmFSfx9!Am%90TRJ&bf5BqagQ^VPy4+V9@ zTTJ8)4pGOj-oMA#^x08Cemj}tB6l{sAL}{=BqrsvV~KzMfdLLIg7s6g)1XtJJ)0wc zvFDKO!odKl2cM2lviJlj+a2M5e!s0n0QK_owdV0A)+zPx`a$Nk$0t~Y;xJQwt72Q6 zTNk!!wk)^YBDl^zve#M>^I^Pl_u5^jyB3WCjq(nul7@XpN7$*ORzJ&pHgklo9$r1Z zN*$$bCUGUvA|dEa#~ZV6Zr@nH3C}^U+|M@-5Z8UI^Yi7;8#np4*|(|ho$JX~HfucY z_Vne`t6$B(`hAsP%)OgHb0I>iME|wTYyBUEpRM&H-XtBnbhE&@!a3^al`pDKoXlvB zdme`!ujo7f{^t92Gq@S&x65vvZgz9W<^;Y;yzw@OZ=H1h;9}!ar4X#3>r(iY#nkjN zYcAxO*0=%meBF70YzH@$Q#>;17J8~W*zDN0udTArS(7@R$~>==RMph&DW8)L5rOlP zSV%`nk6x4>xg>kltH!2o?ApiU+hS{RYdveUYpiQ*KarC1TsGVsTsX;7Qfim9rRJpy z&Du@W-UJ!?dN6uMEpl`|i4cP&saZrk+jzF~Y(PrDeBLb8#KJVdl1Ea=q&`>NQ=c9@Mh# zEGdP~^pk<;`7EofYfd(o%zJeqx@AV|B|K-_45Ws&KJj5w2x-PN{1Pde? zRT#bOymT$PFw!g9>+KR8nxFp6He%vzogmx_ZKGj?wAqWJ^ssk8L6om|*Kn2fg&Yre zoBpP*ReH2ne!RHTr~9GTPtT|BeI6SDDKn)jE}==cWQkrLUN5fSZr_TEiivusU#Oq= zCG-;fQk>hzIXEObC2d%|jHfKHjWtPEPUAx@`o)bUr}~cC@tw4LZ|`-#o^b3l>s9zx z+F05Z?K%xB|1^JJv3cZ6Na7|fwo8de$xR97vHL;sgJCh$LACT8OlKq7N$UPW-{Me! z?gHHcA1(o>G0LW#bEa6}gn+H`4dt5xc}MI{ZYHm`C$?7_H5;9KcmCaR7(a%!(^FQG zH}E;tb1zH2LE%A#L5$bDSBcjxuc@V(CG7a9n!KQ`V5l0w>Z|mDyd=dXx4DlEWuG1` za8H`gY&Ke>t!KyQsfj^^h0WkT8R8l5s<<)7rRyKA`I~O!9jZxwot$7*GgMcS<$J(a%-2Z`X3MvHtyXvE z`__9J^xgTpDW4yIzJKhtne(^cdKL`?IwAkbiC8OLaBpLn>+YJrH|f7dj{D zX6}pfzlt^GA3g2n@pbf6?J(T0gXc%mT^Z!`VGPf7*x2FHaz*1a-hLC^Y)cw_(-(d+ zuM;bVy;OHZ3Az*U|7@Fws$7K9IufM5EzLa z??97$;yStQJv1$vuZgxv2wyN7Smqr3MP~a zg$rjtUJBN_XV!#^ejhm&8OU}dWtmL=G4%l z5^pU~RA82WBA;N_g=epDw-${H@7va)OLDV_se|SGgX4vEi_QVI z^$vkkjfQDy81F0NH|jloYu>lj=B=JyM<1?{sZLw6nsA#dS#n#v))P8c_-R}UZ?kj_ z*;O4*Z1_rF#}+Aw!|QCYZuk*(1;x}u@XDmsjl>S)lg7;H`cSFh!j0Osz&bQPoiv^7 zfohh3I)8SjGx{!BzSXs z-$JJodsn*(SG!lg2Q~&mmIH5_Y?+XJw_9+G$S6#req!$>+6*rB{@WFVv2C%RZFzGs zMO8&_(Hr&UVR*u>zqp(0c@$46CZB?dQCXbs4!)nlq>&<1q&`COaGw6B91}{!r7wn^stkw3i?rBj8)>3loYmyr$5h8 zt`vB?x>g6f&RBCREI3h&h-2!u^WPlYLw`D5!GK%h&Yxh})d{CR9!vYhux_A?f`am; z+hr?%E5nN_PTrof4$j_=F0w(MK0s(oL7^6;0{rRe;_o0FVRWZ{(|7$t$3o3Hm-`__?PA)Jo zP&N=E>wOa@ck;}cGjj3@ataDEzzi9`U@w1%AQ>+|(fv(+@1yPF=XBG}$KTD{OPIW` zgQNE?f2fEE`Jli2+rOtvklSBR^78vd7C@jJc|`7{ti0S`_6C-!k-w^#xCOa*SZTX? zx_J2kXQ(TlR8#}!|EH0^p7@8ASN^#2)E`&=VdU3JH90Z?e;{a|Ti{oKUFu9~a(`jH zIumY&HJF0p_?Eu**~>wci{q4~^Gc-@-D!5=Vi4@tt2fJ%t_-$W?8ZO&rq=SIc;t}$ zl9u^A;<5B*LKOd$J83OET)ke6;h?bQ9STG#)!rqgn+Q(w4@#~2lvkxmm^2fP6Q`!X=tva?g-*pEXLJm{L-32Th* zAHck?%NRE<-GmXgrYGz;KOBJmI?UwdUFavtf)v%`M3v$#z!vxbrqK5+N_i zVztf~RvIP{lF@@j*ZUo#1q=BeUVh3Bs?<5ECx~;2GM7QcIS6Q!2)7G(Lgr&YrMjJ& zpMeKx1Jm?$_(1VX0+NzEHGEDC6k@_1XJIqb^B`ZCfb~p<6(%5GG=XVc6Loztg>cMm(g-^-r@0ZQ&6V8RK}3Lpzi07EdjUV6^})CCb>+MCxg;4xer zk6E}f-@^EN=7e zYy2T50CzHdyVkaeH_9BjZ{PKbE+y($gkj}J#jWjx)`=>4>sQM^M*UMv^>1{~Do66S zdCN)yD8p=&`xsON6gvPxOk6Vd3dmMEDT@Z(+5d!|Dn;gLX|vx`|CiA5H~0B-2xadX z@f+20WPl$d>vAEWlFD-dV$hsQpI*oJyEdKJVi@Og0ezGM=)-34w=6K_H~cuEHjw|h zlKwfR_>;C!Y+{3-eyGd3 z0<`%f-aFer)!&HS2rnvK(T#^GEm>&r6D>&S@I6wI!wzoL8?O8Szlvt1H+FSGM~e<^ zT=>x%CuNOndIrMmx-9O;Iy<~{?@H1*x@;4!j)SZ3p1Gi5_tACo-96a8S9pW!OF=+d zH@v?hcL`X&`|BGgrMno^>L3DSU#RIu38Rac+uPkGVKw%)jaTAC_R$fFh(81AMjpqb zpB0|w77+oBf0=zN*?IDr8XAp!w&NaQc^LbM3>TfEldy>O_InBYF-#@Hk(;iQEWG&5 zlN%YFxa&C2+i(&N?M^2+Jdu9#x)z8wwlb6LsJx~?+#m-xuj10@UJ!Ven1H33;h|xe zCODiWFl#)h%zKJR#O9G0rxs#$l$5ZZ``5dH=*>6mMs5lQ3nA)8Ju0VTy>x2>Lt#VI zuFdMvM3$-3bmWOaHuf80_-PH!50PdX@=(En@=DbF$hwpYF!(~pe!d+gnP^4d6D~fx zNu&)XZG?u8k8GCSO50Q#EGpZyCVHgVg>4hC?V9mU(EGEuD&kWJ<5+VG=n@awV*%ou zUsnpm9oFH$fJ@h^Z6*6*VGiX}d_zQ5^fO{l-!IfHVjsFlhZCH%gWRrb zXHXV}WW6~6Fn*rNL!6o~Rdi2UzIT}qE9iSeedPYng_ttWi@*_E9*7R-NrQJQS&>+n zaYhdqg(6yLl@bvxHC5w{vzclqkmHRdVqLTNhnLoSl;R9J)7jTc${gLvsnvRaB(k>_EF07l?}SW}d>5izhQpvPp%s-A_Xo)*X-;xeU8)qdWqV`S zv->3++U<0`q+lxfTJbw?r}41GXThWx&x&1Gg(`obxZo`^k0#ZqvaCCnRUke;8ek)!2O-sKT;Xv7jYFcj@rU6I3oxOEtUj~nt-`K!)`pqzVB;1k_xDg49T>`D4-4Ph#q{IDB@Sn)SW1SMW4YTu1ICGvV!9|sofWxpMN^OuE$Z3D zf%FBg0u1QVWIsU^93Y%--)w?gFS??`Hz|okVr7vKkY~6D6)lAh{CpT<8MnwFG)hJU zZp0JbgB3sDO30gwUwr|}$Nrd4YYJkQ)er8A&bc(*!o2Y_yQ^sxIwV4-%f>wOX9^+= zwLssUC0;qcxjgGBsHil06od>>#Cz3)affxMVYRGYr?PYMTgvc)Bz(niC443v-wdBg zj)^79D~u^-Nj9{#i$9QiZHM$4Jq#_K`a0lE@+zOxFx|*FTQMK+=r&zevOFhBo*Jw1%U3f< zS$GN!g>12OE=$B)#m+dK;JY3>NT1q6r%_TXq^9A!(0ma@DM9XJBRauFJtBr#??$?J zK0TRSQ+pLYWg=oGvCHQ6+ECHftAm^X0lAD9C8%Nd=IFnrtZr6kea=SiVRC68X`vTh zhLuc7-mhdacn6wQ50y{`i$5t)=!no6J!I&@csG*#-Q?2bR3f4s;h*OfzDFb~s(wnR z5g`s9h9ul>2{A3R=+K;^WhO_*eE%OL$uJ?K4Gby&rEtXOOLvkHe8+J8JITgnFpFG zO&m{7+J1KPF^1|rbKZsbhXVulOvnb8 zaYDYj|MnNLm{COBjv-4)zc)DI)a{8HKI-+6wTo;kcTnau2Mp=dzn`niyZAzRAD&F99*9%xvfgl4B`vR4&Ig0w zz;JMgX9!N|P@|q{Ct5rnd${PJyhM@706iebLWyEkzAC|{k0LkiH3n}VVkLi*E+)dL zc)ydzWC(mu0_`9UMq!;%po5i{D^6ii3z^>CF;UNQ)mYIWqDCu`mB@*uamKH(_FPjS zgwCu$xhj~X!DI;mlS|>qYL6baRuZcI_VSgk(2TB;`bBb>joPFEfLY4+BbqHa^uV_) zr7o37g?VpxOOM6ZG%k5B@njM+(^22uId4-rX!yh%y;}aLyEmW|AMkMyU58(abuu2q zS74xIq)~p3nq4YQgVA+zH&0~^hh~n=2S2s)D7QxL<*ybHS5wI*_s0N)=r#wWKG(>6 z&xu!RF34b>;M48HE8u1^k+;;^2=F-9y;ws0$P#Et;ioV?TcIiHGs~V`(dK(&W5m;7 z8eUIM!;6Mmu6L5Hb;|NNK62a?-Dn5hP1$`NxpPtV*1k||5%C6eIJz@80?VnF`+x9t zq}>E>Z84ek1QAe-eHV;VI08rA?jMe}+$Y^ekq((EXeE!r9FvH2ZNo|1EXrO!E2d=92Fo|w{gxV#5s zcmkF21-er7M!dSD3sHmPma8=>j7ql!+FPkYHb_nT<31zqbR!M5Lfy})!#1|dDu}aH zANDID%N#)?DTu)EDQM|l`qLvHJ9L+yt;C$HbC>9oBeS;F5&(&y%?3;>_41P~zR^Xc zc)`i%QyVWp#@8CyhrD{d%O5y%C$Ola4sXl7lao@*hZOY0FHBqeX25rvL3_m0 zp07;JP=`F$r&?m04p|oT^n3R6M+iiQ9H`k``sL*kH<0BS1?Umjs55qXZtA&ReSO%@ z))e-d??#5efQFm1Q`f+<@9DDPp)&YF3L88*z5?w5-OpftqoPWlQ$cnY%c(6JZ1Bbeb254LP77HLoGGIsv~{a`_j-7EnmPHnYB5n4P zX4Ql2jLwO5ncakn(B^6{zqg?qFzF&Le27sU8Nv{&*O=dRu{a+%!Ft=6V@03y+2+=p zp%}2ta)S|Yx^2DR+A!|9VGSd7DiF`5m(A+A@tex82O+*2D#N}m*veIx(GP^saEn=A z9oe?hg=AcF&qzm{z7QB@6fXl#D8JB2kzfPtRj^t|y>Wf2=!BhJMsXM^#<#Y2if^^t zGc5e{d%KJVmoze2K8}bv=gF9p8<9WwT5!jm9Jh``T*#g}C>=Elu=!wWT}9@gHy2h3 z>pj;tST|nXv96baoOZdbyE#xXoFu=yIj9jg@@5eP%?Av%GJML77(V(Y(dJG4QTr|Z zGM9YvZgKm&>k^+xl=-X#R|Lk`l88}DRA+%vpq?scCc%w zW~^BhxhGdywh(@~KEDhD7-qgVQ5! zQiQvdez|}E)1#7AP;3NGc}aEY3ptIz4~KVUyhj_y;v>J88ivae7+(XiNpTMmd$N2m z#aCujuH}QYch}&u=Vn3ya;d0pV%}+797rC*pcB)fZJ1C(D^Tn8o@xUtzKbyXk9PSP zFnHN!$AfLFWxpPGqWPP(z~RdZ@?N{!%VQVRUeCI6KAuprMpmwam~$}b8A|JmCQh@O zC0FQ}b#}#WHX3a`pz_s2_tJt;!6z@rc{K_*&cv$&?c`0idx?+^(!5*{5i7_@l- z40$@G$>!8|dTTh62oZ*ybe>-lz)BH@RF%LOV=|SBO^g~QF-JcT*3&xN;(nXFqvoUl z6&sK&w4SZ(C6M}d6GqErKnv8gm-d;1P(w>*2U#`-Nihw{(}d;Xs@)@x$ffBrs&@u- z6Ty$eHYTffA3Ypq4AQL=#HtboBtXP52CkVYba^89AX0avrN4-8wrIKh{f1z?L79vN zuzd`zAdpsv^Dn*o-ZH*(8#eArBEUMFGF%A3I4A+)bjITQ?m)T#o-|Ph($>Xuh;(IF z3r2TdU(>%B>(X`LLnE$cx)m9LI7IfYlgj%UdH#bQ;}-l{&NWrV@i5!#^dE453wT`( zS8WRq6(UC}1KFF2ZoMNe^RVXrADw;PPHyoNGs~8vQ`k+2Z@X8`;3ycvgz;O0HHr#6 zvOd1pv3!j8d>5P!gHP1WKfi0d-P;&Y?7k->WIDVC@50bldm>$(>ffgB^V5P1?4D&U z284!%l7b^>0;XXzBdUEBS{LvLT3A+%w9xVfm6IpvfJ=QMJ;zQ(=BV1)$>W18Y7Q@y_ABcQYWk=YghWHwH&>z}t)1OqwZ)Ib z8I%>kj1?oq1#xN(Hm`8O35)Rl6(bM$e*OqyvQgku7Tvgh5$YQZ`}U1kKT^&i>IYW2 zSBw!CY$4rjUVAM4Qv323=GfYTP{X!d1sXb&^le<<_JJWNj8W}_Nl!xLNGOt2x40Br zHy;5tpQ^8v3yKHy!jj|Ac>3;C7t2ODYvHLYj{B)Z5m&MS-T9QyA(`>g>GGk~I|=Hp zQ?tH+6k47YpqrlpH4*fb*Is9H{kwQ_EUC2ntCB04Z&ePr$9{(>BQw+-6;mHvu+>y% zvN%I#34o?j!hBVVrsqKIHvkIa28NaP`|)JK9p}jb?fv9UQVb}fD~?C(7tt$$^13yr z7$~dEr$FJo&!0I7xH%2O7?U2Im*LZW1Tq7rnf1z8{W{1ewVcuG0j~Z@E`GM?FrJ7z z1qGNoSx%Kz)20Gh842#@2k)f(66H1^e#k}2y+n8Ei`uB}GUSqmY9T;UHkmNNKw_X0 z4hnY$oPFgg+2v!M!h{@}au=zs*)kfoNk7^1deoHv71|$&i~Y2rxy=F#RMBKs>mzy! zW?u4D{q$#!2D#PF4AkV~`XwWyQ=NI^fW!dq3Xmf=l^P2-79y8bib}r%6)(FZ>&%sa z1?}t&&VKBg)SJP!r65XT1If^HN0Sk#Tt~U_90PfG2vGiJrHOr&5d*5#V%b+7gL1N$ z1{zkQLHiAurw1RiJWX~zyIbbR6?aUf-8YeDK!Sfb$eoj>*Je|Y zWIlBufKmt8w(1Qz((YujxmiuC1FFyrP@?x~x$*R@j$tXjw)4LxX{i#qb1^P`v}gv; zX^~aeIzr>bx4f@6B2;1Gk_tVIgN|f>oO7Hx>x=M-wYWQ~>0Sfa9`)OSD}myDW@)q$nRVcdSe8e(5ioVJjuGRhu~x)PNX3M8BJ4nGXR=`oA10XN-s{42j>( z^`)=50|v}y_WpraV05x)#v}G?n(I^p;#E$HUo&5e4~N>qu$AiMB<8tq$8Q+1yy61j zi`)VHs9l)2Zr7saI*~E2$JW_1q9o>*0qQ{Xl#W~Ri|n0*0yLo=2rulK z75Rzj>g3|3oy#+F*-LqbH`Af%#`a!Ok6%VZLd?DBW`$oiq(ifF@ej>dz#bA}z;sR6 zL>sV_zqV=r*CVJAng(slI^FR8s>o}a>;M6>Kdpi5)_I&UHid1I_WHuE81&Cj_Gjb9r`L0mAZ%mkqsRePtLamgxH zAty&4mp%xT7`n&~%*Ez+O>WQJf}A2f`b5G#&-l{gN9+gQH{AHZdnL_Y`ZpeHqq=DR zF|Q8(=5+(PL7y!%bZK)H=;fGe@t!8@qa0A>+UXmic|LfBcJ@vc)#?*6h{?*xKVyke zh$@fK5tdFanUHur2)4sPYy{ik+<6sj9?;vrg1`02)W4e%{nsW5nAcy~=&$Plqg&wL zH;5`gN)ZB+2}Jey(!yJ(2|*q3s*aYBt{V8uabCd2;`bXIduk=Qp8Yc#A>wn*0{&!>l-;Mo)|8DF92=-^+1pR;8*#9fq z0B^c@UtB)AAB~980MA?`ucDm85?C$rBjURaO#?K5B8~t#ivf}{JJ>DTJf+h;pA`Qw zk8b|>o&kXS-w$<6tbrfp7`u)AJV+Ao+;^IxZJ6b}j(@e6Pk-|s4RFD~->_9>0?)N% zmEZrucwL4uD8xsbzGDAXAj1o5lp-}IT_|h+m69>em(DvoEuE wK!TWVlux~%lY0TV)kjRr?0&e?OS>yH{lS|f?!;;>`AM$6j*)iZIs1tJ0LXL&9smFU literal 0 HcmV?d00001 diff --git a/content/develop/manual/patterns/indexes/index.md b/content/develop/manual/patterns/indexes/index.md new file mode 100644 index 0000000000..1eea4a79fd --- /dev/null +++ b/content/develop/manual/patterns/indexes/index.md @@ -0,0 +1,755 @@ +--- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients +description: 'Building secondary indexes in Redis + + ' +linkTitle: Secondary indexing +title: Secondary indexing +weight: 1 +--- + +Redis is not exactly a key-value store, since values can be complex data structures. However it has an external key-value shell: at API level data is addressed by the key name. It is fair to say that, natively, Redis only offers *primary key access*. However since Redis is a data structures server, its capabilities can be used for indexing, in order to create secondary indexes of different kinds, including composite (multi-column) indexes. + +This document explains how it is possible to create indexes in Redis using the following data structures: + +* Sorted sets to create secondary indexes by ID or other numerical fields. +* Sorted sets with lexicographical ranges for creating more advanced secondary indexes, composite indexes and graph traversal indexes. +* Sets for creating random indexes. +* Lists for creating simple iterable indexes and last N items indexes. + +Implementing and maintaining indexes with Redis is an advanced topic, so most +users that need to perform complex queries on data should understand if they +are better served by a relational store. However often, especially in caching +scenarios, there is the explicit need to store indexed data into Redis in order to speedup common queries which require some form of indexing in order to be executed. + +Simple numerical indexes with sorted sets +=== + +The simplest secondary index you can create with Redis is by using the +sorted set data type, which is a data structure representing a set of +elements ordered by a floating point number which is the *score* of +each element. Elements are ordered from the smallest to the highest score. + +Since the score is a double precision float, indexes you can build with +vanilla sorted sets are limited to things where the indexing field is a number +within a given range. + +The two commands to build these kind of indexes are [`ZADD`](/commands/zadd) and +[`ZRANGE`](/commands/zrange) with the `BYSCORE` argument to respectively add items and retrieve items within a +specified range. + +For instance, it is possible to index a set of person names by their +age by adding element to a sorted set. The element will be the name of the +person and the score will be the age. + + ZADD myindex 25 Manuel + ZADD myindex 18 Anna + ZADD myindex 35 Jon + ZADD myindex 67 Helen + +In order to retrieve all persons with an age between 20 and 40, the following +command can be used: + + ZRANGE myindex 20 40 BYSCORE + 1) "Manuel" + 2) "Jon" + +By using the **WITHSCORES** option of [`ZRANGE`](/commands/zrange) it is also possible +to obtain the scores associated with the returned elements. + +The [`ZCOUNT`](/commands/zcount) command can be used in order to retrieve the number of elements +within a given range, without actually fetching the elements, which is also +useful, especially given the fact the operation is executed in logarithmic +time regardless of the size of the range. + +Ranges can be inclusive or exclusive, please refer to the [`ZRANGE`](/commands/zrange) +command documentation for more information. + +**Note**: Using the [`ZRANGE`](/commands/zrange) with the `BYSCORE` and `REV` arguments, it is possible to query a range in +reversed order, which is often useful when data is indexed in a given +direction (ascending or descending) but we want to retrieve information +the other way around. + +Using objects IDs as associated values +--- + +In the above example we associated names to ages. However in general we +may want to index some field of an object which is stored elsewhere. +Instead of using the sorted set value directly to store the data associated +with the indexed field, it is possible to store just the ID of the object. + +For example I may have Redis hashes representing users. Each user is +represented by a single key, directly accessible by ID: + + HMSET user:1 id 1 username antirez ctime 1444809424 age 38 + HMSET user:2 id 2 username maria ctime 1444808132 age 42 + HMSET user:3 id 3 username jballard ctime 1443246218 age 33 + +If I want to create an index in order to query users by their age, I +could do: + + ZADD user.age.index 38 1 + ZADD user.age.index 42 2 + ZADD user.age.index 33 3 + +This time the value associated with the score in the sorted set is the +ID of the object. So once I query the index with [`ZRANGE`](/commands/zrange) with the `BYSCORE` argument, I'll +also have to retrieve the information I need with [`HGETALL`](/commands/hgetall) or similar +commands. The obvious advantage is that objects can change without touching +the index, as long as we don't change the indexed field. + +In the next examples we'll almost always use IDs as values associated with +the index, since this is usually the more sounding design, with a few +exceptions. + +Updating simple sorted set indexes +--- + +Often we index things which change over time. In the above +example, the age of the user changes every year. In such a case it would +make sense to use the birth date as index instead of the age itself, +but there are other cases where we simply want some field to change from +time to time, and the index to reflect this change. + +The [`ZADD`](/commands/zadd) command makes updating simple indexes a very trivial operation +since re-adding back an element with a different score and the same value +will simply update the score and move the element at the right position, +so if the user `antirez` turned 39 years old, in order to update the +data in the hash representing the user, and in the index as well, we need +to execute the following two commands: + + HSET user:1 age 39 + ZADD user.age.index 39 1 + +The operation may be wrapped in a [`MULTI`](/commands/multi)/[`EXEC`](/commands/exec) transaction in order to +make sure both fields are updated or none. + +Turning multi dimensional data into linear data +--- + +Indexes created with sorted sets are able to index only a single numerical +value. Because of this you may think it is impossible to index something +which has multiple dimensions using this kind of indexes, but actually this +is not always true. If you can efficiently represent something +multi-dimensional in a linear way, they it is often possible to use a simple +sorted set for indexing. + +For example the [Redis geo indexing API](/commands/geoadd) uses a sorted +set to index places by latitude and longitude using a technique called +[Geo hash](https://en.wikipedia.org/wiki/Geohash). The sorted set score +represents alternating bits of longitude and latitude, so that we map the +linear score of a sorted set to many small *squares* in the earth surface. +By doing an 8+1 style center plus neighborhoods search it is possible to +retrieve elements by radius. + +Limits of the score +--- + +Sorted set elements scores are double precision floats. It means that +they can represent different decimal or integer values with different +errors, because they use an exponential representation internally. +However what is interesting for indexing purposes is that the score is +always able to represent without any error numbers between -9007199254740992 +and 9007199254740992, which is `-/+ 2^53`. + +When representing much larger numbers, you need a different form of indexing +that is able to index numbers at any precision, called a lexicographical +index. + +Lexicographical indexes +=== + +Redis sorted sets have an interesting property. When elements are added +with the same score, they are sorted lexicographically, comparing the +strings as binary data with the `memcmp()` function. + +For people that don't know the C language nor the `memcmp` function, what +it means is that elements with the same score are sorted comparing the +raw values of their bytes, byte after byte. If the first byte is the same, +the second is checked and so forth. If the common prefix of two strings is +the same then the longer string is considered the greater of the two, +so "foobar" is greater than "foo". + +There are commands such as [`ZRANGE`](/commands/zrange) and [`ZLEXCOUNT`](/commands/zlexcount) that +are able to query and count ranges in a lexicographically fashion, assuming +they are used with sorted sets where all the elements have the same score. + +This Redis feature is basically equivalent to a `b-tree` data structure which +is often used in order to implement indexes with traditional databases. +As you can guess, because of this, it is possible to use this Redis data +structure in order to implement pretty fancy indexes. + +Before we dive into using lexicographical indexes, let's check how +sorted sets behave in this special mode of operation. Since we need to +add elements with the same score, we'll always use the special score of +zero. + + ZADD myindex 0 baaa + ZADD myindex 0 abbb + ZADD myindex 0 aaaa + ZADD myindex 0 bbbb + +Fetching all the elements from the sorted set immediately reveals that they +are ordered lexicographically. + + ZRANGE myindex 0 -1 + 1) "aaaa" + 2) "abbb" + 3) "baaa" + 4) "bbbb" + +Now we can use [`ZRANGE`](/commands/zrange) with the `BYLEX` argument in order to perform range queries. + + ZRANGE myindex [a (b BYLEX + 1) "aaaa" + 2) "abbb" + +Note that in the range queries we prefixed the `min` and `max` elements +identifying the range with the special characters `[` and `(`. +This prefixes are mandatory, and they specify if the elements +of the range are inclusive or exclusive. So the range `[a (b` means give me +all the elements lexicographically between `a` inclusive and `b` exclusive, +which are all the elements starting with `a`. + +There are also two more special characters indicating the infinitely negative +string and the infinitely positive string, which are `-` and `+`. + + ZRANGE myindex [b + BYLEX + 1) "baaa" + 2) "bbbb" + +That's it basically. Let's see how to use these features to build indexes. + +A first example: completion +--- + +An interesting application of indexing is completion. Completion is what +happens when you start typing your query into a search engine: the user +interface will anticipate what you are likely typing, providing common +queries that start with the same characters. + +A naive approach to completion is to just add every single query we +get from the user into the index. For example if the user searches `banana` +we'll just do: + + ZADD myindex 0 banana + +And so forth for each search query ever encountered. Then when we want to +complete the user input, we execute a range query using [`ZRANGE`](/commands/zrange) with the `BYLEX` argument. +Imagine the user is typing "bit" inside the search form, and we want to +offer possible search keywords starting for "bit". We send Redis a command +like that: + + ZRANGE myindex "[bit" "[bit\xff" BYLEX + +Basically we create a range using the string the user is typing right now +as start, and the same string plus a trailing byte set to 255, which is `\xff` in the example, as the end of the range. This way we get all the strings that start for the string the user is typing. + +Note that we don't want too many items returned, so we may use the **LIMIT** option in order to reduce the number of results. + +Adding frequency into the mix +--- + +The above approach is a bit naive, because all the user searches are the same +in this way. In a real system we want to complete strings according to their +frequency: very popular searches will be proposed with a higher probability +compared to search strings typed very rarely. + +In order to implement something which depends on the frequency, and at the +same time automatically adapts to future inputs, by purging searches that +are no longer popular, we can use a very simple *streaming algorithm*. + +To start, we modify our index in order to store not just the search term, +but also the frequency the term is associated with. So instead of just adding +`banana` we add `banana:1`, where 1 is the frequency. + + ZADD myindex 0 banana:1 + +We also need logic in order to increment the index if the search term +already exists in the index, so what we'll actually do is something like +that: + + ZRANGE myindex "[banana:" + BYLEX LIMIT 0 1 + 1) "banana:1" + +This will return the single entry of `banana` if it exists. Then we +can increment the associated frequency and send the following two +commands: + + ZREM myindex 0 banana:1 + ZADD myindex 0 banana:2 + +Note that because it is possible that there are concurrent updates, the +above three commands should be send via a [Lua script](/commands/eval) +instead, so that the Lua script will atomically get the old count and +re-add the item with incremented score. + +So the result will be that, every time a user searches for `banana` we'll +get our entry updated. + +There is more: our goal is to just have items searched very frequently. +So we need some form of purging. When we actually query the index +in order to complete the user input, we may see something like that: + + ZRANGE myindex "[banana:" + BYLEX LIMIT 0 10 + 1) "banana:123" + 2) "banaooo:1" + 3) "banned user:49" + 4) "banning:89" + +Apparently nobody searches for "banaooo", for example, but the query was +performed a single time, so we end presenting it to the user. + +This is what we can do. Out of the returned items, we pick a random one, +decrement its score by one, and re-add it with the new score. +However if the score reaches 0, we simply remove the item from the list. +You can use much more advanced systems, but the idea is that the index in +the long run will contain top searches, and if top searches will change over +the time it will adapt automatically. + +A refinement to this algorithm is to pick entries in the list according to +their weight: the higher the score, the less likely entries are picked +in order to decrement its score, or evict them. + +Normalizing strings for case and accents +--- + +In the completion examples we always used lowercase strings. However +reality is much more complex than that: languages have capitalized names, +accents, and so forth. + +One simple way do deal with this issues is to actually normalize the +string the user searches. Whatever the user searches for "Banana", +"BANANA" or "Ba'nana" we may always turn it into "banana". + +However sometimes we may like to present the user with the original +item typed, even if we normalize the string for indexing. In order to +do this, what we do is to change the format of the index so that instead +of just storing `term:frequency` we store `normalized:frequency:original` +like in the following example: + + ZADD myindex 0 banana:273:Banana + +Basically we add another field that we'll extract and use only for +visualization. Ranges will always be computed using the normalized strings +instead. This is a common trick which has multiple applications. + +Adding auxiliary information in the index +--- + +When using a sorted set in a direct way, we have two different attributes +for each object: the score, which we use as an index, and an associated +value. When using lexicographical indexes instead, the score is always +set to 0 and basically not used at all. We are left with a single string, +which is the element itself. + +Like we did in the previous completion examples, we are still able to +store associated data using separators. For example we used the colon in +order to add the frequency and the original word for completion. + +In general we can add any kind of associated value to our indexing key. +In order to use a lexicographical index to implement a simple key-value store +we just store the entry as `key:value`: + + ZADD myindex 0 mykey:myvalue + +And search for the key with: + + ZRANGE myindex [mykey: + BYLEX LIMIT 0 1 + 1) "mykey:myvalue" + +Then we extract the part after the colon to retrieve the value. +However a problem to solve in this case is collisions. The colon character +may be part of the key itself, so it must be chosen in order to never +collide with the key we add. + +Since lexicographical ranges in Redis are binary safe you can use any +byte or any sequence of bytes. However if you receive untrusted user +input, it is better to use some form of escaping in order to guarantee +that the separator will never happen to be part of the key. + +For example if you use two null bytes as separator `"\0\0"`, you may +want to always escape null bytes into two bytes sequences in your strings. + +Numerical padding +--- + +Lexicographical indexes may look like good only when the problem at hand +is to index strings. Actually it is very simple to use this kind of index +in order to perform indexing of arbitrary precision numbers. + +In the ASCII character set, digits appear in the order from 0 to 9, so +if we left-pad numbers with leading zeroes, the result is that comparing +them as strings will order them by their numerical value. + + ZADD myindex 0 00324823481:foo + ZADD myindex 0 12838349234:bar + ZADD myindex 0 00000000111:zap + + ZRANGE myindex 0 -1 + 1) "00000000111:zap" + 2) "00324823481:foo" + 3) "12838349234:bar" + +We effectively created an index using a numerical field which can be as +big as we want. This also works with floating point numbers of any precision +by making sure we left pad the numerical part with leading zeroes and the +decimal part with trailing zeroes like in the following list of numbers: + + 01000000000000.11000000000000 + 01000000000000.02200000000000 + 00000002121241.34893482930000 + 00999999999999.00000000000000 + +Using numbers in binary form +--- + +Storing numbers in decimal may use too much memory. An alternative approach +is just to store numbers, for example 128 bit integers, directly in their +binary form. However for this to work, you need to store the numbers in +*big endian format*, so that the most significant bytes are stored before +the least significant bytes. This way when Redis compares the strings with +`memcmp()`, it will effectively sort the numbers by their value. + +Keep in mind that data stored in binary format is less observable for +debugging, harder to parse and export. So it is definitely a trade off. + +Composite indexes +=== + +So far we explored ways to index single fields. However we all know that +SQL stores are able to create indexes using multiple fields. For example +I may index products in a very large store by room number and price. + +I need to run queries in order to retrieve all the products in a given +room having a given price range. What I can do is to index each product +in the following way: + + ZADD myindex 0 0056:0028.44:90 + ZADD myindex 0 0034:0011.00:832 + +Here the fields are `room:price:product_id`. I used just four digits padding +in the example for simplicity. The auxiliary data (the product ID) does not +need any padding. + +With an index like that, to get all the products in room 56 having a price +between 10 and 30 dollars is very easy. We can just run the following +command: + + ZRANGE myindex [0056:0010.00 [0056:0030.00 BYLEX + +The above is called a composed index. Its effectiveness depends on the +order of the fields and the queries I want to run. For example the above +index cannot be used efficiently in order to get all the products having +a specific price range regardless of the room number. However I can use +the primary key in order to run queries regardless of the price, like +*give me all the products in room 44*. + +Composite indexes are very powerful, and are used in traditional stores +in order to optimize complex queries. In Redis they could be useful both +to implement a very fast in-memory Redis index of something stored into +a traditional data store, or in order to directly index Redis data. + +Updating lexicographical indexes +=== + +The value of the index in a lexicographical index can get pretty fancy +and hard or slow to rebuild from what we store about the object. So one +approach to simplify the handling of the index, at the cost of using more +memory, is to also take alongside to the sorted set representing the index +a hash mapping the object ID to the current index value. + +So for example, when we index we also add to a hash: + + MULTI + ZADD myindex 0 0056:0028.44:90 + HSET index.content 90 0056:0028.44:90 + EXEC + +This is not always needed, but simplifies the operations of updating +the index. In order to remove the old information we indexed for the object +ID 90, regardless of the *current* fields values of the object, we just +have to retrieve the hash value by object ID and [`ZREM`](/commands/zrem) it in the sorted +set view. + +Representing and querying graphs using a hexastore +=== + +One cool thing about composite indexes is that they are handy in order +to represent graphs, using a data structure which is called +[Hexastore](http://www.vldb.org/pvldb/vol1/1453965.pdf). + +The hexastore provides a representation for relations between objects, +formed by a *subject*, a *predicate* and an *object*. +A simple relation between objects could be: + + antirez is-friend-of matteocollina + +In order to represent this relation I can store the following element +in my lexicographical index: + + ZADD myindex 0 spo:antirez:is-friend-of:matteocollina + +Note that I prefixed my item with the string **spo**. It means that +the item represents a subject,predicate,object relation. + +In can add 5 more entries for the same relation, but in a different order: + + ZADD myindex 0 sop:antirez:matteocollina:is-friend-of + ZADD myindex 0 ops:matteocollina:is-friend-of:antirez + ZADD myindex 0 osp:matteocollina:antirez:is-friend-of + ZADD myindex 0 pso:is-friend-of:antirez:matteocollina + ZADD myindex 0 pos:is-friend-of:matteocollina:antirez + +Now things start to be interesting, and I can query the graph in many +different ways. For example, who are all the people `antirez` +*is friend of*? + + ZRANGE myindex "[spo:antirez:is-friend-of:" "[spo:antirez:is-friend-of:\xff" BYLEX + 1) "spo:antirez:is-friend-of:matteocollina" + 2) "spo:antirez:is-friend-of:wonderwoman" + 3) "spo:antirez:is-friend-of:spiderman" + +Or, what are all the relationships `antirez` and `matteocollina` have where +the first is the subject and the second is the object? + + ZRANGE myindex "[sop:antirez:matteocollina:" "[sop:antirez:matteocollina:\xff" BYLEX + 1) "sop:antirez:matteocollina:is-friend-of" + 2) "sop:antirez:matteocollina:was-at-conference-with" + 3) "sop:antirez:matteocollina:talked-with" + +By combining different queries, I can ask fancy questions. For example: +*Who are all my friends that, like beer, live in Barcelona, and matteocollina consider friends as well?* +To get this information I start with an `spo` query to find all the people +I'm friend with. Then for each result I get I perform an `spo` query +to check if they like beer, removing the ones for which I can't find +this relation. I do it again to filter by city. Finally I perform an `ops` +query to find, of the list I obtained, who is considered friend by +matteocollina. + +Make sure to check [Matteo Collina's slides about Levelgraph](http://nodejsconfit.levelgraph.io/) in order to better understand these ideas. + +Multi dimensional indexes +=== + +A more complex type of index is an index that allows you to perform queries +where two or more variables are queried at the same time for specific +ranges. For example I may have a data set representing persons age and +salary, and I want to retrieve all the people between 50 and 55 years old +having a salary between 70000 and 85000. + +This query may be performed with a multi column index, but this requires +us to select the first variable and then scan the second, which means we +may do a lot more work than needed. It is possible to perform these kinds of +queries involving multiple variables using different data structures. +For example, multi-dimensional trees such as *k-d trees* or *r-trees* are +sometimes used. Here we'll describe a different way to index data into +multiple dimensions, using a representation trick that allows us to perform +the query in a very efficient way using Redis lexicographical ranges. + +Let's say we have points in the space, which represent our data samples, where `x` and `y` are our coordinates. The max value of both variables is 400. + +In the next figure, the blue box represents our query. We want all the points where `x` is between 50 and 100, and where `y` is between 100 and 300. + +![Points in the space](2idx_0.png) + +In order to represent data that makes these kinds of queries fast to perform, +we start by padding our numbers with 0. So for example imagine we want to +add the point 10,25 (x,y) to our index. Given that the maximum range in the +example is 400 we can just pad to three digits, so we obtain: + + x = 010 + y = 025 + +Now what we do is to interleave the digits, taking the leftmost digit +in x, and the leftmost digit in y, and so forth, in order to create a single +number: + + 001205 + +This is our index, however in order to more easily reconstruct the original +representation, if we want (at the cost of space), we may also add the +original values as additional columns: + + 001205:10:25 + +Now, let's reason about this representation and why it is useful in the +context of range queries. For example let's take the center of our blue +box, which is at `x=75` and `y=200`. We can encode this number as we did +earlier by interleaving the digits, obtaining: + + 027050 + +What happens if we substitute the last two digits respectively with 00 and 99? +We obtain a range which is lexicographically continuous: + + 027000 to 027099 + +What this maps to is to a square representing all values where the `x` +variable is between 70 and 79, and the `y` variable is between 200 and 209. +To identify this specific area, we can write random points in that interval. + +![Small area](2idx_1.png) + +So the above lexicographic query allows us to easily query for points in +a specific square in the picture. However the square may be too small for +the box we are searching, so that too many queries are needed. +So we can do the same but instead of replacing the last two digits with 00 +and 99, we can do it for the last four digits, obtaining the following +range: + + 020000 029999 + +This time the range represents all the points where `x` is between 0 and 99 +and `y` is between 200 and 299. Drawing random points in this interval +shows us this larger area. + +![Large area](2idx_2.png) + +So now our area is too big for our query, and still our search box is +not completely included. We need more granularity, but we can easily obtain +it by representing our numbers in binary form. This time, when we replace +digits instead of getting squares which are ten times bigger, we get squares +which are just two times bigger. + +Our numbers in binary form, assuming we need just 9 bits for each variable +(in order to represent numbers up to 400 in value) would be: + + x = 75 -> 001001011 + y = 200 -> 011001000 + +So by interleaving digits, our representation in the index would be: + + 000111000011001010:75:200 + +Let's see what are our ranges as we substitute the last 2, 4, 6, 8, ... +bits with 0s ad 1s in the interleaved representation: + + 2 bits: x between 74 and 75, y between 200 and 201 (range=2) + 4 bits: x between 72 and 75, y between 200 and 203 (range=4) + 6 bits: x between 72 and 79, y between 200 and 207 (range=8) + 8 bits: x between 64 and 79, y between 192 and 207 (range=16) + +And so forth. Now we have definitely better granularity! +As you can see substituting N bits from the index gives us +search boxes of side `2^(N/2)`. + +So what we do is check the dimension where our search box is smaller, +and check the nearest power of two to this number. Our search box +was 50,100 to 100,300, so it has a width of 50 and a height of 200. +We take the smaller of the two, 50, and check the nearest power of two +which is 64. 64 is 2^6, so we would work with indexes obtained replacing +the latest 12 bits from the interleaved representation (so that we end +replacing just 6 bits of each variable). + +However single squares may not cover all our search, so we may need more. +What we do is to start with the left bottom corner of our search box, +which is 50,100, and find the first range by substituting the last 6 bits +in each number with 0. Then we do the same with the right top corner. + +With two trivial nested for loops where we increment only the significant +bits, we can find all the squares between these two. For each square we +convert the two numbers into our interleaved representation, and create +the range using the converted representation as our start, and the same +representation but with the latest 12 bits turned on as end range. + +For each square found we perform our query and get the elements inside, +removing the elements which are outside our search box. + +Turning this into code is simple. Here is a Ruby example: + + def spacequery(x0,y0,x1,y1,exp) + bits=exp*2 + x_start = x0/(2**exp) + x_end = x1/(2**exp) + y_start = y0/(2**exp) + y_end = y1/(2**exp) + (x_start..x_end).each{|x| + (y_start..y_end).each{|y| + x_range_start = x*(2**exp) + x_range_end = x_range_start | ((2**exp)-1) + y_range_start = y*(2**exp) + y_range_end = y_range_start | ((2**exp)-1) + puts "#{x},#{y} x from #{x_range_start} to #{x_range_end}, y from #{y_range_start} to #{y_range_end}" + + # Turn it into interleaved form for ZRANGE query. + # We assume we need 9 bits for each integer, so the final + # interleaved representation will be 18 bits. + xbin = x_range_start.to_s(2).rjust(9,'0') + ybin = y_range_start.to_s(2).rjust(9,'0') + s = xbin.split("").zip(ybin.split("")).flatten.compact.join("") + # Now that we have the start of the range, calculate the end + # by replacing the specified number of bits from 0 to 1. + e = s[0..-(bits+1)]+("1"*bits) + puts "ZRANGE myindex [#{s} [#{e} BYLEX" + } + } + end + + spacequery(50,100,100,300,6) + +While non immediately trivial this is a very useful indexing strategy that +in the future may be implemented in Redis in a native way. +For now, the good thing is that the complexity may be easily encapsulated +inside a library that can be used in order to perform indexing and queries. +One example of such library is [Redimension](https://github.com/antirez/redimension), a proof of concept Ruby library which indexes N-dimensional data inside Redis using the technique described here. + +Multi dimensional indexes with negative or floating point numbers +=== + +The simplest way to represent negative values is just to work with unsigned +integers and represent them using an offset, so that when you index, before +translating numbers in the indexed representation, you add the absolute value +of your smaller negative integer. + +For floating point numbers, the simplest approach is probably to convert them +to integers by multiplying the integer for a power of ten proportional to the +number of digits after the dot you want to retain. + +Non range indexes +=== + +So far we checked indexes which are useful to query by range or by single +item. However other Redis data structures such as Sets or Lists can be used +in order to build other kind of indexes. They are very commonly used but +maybe we don't always realize they are actually a form of indexing. + +For instance I can index object IDs into a Set data type in order to use +the *get random elements* operation via [`SRANDMEMBER`](/commands/srandmember) in order to retrieve +a set of random objects. Sets can also be used to check for existence when +all I need is to test if a given item exists or not or has a single boolean +property or not. + +Similarly lists can be used in order to index items into a fixed order. +I can add all my items into a Redis list and rotate the list with +[`RPOPLPUSH`](/commands/rpoplpush) using the same key name as source and destination. This is useful +when I want to process a given set of items again and again forever in the +same order. Think of an RSS feed system that needs to refresh the local copy +periodically. + +Another popular index often used with Redis is a **capped list**, where items +are added with [`LPUSH`](/commands/lpush) and trimmed with [`LTRIM`](/commands/ltrim), in order to create a view +with just the latest N items encountered, in the same order they were +seen. + +Index inconsistency +=== + +Keeping the index updated may be challenging, in the course of months +or years it is possible that inconsistencies are added because of software +bugs, network partitions or other events. + +Different strategies could be used. If the index data is outside Redis +*read repair* can be a solution, where data is fixed in a lazy way when +it is requested. When we index data which is stored in Redis itself +the [`SCAN`](/commands/scan) family of commands can be used in order to verify, update or +rebuild the index from scratch, incrementally. diff --git a/content/develop/manual/patterns/twitter-clone.md b/content/develop/manual/patterns/twitter-clone.md new file mode 100644 index 0000000000..b8d3fb823a --- /dev/null +++ b/content/develop/manual/patterns/twitter-clone.md @@ -0,0 +1,460 @@ +--- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients +description: Learn several Redis patterns by building a Twitter clone +linkTitle: Patterns example +title: Redis patterns example +weight: 20 +--- + +This article describes the design and implementation of a [very simple Twitter clone](https://github.com/antirez/retwis) written using PHP with Redis as the only database. The programming community has traditionally considered key-value stores as a special purpose database that couldn't be used as a drop-in replacement for a relational database for the development of web applications. This article will try to show that Redis data structures on top of a key-value layer are an effective data model to implement many kinds of applications. + +Note: the original version of this article was written in 2009 when Redis was +released. It was not exactly clear at that time that the Redis data model was +suitable to write entire applications. Now after 5 years there are many cases of +applications using Redis as their main store, so the goal of the article today +is to be a tutorial for Redis newcomers. You'll learn how to design a simple +data layout using Redis, and how to apply different data structures. + +Our Twitter clone, called [Retwis](https://github.com/antirez/retwis), is structurally simple, has very good performance, and can be distributed among any number of web and Redis servers with little efforts. [View the Retwis source code](https://github.com/antirez/retwis). + +I used PHP for the example because of its universal readability. The same (or better) results can be obtained using Ruby, Python, Erlang, and so on. +A few clones exist (however not all the clones use the same data layout as the +current version of this tutorial, so please, stick with the official PHP +implementation for the sake of following the article better). + +* [Retwis-RB](https://github.com/danlucraft/retwis-rb) is a port of Retwis to Ruby and Sinatra written by Daniel Lucraft. +* [Retwis-J](https://docs.spring.io/spring-data/data-keyvalue/examples/retwisj/current/) is a port of Retwis to Java, using the Spring Data Framework, written by [Costin Leau](http://twitter.com/costinl). Its source code can be found on [GitHub](https://github.com/SpringSource/spring-data-keyvalue-examples), and there is comprehensive documentation available at [springsource.org](http://j.mp/eo6z6I). + +What is a key-value store? +--- +The essence of a key-value store is the ability to store some data, called a _value_, inside a key. The value can be retrieved later only if we know the specific key it was stored in. There is no direct way to search for a key by value. In some sense, it is like a very large hash/dictionary, but it is persistent, i.e. when your application ends, the data doesn't go away. So, for example, I can use the command [`SET`](/commands/set) to store the value *bar* in the key *foo*: + + SET foo bar + +Redis stores data permanently, so if I later ask "_What is the value stored in key foo?_" Redis will reply with *bar*: + + GET foo => bar + +Other common operations provided by key-value stores are [`DEL`](/commands/del), to delete a given key and its associated value, SET-if-not-exists (called [`SETNX`](/commands/setnx) on Redis), to assign a value to a key only if the key does not already exist, and [`INCR`](/commands/incr), to atomically increment a number stored in a given key: + + SET foo 10 + INCR foo => 11 + INCR foo => 12 + INCR foo => 13 + +Atomic operations +--- + +There is something special about [`INCR`](/commands/incr). You may wonder why Redis provides such an operation if we can do it ourselves with a bit of code? After all, it is as simple as: + + x = GET foo + x = x + 1 + SET foo x + +The problem is that incrementing this way will work as long as there is only one client working with the key _foo_ at one time. See what happens if two clients are accessing this key at the same time: + + x = GET foo (yields 10) + y = GET foo (yields 10) + x = x + 1 (x is now 11) + y = y + 1 (y is now 11) + SET foo x (foo is now 11) + SET foo y (foo is now 11) + +Something is wrong! We incremented the value two times, but instead of going from 10 to 12, our key holds 11. This is because the increment done with `GET / increment / SET` *is not an atomic operation*. Instead the INCR provided by Redis, Memcached, ..., are atomic implementations, and the server will take care of protecting the key during the time needed to complete the increment in order to prevent simultaneous accesses. + +What makes Redis different from other key-value stores is that it provides other operations similar to INCR that can be used to model complex problems. This is why you can use Redis to write whole web applications without using another database like an SQL database, and without going crazy. + +Beyond key-value stores: lists +--- + +In this section we will see which Redis features we need to build our Twitter clone. The first thing to know is that Redis values can be more than strings. Redis supports Lists, Sets, Hashes, Sorted Sets, Bitmaps, and HyperLogLog types as values, and there are atomic operations to operate on them so we are safe even with multiple accesses to the same key. Let's start with Lists: + + LPUSH mylist a (now mylist holds 'a') + LPUSH mylist b (now mylist holds 'b','a') + LPUSH mylist c (now mylist holds 'c','b','a') + +[`LPUSH`](/commands/lpush) means _Left Push_, that is, add an element to the left (or to the head) of the list stored in _mylist_. If the key _mylist_ does not exist it is automatically created as an empty list before the PUSH operation. As you can imagine, there is also an [`RPUSH`](/commands/rpush) operation that adds the element to the right of the list (on the tail). This is very useful for our Twitter clone. User updates can be added to a list stored in `username:updates`, for instance. + +There are operations to get data from Lists, of course. For instance, LRANGE returns a range from the list, or the whole list. + + LRANGE mylist 0 1 => c,b + +LRANGE uses zero-based indexes - that is the first element is 0, the second 1, and so on. The command arguments are `LRANGE key first-index last-index`. The _last-index_ argument can be negative, with a special meaning: -1 is the last element of the list, -2 the penultimate, and so on. So, to get the whole list use: + + LRANGE mylist 0 -1 => c,b,a + +Other important operations are LLEN that returns the number of elements in the list, and LTRIM that is like LRANGE but instead of returning the specified range *trims* the list, so it is like _Get range from mylist, Set this range as new value_ but does so atomically. + +The Set data type +--- + +Currently we don't use the Set type in this tutorial, but since we use +Sorted Sets, which are kind of a more capable version of Sets, it is better +to start introducing Sets first (which are a very useful data structure +per se), and later Sorted Sets. + +There are more data types than just Lists. Redis also supports Sets, which are unsorted collections of elements. It is possible to add, remove, and test for existence of members, and perform the intersection between different Sets. Of course it is possible to get the elements of a Set. Some examples will make it more clear. Keep in mind that [`SADD`](/commands/sadd) is the _add to set_ operation, [`SREM`](/commands/srem) is the _remove from set_ operation, [`SISMEMBER`](/commands/sismember) is the _test if member_ operation, and [`SINTER`](/commands/sinter) is the _perform intersection_ operation. Other operations are [`SCARD`](/commands/scard) to get the cardinality (the number of elements) of a Set, and [`SMEMBERS`](/commands/smembers) to return all the members of a Set. + + SADD myset a + SADD myset b + SADD myset foo + SADD myset bar + SCARD myset => 4 + SMEMBERS myset => bar,a,foo,b + +Note that [`SMEMBERS`](/commands/smembers) does not return the elements in the same order we added them since Sets are *unsorted* collections of elements. When you want to store in order it is better to use Lists instead. Some more operations against Sets: + + SADD mynewset b + SADD mynewset foo + SADD mynewset hello + SINTER myset mynewset => foo,b + +[`SINTER`](/commands/sinter) can return the intersection between Sets but it is not limited to two Sets. You may ask for the intersection of 4,5, or 10000 Sets. Finally let's check how [`SISMEMBER`](/commands/sismember) works: + + SISMEMBER myset foo => 1 + SISMEMBER myset notamember => 0 + +The Sorted Set data type +--- + +Sorted Sets are similar to Sets: collection of elements. However in Sorted +Sets each element is associated with a floating point value, called the +*element score*. Because of the score, elements inside a Sorted Set are +ordered, since we can always compare two elements by score (and if the score +happens to be the same, we compare the two elements as strings). + +Like Sets in Sorted Sets it is not possible to add repeated elements, every +element is unique. However it is possible to update an element's score. + +Sorted Set commands are prefixed with `Z`. The following is an example +of Sorted Sets usage: + + ZADD zset 10 a + ZADD zset 5 b + ZADD zset 12.55 c + ZRANGE zset 0 -1 => b,a,c + +In the above example we added a few elements with [`ZADD`](/commands/zadd), and later retrieved +the elements with [`ZRANGE`](/commands/zrange). As you can see the elements are returned in order +according to their score. In order to check if a given element exists, and +also to retrieve its score if it exists, we use the [`ZSCORE`](/commands/zscore) command: + + ZSCORE zset a => 10 + ZSCORE zset non_existing_element => NULL + +Sorted Sets are a very powerful data structure, you can query elements by +score range, lexicographically, in reverse order, and so forth. +To know more [please check the Sorted Set sections in the official Redis commands documentation](https://redis.io/commands/#sorted_set). + +The Hash data type +--- + +This is the last data structure we use in our program, and is extremely easy +to grasp since there is an equivalent in almost every programming language out +there: Hashes. Redis Hashes are basically like Ruby or Python hashes, a +collection of fields associated with values: + + HMSET myuser name Salvatore surname Sanfilippo country Italy + HGET myuser surname => Sanfilippo + +[`HMSET`](/commands/hmset) can be used to set fields in the hash, that can be retrieved with +[`HGET`](/commands/hget) later. It is possible to check if a field exists with [`HEXISTS`](/commands/hexists), or +to increment a hash field with [`HINCRBY`](/commands/hincrby) and so forth. + +Hashes are the ideal data structure to represent *objects*. For example we +use Hashes in order to represent Users and Updates in our Twitter clone. + +Okay, we just exposed the basics of the Redis main data structures, +we are ready to start coding! + +Prerequisites +--- + +If you haven't downloaded the [Retwis source code](https://github.com/antirez/retwis) already please grab it now. It contains a few PHP files, and also a copy of [Predis](https://github.com/nrk/predis), the PHP client library we use in this example. + +Another thing you probably want is a working Redis server. Just get the source, build with `make`, run with `./redis-server`, and you're ready to go. No configuration is required at all in order to play with or run Retwis on your computer. + +Data layout +--- + +When working with a relational database, a database schema must be designed so that we'd know the tables, indexes, and so on that the database will contain. We don't have tables in Redis, so what do we need to design? We need to identify what keys are needed to represent our objects and what kind of values these keys need to hold. + +Let's start with Users. We need to represent users, of course, with their username, userid, password, the set of users following a given user, the set of users a given user follows, and so on. The first question is, how should we identify a user? Like in a relational DB, a good solution is to identify different users with different numbers, so we can associate a unique ID with every user. Every other reference to this user will be done by id. Creating unique IDs is very simple to do by using our atomic [`INCR`](/commands/incr) operation. When we create a new user we can do something like this, assuming the user is called "antirez": + + INCR next_user_id => 1000 + HMSET user:1000 username antirez password p1pp0 + +*Note: you should use a hashed password in a real application, for simplicity +we store the password in clear text.* + +We use the `next_user_id` key in order to always get a unique ID for every new user. Then we use this unique ID to name the key holding a Hash with user's data. *This is a common design pattern* with key-values stores! Keep it in mind. +Besides the fields already defined, we need some more stuff in order to fully define a User. For example, sometimes it can be useful to be able to get the user ID from the username, so every time we add a user, we also populate the `users` key, which is a Hash, with the username as field, and its ID as value. + + HSET users antirez 1000 + +This may appear strange at first, but remember that we are only able to access data in a direct way, without secondary indexes. It's not possible to tell Redis to return the key that holds a specific value. This is also *our strength*. This new paradigm is forcing us to organize data so that everything is accessible by _primary key_, speaking in relational DB terms. + +Followers, following, and updates +--- + +There is another central need in our system. A user might have users who follow them, which we'll call their followers. A user might follow other users, which we'll call a following. We have a perfect data structure for this. That is... Sets. +The uniqueness of Sets elements, and the fact we can test in constant time for +existence, are two interesting features. However what about also remembering +the time at which a given user started following another one? In an enhanced +version of our simple Twitter clone this may be useful, so instead of using +a simple Set, we use a Sorted Set, using the user ID of the following or follower +user as element, and the unix time at which the relation between the users +was created, as our score. + +So let's define our keys: + + followers:1000 => Sorted Set of uids of all the followers users + following:1000 => Sorted Set of uids of all the following users + +We can add new followers with: + + ZADD followers:1000 1401267618 1234 => Add user 1234 with time 1401267618 + +Another important thing we need is a place where we can add the updates to display in the user's home page. We'll need to access this data in chronological order later, from the most recent update to the oldest, so the perfect kind of data structure for this is a List. Basically every new update will be [`LPUSH`](/commands/lpush)ed in the user updates key, and thanks to [`LRANGE`](/commands/lrange), we can implement pagination and so on. Note that we use the words _updates_ and _posts_ interchangeably, since updates are actually "little posts" in some way. + + posts:1000 => a List of post ids - every new post is LPUSHed here. + +This list is basically the User timeline. We'll push the IDs of her/his own +posts, and, the IDs of all the posts of created by the following users. +Basically, we'll implement a write fanout. + +Authentication +--- + +OK, we have more or less everything about the user except for authentication. We'll handle authentication in a simple but robust way: we don't want to use PHP sessions, as our system must be ready to be distributed among different web servers easily, so we'll keep the whole state in our Redis database. All we need is a random **unguessable** string to set as the cookie of an authenticated user, and a key that will contain the user ID of the client holding the string. + +We need two things in order to make this thing work in a robust way. +First: the current authentication *secret* (the random unguessable string) +should be part of the User object, so when the user is created we also set +an `auth` field in its Hash: + + HSET user:1000 auth fea5e81ac8ca77622bed1c2132a021f9 + +Moreover, we need a way to map authentication secrets to user IDs, so +we also take an `auths` key, which has as value a Hash type mapping +authentication secrets to user IDs. + + HSET auths fea5e81ac8ca77622bed1c2132a021f9 1000 + +In order to authenticate a user we'll do these simple steps (see the `login.php` file in the Retwis source code): + + * Get the username and password via the login form. + * Check if the `username` field actually exists in the `users` Hash. + * If it exists we have the user id, (i.e. 1000). + * Check if user:1000 password matches, if not, return an error message. + * Ok authenticated! Set "fea5e81ac8ca77622bed1c2132a021f9" (the value of user:1000 `auth` field) as the "auth" cookie. + +This is the actual code: + + include("retwis.php"); + + # Form sanity checks + if (!gt("username") || !gt("password")) + goback("You need to enter both username and password to login."); + + # The form is ok, check if the username is available + $username = gt("username"); + $password = gt("password"); + $r = redisLink(); + $userid = $r->hget("users",$username); + if (!$userid) + goback("Wrong username or password"); + $realpassword = $r->hget("user:$userid","password"); + if ($realpassword != $password) + goback("Wrong username or password"); + + # Username / password OK, set the cookie and redirect to index.php + $authsecret = $r->hget("user:$userid","auth"); + setcookie("auth",$authsecret,time()+3600*24*365); + header("Location: index.php"); + +This happens every time a user logs in, but we also need a function `isLoggedIn` in order to check if a given user is already authenticated or not. These are the logical steps preformed by the `isLoggedIn` function: + + * Get the "auth" cookie from the user. If there is no cookie, the user is not logged in, of course. Let's call the value of the cookie ``. + * Check if `` field in the `auths` Hash exists, and what the value (the user ID) is (1000 in the example). + * In order for the system to be more robust, also verify that user:1000 auth field also matches. + * OK the user is authenticated, and we loaded a bit of information in the `$User` global variable. + +The code is simpler than the description, possibly: + + function isLoggedIn() { + global $User, $_COOKIE; + + if (isset($User)) return true; + + if (isset($_COOKIE['auth'])) { + $r = redisLink(); + $authcookie = $_COOKIE['auth']; + if ($userid = $r->hget("auths",$authcookie)) { + if ($r->hget("user:$userid","auth") != $authcookie) return false; + loadUserInfo($userid); + return true; + } + } + return false; + } + + function loadUserInfo($userid) { + global $User; + + $r = redisLink(); + $User['id'] = $userid; + $User['username'] = $r->hget("user:$userid","username"); + return true; + } + +Having `loadUserInfo` as a separate function is overkill for our application, but it's a good approach in a complex application. The only thing that's missing from all the authentication is the logout. What do we do on logout? That's simple, we'll just change the random string in user:1000 `auth` field, remove the old authentication secret from the `auths` Hash, and add the new one. + +*Important:* the logout procedure explains why we don't just authenticate the user after looking up the authentication secret in the `auths` Hash, but double check it against user:1000 `auth` field. The true authentication string is the latter, while the `auths` Hash is just an authentication field that may even be volatile, or, if there are bugs in the program or a script gets interrupted, we may even end with multiple entries in the `auths` key pointing to the same user ID. The logout code is the following (`logout.php`): + + include("retwis.php"); + + if (!isLoggedIn()) { + header("Location: index.php"); + exit; + } + + $r = redisLink(); + $newauthsecret = getrand(); + $userid = $User['id']; + $oldauthsecret = $r->hget("user:$userid","auth"); + + $r->hset("user:$userid","auth",$newauthsecret); + $r->hset("auths",$newauthsecret,$userid); + $r->hdel("auths",$oldauthsecret); + + header("Location: index.php"); + +That is just what we described and should be simple to understand. + +Updates +--- + +Updates, also known as posts, are even simpler. In order to create a new post in the database we do something like this: + + INCR next_post_id => 10343 + HMSET post:10343 user_id $owner_id time $time body "I'm having fun with Retwis" + +As you can see each post is just represented by a Hash with three fields. The ID of the user owning the post, the time at which the post was published, and finally, the body of the post, which is, the actual status message. + +After we create a post and we obtain the post ID, we need to LPUSH the ID in the timeline of every user that is following the author of the post, and of course in the list of posts of the author itself (everybody is virtually following herself/himself). This is the file `post.php` that shows how this is performed: + + include("retwis.php"); + + if (!isLoggedIn() || !gt("status")) { + header("Location:index.php"); + exit; + } + + $r = redisLink(); + $postid = $r->incr("next_post_id"); + $status = str_replace("\n"," ",gt("status")); + $r->hmset("post:$postid","user_id",$User['id'],"time",time(),"body",$status); + $followers = $r->zrange("followers:".$User['id'],0,-1); + $followers[] = $User['id']; /* Add the post to our own posts too */ + + foreach($followers as $fid) { + $r->lpush("posts:$fid",$postid); + } + # Push the post on the timeline, and trim the timeline to the + # newest 1000 elements. + $r->lpush("timeline",$postid); + $r->ltrim("timeline",0,1000); + + header("Location: index.php"); + +The core of the function is the `foreach` loop. We use [`ZRANGE`](/commands/zrange) to get all the followers of the current user, then the loop will [`LPUSH`](/commands/lpush) the push the post in every follower timeline List. + +Note that we also maintain a global timeline for all the posts, so that in the Retwis home page we can show everybody's updates easily. This requires just doing an [`LPUSH`](/commands/lpush) to the `timeline` List. Let's face it, aren't you starting to think it was a bit strange to have to sort things added in chronological order using `ORDER BY` with SQL? I think so. + +There is an interesting thing to notice in the code above: we used a new +command called [`LTRIM`](/commands/ltrim) after we perform the [`LPUSH`](/commands/lpush) operation in the global +timeline. This is used in order to trim the list to just 1000 elements. The +global timeline is actually only used in order to show a few posts in the +home page, there is no need to have the full history of all the posts. + +Basically [`LTRIM`](/commands/ltrim) + [`LPUSH`](/commands/lpush) is a way to create a *capped collection* in Redis. + +Paginating updates +--- + +Now it should be pretty clear how we can use [`LRANGE`](/commands/lrange) in order to get ranges of posts, and render these posts on the screen. The code is simple: + + function showPost($id) { + $r = redisLink(); + $post = $r->hgetall("post:$id"); + if (empty($post)) return false; + + $userid = $post['user_id']; + $username = $r->hget("user:$userid","username"); + $elapsed = strElapsed($post['time']); + $userlink = "".utf8entities($username).""; + + echo('

'.$userlink.' '.utf8entities($post['body'])."
"); + echo('posted '.$elapsed.' ago via web
'); + return true; + } + + function showUserPosts($userid,$start,$count) { + $r = redisLink(); + $key = ($userid == -1) ? "timeline" : "posts:$userid"; + $posts = $r->lrange($key,$start,$start+$count); + $c = 0; + foreach($posts as $p) { + if (showPost($p)) $c++; + if ($c == $count) break; + } + return count($posts) == $count+1; + } + +`showPost` will simply convert and print a Post in HTML while `showUserPosts` gets a range of posts and then passes them to `showPosts`. + +*Note: [`LRANGE`](/commands/lrange) is not very efficient if the list of posts start to be very +big, and we want to access elements which are in the middle of the list, since Redis Lists are backed by linked lists. If a system is designed for +deep pagination of million of items, it is better to resort to Sorted Sets +instead.* + +Following users +--- + +It is not hard, but we did not yet check how we create following / follower relationships. If user ID 1000 (antirez) wants to follow user ID 5000 (pippo), we need to create both a following and a follower relationship. We just need to [`ZADD`](/commands/zadd) calls: + + ZADD following:1000 5000 + ZADD followers:5000 1000 + +Note the same pattern again and again. In theory with a relational database, the list of following and followers would be contained in a single table with fields like `following_id` and `follower_id`. You can extract the followers or following of every user using an SQL query. With a key-value DB things are a bit different since we need to set both the `1000 is following 5000` and `5000 is followed by 1000` relations. This is the price to pay, but on the other hand accessing the data is simpler and extremely fast. Having these things as separate sets allows us to do interesting stuff. For example, using [`ZINTERSTORE`](/commands/zinterstore) we can have the intersection of `following` of two different users, so we may add a feature to our Twitter clone so that it is able to tell you very quickly when you visit somebody else's profile, "you and Alice have 34 followers in common", and things like that. + +You can find the code that sets or removes a following / follower relation in the `follow.php` file. + +Making it horizontally scalable +--- + +Gentle reader, if you read till this point you are already a hero. Thank you. Before talking about scaling horizontally it is worth checking performance on a single server. Retwis is *extremely fast*, without any kind of cache. On a very slow and loaded server, an Apache benchmark with 100 parallel clients issuing 100000 requests measured the average pageview to take 5 milliseconds. This means you can serve millions of users every day with just a single Linux box, and this one was monkey ass slow... Imagine the results with more recent hardware. + +However you can't go with a single server forever, how do you scale a key-value +store? + +Retwis does not perform any multi-keys operation, so making it scalable is +simple: you may use client-side sharding, or something like a sharding proxy +like Twemproxy, or the upcoming Redis Cluster. + +To know more about those topics please read +[our documentation about sharding](/topics/partitioning). However, the point here +to stress is that in a key-value store, if you design with care, the data set +is split among **many independent small keys**. To distribute those keys +to multiple nodes is more straightforward and predictable compared to using +a semantically more complex database system. diff --git a/content/develop/manual/pipelining/index.md b/content/develop/manual/pipelining/index.md new file mode 100644 index 0000000000..88343d0020 --- /dev/null +++ b/content/develop/manual/pipelining/index.md @@ -0,0 +1,186 @@ +--- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients +description: How to optimize round-trip times by batching Redis commands +linkTitle: Pipelining +title: Redis pipelining +weight: 2 +--- + +Redis pipelining is a technique for improving performance by issuing multiple commands at once without waiting for the response to each individual command. Pipelining is supported by most Redis clients. This document describes the problem that pipelining is designed to solve and how pipelining works in Redis. + +## Request/Response protocols and round-trip time (RTT) + +Redis is a TCP server using the client-server model and what is called a *Request/Response* protocol. + +This means that usually a request is accomplished with the following steps: + +* The client sends a query to the server, and reads from the socket, usually in a blocking way, for the server response. +* The server processes the command and sends the response back to the client. + +So for instance a four commands sequence is something like this: + + * *Client:* INCR X + * *Server:* 1 + * *Client:* INCR X + * *Server:* 2 + * *Client:* INCR X + * *Server:* 3 + * *Client:* INCR X + * *Server:* 4 + +Clients and Servers are connected via a network link. +Such a link can be very fast (a loopback interface) or very slow (a connection established over the Internet with many hops between the two hosts). +Whatever the network latency is, it takes time for the packets to travel from the client to the server, and back from the server to the client to carry the reply. + +This time is called RTT (Round Trip Time). +It's easy to see how this can affect performance when a client needs to perform many requests in a row (for instance adding many elements to the same list, or populating a database with many keys). +For instance if the RTT time is 250 milliseconds (in the case of a very slow link over the Internet), even if the server is able to process 100k requests per second, we'll be able to process at max four requests per second. + +If the interface used is a loopback interface, the RTT is much shorter, typically sub-millisecond, but even this will add up to a lot if you need to perform many writes in a row. + +Fortunately there is a way to improve this use case. + +## Redis Pipelining + +A Request/Response server can be implemented so that it is able to process new requests even if the client hasn't already read the old responses. +This way it is possible to send *multiple commands* to the server without waiting for the replies at all, and finally read the replies in a single step. + +This is called pipelining, and is a technique widely in use for many decades. +For instance many POP3 protocol implementations already support this feature, dramatically speeding up the process of downloading new emails from the server. + +Redis has supported pipelining since its early days, so whatever version you are running, you can use pipelining with Redis. +This is an example using the raw netcat utility: + +```bash +$ (printf "PING\r\nPING\r\nPING\r\n"; sleep 1) | nc localhost 6379 ++PONG ++PONG ++PONG +``` + +This time we don't pay the cost of RTT for every call, but just once for the three commands. + +To be explicit, with pipelining the order of operations of our very first example will be the following: + + * *Client:* INCR X + * *Client:* INCR X + * *Client:* INCR X + * *Client:* INCR X + * *Server:* 1 + * *Server:* 2 + * *Server:* 3 + * *Server:* 4 + +> **IMPORTANT NOTE**: While the client sends commands using pipelining, the server will be forced to queue the replies, using memory. So if you need to send a lot of commands with pipelining, it is better to send them as batches each containing a reasonable number, for instance 10k commands, read the replies, and then send another 10k commands again, and so forth. The speed will be nearly the same, but the additional memory used will be at most the amount needed to queue the replies for these 10k commands. + +## It's not just a matter of RTT + +Pipelining is not just a way to reduce the latency cost associated with the +round trip time, it actually greatly improves the number of operations +you can perform per second in a given Redis server. +This is because without using pipelining, serving each command is very cheap from +the point of view of accessing the data structures and producing the reply, +but it is very costly from the point of view of doing the socket I/O. This +involves calling the `read()` and `write()` syscall, that means going from user +land to kernel land. +The context switch is a huge speed penalty. + +When pipelining is used, many commands are usually read with a single `read()` +system call, and multiple replies are delivered with a single `write()` system +call. Consequently, the number of total queries performed per second +initially increases almost linearly with longer pipelines, and eventually +reaches 10 times the baseline obtained without pipelining, as shown in this figure. + +![Pipeline size and IOPs](pipeline_iops.png) + +## A real world code example + + +In the following benchmark we'll use the Redis Ruby client, supporting pipelining, to test the speed improvement due to pipelining: + +```ruby +require 'rubygems' +require 'redis' + +def bench(descr) + start = Time.now + yield + puts "#{descr} #{Time.now - start} seconds" +end + +def without_pipelining + r = Redis.new + 10_000.times do + r.ping + end +end + +def with_pipelining + r = Redis.new + r.pipelined do + 10_000.times do + r.ping + end + end +end + +bench('without pipelining') do + without_pipelining +end +bench('with pipelining') do + with_pipelining +end +``` + +Running the above simple script yields the following figures on my Mac OS X system, running over the loopback interface, where pipelining will provide the smallest improvement as the RTT is already pretty low: + +``` +without pipelining 1.185238 seconds +with pipelining 0.250783 seconds +``` +As you can see, using pipelining, we improved the transfer by a factor of five. + +## Pipelining vs Scripting + +Using [Redis scripting](/commands/eval), available since Redis 2.6, a number of use cases for pipelining can be addressed more efficiently using scripts that perform a lot of the work needed at the server side. +A big advantage of scripting is that it is able to both read and write data with minimal latency, making operations like *read, compute, write* very fast (pipelining can't help in this scenario since the client needs the reply of the read command before it can call the write command). + +Sometimes the application may also want to send [`EVAL`](/commands/eval) or [`EVALSHA`](/commands/evalsha) commands in a pipeline. +This is entirely possible and Redis explicitly supports it with the [SCRIPT LOAD](https://redis.io/commands/script-load) command (it guarantees that [`EVALSHA`](/commands/evalsha) can be called without the risk of failing). + +## Appendix: Why are busy loops slow even on the loopback interface? + +Even with all the background covered in this page, you may still wonder why +a Redis benchmark like the following (in pseudo code), is slow even when +executed in the loopback interface, when the server and the client are running +in the same physical machine: + +```sh +FOR-ONE-SECOND: + Redis.SET("foo","bar") +END +``` + +After all, if both the Redis process and the benchmark are running in the same +box, isn't it just copying messages in memory from one place to another without +any actual latency or networking involved? + +The reason is that processes in a system are not always running, actually it is +the kernel scheduler that lets the process run. +So, for instance, when the benchmark is allowed to run, it reads the reply from the Redis server (related to the last command executed), and writes a new command. +The command is now in the loopback interface buffer, but in order to be read by the server, the kernel should schedule the server process (currently blocked in a system call) +to run, and so forth. +So in practical terms the loopback interface still involves network-like latency, because of how the kernel scheduler works. + +Basically a busy loop benchmark is the silliest thing that can be done when +metering performances on a networked server. The wise thing is just avoiding +benchmarking in this way. diff --git a/content/develop/manual/pipelining/pipeline_iops.png b/content/develop/manual/pipelining/pipeline_iops.png new file mode 100644 index 0000000000000000000000000000000000000000..6ab11079f2a4bc71d196b0e59839215ab40139e6 GIT binary patch literal 14577 zcmb_?c|4Tg+y9uTL46F_QW#22$Ui{JBn|9D={>+zSl?{i=0I@k4nzt44@`@U^tpu2-ZhywzF>^OZ& z=R5=gB|;!9?_gWOm4eGXZ4d}Yj*GUo(P?dMgwa(mCl_}|2;|i5gv4zZFSzj6;RtQJ zS$MSW^Mu69S%vI45%D;jqk zWwwpe)ad&*89Qgf4j!iNT0QToI!fU*L-CjG7i|#IF(tDX*WGe#_beL0}@Em1d>vi)<15 zR?iJXeH~QEY$ST2%^|K7>yeR=fZFRaBvLsTIl@zvUBY<+gO#C(<*s%P|K2oR&j^X~*2K zio@5m1XNQ6-d@ID*a@u^=q`<#&^i|p^hfaD^5tFe=LMXH^95!S_yvBKscJt867ej2 zom|??#h)ke{AhG22MAgy18n zukAI~!^Y-|tKS2KO80Rdfdv=%xSWyv-Jzps78djK{L8F@+Y*p>&fa8sG{t+`{rL8^ z%d%zLAPwfM*#d5pryx)6_s5;HdE+9oEn;;WBuRD8eTDF5@GW(UMfQTUqLD3hb;9RBiH7&Ph?y+(&})!D3Z-; zd^=mWC`0XB;JGcYd^((!_|Ix9e%pG*qEJ#zu<3YahRQR}R>v_RzrF9bgbQdyrJ*o& zMhbTX!|(36d4v7qr^|;G${z8Y;rpZhN9d2Hlc3^_+RM94J>E!k?c5!6ug>!mzb)70 z-T59p)|2o6o?9Qk4lnuys3Q05PP*ApD_ouW$?Egc&znB!v->JI26$&;VyhSCzAy8v zv4=@laUrjz??1((#D0Vo2~En!8l6%-c}47p*!G<-Z=cukcq)7-O3?7fNf|I#9(Ov= zVEn0l$`6!XByQ{W>K&9w-QSPc@jx^F#n%uAfqB=rQ5DgaXFc~K#c`QNQs%t6daM%n z-bSC-FVhP)SlM?xUg4qbX>W<*QxEl(^cGK7{LAy|$wkS(=C1pTB0~~GZmG$gj_Y{f zci%zZGf^g~0`Xn`=z$aQhwsnF>peIarzYlq!S$k$WRAqt{-%WH`l>SO)GTekCRM}N4y?=Z(M zXF)DO8;1mkREKAdf%RPt_^;@P7UH8)01g!}17|glqhrJ3*A&?bucg)K z-PnFZo1>feb5d(kkK9k0uNGIGzS^HKJ!g0b<@4;RG z>W&1BTpqbJa)|HAok|YF7p5a`>!n|AZ@>Nci;QD{g5*U$V-4dI*}50YjWat7JFj)V z?G)-r%i7tQ(_!Bc-PzYEsdP~(O!=+SUnLPY9;FNB`x$Wp|?QcG@+iQ4gmiT-c5mSh)$SGO&NCS6W6 zmmiVOvF!Ci9q~SVO68TJppt{&s-V;EBfF{&wJF?D=(X=~S?urmq55t4(cI3RE<5Gp z-Q)S=17C)`+|z!wJ=BR;G4P$NQc#?2w3pqPvqB%4v9Xb zzoX$)14jc|`h5b<7~8qS?QYS#CH{%%u5=F>_cr(Do;4RY+hp6&ngG8t{QTHRT_V@_ ztnUR=F+Dqa6sPu1VTwh)jr?toT~w>^z8Lsh?b6X0)p51jW2t^2!|szZews6D-)cWQ z#_K0b*LQwYZYkPUD{v`BGXeu%CR$1N2*dvB_GfPHATF{9+(I2xN&jy)= zc+CHu-5oNA1evXyq0pE<9ZtF1Z(uQKJ8axueDvZHE+JZ$$#UY$Xz!)ILd`{ z;mw?0G1Vs`buOMblY7;o$g6YKb)mEDwdk8~*6+|wO+_4TJG-sTb*<{G1CQ|R+$9t& zJ}=!DYxOnl?&TB4I%mBtRf$LQ%3^+8o;EGC@;kS9>aNw9bB$NtSWOP}6sX=ReNgI? z#+i4QaAZJ{?7ddUii`Xk`S?@orXsxS6{9xBISvzXe53yL~KbiU;4`dl&r{>CV+Z@Zj0Q zcgoIV+hRrkUX{3dql|RSJ#Y`1o3wYPZaUTJ7}w-lT3u6lf>(;o14>cE1@8&T_;fwY zlka@*UYEB%oo)4;^dwJj3*Ce7;62Uf$iHK1slU&(QKng%obT0Nl`PSq_s@*vw7`_8 z)Y1VD%?uw~0yfXl$C1#G+OYc|uXMnrF!e7QJ%L~5j7WAbKCaxttu+!?esag|y_aB~ zAfa2J+a6n`&VTjILTTq*@4*|{YuR3%Yk#p%BA&#UH}v-Iyd%jmF{E~`zvpYGWT-(| znrp7gD`Z>sl`qI02GIt%Rf+keZO#=fh3Q3V78rvC%i7@Hm5P(Ee6)vkE?Bmse1m*v z*z!21WQ%1-G0JO2t55&d&g&me;;^`#b2LXWNm1Ugn7#LN-Y4r_t~TA|->yBKp~BL~ z-sc6|l>79PRyz$cn;TRnJuNF-d>3kDUt63lk@ZvwcpR8YX<92;=;!ah7br8ML@*^3 zF3Qb#O!558dh7eUrbBpmghKCFn(oSni z2j?zV%m?-l?%pc1Rc^=oe*!9DVRH^qii@kU9muMGMk}6I$m}||D@#~exKj0xX4isi z=j)%{*Sgz>%P{e)7PY%mRYu8vYwz5|-7hca%uY3yUn9)Vyhh3pTQ$d#D`5v+TvzP} zBv%ZT({nTf(d?wdhg0YH2t|v@(K)2%;b=M32_ z25z@!OaGXV#Sg03RF{jUex_&7sM&jY$k{n~U3QcU^6&g(hG+a2bNo1QddKB=Ury!pvZcW6(l%TpJyX+ay& zTE7ayrOBZ#&j`IH2Ul;IFa#oV>9o%Ai$N??{n1ySmGZN-Hs_EjHqH0;`@ZB8a%Y7t zn&#H`@N32TtEUS8+9#TBQ)1)E3WFn15fBzAD{Q;uEbeE3gZ8FNYkLusp<;{?aKVd1 zjy;s+WvSi(7fQ~D?$4(^<_i4N0>IPsL1dWwO;S-cX|xb0nt-Q-M$RNE8LS?<7FKaZ zf2zUWV)%+AZ9X_cp}g9)C)T@CAhQs2H+kD?M`G;4!H!J(&0baTFINVXDxJaoP2aX^ zLh9=J_ujg<>+@lUp|9;I%ihSA#=MTqy>sdmC_)5B)IzMj@EKUTSJqa!o4T@%u>)-Q z--AUOmv#_cYSf?CK`$4Ko2tjyTM&YAfulK1Wy-mm){(XY#8nkVnNJxu zTe%q;#_qN`RZv;Qww3^dB%wtX4 ze$S+l^jF+{eE9{*rc5q2A;zi~b2NQ)5L#1Z0^OKUApym4KMy)@cK5-YTkhVeXMSsK zWapd|8ynlDZGIDmZd+*?KnY`EZ+s}OiKQWX2tEd6b*`CjBK9XKK~)u)PGro8y^h7* zcm6V7u~iL3LFt*~-nNAtdzZUy(3ZTBg0YJT zu6X5Q@497^H|0v|NqBiHdscGxLRh%&v)g^$0>=UeBT418O}ZPDf~z8ol(&nT8k%IP z!{1uWapNtrBK>Iy=w!uP`wzCuFWt6aoi!QFS&Cigo+d0}lm~ksnUvIaF6a0R=Ex&O zVu#TljV5(nt8PRY{*O!^=`!V8xdT zh3OKNz3X`uG(&J@hT_f2DQU_bWAA=+myarUDYQTOs?s0hU}1uH4tdcdO@rJJ6uXuk zD&)CKE2%~PQ5$?4TO{eax53^GqW{?`wWFYjh&02tRce%VJs!n};+=D=Dt9u3M;0;3 zWo4aA8xMm&GDG#Gm#yLrXi-Ec6h`DD{enGW=tQt}VnJpc^Ie)s1y^1`zl-iXsIint z-bqPlvmr(Ujz3wz&dtL(C&3WVmu*99z3{>Qbj;&YF&MQI(dOkMH zsx6m8LMfuF#6};B3WJ+(a`KT5W*&DSSWh zd0(&pW7#wa_BNaOG9h$k^hv^DcA~V@6F}`*w;ALc*)Sd>xpk7|iyH@3|jcLVZauUOcjn zNDr8d;^bpvW3Z9fxkcY=O^vA?4ov<+Oh_SkS^|MzHbH2iL;ku}C2cq&sFe}Y)1tlF zf7!4(oM7FgJM<968ZJiReHOp2wbgvkhVA76b73>;Nox_)178z4tvvT>#A@aa%oumZ zRBAG*Jw+MdjyQVMn&GV1`4u2-Q`ls$` z69_gNH21fF?YQ1Li&ptc_f6tB2J>P(3UX54brt(pf$i=HYvkGv!KU?K*q)&V)nX_!7~SBBCB^}Var*`ag?nMcI$Ug2!LnnZLH?}@L7x8m?Cbfj$r=) zIRDXiz+T{htz1a#f4LFaPG}9e^Xcck-WO5T$b+Nm8wPB(Q9XkZ-_8v2v#?P3o#Hp@ z12bU3OxkO(g&nrs@;&+Sy4IZ8kC-&Pcg=@{*|q z>Pw!id)P*3hQvVb#Ddz7AmCzy#%o*e*s;^x-vOq+nSH2_FDfGalGN`K3rT6()NxOX%76()bcS^}m0`WW0 zST0&(W4D>+gX3qGj`SHrwaCT~K8tN!-O9ggGd`eMrFAJxrseFETS@Pfo3HEci-^k4 zZMv%;ERbQxuH5&Cra7mVAvKxVN?zg1|FxHE&PT>eKl)8SnlI8vNB};+TRv`NJ$Ecn zSq|PCq?{c(8HRQ30#NQ{w-T=$T_Ot1jH)fqkN2b3H>Q+*69zFpmJVvGPM*LB>Rzt(maRQb1HMGKoJcC_D78k}{OGbTP z)D?UlSxqFVd!ec=0)?cmNzf*j~IL>F(ljcVjG#Hl$pm z)|rxkOSBSYJJ@)+&VAmhA~vRC%BwHFfjXz1aJ=*DWntpksDpaRl&f zq1pCEk?;l_GIRmM>avTfoQ~s`ILt&hyim9ecI4CldVPOM7$2;0=b=N#n#B&2_Yj@O z#!BkejG>jktRiqYUKAzjsdpSx`k)t1GjS7`I4PXocVZ1S?JKRIxN#X3Q4Mb3Se^yn z0^1QXu6&EW0NpwXgq8HizDa5<*4VI|MxvWMaQv-qpe``;+5|RT&4o0}p863oP0Usp`NcuRUnM z`TX_jHL}aeEi`JensYB(y^UMT4p)=c@LJ&?w2M+Mt{N~M-AlMe-aEOUrg$(Jw!~cl z!4fBS&q1L>7xDQqO+Q}5~XOz$*x z>QOXVl@D$~n;Xf9<9cn%Nt*M=o%J^1*47jon}@Pq{7i%`opzuqgKt6a)QaV}&l{hz zDo0#ax^AnyG^NAgh#@VYL0Us+ie>*H8oKBEH7g=s`1)PVH9F*J%OTbc=vyxawWgaT!R<(;l=w2r)}lh8K!$ z1-y&>8|^N@!%F~GXQ@Eea9ptSP1nASrUn$dNWebioe!e0)$0O1wCabo9#vr_X_G0vT6VLJ9?mQP!gKY)$8AbQUp&Phr#Yx9JLI9B z$-Y|%)#~+1Fj0%{Hx>3(TSracCQB``4y+6?VX0Q`jAxUj5(>HK*_pT9J&0x( zg#(>TUXiacuDjk$g{A>RM?gG|{N>nLmTkk*8ArG31N`!Fb&;g~27}RH3r4D{1wR<5 z8O}ya*1kPMch?uVOY0Oij2aoEdh+Vsyy>fCfr65X$SA3(s6!b84-b0X_)uwsgJz4- znQhMjBieU$=F;QO3R~mBSHhS-w;x1I5-ys0{T$CS@7gGAfqYwn=&mfg#tn2t(O3R~4bY^{WXKZzTXVdpHz$DDn*v%V zIxZXK0Hu*gBxXePs5Qspc+cUUZqJtPp^bWYV-$wUwDfi~Y3JVyrG_Q2#c>}IP#a{f zyetjoY~gh*K1~l@C-fFLhUPpZLdyo#)ozY8&BK=HTJo2bz|{Dz$AiC)aR*y0#9DQF zRq61bq9LmEyzOJVmb+8+qrD?`;JzCymDLy#KWPUfgz*LBC8Wm2hBz#{` zKvNV*Hu5yIaZf=X2;yvU(P9XW6F+DYB&%U-4S71QB`ucfq0Ga&Sv^o0+{IAHjV6_9 z$M^^Tj)l18{td)uzq6zROc<;99sgFE=wEzf{8jXzxKh?N54sS;Qh24vEdB8dM@b2& z$hEnu@AIzRF|Jri#dL1Or{T^elL7UsK|Cl6Kt zO7na}q*cSe$+R;cghB-{6n5Jqjt(!|OTbW9eIK1f5!JJR1G5sv$LWyA(whkefj$C- zF-+*;8r78WvZm)>2|T9sqWB!+vG0I)-w%4?7>~6B+OE48Lbe^pADK>q7# zK)2%u?D$EP*fH#w0Xt>~J<^$W00Y|-e%xf*0Y&>vZXsb}P}WnEZl@5~DST1l!?5!W zsLL_w$!v0J?;l!G13LqR$dJMVp>3w%&>%Y^A7kQZ)rr&ZRcFjd0bI7`VEkm=N-fN# z-n?E7luD+T11MV{=<+Cay2b%pcThgyg!#Rmu&91yM^(V<{;DIHN9g{rK;LnKlAE^| zFpxt0q8+xUp^wLN3jc?5Ojf-q=?j#;yl2$PTtbV3JkF2R)G+X%*5;WjKbSm*n+mA% z;k$2j&_#qQN}N!fz5ut$RC}^9jsu+?n1%mHO}}ezVyuyJnKj&AYn3-tyX;gHL0l2Lhivv1G`Ss zAaiQ-|+S zSHaP0Q3p7C{t9s)BQr+g-e3EI^!9hqYG|rHVM75ngqndB?aoJx^xOb@gV>{K#&6aT zfRE6uhyDzX!KHZNc+L-gOhsso26^{?QDH-IJvYo?@*XgZtL7-9-i<&_0NWFMZ|Nul zEc*Ws2tig_-vd*QH-IUL|YDEO^)%W z5GHQ^$75K=Z~5jKDQ%_8Z*?u;R&B;jhSVVFy)J_c5&!!!m|`CE&K9%dqYP==5LBT= zx@JXGw*v>-`4o_Jk04k*5KRfwkIWPYnFINI5B@C2@Tb#BbOfQRo-k87r%+&bVd@#K? z%Nd40sEkBVujzM{GUZ1EI7HyD<{F(&Xp5y|ID;EhjrX-o8Ki<>UxVphpSVYV-vU(j zKabIPI!@ZuT(YWxZIr-2x!~(%NMeRq32a9y=e9;ub#fy$-jyH?m08@(l>X;2l;#f6 z_7HH8NzW+i86elQ8wE1VKsW{X|u>O{XBvPc-@q4)Uts`n?&68Op|efI2oGbn;{6Ay2)g z@Q+64LvaBMSI=)@ND^3#>ZH7OxH;auPk4&(;4xD$P@+(d1ITXPsb$67I=s=(L>UwXMv^yDyH%^US+HsHVMitg5Ee|Rj3R_ zK%RHk%;e|@gBtNo`6)v$et{H#LuX2A+%S`zYQXj}*%U{n zxCcps#(d{hM#v&inP6d=H{Q83eVhS05U3Tyd}!<XD}1e{}6+1r^RN|Klp4)R2tizk7&c>CY(ZW`SiWPeT#T?VJ4+5rT*(?W;2!7 zkN$U@U_>ET&y5TVF*=us>MgWAt?LPJ2dXOy5|D0gc7O>kad0I{-QUDO8W4B{{_NTl7hWEvzPh(yMq}+HM}^PP`TDtD6qRL1aw< zcJ#ShnDcEH2kqYZQ+-T%&j%G|l6V@^13?|22G8S)=?6NXYXq-(30M3Px zr^Gxq)otfdJvN&-gtCRR*bUmW2>*WJzMGL^eHJ_9?d6HGhVC1qjEh@9P`2=X<7Z}* z@wS%k<#q({dehMuXp6y#YhKnBX8aW*fJ-QD58A68@MQH9AQ3M&l~>geKz7K9LN;bX z+`yG5cQ!RwTVRdCc+%1j7VDZ~#txV-p%B<(c#s@2-Q}ju;1g;T@OzZ4aMSK&0n8=d zrJq3oYNmpgH&vi@o|yo5cJQVmHc!;`UhGIlR`}GV@^B9W4@NLpodiG`d7u!3CZXKI zev4_wjCin@1P~|L3NcK#EdVqzZgJpPC1_O(0mV$=bCx!JRD>Gp&I4~%u2nEU{b7NI z!`3w4H?C!qz8!K+7XSyT!C?hFo4{kvN39|!WZu6O-MIu`w9iQL*BJ_%>o%I(*zc@~yYP((s*2^D=|nU+_mIC)R0NX$)iStxB%bpiD_P`fyV#s;A!zl+V_GE?64P#-Z0V; zf%@MqOu?{+a10+BVr9+1}$-BdQmnwRRNa49eQtT9JBM zTDN<8)isqZ&qL(g!WMXLrp9$R{{o zUsbZ(IJ`-aQDkK7#Omix4z6gIjaz8fgh}SfH874of-ZuS6jcgo!v9EOAztLo+F}mE zx;)&z>15r9cU3>K{sb z^8~(E(z0F7$KkbWFI+6H*&pv1CUa}Fg7+vstjQ;5s%OkMjW6%Ja$QmJ(S(?(CsT(? zOq@L`_v`oBG#wf)P)6nrx>>Wk6(5@KEhs7;n;~abKQVTVj!mz`klL%<^;>TJLuB7e zD$Y=TqrBat4{G;VJ?W*--G$=8Z+qQi>%-Fp(AFiRd%ByQ%3og1^_UoJ&9@dAbVZij znQOLw|3dg?DrR?cXJ`|?GwP>x$iVAEx*Ek!yyzEseRdzw5zhra98QT@tqHnplahDA zh8)|3SL+izK(dJQbl3j98d_X;#i0+0xB`FI;*s~A z*3NwPF{|~hqlLc87{^eD--EgaSDSx8$&zX1AQrl52o%Whw{M&*j`T*$?j-N}FU{$WY?m-C&TYrDF#_G7Ghzjtgz_taAJ znLL#yUG@2+O?^REkH^|@EQ>Bnv}9fXdL;b0#gBBM*00no&z`RyEu=znH6`HA%zXT< z9mJKAMwgPZybg9R$j3Mcin7{CXiVZrzbJx_$g#Sd*`%V%2Ml=qDOvKt!`!bQ7 zYNzH4p7IDyJQ`c^wkdXma?(AR(oXL58OukGHhjuu0(?7Jw}_WfBu7*?LO)hHWBxqt zWo+XREYST>VaB-#_`(;|KZW_l^q0G!CZ~KHPBMf@el4Va@O}D(fljWL?X~|04#hBK literal 0 HcmV?d00001 diff --git a/content/develop/reference/_index.md b/content/develop/reference/_index.md index 7162ea1904..2fe6b287c8 100644 --- a/content/develop/reference/_index.md +++ b/content/develop/reference/_index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Specifications and protocols linkTitle: Reference title: Redis reference diff --git a/content/develop/reference/arm.md b/content/develop/reference/arm.md deleted file mode 100644 index 03dfd2c131..0000000000 --- a/content/develop/reference/arm.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -aliases: -- /topics/ARM -description: 'Exploring Redis on the ARM CPU Architecture - - ' -linkTitle: ARM support -title: ARM support -weight: 11 ---- - -Redis versions 4.0 and above support the ARM processor in general, and -the Raspberry Pi specifically, as a main platform. Every new release of Redis is tested on the Pi -environment, and we update this documentation page with information about supported devices and other useful information. While Redis does run on Android, in the future we look forward to extend our testing efforts to Android -to also make it an officially supported platform. - -We believe that Redis is ideal for IoT and embedded devices for several -reasons: - -* Redis has a very small memory footprint and CPU requirements. It can run in small devices like the Raspberry Pi Zero without impacting the overall performance, using a small amount of memory while delivering good performance for many use cases. -* The data structures of Redis are often an ideal way to model IoT/embedded use cases. Some examples include accumulating time series data, receiving or queuing commands to execute or respond to send back to the remote servers, and so forth. -* Modeling data inside Redis can be very useful in order to make in-device decisions for appliances that must respond very quickly or when the remote servers are offline. -* Redis can be used as a communication system between the processes running in the device. -* The append-only file storage of Redis is well suited for SSD cards. -* The stream data structure included in Redis versions 5.0 and higher was specifically designed for time series applications and has a very low memory overhead. - -## Redis /proc/cpu/alignment requirements - -Linux on ARM allows to trap unaligned accesses and fix them inside the kernel -in order to continue the execution of the offending program instead of -generating a `SIGBUS`. Redis 4.0 and greater are fixed in order to avoid any kind -of unaligned access, so there is no need to have a specific value for this -kernel configuration. Even when kernel alignment fixing set as disabled Redis should -run as expected. - -## Building Redis in the Pi - -* Download Redis version 4.0 or higher. -* Use `make` as usual to create the executable. - -There is nothing special in the process. The only difference is that by -default, Redis uses the `libc` allocator instead of defaulting to `jemalloc` -as it does in other Linux based environments. This is because we believe -that for the small use cases inside embedded devices, memory fragmentation -is unlikely to be a problem. Moreover `jemalloc` on ARM may not be as tested -as the `libc` allocator. - -## Performance - -Performance testing of Redis was performed on the Raspberry Pi 3 and Pi 1 model B. The difference between the two Pis in terms of delivered performance is quite big. The benchmarks were performed via the -loopback interface, since most use cases will probably use Redis from within -the device and not via the network. The following numbers were obtained using -Redis 4.0. - -Raspberry Pi 3: - -* Test 1 : 5 millions writes with 1 million keys (even distribution among keys). No persistence, no pipelining. 28,000 ops/sec. -* Test 2: Like test 1 but with pipelining using groups of 8 operations: 80,000 ops/sec. -* Test 3: Like test 1 but with AOF enabled, fsync 1 sec: 23,000 ops/sec -* Test 4: Like test 3, but with an AOF rewrite in progress: 21,000 ops/sec - -Raspberry Pi 1 model B: - -* Test 1 : 5 millions writes with 1 million keys (even distribution among keys). No persistence, no pipelining. 2,200 ops/sec. -* Test 2: Like test 1 but with pipelining using groups of 8 operations: 8,500 ops/sec. -* Test 3: Like test 1 but with AOF enabled, fsync 1 sec: 1,820 ops/sec -* Test 4: Like test 3, but with an AOF rewrite in progress: 1,000 ops/sec - -The benchmarks above are referring to simple [`SET`](/commands/set)/[`GET`](/commands/get) operations. The performance is similar for all the Redis fast operations (not running in linear time). However sorted sets may show slightly slower numbers. diff --git a/content/develop/reference/clients.md b/content/develop/reference/clients.md index 9e8e120981..ecc1c4c51b 100644 --- a/content/develop/reference/clients.md +++ b/content/develop/reference/clients.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/clients +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'How the Redis server manages client connections ' @@ -48,8 +56,7 @@ However, Redis does the following two things when serving clients: In Redis 2.4 there was a hard-coded limit for the maximum number of clients that could be handled simultaneously. -In Redis 2.6 and newer, this limit is dynamic: by default it is set to 10000 clients, unless -otherwise stated by the `maxclients` directive in `redis.conf`. +In Redis 2.6 and newer, this limit is configurable using the `maxclients` directive in `redis.conf`. The default is 10,000 clients. However, Redis checks with the kernel what the maximum number of file descriptors that we are able to open is (the *soft limit* is checked). If the diff --git a/content/develop/reference/cluster-spec.md b/content/develop/reference/cluster-spec.md deleted file mode 100644 index ad57d023fa..0000000000 --- a/content/develop/reference/cluster-spec.md +++ /dev/null @@ -1,1280 +0,0 @@ ---- -aliases: -- /topics/cluster-spec -description: 'Detailed specification for Redis cluster - - ' -linkTitle: Cluster spec -title: Redis cluster specification -weight: 9 ---- - -Welcome to the **Redis Cluster Specification**. Here you'll find information -about the algorithms and design rationales of Redis Cluster. This document is a work -in progress as it is continuously synchronized with the actual implementation -of Redis. - -## Main properties and rationales of the design - -### Redis Cluster goals - -Redis Cluster is a distributed implementation of Redis with the following goals in order of importance in the design: - -* High performance and linear scalability up to 1000 nodes. There are no proxies, asynchronous replication is used, and no merge operations are performed on values. -* Acceptable degree of write safety: the system tries (in a best-effort way) to retain all the writes originating from clients connected with the majority of the master nodes. Usually there are small windows where acknowledged writes can be lost. Windows to lose acknowledged writes are larger when clients are in a minority partition. -* Availability: Redis Cluster is able to survive partitions where the majority of the master nodes are reachable and there is at least one reachable replica for every master node that is no longer reachable. Moreover using *replicas migration*, masters no longer replicated by any replica will receive one from a master which is covered by multiple replicas. - -What is described in this document is implemented in Redis 3.0 or greater. - -### Implemented subset - -Redis Cluster implements all the single key commands available in the -non-distributed version of Redis. Commands performing complex multi-key -operations like set unions and intersections are implemented for cases where -all of the keys involved in the operation hash to the same slot. - -Redis Cluster implements a concept called **hash tags** that can be used -to force certain keys to be stored in the same hash slot. However, during -manual resharding, multi-key operations may become unavailable for some time -while single-key operations are always available. - -Redis Cluster does not support multiple databases like the standalone version -of Redis. We only support database `0`; the [`SELECT`](/commands/select) command is not allowed. - -## Client and Server roles in the Redis cluster protocol - -In Redis Cluster, nodes are responsible for holding the data, -and taking the state of the cluster, including mapping keys to the right nodes. -Cluster nodes are also able to auto-discover other nodes, detect non-working -nodes, and promote replica nodes to master when needed in order -to continue to operate when a failure occurs. - -To perform their tasks all the cluster nodes are connected using a -TCP bus and a binary protocol, called the **Redis Cluster Bus**. -Every node is connected to every other node in the cluster using the cluster -bus. Nodes use a gossip protocol to propagate information about the cluster -in order to discover new nodes, to send ping packets to make sure all the -other nodes are working properly, and to send cluster messages needed to -signal specific conditions. The cluster bus is also used in order to -propagate Pub/Sub messages across the cluster and to orchestrate manual -failovers when requested by users (manual failovers are failovers which -are not initiated by the Redis Cluster failure detector, but by the -system administrator directly). - -Since cluster nodes are not able to proxy requests, clients may be redirected -to other nodes using redirection errors `-MOVED` and `-ASK`. -The client is in theory free to send requests to all the nodes in the cluster, -getting redirected if needed, so the client is not required to hold the -state of the cluster. However clients that are able to cache the map between -keys and nodes can improve the performance in a sensible way. - -### Write safety - -Redis Cluster uses asynchronous replication between nodes, and **last failover wins** implicit merge function. This means that the last elected master dataset eventually replaces all the other replicas. There is always a window of time when it is possible to lose writes during partitions. However these windows are very different in the case of a client that is connected to the majority of masters, and a client that is connected to the minority of masters. - -Redis Cluster tries harder to retain writes that are performed by clients connected to the majority of masters, compared to writes performed in the minority side. -The following are examples of scenarios that lead to loss of acknowledged -writes received in the majority partitions during failures: - -1. A write may reach a master, but while the master may be able to reply to the client, the write may not be propagated to replicas via the asynchronous replication used between master and replica nodes. If the master dies without the write reaching the replicas, the write is lost forever if the master is unreachable for a long enough period that one of its replicas is promoted. This is usually hard to observe in the case of a total, sudden failure of a master node since masters try to reply to clients (with the acknowledge of the write) and replicas (propagating the write) at about the same time. However it is a real world failure mode. - -2. Another theoretically possible failure mode where writes are lost is the following: - -* A master is unreachable because of a partition. -* It gets failed over by one of its replicas. -* After some time it may be reachable again. -* A client with an out-of-date routing table may write to the old master before it is converted into a replica (of the new master) by the cluster. - -The second failure mode is unlikely to happen because master nodes unable to communicate with the majority of the other masters for enough time to be failed over will no longer accept writes, and when the partition is fixed writes are still refused for a small amount of time to allow other nodes to inform about configuration changes. This failure mode also requires that the client's routing table has not yet been updated. - -Writes targeting the minority side of a partition have a larger window in which to get lost. For example, Redis Cluster loses a non-trivial number of writes on partitions where there is a minority of masters and at least one or more clients, since all the writes sent to the masters may potentially get lost if the masters are failed over in the majority side. - -Specifically, for a master to be failed over it must be unreachable by the majority of masters for at least `NODE_TIMEOUT`, so if the partition is fixed before that time, no writes are lost. When the partition lasts for more than `NODE_TIMEOUT`, all the writes performed in the minority side up to that point may be lost. However the minority side of a Redis Cluster will start refusing writes as soon as `NODE_TIMEOUT` time has elapsed without contact with the majority, so there is a maximum window after which the minority becomes no longer available. Hence, no writes are accepted or lost after that time. - -### Availability - -Redis Cluster is not available in the minority side of the partition. In the majority side of the partition assuming that there are at least the majority of masters and a replica for every unreachable master, the cluster becomes available again after `NODE_TIMEOUT` time plus a few more seconds required for a replica to get elected and failover its master (failovers are usually executed in a matter of 1 or 2 seconds). - -This means that Redis Cluster is designed to survive failures of a few nodes in the cluster, but it is not a suitable solution for applications that require availability in the event of large net splits. - -In the example of a cluster composed of N master nodes where every node has a single replica, the majority side of the cluster will remain available as long as a single node is partitioned away, and will remain available with a probability of `1-(1/(N*2-1))` when two nodes are partitioned away (after the first node fails we are left with `N*2-1` nodes in total, and the probability of the only master without a replica to fail is `1/(N*2-1))`. - -For example, in a cluster with 5 nodes and a single replica per node, there is a `1/(5*2-1) = 11.11%` probability that after two nodes are partitioned away from the majority, the cluster will no longer be available. - -Thanks to a Redis Cluster feature called **replicas migration** the Cluster -availability is improved in many real world scenarios by the fact that -replicas migrate to orphaned masters (masters no longer having replicas). -So at every successful failure event, the cluster may reconfigure the replicas -layout in order to better resist the next failure. - -### Performance - -In Redis Cluster nodes don't proxy commands to the right node in charge for a given key, but instead they redirect clients to the right nodes serving a given portion of the key space. - -Eventually clients obtain an up-to-date representation of the cluster and which node serves which subset of keys, so during normal operations clients directly contact the right nodes in order to send a given command. - -Because of the use of asynchronous replication, nodes do not wait for other nodes' acknowledgment of writes (if not explicitly requested using the [`WAIT`](/commands/wait) command). - -Also, because multi-key commands are only limited to *near* keys, data is never moved between nodes except when resharding. - -Normal operations are handled exactly as in the case of a single Redis instance. This means that in a Redis Cluster with N master nodes you can expect the same performance as a single Redis instance multiplied by N as the design scales linearly. At the same time the query is usually performed in a single round trip, since clients usually retain persistent connections with the nodes, so latency figures are also the same as the single standalone Redis node case. - -Very high performance and scalability while preserving weak but -reasonable forms of data safety and availability is the main goal of -Redis Cluster. - -### Why merge operations are avoided - -The Redis Cluster design avoids conflicting versions of the same key-value pair in multiple nodes as in the case of the Redis data model this is not always desirable. Values in Redis are often very large; it is common to see lists or sorted sets with millions of elements. Also data types are semantically complex. Transferring and merging these kind of values can be a major bottleneck and/or may require the non-trivial involvement of application-side logic, additional memory to store meta-data, and so forth. - -There are no strict technological limits here. CRDTs or synchronously replicated -state machines can model complex data types similar to Redis. However, the -actual run time behavior of such systems would not be similar to Redis Cluster. -Redis Cluster was designed in order to cover the exact use cases of the -non-clustered Redis version. - -## Overview of Redis Cluster main components - -### Key distribution model - -The cluster's key space is split into 16384 slots, effectively setting an upper limit -for the cluster size of 16384 master nodes (however, the suggested max size of -nodes is on the order of ~ 1000 nodes). - -Each master node in a cluster handles a subset of the 16384 hash slots. -The cluster is **stable** when there is no cluster reconfiguration in -progress (i.e. where hash slots are being moved from one node to another). -When the cluster is stable, a single hash slot will be served by a single node -(however the serving node can have one or more replicas that will replace it in the case of net splits or failures, -and that can be used in order to scale read operations where reading stale data is acceptable). - -The base algorithm used to map keys to hash slots is the following -(read the next paragraph for the hash tag exception to this rule): - - HASH_SLOT = CRC16(key) mod 16384 - -The CRC16 is specified as follows: - -* Name: XMODEM (also known as ZMODEM or CRC-16/ACORN) -* Width: 16 bit -* Poly: 1021 (That is actually x^16 + x^12 + x^5 + 1) -* Initialization: 0000 -* Reflect Input byte: False -* Reflect Output CRC: False -* Xor constant to output CRC: 0000 -* Output for "123456789": 31C3 - -14 out of 16 CRC16 output bits are used (this is why there is -a modulo 16384 operation in the formula above). - -In our tests CRC16 behaved remarkably well in distributing different kinds of -keys evenly across the 16384 slots. - -**Note**: A reference implementation of the CRC16 algorithm used is available in the Appendix A of this document. - -### Hash tags - -There is an exception for the computation of the hash slot that is used in order -to implement **hash tags**. Hash tags are a way to ensure that multiple keys -are allocated in the same hash slot. This is used in order to implement -multi-key operations in Redis Cluster. - -To implement hash tags, the hash slot for a key is computed in a -slightly different way in certain conditions. -If the key contains a "{...}" pattern only the substring between -`{` and `}` is hashed in order to obtain the hash slot. However since it is -possible that there are multiple occurrences of `{` or `}` the algorithm is -well specified by the following rules: - -* IF the key contains a `{` character. -* AND IF there is a `}` character to the right of `{`. -* AND IF there are one or more characters between the first occurrence of `{` and the first occurrence of `}`. - -Then instead of hashing the key, only what is between the first occurrence of `{` and the following first occurrence of `}` is hashed. - -Examples: - -* The two keys `{user1000}.following` and `{user1000}.followers` will hash to the same hash slot since only the substring `user1000` will be hashed in order to compute the hash slot. -* For the key `foo{}{bar}` the whole key will be hashed as usually since the first occurrence of `{` is followed by `}` on the right without characters in the middle. -* For the key `foo{{bar}}zap` the substring `{bar` will be hashed, because it is the substring between the first occurrence of `{` and the first occurrence of `}` on its right. -* For the key `foo{bar}{zap}` the substring `bar` will be hashed, since the algorithm stops at the first valid or invalid (without bytes inside) match of `{` and `}`. -* What follows from the algorithm is that if the key starts with `{}`, it is guaranteed to be hashed as a whole. This is useful when using binary data as key names. - -Adding the hash tags exception, the following is an implementation of the `HASH_SLOT` function in Ruby and C language. - -Ruby example code: - - def HASH_SLOT(key) - s = key.index "{" - if s - e = key.index "}",s+1 - if e && e != s+1 - key = key[s+1..e-1] - end - end - crc16(key) % 16384 - end - -C example code: - - unsigned int HASH_SLOT(char *key, int keylen) { - int s, e; /* start-end indexes of { and } */ - - /* Search the first occurrence of '{'. */ - for (s = 0; s < keylen; s++) - if (key[s] == '{') break; - - /* No '{' ? Hash the whole key. This is the base case. */ - if (s == keylen) return crc16(key,keylen) & 16383; - - /* '{' found? Check if we have the corresponding '}'. */ - for (e = s+1; e < keylen; e++) - if (key[e] == '}') break; - - /* No '}' or nothing between {} ? Hash the whole key. */ - if (e == keylen || e == s+1) return crc16(key,keylen) & 16383; - - /* If we are here there is both a { and a } on its right. Hash - * what is in the middle between { and }. */ - return crc16(key+s+1,e-s-1) & 16383; - } - -### Cluster node attributes - -Every node has a unique name in the cluster. The node name is the -hex representation of a 160 bit random number, obtained the first time a -node is started (usually using /dev/urandom). -The node will save its ID in the node configuration file, and will use the -same ID forever, or at least as long as the node configuration file is not -deleted by the system administrator, or a *hard reset* is requested -via the [`CLUSTER RESET`](/commands/cluster-reset) command. - -The node ID is used to identify every node across the whole cluster. -It is possible for a given node to change its IP address without any need -to also change the node ID. The cluster is also able to detect the change -in IP/port and reconfigure using the gossip protocol running over the cluster -bus. - -The node ID is not the only information associated with each node, but is -the only one that is always globally consistent. Every node has also the -following set of information associated. Some information is about the -cluster configuration detail of this specific node, and is eventually -consistent across the cluster. Some other information, like the last time -a node was pinged, is instead local to each node. - -Every node maintains the following information about other nodes that it is -aware of in the cluster: The node ID, IP and port of the node, a set of -flags, what is the master of the node if it is flagged as `replica`, last time -the node was pinged and the last time the pong was received, the current -*configuration epoch* of the node (explained later in this specification), -the link state and finally the set of hash slots served. - -A detailed [explanation of all the node fields](https://redis.io/commands/cluster-nodes) is described in the [`CLUSTER NODES`](/commands/cluster-nodes) documentation. - -The [`CLUSTER NODES`](/commands/cluster-nodes) command can be sent to any node in the cluster and provides the state of the cluster and the information for each node according to the local view the queried node has of the cluster. - -The following is sample output of the [`CLUSTER NODES`](/commands/cluster-nodes) command sent to a master -node in a small cluster of three nodes. - - $ redis-cli cluster nodes - d1861060fe6a534d42d8a19aeb36600e18785e04 127.0.0.1:6379 myself - 0 1318428930 1 connected 0-1364 - 3886e65cc906bfd9b1f7e7bde468726a052d1dae 127.0.0.1:6380 master - 1318428930 1318428931 2 connected 1365-2729 - d289c575dcbc4bdd2931585fd4339089e461a27d 127.0.0.1:6381 master - 1318428931 1318428931 3 connected 2730-4095 - -In the above listing the different fields are in order: node id, address:port, flags, last ping sent, last pong received, configuration epoch, link state, slots. Details about the above fields will be covered as soon as we talk of specific parts of Redis Cluster. - -### The cluster bus - -Every Redis Cluster node has an additional TCP port for receiving -incoming connections from other Redis Cluster nodes. This port will be derived by adding 10000 to the data port or it can be specified with the cluster-port config. - -Example 1: - -If a Redis node is listening for client connections on port 6379, -and you do not add cluster-port parameter in redis.conf, -the Cluster bus port 16379 will be opened. - -Example 2: - -If a Redis node is listening for client connections on port 6379, -and you set cluster-port 20000 in redis.conf, -the Cluster bus port 20000 will be opened. - -Node-to-node communication happens exclusively using the Cluster bus and -the Cluster bus protocol: a binary protocol composed of frames -of different types and sizes. The Cluster bus binary protocol is not -publicly documented since it is not intended for external software devices -to talk with Redis Cluster nodes using this protocol. However you can -obtain more details about the Cluster bus protocol by reading the -`cluster.h` and `cluster.c` files in the Redis Cluster source code. - -### Cluster topology - -Redis Cluster is a full mesh where every node is connected with every other node using a TCP connection. - -In a cluster of N nodes, every node has N-1 outgoing TCP connections, and N-1 incoming connections. - -These TCP connections are kept alive all the time and are not created on demand. -When a node expects a pong reply in response to a ping in the cluster bus, before waiting long enough to mark the node as unreachable, it will try to -refresh the connection with the node by reconnecting from scratch. - -While Redis Cluster nodes form a full mesh, **nodes use a gossip protocol and -a configuration update mechanism in order to avoid exchanging too many -messages between nodes during normal conditions**, so the number of messages -exchanged is not exponential. - -### Node handshake - -Nodes always accept connections on the cluster bus port, and even reply to -pings when received, even if the pinging node is not trusted. -However, all other packets will be discarded by the receiving node if the -sending node is not considered part of the cluster. - -A node will accept another node as part of the cluster only in two ways: - -* If a node presents itself with a `MEET` message ([`CLUSTER MEET`](/commands/cluster-meet) command). A meet message is exactly -like a [`PING`](/commands/ping) message, but forces the receiver to accept the node as part of -the cluster. Nodes will send `MEET` messages to other nodes **only if** the system administrator requests this via the following command: - - CLUSTER MEET ip port - -* A node will also register another node as part of the cluster if a node that is already trusted will gossip about this other node. So if A knows B, and B knows C, eventually B will send gossip messages to A about C. When this happens, A will register C as part of the network, and will try to connect with C. - -This means that as long as we join nodes in any connected graph, they'll eventually form a fully connected graph automatically. This means that the cluster is able to auto-discover other nodes, but only if there is a trusted relationship that was forced by the system administrator. - -This mechanism makes the cluster more robust but prevents different Redis clusters from accidentally mixing after change of IP addresses or other network related events. - -## Redirection and resharding - -### MOVED Redirection - -A Redis client is free to send queries to every node in the cluster, including -replica nodes. The node will analyze the query, and if it is acceptable -(that is, only a single key is mentioned in the query, or the multiple keys -mentioned are all to the same hash slot) it will lookup what -node is responsible for the hash slot where the key or keys belong. - -If the hash slot is served by the node, the query is simply processed, otherwise -the node will check its internal hash slot to node map, and will reply -to the client with a MOVED error, like in the following example: - - GET x - -MOVED 3999 127.0.0.1:6381 - -The error includes the hash slot of the key (3999) and the endpoint:port of the instance that can serve the query. -The client needs to reissue the query to the specified node's endpoint address and port. -The endpoint can be either an IP address, a hostname, or it can be empty (e.g. `-MOVED 3999 :6380`). -An empty endpoint indicates that the server node has an unknown endpoint, and the client should send the next request to the same endpoint as the current request but with the provided port. - -Note that even if the client waits a long time before reissuing the query, -and in the meantime the cluster configuration changed, the destination node -will reply again with a MOVED error if the hash slot 3999 is now served by -another node. The same happens if the contacted node had no updated information. - -So while from the point of view of the cluster nodes are identified by -IDs we try to simplify our interface with the client just exposing a map -between hash slots and Redis nodes identified by endpoint:port pairs. - -The client is not required to, but should try to memorize that hash slot -3999 is served by 127.0.0.1:6381. This way once a new command needs to -be issued it can compute the hash slot of the target key and have a -greater chance of choosing the right node. - -An alternative is to just refresh the whole client-side cluster layout -using the [`CLUSTER SHARDS`](/commands/cluster-shards), or the deprecated [`CLUSTER SLOTS`](/commands/cluster-slots), command -when a MOVED redirection is received. When a redirection is encountered, it -is likely multiple slots were reconfigured rather than just one, so updating -the client configuration as soon as possible is often the best strategy. - -Note that when the Cluster is stable (no ongoing changes in the configuration), -eventually all the clients will obtain a map of hash slots -> nodes, making -the cluster efficient, with clients directly addressing the right nodes -without redirections, proxies or other single point of failure entities. - -A client **must be also able to handle -ASK redirections** that are described -later in this document, otherwise it is not a complete Redis Cluster client. - -### Live reconfiguration - -Redis Cluster supports the ability to add and remove nodes while the cluster -is running. Adding or removing a node is abstracted into the same -operation: moving a hash slot from one node to another. This means -that the same basic mechanism can be used in order to rebalance the cluster, add -or remove nodes, and so forth. - -* To add a new node to the cluster an empty node is added to the cluster and some set of hash slots are moved from existing nodes to the new node. -* To remove a node from the cluster the hash slots assigned to that node are moved to other existing nodes. -* To rebalance the cluster a given set of hash slots are moved between nodes. - -The core of the implementation is the ability to move hash slots around. -From a practical point of view a hash slot is just a set of keys, so -what Redis Cluster really does during *resharding* is to move keys from -an instance to another instance. Moving a hash slot means moving all the keys -that happen to hash into this hash slot. - -To understand how this works we need to show the [`CLUSTER`](/commands/cluster) subcommands -that are used to manipulate the slots translation table in a Redis Cluster node. - -The following subcommands are available (among others not useful in this case): - -* [`CLUSTER ADDSLOTS`](/commands/cluster-addslots) slot1 [slot2] ... [slotN] -* [`CLUSTER DELSLOTS`](/commands/cluster-delslots) slot1 [slot2] ... [slotN] -* [`CLUSTER ADDSLOTSRANGE`](/commands/cluster-addslotsrange) start-slot1 end-slot1 [start-slot2 end-slot2] ... [start-slotN end-slotN] -* [`CLUSTER DELSLOTSRANGE`](/commands/cluster-delslotsrange) start-slot1 end-slot1 [start-slot2 end-slot2] ... [start-slotN end-slotN] -* [`CLUSTER SETSLOT`](/commands/cluster-setslot) slot NODE node -* [`CLUSTER SETSLOT`](/commands/cluster-setslot) slot MIGRATING node -* [`CLUSTER SETSLOT`](/commands/cluster-setslot) slot IMPORTING node - -The first four commands, `ADDSLOTS`, `DELSLOTS`, `ADDSLOTSRANGE` and `DELSLOTSRANGE`, are simply used to assign -(or remove) slots to a Redis node. Assigning a slot means to tell a given -master node that it will be in charge of storing and serving content for -the specified hash slot. - -After the hash slots are assigned they will propagate across the cluster -using the gossip protocol, as specified later in the -*configuration propagation* section. - -The `ADDSLOTS` and `ADDSLOTSRANGE` commands are usually used when a new cluster is created -from scratch to assign each master node a subset of all the 16384 hash -slots available. - -The `DELSLOTS` and `DELSLOTSRANGE` are mainly used for manual modification of a cluster configuration -or for debugging tasks: in practice it is rarely used. - -The `SETSLOT` subcommand is used to assign a slot to a specific node ID if -the `SETSLOT NODE` form is used. Otherwise the slot can be set in the -two special states `MIGRATING` and `IMPORTING`. Those two special states -are used in order to migrate a hash slot from one node to another. - -* When a slot is set as MIGRATING, the node will accept all queries that -are about this hash slot, but only if the key in question -exists, otherwise the query is forwarded using a `-ASK` redirection to the -node that is target of the migration. -* When a slot is set as IMPORTING, the node will accept all queries that -are about this hash slot, but only if the request is -preceded by an [`ASKING`](/commands/asking) command. If the [`ASKING`](/commands/asking) command was not given -by the client, the query is redirected to the real hash slot owner via -a `-MOVED` redirection error, as would happen normally. - -Let's make this clearer with an example of hash slot migration. -Assume that we have two Redis master nodes, called A and B. -We want to move hash slot 8 from A to B, so we issue commands like this: - -* We send B: CLUSTER SETSLOT 8 IMPORTING A -* We send A: CLUSTER SETSLOT 8 MIGRATING B - -All the other nodes will continue to point clients to node "A" every time -they are queried with a key that belongs to hash slot 8, so what happens -is that: - -* All queries about existing keys are processed by "A". -* All queries about non-existing keys in A are processed by "B", because "A" will redirect clients to "B". - -This way we no longer create new keys in "A". -In the meantime, `redis-cli` used during reshardings -and Redis Cluster configuration will migrate existing keys in -hash slot 8 from A to B. -This is performed using the following command: - - CLUSTER GETKEYSINSLOT slot count - -The above command will return `count` keys in the specified hash slot. -For keys returned, `redis-cli` sends node "A" a [`MIGRATE`](/commands/migrate) command, that -will migrate the specified keys from A to B in an atomic way (both instances -are locked for the time (usually very small time) needed to migrate keys so -there are no race conditions). This is how [`MIGRATE`](/commands/migrate) works: - - MIGRATE target_host target_port "" target_database id timeout KEYS key1 key2 ... - -[`MIGRATE`](/commands/migrate) will connect to the target instance, send a serialized version of -the key, and once an OK code is received, the old key from its own dataset -will be deleted. From the point of view of an external client a key exists -either in A or B at any given time. - -In Redis Cluster there is no need to specify a database other than 0, but -[`MIGRATE`](/commands/migrate) is a general command that can be used for other tasks not -involving Redis Cluster. -[`MIGRATE`](/commands/migrate) is optimized to be as fast as possible even when moving complex -keys such as long lists, but in Redis Cluster reconfiguring the -cluster where big keys are present is not considered a wise procedure if -there are latency constraints in the application using the database. - -When the migration process is finally finished, the `SETSLOT NODE ` command is sent to the two nodes involved in the migration in order to -set the slots to their normal state again. The same command is usually -sent to all other nodes to avoid waiting for the natural -propagation of the new configuration across the cluster. - -### ASK redirection - -In the previous section, we briefly talked about ASK redirection. Why can't -we simply use MOVED redirection? Because while MOVED means that -we think the hash slot is permanently served by a different node and the -next queries should be tried against the specified node. ASK means to -send only the next query to the specified node. - -This is needed because the next query about hash slot 8 can be about a -key that is still in A, so we always want the client to try A and -then B if needed. Since this happens only for one hash slot out of 16384 -available, the performance hit on the cluster is acceptable. - -We need to force that client behavior, so to make sure -that clients will only try node B after A was tried, node B will only -accept queries of a slot that is set as IMPORTING if the client sends the -ASKING command before sending the query. - -Basically the ASKING command sets a one-time flag on the client that forces -a node to serve a query about an IMPORTING slot. - -The full semantics of ASK redirection from the point of view of the client is as follows: - -* If ASK redirection is received, send only the query that was redirected to the specified node but continue sending subsequent queries to the old node. -* Start the redirected query with the ASKING command. -* Don't yet update local client tables to map hash slot 8 to B. - -Once hash slot 8 migration is completed, A will send a MOVED message and -the client may permanently map hash slot 8 to the new endpoint and port pair. -Note that if a buggy client performs the map earlier this is not -a problem since it will not send the ASKING command before issuing the query, -so B will redirect the client to A using a MOVED redirection error. - -Slots migration is explained in similar terms but with different wording -(for the sake of redundancy in the documentation) in the [`CLUSTER SETSLOT`](/commands/cluster-setslot) -command documentation. - -### Client connections and redirection handling - -To be efficient, Redis Cluster clients maintain a map of the current slot -configuration. However, this configuration is not *required* to be up to date. -When contacting the wrong node results in a redirection, the client -can update its internal slot map accordingly. - -Clients usually need to fetch a complete list of slots and mapped node -addresses in two different situations: - -* At startup, to populate the initial slots configuration -* When the client receives a `MOVED` redirection - -Note that a client may handle the `MOVED` redirection by updating just the -moved slot in its table; however this is usually not efficient because often -the configuration of multiple slots will be modified at once. For example, if a -replica is promoted to master, all of the slots served by the old master will -be remapped). It is much simpler to react to a `MOVED` redirection by -fetching the full map of slots to nodes from scratch. - -Client can issue a [`CLUSTER SLOTS`](/commands/cluster-slots) command to retrieve an array of slot -ranges and the associated master and replica nodes serving the specified ranges. - -The following is an example of output of [`CLUSTER SLOTS`](/commands/cluster-slots): - -``` -127.0.0.1:7000> cluster slots -1) 1) (integer) 5461 - 2) (integer) 10922 - 3) 1) "127.0.0.1" - 2) (integer) 7001 - 4) 1) "127.0.0.1" - 2) (integer) 7004 -2) 1) (integer) 0 - 2) (integer) 5460 - 3) 1) "127.0.0.1" - 2) (integer) 7000 - 4) 1) "127.0.0.1" - 2) (integer) 7003 -3) 1) (integer) 10923 - 2) (integer) 16383 - 3) 1) "127.0.0.1" - 2) (integer) 7002 - 4) 1) "127.0.0.1" - 2) (integer) 7005 -``` - -The first two sub-elements of every element of the returned array are the -start and end slots of the range. The additional elements represent address-port -pairs. The first address-port pair is the master serving the slot, and the -additional address-port pairs are the replicas serving the same slot. Replicas -will be listed only when not in an error condition (i.e., when their FAIL flag is not set). - -The first element in the output above says that slots from 5461 to 10922 -(start and end included) are served by 127.0.0.1:7001, and it is possible -to scale read-only load contacting the replica at 127.0.0.1:7004. - -[`CLUSTER SLOTS`](/commands/cluster-slots) is not guaranteed to return ranges that cover the full -16384 slots if the cluster is misconfigured, so clients should initialize the -slots configuration map filling the target nodes with NULL objects, and -report an error if the user tries to execute commands about keys -that belong to unassigned slots. - -Before returning an error to the caller when a slot is found to -be unassigned, the client should try to fetch the slots configuration -again to check if the cluster is now configured properly. - -### Multi-keys operations - -Using hash tags, clients are free to use multi-key operations. -For example the following operation is valid: - - MSET {user:1000}.name Angela {user:1000}.surname White - -Multi-key operations may become unavailable when a resharding of the -hash slot the keys belong to is in progress. - -More specifically, even during a resharding the multi-key operations targeting -keys that all exist and all still hash to the same slot (either the source or -destination node) are still available. - -Operations on keys that don't exist or are - during the resharding - split -between the source and destination nodes, will generate a `-TRYAGAIN` error. -The client can try the operation after some time, or report back the error. - -As soon as migration of the specified hash slot has terminated, all -multi-key operations are available again for that hash slot. - -### Scaling reads using replica nodes - -Normally replica nodes will redirect clients to the authoritative master for -the hash slot involved in a given command, however clients can use replicas -in order to scale reads using the [`READONLY`](/commands/readonly) command. - -[`READONLY`](/commands/readonly) tells a Redis Cluster replica node that the client is ok reading -possibly stale data and is not interested in running write queries. - -When the connection is in readonly mode, the cluster will send a redirection -to the client only if the operation involves keys not served -by the replica's master node. This may happen because: - -1. The client sent a command about hash slots never served by the master of this replica. -2. The cluster was reconfigured (for example resharded) and the replica is no longer able to serve commands for a given hash slot. - -When this happens the client should update its hash slot map as explained in -the previous sections. - -The readonly state of the connection can be cleared using the [`READWRITE`](/commands/readwrite) command. - -## Fault Tolerance - -### Heartbeat and gossip messages - -Redis Cluster nodes continuously exchange ping and pong packets. Those two kinds of packets have the same structure, and both carry important configuration information. The only actual difference is the message type field. We'll refer to the sum of ping and pong packets as *heartbeat packets*. - -Usually nodes send ping packets that will trigger the receivers to reply with pong packets. However this is not necessarily true. It is possible for nodes to just send pong packets to send information to other nodes about their configuration, without triggering a reply. This is useful, for example, in order to broadcast a new configuration as soon as possible. - -Usually a node will ping a few random nodes every second so that the total number of ping packets sent (and pong packets received) by each node is a constant amount regardless of the number of nodes in the cluster. - -However every node makes sure to ping every other node that hasn't sent a ping or received a pong for longer than half the `NODE_TIMEOUT` time. Before `NODE_TIMEOUT` has elapsed, nodes also try to reconnect the TCP link with another node to make sure nodes are not believed to be unreachable only because there is a problem in the current TCP connection. - -The number of messages globally exchanged can be sizable if `NODE_TIMEOUT` is set to a small figure and the number of nodes (N) is very large, since every node will try to ping every other node for which they don't have fresh information every half the `NODE_TIMEOUT` time. - -For example in a 100 node cluster with a node timeout set to 60 seconds, every node will try to send 99 pings every 30 seconds, with a total amount of pings of 3.3 per second. Multiplied by 100 nodes, this is 330 pings per second in the total cluster. - -There are ways to lower the number of messages, however there have been no -reported issues with the bandwidth currently used by Redis Cluster failure -detection, so for now the obvious and direct design is used. Note that even -in the above example, the 330 packets per second exchanged are evenly -divided among 100 different nodes, so the traffic each node receives -is acceptable. - -### Heartbeat packet content - -Ping and pong packets contain a header that is common to all types of packets (for instance packets to request a failover vote), and a special gossip section that is specific to Ping and Pong packets. - -The common header has the following information: - -* Node ID, a 160 bit pseudorandom string that is assigned the first time a node is created and remains the same for all the life of a Redis Cluster node. -* The `currentEpoch` and `configEpoch` fields of the sending node that are used to mount the distributed algorithms used by Redis Cluster (this is explained in detail in the next sections). If the node is a replica the `configEpoch` is the last known `configEpoch` of its master. -* The node flags, indicating if the node is a replica, a master, and other single-bit node information. -* A bitmap of the hash slots served by the sending node, or if the node is a replica, a bitmap of the slots served by its master. -* The sender TCP base port that is the port used by Redis to accept client commands. -* The cluster port that is the port used by Redis for node-to-node communication. -* The state of the cluster from the point of view of the sender (down or ok). -* The master node ID of the sending node, if it is a replica. - -Ping and pong packets also contain a gossip section. This section offers to the receiver a view of what the sender node thinks about other nodes in the cluster. The gossip section only contains information about a few random nodes among the set of nodes known to the sender. The number of nodes mentioned in a gossip section is proportional to the cluster size. - -For every node added in the gossip section the following fields are reported: - -* Node ID. -* IP and port of the node. -* Node flags. - -Gossip sections allow receiving nodes to get information about the state of other nodes from the point of view of the sender. This is useful both for failure detection and to discover other nodes in the cluster. - -### Failure detection - -Redis Cluster failure detection is used to recognize when a master or replica node is no longer reachable by the majority of nodes and then respond by promoting a replica to the role of master. When replica promotion is not possible the cluster is put in an error state to stop receiving queries from clients. - -As already mentioned, every node takes a list of flags associated with other known nodes. There are two flags that are used for failure detection that are called `PFAIL` and `FAIL`. `PFAIL` means *Possible failure*, and is a non-acknowledged failure type. `FAIL` means that a node is failing and that this condition was confirmed by a majority of masters within a fixed amount of time. - -**PFAIL flag:** - -A node flags another node with the `PFAIL` flag when the node is not reachable for more than `NODE_TIMEOUT` time. Both master and replica nodes can flag another node as `PFAIL`, regardless of its type. - -The concept of non-reachability for a Redis Cluster node is that we have an **active ping** (a ping that we sent for which we have yet to get a reply) pending for longer than `NODE_TIMEOUT`. For this mechanism to work the `NODE_TIMEOUT` must be large compared to the network round trip time. In order to add reliability during normal operations, nodes will try to reconnect with other nodes in the cluster as soon as half of the `NODE_TIMEOUT` has elapsed without a reply to a ping. This mechanism ensures that connections are kept alive so broken connections usually won't result in false failure reports between nodes. - -**FAIL flag:** - -The `PFAIL` flag alone is just local information every node has about other nodes, but it is not sufficient to trigger a replica promotion. For a node to be considered down the `PFAIL` condition needs to be escalated to a `FAIL` condition. - -As outlined in the node heartbeats section of this document, every node sends gossip messages to every other node including the state of a few random known nodes. Every node eventually receives a set of node flags for every other node. This way every node has a mechanism to signal other nodes about failure conditions they have detected. - -A `PFAIL` condition is escalated to a `FAIL` condition when the following set of conditions are met: - -* Some node, that we'll call A, has another node B flagged as `PFAIL`. -* Node A collected, via gossip sections, information about the state of B from the point of view of the majority of masters in the cluster. -* The majority of masters signaled the `PFAIL` or `FAIL` condition within `NODE_TIMEOUT * FAIL_REPORT_VALIDITY_MULT` time. (The validity factor is set to 2 in the current implementation, so this is just two times the `NODE_TIMEOUT` time). - -If all the above conditions are true, Node A will: - -* Mark the node as `FAIL`. -* Send a `FAIL` message (as opposed to a `FAIL` condition within a heartbeat message) to all the reachable nodes. - -The `FAIL` message will force every receiving node to mark the node in `FAIL` state, whether or not it already flagged the node in `PFAIL` state. - -Note that *the FAIL flag is mostly one way*. That is, a node can go from `PFAIL` to `FAIL`, but a `FAIL` flag can only be cleared in the following situations: - -* The node is already reachable and is a replica. In this case the `FAIL` flag can be cleared as replicas are not failed over. -* The node is already reachable and is a master not serving any slot. In this case the `FAIL` flag can be cleared as masters without slots do not really participate in the cluster and are waiting to be configured in order to join the cluster. -* The node is already reachable and is a master, but a long time (N times the `NODE_TIMEOUT`) has elapsed without any detectable replica promotion. It's better for it to rejoin the cluster and continue in this case. - -It is useful to note that while the `PFAIL` -> `FAIL` transition uses a form of agreement, the agreement used is weak: - -1. Nodes collect views of other nodes over some time period, so even if the majority of master nodes need to "agree", actually this is just state that we collected from different nodes at different times and we are not sure, nor we require, that at a given moment the majority of masters agreed. However we discard failure reports which are old, so the failure was signaled by the majority of masters within a window of time. -2. While every node detecting the `FAIL` condition will force that condition on other nodes in the cluster using the `FAIL` message, there is no way to ensure the message will reach all the nodes. For instance a node may detect the `FAIL` condition and because of a partition will not be able to reach any other node. - -However the Redis Cluster failure detection has a liveness requirement: eventually all the nodes should agree about the state of a given node. There are two cases that can originate from split brain conditions. Either some minority of nodes believe the node is in `FAIL` state, or a minority of nodes believe the node is not in `FAIL` state. In both the cases eventually the cluster will have a single view of the state of a given node: - -**Case 1**: If a majority of masters have flagged a node as `FAIL`, because of failure detection and the *chain effect* it generates, every other node will eventually flag the master as `FAIL`, since in the specified window of time enough failures will be reported. - -**Case 2**: When only a minority of masters have flagged a node as `FAIL`, the replica promotion will not happen (as it uses a more formal algorithm that makes sure everybody knows about the promotion eventually) and every node will clear the `FAIL` state as per the `FAIL` state clearing rules above (i.e. no promotion after N times the `NODE_TIMEOUT` has elapsed). - -**The `FAIL` flag is only used as a trigger to run the safe part of the algorithm** for the replica promotion. In theory a replica may act independently and start a replica promotion when its master is not reachable, and wait for the masters to refuse to provide the acknowledgment if the master is actually reachable by the majority. However the added complexity of the `PFAIL -> FAIL` state, the weak agreement, and the `FAIL` message forcing the propagation of the state in the shortest amount of time in the reachable part of the cluster, have practical advantages. Because of these mechanisms, usually all the nodes will stop accepting writes at about the same time if the cluster is in an error state. This is a desirable feature from the point of view of applications using Redis Cluster. Also erroneous election attempts initiated by replicas that can't reach its master due to local problems (the master is otherwise reachable by the majority of other master nodes) are avoided. - -## Configuration handling, propagation, and failovers - -### Cluster current epoch - -Redis Cluster uses a concept similar to the Raft algorithm "term". In Redis Cluster the term is called epoch instead, and it is used in order to give incremental versioning to events. When multiple nodes provide conflicting information, it becomes possible for another node to understand which state is the most up to date. - -The `currentEpoch` is a 64 bit unsigned number. - -At node creation every Redis Cluster node, both replicas and master nodes, set the `currentEpoch` to 0. - -Every time a packet is received from another node, if the epoch of the sender (part of the cluster bus messages header) is greater than the local node epoch, the `currentEpoch` is updated to the sender epoch. - -Because of these semantics, eventually all the nodes will agree to the greatest `currentEpoch` in the cluster. - -This information is used when the state of the cluster is changed and a node seeks agreement in order to perform some action. - -Currently this happens only during replica promotion, as described in the next section. Basically the epoch is a logical clock for the cluster and dictates that given information wins over one with a smaller epoch. - -### Configuration epoch - -Every master always advertises its `configEpoch` in ping and pong packets along with a bitmap advertising the set of slots it serves. - -The `configEpoch` is set to zero in masters when a new node is created. - -A new `configEpoch` is created during replica election. replicas trying to replace -failing masters increment their epoch and try to get authorization from -a majority of masters. When a replica is authorized, a new unique `configEpoch` -is created and the replica turns into a master using the new `configEpoch`. - -As explained in the next sections the `configEpoch` helps to resolve conflicts when different nodes claim divergent configurations (a condition that may happen because of network partitions and node failures). - -replica nodes also advertise the `configEpoch` field in ping and pong packets, but in the case of replicas the field represents the `configEpoch` of its master as of the last time they exchanged packets. This allows other instances to detect when a replica has an old configuration that needs to be updated (master nodes will not grant votes to replicas with an old configuration). - -Every time the `configEpoch` changes for some known node, it is permanently stored in the nodes.conf file by all the nodes that receive this information. The same also happens for the `currentEpoch` value. These two variables are guaranteed to be saved and `fsync-ed` to disk when updated before a node continues its operations. - -The `configEpoch` values generated using a simple algorithm during failovers -are guaranteed to be new, incremental, and unique. - -### Replica election and promotion - -Replica election and promotion is handled by replica nodes, with the help of master nodes that vote for the replica to promote. -A replica election happens when a master is in `FAIL` state from the point of view of at least one of its replicas that has the prerequisites in order to become a master. - -In order for a replica to promote itself to master, it needs to start an election and win it. All the replicas for a given master can start an election if the master is in `FAIL` state, however only one replica will win the election and promote itself to master. - -A replica starts an election when the following conditions are met: - -* The replica's master is in `FAIL` state. -* The master was serving a non-zero number of slots. -* The replica replication link was disconnected from the master for no longer than a given amount of time, in order to ensure the promoted replica's data is reasonably fresh. This time is user configurable. - -In order to be elected, the first step for a replica is to increment its `currentEpoch` counter, and request votes from master instances. - -Votes are requested by the replica by broadcasting a `FAILOVER_AUTH_REQUEST` packet to every master node of the cluster. Then it waits for a maximum time of two times the `NODE_TIMEOUT` for replies to arrive (but always for at least 2 seconds). - -Once a master has voted for a given replica, replying positively with a `FAILOVER_AUTH_ACK`, it can no longer vote for another replica of the same master for a period of `NODE_TIMEOUT * 2`. In this period it will not be able to reply to other authorization requests for the same master. This is not needed to guarantee safety, but useful for preventing multiple replicas from getting elected (even if with a different `configEpoch`) at around the same time, which is usually not wanted. - -A replica discards any `AUTH_ACK` replies with an epoch that is less than the `currentEpoch` at the time the vote request was sent. This ensures it doesn't count votes intended for a previous election. - -Once the replica receives ACKs from the majority of masters, it wins the election. -Otherwise if the majority is not reached within the period of two times `NODE_TIMEOUT` (but always at least 2 seconds), the election is aborted and a new one will be tried again after `NODE_TIMEOUT * 4` (and always at least 4 seconds). - -### Replica rank - -As soon as a master is in `FAIL` state, a replica waits a short period of time before trying to get elected. That delay is computed as follows: - - DELAY = 500 milliseconds + random delay between 0 and 500 milliseconds + - REPLICA_RANK * 1000 milliseconds. - -The fixed delay ensures that we wait for the `FAIL` state to propagate across the cluster, otherwise the replica may try to get elected while the masters are still unaware of the `FAIL` state, refusing to grant their vote. - -The random delay is used to desynchronize replicas so they're unlikely to start an election at the same time. - -The `REPLICA_RANK` is the rank of this replica regarding the amount of replication data it has processed from the master. -Replicas exchange messages when the master is failing in order to establish a (best effort) rank: -the replica with the most updated replication offset is at rank 0, the second most updated at rank 1, and so forth. -In this way the most updated replicas try to get elected before others. - -Rank order is not strictly enforced; if a replica of higher rank fails to be -elected, the others will try shortly. - -Once a replica wins the election, it obtains a new unique and incremental `configEpoch` which is higher than that of any other existing master. It starts advertising itself as master in ping and pong packets, providing the set of served slots with a `configEpoch` that will win over the past ones. - -In order to speedup the reconfiguration of other nodes, a pong packet is broadcast to all the nodes of the cluster. Currently unreachable nodes will eventually be reconfigured when they receive a ping or pong packet from another node or will receive an `UPDATE` packet from another node if the information it publishes via heartbeat packets are detected to be out of date. - -The other nodes will detect that there is a new master serving the same slots served by the old master but with a greater `configEpoch`, and will upgrade their configuration. Replicas of the old master (or the failed over master if it rejoins the cluster) will not just upgrade the configuration but will also reconfigure to replicate from the new master. How nodes rejoining the cluster are configured is explained in the next sections. - -### Masters reply to replica vote request - -In the previous section, we discussed how replicas try to get elected. This section explains what happens from the point of view of a master that is requested to vote for a given replica. - -Masters receive requests for votes in form of `FAILOVER_AUTH_REQUEST` requests from replicas. - -For a vote to be granted the following conditions need to be met: - -1. A master only votes a single time for a given epoch, and refuses to vote for older epochs: every master has a lastVoteEpoch field and will refuse to vote again as long as the `currentEpoch` in the auth request packet is not greater than the lastVoteEpoch. When a master replies positively to a vote request, the lastVoteEpoch is updated accordingly, and safely stored on disk. -2. A master votes for a replica only if the replica's master is flagged as `FAIL`. -3. Auth requests with a `currentEpoch` that is less than the master `currentEpoch` are ignored. Because of this the master reply will always have the same `currentEpoch` as the auth request. If the same replica asks again to be voted, incrementing the `currentEpoch`, it is guaranteed that an old delayed reply from the master can not be accepted for the new vote. - -Example of the issue caused by not using rule number 3: - -Master `currentEpoch` is 5, lastVoteEpoch is 1 (this may happen after a few failed elections) - -* Replica `currentEpoch` is 3. -* Replica tries to be elected with epoch 4 (3+1), master replies with an ok with `currentEpoch` 5, however the reply is delayed. -* Replica will try to be elected again, at a later time, with epoch 5 (4+1), the delayed reply reaches the replica with `currentEpoch` 5, and is accepted as valid. - -4. Masters don't vote for a replica of the same master before `NODE_TIMEOUT * 2` has elapsed if a replica of that master was already voted for. This is not strictly required as it is not possible for two replicas to win the election in the same epoch. However, in practical terms it ensures that when a replica is elected it has plenty of time to inform the other replicas and avoid the possibility that another replica will win a new election, performing an unnecessary second failover. -5. Masters make no effort to select the best replica in any way. If the replica's master is in `FAIL` state and the master did not vote in the current term, a positive vote is granted. The best replica is the most likely to start an election and win it before the other replicas, since it will usually be able to start the voting process earlier because of its *higher rank* as explained in the previous section. -6. When a master refuses to vote for a given replica there is no negative response, the request is simply ignored. -7. Masters don't vote for replicas sending a `configEpoch` that is less than any `configEpoch` in the master table for the slots claimed by the replica. Remember that the replica sends the `configEpoch` of its master, and the bitmap of the slots served by its master. This means that the replica requesting the vote must have a configuration for the slots it wants to failover that is newer or equal the one of the master granting the vote. - -### Practical example of configuration epoch usefulness during partitions - -This section illustrates how the epoch concept is used to make the replica promotion process more resistant to partitions. - -* A master is no longer reachable indefinitely. The master has three replicas A, B, C. -* Replica A wins the election and is promoted to master. -* A network partition makes A not available for the majority of the cluster. -* Replica B wins the election and is promoted as master. -* A partition makes B not available for the majority of the cluster. -* The previous partition is fixed, and A is available again. - -At this point B is down and A is available again with a role of master (actually `UPDATE` messages would reconfigure it promptly, but here we assume all `UPDATE` messages were lost). At the same time, replica C will try to get elected in order to fail over B. This is what happens: - -1. C will try to get elected and will succeed, since for the majority of masters its master is actually down. It will obtain a new incremental `configEpoch`. -2. A will not be able to claim to be the master for its hash slots, because the other nodes already have the same hash slots associated with a higher configuration epoch (the one of B) compared to the one published by A. -3. So, all the nodes will upgrade their table to assign the hash slots to C, and the cluster will continue its operations. - -As you'll see in the next sections, a stale node rejoining a cluster -will usually get notified as soon as possible about the configuration change -because as soon as it pings any other node, the receiver will detect it -has stale information and will send an `UPDATE` message. - -### Hash slots configuration propagation - -An important part of Redis Cluster is the mechanism used to propagate the information about which cluster node is serving a given set of hash slots. This is vital to both the startup of a fresh cluster and the ability to upgrade the configuration after a replica was promoted to serve the slots of its failing master. - -The same mechanism allows nodes partitioned away for an indefinite amount of -time to rejoin the cluster in a sensible way. - -There are two ways hash slot configurations are propagated: - -1. Heartbeat messages. The sender of a ping or pong packet always adds information about the set of hash slots it (or its master, if it is a replica) serves. -2. `UPDATE` messages. Since in every heartbeat packet there is information about the sender `configEpoch` and set of hash slots served, if a receiver of a heartbeat packet finds the sender information is stale, it will send a packet with new information, forcing the stale node to update its info. - -The receiver of a heartbeat or `UPDATE` message uses certain simple rules in -order to update its table mapping hash slots to nodes. When a new Redis Cluster node is created, its local hash slot table is simply initialized to `NULL` entries so that each hash slot is not bound or linked to any node. This looks similar to the following: - -``` -0 -> NULL -1 -> NULL -2 -> NULL -... -16383 -> NULL -``` - -The first rule followed by a node in order to update its hash slot table is the following: - -**Rule 1**: If a hash slot is unassigned (set to `NULL`), and a known node claims it, I'll modify my hash slot table and associate the claimed hash slots to it. - -So if we receive a heartbeat from node A claiming to serve hash slots 1 and 2 with a configuration epoch value of 3, the table will be modified to: - -``` -0 -> NULL -1 -> A [3] -2 -> A [3] -... -16383 -> NULL -``` - -When a new cluster is created, a system administrator needs to manually assign (using the [`CLUSTER ADDSLOTS`](/commands/cluster-addslots) command, via the redis-cli command line tool, or by any other means) the slots served by each master node only to the node itself, and the information will rapidly propagate across the cluster. - -However this rule is not enough. We know that hash slot mapping can change -during two events: - -1. A replica replaces its master during a failover. -2. A slot is resharded from a node to a different one. - -For now let's focus on failovers. When a replica fails over its master, it obtains -a configuration epoch which is guaranteed to be greater than the one of its -master (and more generally greater than any other configuration epoch -generated previously). For example node B, which is a replica of A, may failover -A with configuration epoch of 4. It will start to send heartbeat packets -(the first time mass-broadcasting cluster-wide) and because of the following -second rule, receivers will update their hash slot tables: - -**Rule 2**: If a hash slot is already assigned, and a known node is advertising it using a `configEpoch` that is greater than the `configEpoch` of the master currently associated with the slot, I'll rebind the hash slot to the new node. - -So after receiving messages from B that claim to serve hash slots 1 and 2 with configuration epoch of 4, the receivers will update their table in the following way: - -``` -0 -> NULL -1 -> B [4] -2 -> B [4] -... -16383 -> NULL -``` - -Liveness property: because of the second rule, eventually all nodes in the cluster will agree that the owner of a slot is the one with the greatest `configEpoch` among the nodes advertising it. - -This mechanism in Redis Cluster is called **last failover wins**. - -The same happens during resharding. When a node importing a hash slot completes -the import operation, its configuration epoch is incremented to make sure the -change will be propagated throughout the cluster. - -### UPDATE messages, a closer look - -With the previous section in mind, it is easier to see how update messages -work. Node A may rejoin the cluster after some time. It will send heartbeat -packets where it claims it serves hash slots 1 and 2 with configuration epoch -of 3. All the receivers with updated information will instead see that -the same hash slots are associated with node B having a higher configuration -epoch. Because of this they'll send an `UPDATE` message to A with the new -configuration for the slots. A will update its configuration because of the -**rule 2** above. - -### How nodes rejoin the cluster - -The same basic mechanism is used when a node rejoins a cluster. -Continuing with the example above, node A will be notified -that hash slots 1 and 2 are now served by B. Assuming that these two were -the only hash slots served by A, the count of hash slots served by A will -drop to 0! So A will **reconfigure to be a replica of the new master**. - -The actual rule followed is a bit more complex than this. In general it may -happen that A rejoins after a lot of time, in the meantime it may happen that -hash slots originally served by A are served by multiple nodes, for example -hash slot 1 may be served by B, and hash slot 2 by C. - -So the actual *Redis Cluster node role switch rule* is: **A master node will change its configuration to replicate (be a replica of) the node that stole its last hash slot**. - -During reconfiguration, eventually the number of served hash slots will drop to zero, and the node will reconfigure accordingly. Note that in the base case this just means that the old master will be a replica of the replica that replaced it after a failover. However in the general form the rule covers all possible cases. - -Replicas do exactly the same: they reconfigure to replicate the node that -stole the last hash slot of its former master. - -### Replica migration - -Redis Cluster implements a concept called *replica migration* in order to -improve the availability of the system. The idea is that in a cluster with -a master-replica setup, if the map between replicas and masters is fixed -availability is limited over time if multiple independent failures of single -nodes happen. - -For example in a cluster where every master has a single replica, the cluster -can continue operations as long as either the master or the replica fail, but not -if both fail the same time. However there is a class of failures that are -the independent failures of single nodes caused by hardware or software issues -that can accumulate over time. For example: - -* Master A has a single replica A1. -* Master A fails. A1 is promoted as new master. -* Three hours later A1 fails in an independent manner (unrelated to the failure of A). No other replica is available for promotion since node A is still down. The cluster cannot continue normal operations. - -If the map between masters and replicas is fixed, the only way to make the cluster -more resistant to the above scenario is to add replicas to every master, however -this is costly as it requires more instances of Redis to be executed, more -memory, and so forth. - -An alternative is to create an asymmetry in the cluster, and let the cluster -layout automatically change over time. For example the cluster may have three -masters A, B, C. A and B have a single replica each, A1 and B1. However the master -C is different and has two replicas: C1 and C2. - -Replica migration is the process of automatic reconfiguration of a replica -in order to *migrate* to a master that has no longer coverage (no working -replicas). With replica migration the scenario mentioned above turns into the -following: - -* Master A fails. A1 is promoted. -* C2 migrates as replica of A1, that is otherwise not backed by any replica. -* Three hours later A1 fails as well. -* C2 is promoted as new master to replace A1. -* The cluster can continue the operations. - -### Replica migration algorithm - -The migration algorithm does not use any form of agreement since the replica -layout in a Redis Cluster is not part of the cluster configuration that needs -to be consistent and/or versioned with config epochs. Instead it uses an -algorithm to avoid mass-migration of replicas when a master is not backed. -The algorithm guarantees that eventually (once the cluster configuration is -stable) every master will be backed by at least one replica. - -This is how the algorithm works. To start we need to define what is a -*good replica* in this context: a good replica is a replica not in `FAIL` state -from the point of view of a given node. - -The execution of the algorithm is triggered in every replica that detects that -there is at least a single master without good replicas. However among all the -replicas detecting this condition, only a subset should act. This subset is -actually often a single replica unless different replicas have in a given moment -a slightly different view of the failure state of other nodes. - -The *acting replica* is the replica among the masters with the maximum number -of attached replicas, that is not in FAIL state and has the smallest node ID. - -So for example if there are 10 masters with 1 replica each, and 2 masters with -5 replicas each, the replica that will try to migrate is - among the 2 masters -having 5 replicas - the one with the lowest node ID. Given that no agreement -is used, it is possible that when the cluster configuration is not stable, -a race condition occurs where multiple replicas believe themselves to be -the non-failing replica with the lower node ID (it is unlikely for this to happen -in practice). If this happens, the result is multiple replicas migrating to the -same master, which is harmless. If the race happens in a way that will leave -the ceding master without replicas, as soon as the cluster is stable again -the algorithm will be re-executed again and will migrate a replica back to -the original master. - -Eventually every master will be backed by at least one replica. However, -the normal behavior is that a single replica migrates from a master with -multiple replicas to an orphaned master. - -The algorithm is controlled by a user-configurable parameter called -`cluster-migration-barrier`: the number of good replicas a master -must be left with before a replica can migrate away. For example, if this -parameter is set to 2, a replica can try to migrate only if its master remains -with two working replicas. - -### configEpoch conflicts resolution algorithm - -When new `configEpoch` values are created via replica promotion during -failovers, they are guaranteed to be unique. - -However there are two distinct events where new configEpoch values are -created in an unsafe way, just incrementing the local `currentEpoch` of -the local node and hoping there are no conflicts at the same time. -Both the events are system-administrator triggered: - -1. [`CLUSTER FAILOVER`](/commands/cluster-failover) command with `TAKEOVER` option is able to manually promote a replica node into a master *without the majority of masters being available*. This is useful, for example, in multi data center setups. -2. Migration of slots for cluster rebalancing also generates new configuration epochs inside the local node without agreement for performance reasons. - -Specifically, during manual resharding, when a hash slot is migrated from -a node A to a node B, the resharding program will force B to upgrade -its configuration to an epoch which is the greatest found in the cluster, -plus 1 (unless the node is already the one with the greatest configuration -epoch), without requiring agreement from other nodes. -Usually a real world resharding involves moving several hundred hash slots -(especially in small clusters). Requiring an agreement to generate new -configuration epochs during resharding, for each hash slot moved, is -inefficient. Moreover it requires a fsync in each of the cluster nodes -every time in order to store the new configuration. Because of the way it is -performed instead, we only need a new config epoch when the first hash slot is moved, -making it much more efficient in production environments. - -However because of the two cases above, it is possible (though unlikely) to end -with multiple nodes having the same configuration epoch. A resharding operation -performed by the system administrator, and a failover happening at the same -time (plus a lot of bad luck) could cause `currentEpoch` collisions if -they are not propagated fast enough. - -Moreover, software bugs and filesystem corruptions can also contribute -to multiple nodes having the same configuration epoch. - -When masters serving different hash slots have the same `configEpoch`, there -are no issues. It is more important that replicas failing over a master have -unique configuration epochs. - -That said, manual interventions or resharding may change the cluster -configuration in different ways. The Redis Cluster main liveness property -requires that slot configurations always converge, so under every circumstance -we really want all the master nodes to have a different `configEpoch`. - -In order to enforce this, **a conflict resolution algorithm** is used in the -event that two nodes end up with the same `configEpoch`. - -* IF a master node detects another master node is advertising itself with -the same `configEpoch`. -* AND IF the node has a lexicographically smaller Node ID compared to the other node claiming the same `configEpoch`. -* THEN it increments its `currentEpoch` by 1, and uses it as the new `configEpoch`. - -If there are any set of nodes with the same `configEpoch`, all the nodes but the one with the greatest Node ID will move forward, guaranteeing that, eventually, every node will pick a unique configEpoch regardless of what happened. - -This mechanism also guarantees that after a fresh cluster is created, all -nodes start with a different `configEpoch` (even if this is not actually -used) since `redis-cli` makes sure to use [`CLUSTER SET-CONFIG-EPOCH`](/commands/cluster-set-config-epoch) at startup. -However if for some reason a node is left misconfigured, it will update -its configuration to a different configuration epoch automatically. - -### Node resets - -Nodes can be software reset (without restarting them) in order to be reused -in a different role or in a different cluster. This is useful in normal -operations, in testing, and in cloud environments where a given node can -be reprovisioned to join a different set of nodes to enlarge or create a new -cluster. - -In Redis Cluster nodes are reset using the [`CLUSTER RESET`](/commands/cluster-reset) command. The -command is provided in two variants: - -* `CLUSTER RESET SOFT` -* `CLUSTER RESET HARD` - -The command must be sent directly to the node to reset. If no reset type is -provided, a soft reset is performed. - -The following is a list of operations performed by a reset: - -1. Soft and hard reset: If the node is a replica, it is turned into a master, and its dataset is discarded. If the node is a master and contains keys the reset operation is aborted. -2. Soft and hard reset: All the slots are released, and the manual failover state is reset. -3. Soft and hard reset: All the other nodes in the nodes table are removed, so the node no longer knows any other node. -4. Hard reset only: `currentEpoch`, `configEpoch`, and `lastVoteEpoch` are set to 0. -5. Hard reset only: the Node ID is changed to a new random ID. - -Master nodes with non-empty data sets can't be reset (since normally you want to reshard data to the other nodes). However, under special conditions when this is appropriate (e.g. when a cluster is totally destroyed with the intent of creating a new one), [`FLUSHALL`](/commands/flushall) must be executed before proceeding with the reset. - -### Removing nodes from a cluster - -It is possible to practically remove a node from an existing cluster by -resharding all its data to other nodes (if it is a master node) and -shutting it down. However, the other nodes will still remember its node -ID and address, and will attempt to connect with it. - -For this reason, when a node is removed we want to also remove its entry -from all the other nodes tables. This is accomplished by using the -`CLUSTER FORGET ` command. - -The command does two things: - -1. It removes the node with the specified node ID from the nodes table. -2. It sets a 60 second ban which prevents a node with the same node ID from being re-added. - -The second operation is needed because Redis Cluster uses gossip in order to auto-discover nodes, so removing the node X from node A, could result in node B gossiping about node X to A again. Because of the 60 second ban, the Redis Cluster administration tools have 60 seconds in order to remove the node from all the nodes, preventing the re-addition of the node due to auto discovery. - -Further information is available in the [`CLUSTER FORGET`](/commands/cluster-forget) documentation. - -## Publish/Subscribe - -In a Redis Cluster, clients can subscribe to every node, and can also -publish to every other node. The cluster will make sure that published -messages are forwarded as needed. - -The clients can send SUBSCRIBE to any node and can also send PUBLISH to any node. -It will simply broadcast each published message to all other nodes. - -Redis 7.0 and later features sharded pub/sub, in which shard channels are assigned to slots by the same algorithm used to assign keys to slots. -A shard message must be sent to a node that owns the slot the shard channel is hashed to. -The cluster makes sure the published shard messages are forwarded to all nodes in the shard, so clients can subscribe to a shard channel by connecting to either the master responsible for the slot, or to any of its replicas. - -## Appendix - -### Appendix A: CRC16 reference implementation in ANSI C - - /* - * Copyright 2001-2010 Georges Menie (www.menie.org) - * Copyright 2010 Salvatore Sanfilippo (adapted to Redis coding style) - * All rights reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the University of California, Berkeley nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - /* CRC16 implementation according to CCITT standards. - * - * Note by @antirez: this is actually the XMODEM CRC 16 algorithm, using the - * following parameters: - * - * Name : "XMODEM", also known as "ZMODEM", "CRC-16/ACORN" - * Width : 16 bit - * Poly : 1021 (That is actually x^16 + x^12 + x^5 + 1) - * Initialization : 0000 - * Reflect Input byte : False - * Reflect Output CRC : False - * Xor constant to output CRC : 0000 - * Output for "123456789" : 31C3 - */ - - static const uint16_t crc16tab[256]= { - 0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7, - 0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef, - 0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6, - 0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de, - 0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485, - 0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d, - 0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4, - 0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc, - 0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823, - 0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b, - 0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12, - 0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a, - 0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41, - 0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49, - 0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70, - 0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78, - 0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f, - 0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067, - 0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e, - 0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256, - 0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d, - 0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405, - 0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c, - 0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634, - 0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab, - 0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3, - 0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a, - 0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92, - 0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9, - 0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1, - 0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8, - 0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0 - }; - - uint16_t crc16(const char *buf, int len) { - int counter; - uint16_t crc = 0; - for (counter = 0; counter < len; counter++) - crc = (crc<<8) ^ crc16tab[((crc>>8) ^ *buf++)&0x00FF]; - return crc; - } diff --git a/content/develop/reference/command-arguments.md b/content/develop/reference/command-arguments.md index 44ad46a0b3..fdb8aba0d7 100644 --- a/content/develop/reference/command-arguments.md +++ b/content/develop/reference/command-arguments.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/command-arguments +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: How Redis commands expose their documentation programmatically linkTitle: Command arguments title: Redis command arguments diff --git a/content/develop/reference/command-tips.md b/content/develop/reference/command-tips.md index d571fa8a8d..1e3ec72673 100644 --- a/content/develop/reference/command-tips.md +++ b/content/develop/reference/command-tips.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/command-tips +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Get additional information about a command linkTitle: Command tips title: Redis command tips @@ -50,7 +58,9 @@ In cases where the client should adopt a behavior different than the default, th This tip is in-use by commands that don't accept key name arguments. The command operates atomically per shard. - **multi_shard:** the client should execute the command on several shards. - The shards that execute the command are determined by the hash slots of its input key name arguments. + The client should split the inputs according to the hash slots of its input key name arguments. + For example, the command `DEL {foo} {foo}1 bar` should be split to `DEL {foo} {foo}1` and `DEL bar`. + If the keys are hashed to more than a single slot, the command must be split even if all the slots are managed by the same shard. Examples for such commands include [`MSET`](/commands/mset), [`MGET`](/commands/mget) and [`DEL`](/commands/del). However, note that [`SUNIONSTORE`](/commands/sunionstore) isn't considered as _multi_shard_ because all of its keys must belong to the same hash slot. - **special:** indicates a non-trivial form of the client's request policy, such as the [`SCAN`](/commands/scan) command. diff --git a/content/develop/reference/eviction/index.md b/content/develop/reference/eviction/index.md index 05c6a0fa89..22de39c968 100644 --- a/content/develop/reference/eviction/index.md +++ b/content/develop/reference/eviction/index.md @@ -1,8 +1,14 @@ --- -aliases: -- /topics/lru_cache -- /topics/lru_cache.md -- /docs/manual/eviction +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Overview of Redis key eviction policies (LRU, LFU, etc.) linkTitle: Eviction title: Key eviction @@ -157,7 +163,7 @@ By default Redis is configured to: * Saturate the counter at, around, one million requests. * Decay the counter every one minute. -Those should be reasonable values and were tested experimental, but the user may want to play with these configuration settings to pick optimal values. +Those should be reasonable values and were tested experimentally, but the user may want to play with these configuration settings to pick optimal values. Instructions about how to tune these parameters can be found inside the example `redis.conf` file in the source distribution. Briefly, they are: diff --git a/content/develop/reference/gopher.md b/content/develop/reference/gopher.md index 1541f9ad37..62ff3886d8 100644 --- a/content/develop/reference/gopher.md +++ b/content/develop/reference/gopher.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/gopher +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: The Redis Gopher protocol implementation linkTitle: Gopher protocol title: Redis and the Gopher protocol diff --git a/content/develop/reference/key-specs.md b/content/develop/reference/key-specs.md index c0efdc878d..55115c4dae 100644 --- a/content/develop/reference/key-specs.md +++ b/content/develop/reference/key-specs.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/key-specs +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: What are command key specification and how to use them in your client linkTitle: Command key specifications title: Command key specifications diff --git a/content/develop/reference/modules/_index.md b/content/develop/reference/modules/_index.md index f62928aa90..dc830dc641 100644 --- a/content/develop/reference/modules/_index.md +++ b/content/develop/reference/modules/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/modules-intro +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Introduction to writing Redis modules ' diff --git a/content/develop/reference/modules/modules-api-ref.md b/content/develop/reference/modules/modules-api-ref.md index 4d07ad8345..8abd6454b6 100644 --- a/content/develop/reference/modules/modules-api-ref.md +++ b/content/develop/reference/modules/modules-api-ref.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/modules-api-ref +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Reference for the Redis Modules API ' diff --git a/content/develop/reference/modules/modules-blocking-ops.md b/content/develop/reference/modules/modules-blocking-ops.md index 52c5137841..ec03880f6f 100644 --- a/content/develop/reference/modules/modules-blocking-ops.md +++ b/content/develop/reference/modules/modules-blocking-ops.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/modules-blocking-ops +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'How to implement blocking commands in a Redis module ' diff --git a/content/develop/reference/modules/modules-native-types.md b/content/develop/reference/modules/modules-native-types.md index 48c6b24671..0ccf2a6a43 100644 --- a/content/develop/reference/modules/modules-native-types.md +++ b/content/develop/reference/modules/modules-native-types.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/modules-native-types +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'How to use native types in a Redis module ' diff --git a/content/develop/reference/protocol-spec.md b/content/develop/reference/protocol-spec.md index e741c566b6..4d381b2543 100644 --- a/content/develop/reference/protocol-spec.md +++ b/content/develop/reference/protocol-spec.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/protocol +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Redis serialization protocol (RESP) is the wire protocol that clients implement linkTitle: Protocol spec @@ -29,7 +37,7 @@ RESP is the protocol you should implement in your Redis client. {{% alert title="Note" color="info" %}} The protocol outlined here is used only for client-server communication. -[Redis Cluster](/docs/reference/cluster-spec) uses a different binary protocol for exchanging messages between nodes. +[Redis Cluster]({{< relref "/develop/reference/cluster-spec" >}}) uses a different binary protocol for exchanging messages between nodes. {{% /alert %}} ## RESP versions @@ -61,12 +69,12 @@ This is the simplest model possible; however, there are some exceptions: * Redis requests can be [pipelined](#multiple-commands-and-pipelining). Pipelining enables clients to send multiple commands at once and wait for replies later. -* When a RESP2 connection subscribes to a [Pub/Sub](/docs/manual/pubsub) channel, the protocol changes semantics and becomes a *push* protocol. +* When a RESP2 connection subscribes to a [Pub/Sub]({{< relref "/develop/manual/pubsub" >}}) channel, the protocol changes semantics and becomes a *push* protocol. The client no longer requires sending commands because the server will automatically send new messages to the client (for the channels the client is subscribed to) as soon as they are received. * The [`MONITOR`](/commands/monitor) command. Invoking the [`MONITOR`](/commands/monitor) command switches the connection to an ad-hoc push mode. The protocol of this mode is not specified but is obvious to parse. -* [Protected mode](/docs/management/security/#protected-mode). +* [Protected mode]({{< relref "/develop/management/security/#protected-mode" >}}). Connections opened from a non-loopback address to a Redis while in protected mode are denied and terminated by the server. Before terminating the connection, Redis unconditionally sends a `-DENIED` reply, regardless of whether the client writes to the socket. * The [RESP3 Push type](#resp3-pushes). @@ -473,7 +481,7 @@ Example: (The raw RESP encoding is split into multiple lines for readability). Some client libraries may ignore the difference between this type and the string type and return a native string in both cases. -However, interactive clients, such as command line interfaces (e.g., [`redis-cli`](/docs/manual/cli)), can use this type and know that their output should be presented to the human user as is and without quoting the string. +However, interactive clients, such as command line interfaces (e.g., [`redis-cli`]({{< relref "/develop/manual/cli" >}})), can use this type and know that their output should be presented to the human user as is and without quoting the string. For example, the Redis command [`INFO`](/commands/info) outputs a report that includes newlines. When using RESP3, `redis-cli` displays it correctly because it is sent as a Verbatim String reply (with its three bytes being "txt"). diff --git a/content/develop/reference/sentinel-clients.md b/content/develop/reference/sentinel-clients.md index 14ca02de31..ec006ca001 100644 --- a/content/develop/reference/sentinel-clients.md +++ b/content/develop/reference/sentinel-clients.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/sentinel-clients +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: How to build clients for Redis Sentinel linkTitle: Sentinel clients title: Sentinel client spec diff --git a/content/develop/reference/signals.md b/content/develop/reference/signals.md deleted file mode 100644 index 82705c39db..0000000000 --- a/content/develop/reference/signals.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -aliases: -- /topics/signals -description: How Redis handles common Unix signals -linkTitle: Signal handling -title: Redis signal handling -weight: 8 ---- - -This document provides information about how Redis reacts to different POSIX signals such as `SIGTERM` and `SIGSEGV`. - -The information in this document **only applies to Redis version 2.6 or greater**. - -## SIGTERM and SIGINT - -The `SIGTERM` and `SIGINT` signals tell Redis to shut down gracefully. When the server receives this signal, -it does not immediately exit. Instead, it schedules -a shutdown similar to the one performed by the [`SHUTDOWN`](/commands/shutdown) command. The scheduled shutdown starts as soon as possible, specifically as long as the -current command in execution terminates (if any), with a possible additional -delay of 0.1 seconds or less. - -If the server is blocked by a long-running Lua script, -kill the script with [`SCRIPT KILL`](/commands/script-kill) if possible. The scheduled shutdown will -run just after the script is killed or terminates spontaneously. - -This shutdown process includes the following actions: - -* If there are any replicas lagging behind in replication: - * Pause clients attempting to write with [`CLIENT PAUSE`](/commands/client-pause) and the `WRITE` option. - * Wait up to the configured `shutdown-timeout` (default 10 seconds) for replicas to catch up with the master's replication offset. -* If a background child is saving the RDB file or performing an AOF rewrite, the child process is killed. -* If the AOF is active, Redis calls the `fsync` system call on the AOF file descriptor to flush the buffers on disk. -* If Redis is configured to persist on disk using RDB files, a synchronous (blocking) save is performed. Since the save is synchronous, it doesn't use any additional memory. -* If the server is daemonized, the PID file is removed. -* If the Unix domain socket is enabled, it gets removed. -* The server exits with an exit code of zero. - -IF the RDB file can't be saved, the shutdown fails, and the server continues to run in order to ensure no data loss. -Likewise, if the user just turned on AOF, and the server triggered the first AOF rewrite in order to create the initial AOF file but this file can't be saved, the shutdown fails and the server continues to run. -Since Redis 2.6.11, no further attempt to shut down will be made unless a new `SIGTERM` is received or the [`SHUTDOWN`](/commands/shutdown) command is issued. - -Since Redis 7.0, the server waits for lagging replicas up to a configurable `shutdown-timeout`, 10 seconds by default, before shutting down. -This provides a best effort to minimize the risk of data loss in a situation where no save points are configured and AOF is deactivated. -Before version 7.0, shutting down a heavily loaded master node in a diskless setup was more likely to result in data loss. -To minimize the risk of data loss in such setups, trigger a manual [`FAILOVER`](/commands/failover) (or [`CLUSTER FAILOVER`](/commands/cluster-failover)) to demote the master to a replica and promote one of the replicas to a new master before shutting down a master node. - -## SIGSEGV, SIGBUS, SIGFPE and SIGILL - -The following signals are handled as a Redis crash: - -* SIGSEGV -* SIGBUS -* SIGFPE -* SIGILL - -Once one of these signals is trapped, Redis stops any current operation and performs the following actions: - -* Adds a bug report to the log file. This includes a stack trace, dump of registers, and information about the state of clients. -* Since Redis 2.8, a fast memory test is performed as a first check of the reliability of the crashing system. -* If the server was daemonized, the PID file is removed. -* Finally the server unregisters its own signal handler for the received signal and resends the same signal to itself to make sure that the default action is performed, such as dumping the core on the file system. - -## What happens when a child process gets killed - -When the child performing the Append Only File rewrite gets killed by a signal, -Redis handles this as an error and discards the (probably partial or corrupted) -AOF file. It will attempt the rewrite again later. - -When the child performing an RDB save is killed, Redis handles the -condition as a more severe error. While the failure of an -AOF file rewrite can cause AOF file enlargement, failed RDB file -creation reduces durability. - -As a result of the child producing the RDB file being killed by a signal, -or when the child exits with an error (non zero exit code), Redis enters -a special error condition where no further write command is accepted. - -* Redis will continue to reply to read commands. -* Redis will reply to all write commands with a `MISCONFIG` error. - -This error condition will persist until it becomes possible to create an RDB file successfully. - -## Kill the RDB file without errors - -Sometimes the user may want to kill the RDB-saving child process without -generating an error. Since Redis version 2.6.10, this can be done using the signal `SIGUSR1`. This signal is handled in a special way: -it kills the child process like any other signal, but the parent process will -not detect this as a critical error and will continue to serve write -requests. diff --git a/content/develop/use/_index.md b/content/develop/use/_index.md index 0f81818ae9..33b9f80dbd 100644 --- a/content/develop/use/_index.md +++ b/content/develop/use/_index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: A developer's guide to Redis linkTitle: Use Redis title: Use Redis diff --git a/content/develop/use/client-side-caching.md b/content/develop/use/client-side-caching.md index 1ec9cfdfc9..1e0e472971 100644 --- a/content/develop/use/client-side-caching.md +++ b/content/develop/use/client-side-caching.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/client-side-caching +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Server-assisted, client-side caching in Redis ' diff --git a/content/develop/use/keyspace-notifications.md b/content/develop/use/keyspace-notifications.md index 2d4d3a5ec9..2577c62dea 100644 --- a/content/develop/use/keyspace-notifications.md +++ b/content/develop/use/keyspace-notifications.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/notifications +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Monitor changes to Redis keys and values in real time ' diff --git a/content/develop/use/keyspace.md b/content/develop/use/keyspace.md index 9461247f94..56dfce68ac 100644 --- a/content/develop/use/keyspace.md +++ b/content/develop/use/keyspace.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/manual/the-redis-keyspace +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Managing keys in Redis: Key expiration, scanning, altering and querying the key space diff --git a/content/develop/use/manual/_index.md b/content/develop/use/manual/_index.md index 0f81818ae9..33b9f80dbd 100644 --- a/content/develop/use/manual/_index.md +++ b/content/develop/use/manual/_index.md @@ -1,4 +1,14 @@ --- +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: A developer's guide to Redis linkTitle: Use Redis title: Use Redis diff --git a/content/develop/use/manual/client-side-caching.md b/content/develop/use/manual/client-side-caching.md index 1ec9cfdfc9..1e0e472971 100644 --- a/content/develop/use/manual/client-side-caching.md +++ b/content/develop/use/manual/client-side-caching.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/client-side-caching +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Server-assisted, client-side caching in Redis ' diff --git a/content/develop/use/manual/keyspace-notifications.md b/content/develop/use/manual/keyspace-notifications.md index 2d4d3a5ec9..2577c62dea 100644 --- a/content/develop/use/manual/keyspace-notifications.md +++ b/content/develop/use/manual/keyspace-notifications.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/notifications +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Monitor changes to Redis keys and values in real time ' diff --git a/content/develop/use/manual/keyspace.md b/content/develop/use/manual/keyspace.md index 9461247f94..56dfce68ac 100644 --- a/content/develop/use/manual/keyspace.md +++ b/content/develop/use/manual/keyspace.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/manual/the-redis-keyspace +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Managing keys in Redis: Key expiration, scanning, altering and querying the key space diff --git a/content/develop/use/manual/patterns/_index.md b/content/develop/use/manual/patterns/_index.md index 4b26e11692..714a33b1c3 100644 --- a/content/develop/use/manual/patterns/_index.md +++ b/content/develop/use/manual/patterns/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/reference/patterns +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Novel patterns for working with Redis data structures linkTitle: Patterns title: Redis programming patterns diff --git a/content/develop/use/manual/patterns/bulk-loading.md b/content/develop/use/manual/patterns/bulk-loading.md index fbd517dd77..3ce70b0d98 100644 --- a/content/develop/use/manual/patterns/bulk-loading.md +++ b/content/develop/use/manual/patterns/bulk-loading.md @@ -1,7 +1,14 @@ --- -aliases: -- /topics/mass-insertion -- /docs/reference/patterns/bulk-loading +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Writing data in bulk using the Redis protocol ' diff --git a/content/develop/use/manual/patterns/distributed-locks.md b/content/develop/use/manual/patterns/distributed-locks.md index f149be5428..55526d4fc2 100644 --- a/content/develop/use/manual/patterns/distributed-locks.md +++ b/content/develop/use/manual/patterns/distributed-locks.md @@ -1,8 +1,14 @@ --- -aliases: -- /topics/distlock -- /docs/reference/patterns/distributed-locks -- /docs/reference/patterns/distributed-locks.md +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'A distributed lock pattern with Redis ' diff --git a/content/develop/use/manual/patterns/indexes/index.md b/content/develop/use/manual/patterns/indexes/index.md index dd7d3685f5..1eea4a79fd 100644 --- a/content/develop/use/manual/patterns/indexes/index.md +++ b/content/develop/use/manual/patterns/indexes/index.md @@ -1,7 +1,14 @@ --- -aliases: -- /topics/indexing -- /docs/reference/patterns/indexes +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Building secondary indexes in Redis ' diff --git a/content/develop/use/manual/patterns/twitter-clone.md b/content/develop/use/manual/patterns/twitter-clone.md index 36f3eb2ed7..b8d3fb823a 100644 --- a/content/develop/use/manual/patterns/twitter-clone.md +++ b/content/develop/use/manual/patterns/twitter-clone.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/reference/patterns/twitter-clone +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Learn several Redis patterns by building a Twitter clone linkTitle: Patterns example title: Redis patterns example diff --git a/content/develop/use/manual/pipelining/index.md b/content/develop/use/manual/pipelining/index.md index d472ffc018..88343d0020 100644 --- a/content/develop/use/manual/pipelining/index.md +++ b/content/develop/use/manual/pipelining/index.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/pipelining +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: How to optimize round-trip times by batching Redis commands linkTitle: Pipelining title: Redis pipelining diff --git a/content/develop/use/patterns/_index.md b/content/develop/use/patterns/_index.md index 4b26e11692..714a33b1c3 100644 --- a/content/develop/use/patterns/_index.md +++ b/content/develop/use/patterns/_index.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/reference/patterns +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Novel patterns for working with Redis data structures linkTitle: Patterns title: Redis programming patterns diff --git a/content/develop/use/patterns/bulk-loading.md b/content/develop/use/patterns/bulk-loading.md index fbd517dd77..3ce70b0d98 100644 --- a/content/develop/use/patterns/bulk-loading.md +++ b/content/develop/use/patterns/bulk-loading.md @@ -1,7 +1,14 @@ --- -aliases: -- /topics/mass-insertion -- /docs/reference/patterns/bulk-loading +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Writing data in bulk using the Redis protocol ' diff --git a/content/develop/use/patterns/distributed-locks.md b/content/develop/use/patterns/distributed-locks.md index f149be5428..55526d4fc2 100644 --- a/content/develop/use/patterns/distributed-locks.md +++ b/content/develop/use/patterns/distributed-locks.md @@ -1,8 +1,14 @@ --- -aliases: -- /topics/distlock -- /docs/reference/patterns/distributed-locks -- /docs/reference/patterns/distributed-locks.md +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'A distributed lock pattern with Redis ' diff --git a/content/develop/use/patterns/indexes/index.md b/content/develop/use/patterns/indexes/index.md index dd7d3685f5..1eea4a79fd 100644 --- a/content/develop/use/patterns/indexes/index.md +++ b/content/develop/use/patterns/indexes/index.md @@ -1,7 +1,14 @@ --- -aliases: -- /topics/indexing -- /docs/reference/patterns/indexes +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: 'Building secondary indexes in Redis ' diff --git a/content/develop/use/patterns/twitter-clone.md b/content/develop/use/patterns/twitter-clone.md index 36f3eb2ed7..b8d3fb823a 100644 --- a/content/develop/use/patterns/twitter-clone.md +++ b/content/develop/use/patterns/twitter-clone.md @@ -1,6 +1,14 @@ --- -aliases: -- /docs/reference/patterns/twitter-clone +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: Learn several Redis patterns by building a Twitter clone linkTitle: Patterns example title: Redis patterns example diff --git a/content/develop/use/pipelining/index.md b/content/develop/use/pipelining/index.md index d472ffc018..88343d0020 100644 --- a/content/develop/use/pipelining/index.md +++ b/content/develop/use/pipelining/index.md @@ -1,6 +1,14 @@ --- -aliases: -- /topics/pipelining +categories: +- docs +- develop +- stack +- oss +- rs +- rc +- oss +- kubernetes +- clients description: How to optimize round-trip times by batching Redis commands linkTitle: Pipelining title: Redis pipelining From 5b2884acb17ec51d55cec48242961f0329511567 Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Fri, 19 Jan 2024 18:06:05 +0100 Subject: [PATCH 05/15] Broken relrefs don't break a build but cause a warning --- config.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/config.toml b/config.toml index e5a018d4da..5f7a2a0e39 100644 --- a/config.toml +++ b/config.toml @@ -2,6 +2,7 @@ baseURL = "https://redis.io" title = "Redis Documentation" +refLinksErrorLevel = "WARNING" enableRobotsTXT = true enableGitInfo = true enableEmoji = true From 9013767609000a854ae51bad36c38474ee50a146 Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Mon, 22 Jan 2024 12:24:04 +0100 Subject: [PATCH 06/15] Testing broken relrefs --- content/develop/connect/clients/om-clients/_index.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/content/develop/connect/clients/om-clients/_index.md b/content/develop/connect/clients/om-clients/_index.md index 9ac424d7f3..c50158f282 100644 --- a/content/develop/connect/clients/om-clients/_index.md +++ b/content/develop/connect/clients/om-clients/_index.md @@ -20,9 +20,9 @@ Redis OM (pronounced *REDiss OHM*) is a library that provides object mapping for You can use Redis OM with the following four programming languages: -* [Node.js]({{< relref "/develop/connect/clients/om-clients/stack-node/" >}}) -* [Python]({{< relref "/develop/connect/clients/om-clients/stack-python/" >}}) -* [C# | .NET]({{< relref "/develop/connect/clients/om-clients/stack-dotnet/" >}}) -* [Java | Spring]({{< relref "/develop/connect/clients/om-clients/stack-spring/" >}}) +* [Node.js]({{< relref "/develop/connect/clients/om-clients/stack-node" >}}) +* [Python]({{< relref "/develop/connect/clients/om-clients/stack-python" >}}) +* [C# | .NET]({{< relref "/develop/connect/clients/om-clients/stack-dotnet" >}}) +* [Java | Spring]({{< relref "/develop/connect/clients/om-clients/stack-spring" >}})
From 9e50dd79ebffd010cb481920f70112d82d2d78df Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Mon, 22 Jan 2024 14:12:47 +0100 Subject: [PATCH 07/15] Added some corrected links as CSV --- build/migrate.sh | 28 --------- build/migrate/corrected_dev_refs.csv | 92 ++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 28 deletions(-) delete mode 100755 build/migrate.sh create mode 100644 build/migrate/corrected_dev_refs.csv diff --git a/build/migrate.sh b/build/migrate.sh deleted file mode 100755 index 5bf125b173..0000000000 --- a/build/migrate.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -export WD=$PWD/.. -export DOCS_SRC_FOLDER=$WD/content/tmp/docs -export DOCS_SRC_CMD_FOLDER=$WD/content/tmp/commands -export DOCS_DEV=$WD/content/develop/ -export DOCS_ROOT=$WD/content/ - -# Use the original index.json file -cd $WD -cp ./data/components/index.json ./data/components/index.json.bkp -cp ./data/components/index_migrate.json ./data/components/index.json -python3 ./build/make.py -mv ./data/components/index.json.bkp ./data/components/index.json - -# Copy commands -cp -R $DOCS_SRC_CMD_FOLDER $DOCS_ROOT/ - -# Only copy the developer documentation -cp -R $DOCS_SRC_FOLDER/get-started $DOCS_DEV/ -cp -R $DOCS_SRC_FOLDER/connect $DOCS_DEV/ -cp -R $DOCS_SRC_FOLDER/data-types $DOCS_DEV/ -cp -R $DOCS_SRC_FOLDER/interact $DOCS_DEV/ -cp -R $DOCS_SRC_FOLDER/manual $DOCS_DEV/use -cp -R $DOCS_SRC_FOLDER/reference $DOCS_DEV/ -rm -Rf $DOCS_DEV/reference/signals -rm -Rf $DOCS_DEV/reference/cluster-spec -rm -Rf $DOCS_DEV/reference/arm -rm -Rf $DOCS_DEV/reference/internals \ No newline at end of file diff --git a/build/migrate/corrected_dev_refs.csv b/build/migrate/corrected_dev_refs.csv new file mode 100644 index 0000000000..bd11b22995 --- /dev/null +++ b/build/migrate/corrected_dev_refs.csv @@ -0,0 +1,92 @@ +Broken Ref;Fixed Ref;Fixed Ref +/develop/about/about-stack/;/operate/oss_and_stack/;Misses an introduction of Redis and Redis Stack +/develop/clients/om-clients/stack-dotnet/;/develop/connect/clients/om-clients/stack-dotnet;Referenced files should not have a trailing slash +/develop/clients/om-clients/stack-node/;/develop/connect/clients/om-clients/stack-node; +/develop/clients/om-clients/stack-python/;/develop/connect/clients/om-clients/stack-python; +/develop/clients/om-clients/stack-spring/;/develop/connect/clients/om-clients/stack-spring; +/develop/commands/ft.create.md;/commands/ft.create/; +/develop/connect/clients/om-clients/stack-dotnet/;/develop/connect/clients/om-clients/stack-dotnet; +/develop/connect/clients/om-clients/stack-node/;/develop/connect/clients/om-clients/stack-node; +/develop/connect/clients/om-clients/stack-python/;/develop/connect/clients/om-clients/stack-python; +/develop/connect/clients/om-clients/stack-spring/;/develop/connect/clients/om-clients/stack-spring; +/develop/connect/clients/python/;/develop/connect/clients/python; +/develop/data-types/bitfields/;/develop/data-types/bitfields; +/develop/data-types/bitmaps/;/develop/data-types/bitmaps; +/develop/data-types/geospatial/;/develop/data-types/geospatial; +/develop/data-types/hashes/;/develop/data-types/hashes; +/develop/data-types/hyperloglogs;/develop/data-types/probabilistic/hyperloglogs/; +/develop/data-types/hyperloglogs;/develop/data-types/probabilistic/hyperloglogs/; +/develop/data-types/json/path/;/develop/data-types/json/path; +/develop/data-types/lists/;/develop/data-types/lists; +/develop/data-types/sets/;/develop/data-types/sets; +/develop/data-types/streams/;/develop/data-types/streams; +/develop/data-types/strings/;/develop/data-types/strings; +/develop/get-started/data-store/;/develop/get-started/data-store; +/develop/get-started/document-database;/develop/get-started/document-database; +/develop/get-started/faq/;/develop/get-started/faq; +/develop/get-started/vector-database/;/develop/get-started/vector-database; +/develop/getting-started/install-stack/;/operate/oss_and_stack/install/install-stack/; +/develop/getting-started/install-stack/docker;/operate/oss_and_stack/install/install-stack/docker; +/develop/install/install-stack;/operate/oss_and_stack/install/install-stack/; +/develop/install/install-stack/;/operate/oss_and_stack/install/install-stack/; +/develop/interact/programmability/triggers-and-functions/concepts/function_flags/;/develop/interact/programmability/triggers-and-functions/concepts/Function_Flags; +/develop/interact/programmability/triggers-and-functions/concepts/javascript_api/;/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API; +/develop/interact/programmability/triggers-and-functions/concepts/sync_async/;/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async; +/develop/interact/programmability/triggers-and-functions/concepts/triggers/keyspace_triggers/;/develop/interact/programmability/triggers-and-functions/concepts/triggers/Keyspace_Triggers; +/develop/interact/programmability/triggers-and-functions/concepts/triggers/stream_triggers/;/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers; +/develop/interact/programmability/triggers-and-functions/configuration/;/develop/interact/programmability/triggers-and-functions/Configuration; +/develop/interact/programmability/triggers-and-functions/quick_start;/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI; +/develop/interact/programmability/triggers-and-functions/quick_start/;/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI; +/develop/interact/pubsub/;/develop/interact/pubsub; +/develop/interact/search-and-query/administration/design/;/develop/interact/search-and-query/administration/design; +/develop/interact/search-and-query/administration/extensions/;/develop/interact/search-and-query/administration/extensions; +/develop/interact/search-and-query/administration/overview/;/develop/interact/search-and-query/administration/overview; +/develop/interact/search-and-query/advanced-concepts/chinese/;/develop/interact/search-and-query/advanced-concepts/chinese; +/develop/interact/search-and-query/advanced-concepts/phonetic_matching/;/develop/interact/search-and-query/advanced-concepts/phonetic_matching; +/develop/interact/search-and-query/advanced-concepts/query_syntax/;/develop/interact/search-and-query/advanced-concepts/query_syntax; +/develop/interact/search-and-query/advanced-concepts/sorting/;/develop/interact/search-and-query/advanced-concepts/sorting; +/develop/interact/search-and-query/advanced-concepts/stemming/;/develop/interact/search-and-query/advanced-concepts/stemming; +/develop/interact/search-and-query/advanced-concepts/stemming//;/develop/interact/search-and-query/advanced-concepts/stemming; +/develop/interact/search-and-query/advanced-concepts/stopwords/;/develop/interact/search-and-query/advanced-concepts/stopwords; +/develop/interact/search-and-query/advanced-concepts/tags/;/develop/interact/search-and-query/advanced-concepts/tags; +/develop/interact/search-and-query/advanced-concepts/vectors/;/develop/interact/search-and-query/advanced-concepts/vectors; +/develop/interact/search-and-query/basic-constructs/configuration-parameters/;/develop/interact/search-and-query/basic-constructs/configuration-parameters; +/develop/interact/search-and-query/basic-constructs/schema-definition/;/develop/interact/search-and-query/basic-constructs/schema-definition; +/develop/interact/search-and-query/img/polygons.png;/develop/interact/search-and-query/img/polygons.png;Markdown image reference, RelRefs don’t work with images. Markdown syntax ![Name](Ref) +/develop/interact/search-and-query/query/combined/;/develop/interact/search-and-query/query/combined; +/develop/interact/search-and-query/quickstart/;/develop/get-started/document-database; +/develop/interact/search-and-query/search/aggregations/;/develop/interact/search-and-query/advanced-concepts/aggregations; +/develop/interact/search-and-query/search/vectors/;/develop/get-started/vector-database; +/develop/management/;/operate/oss_and_stack/management/; +/develop/management/security/;/operate/oss_and_stack/management/security/; +/develop/manual/cli;/develop/connect/cli; +/develop/manual/cli/;/develop/connect/cli; +/develop/manual/client-side-caching/;/develop/use/client-side-caching; +/develop/manual/config/;/operate/oss_and_stack/management/config; +/develop/manual/data-types/streams;/develop/data-types/streams; +/develop/manual/eviction/;/develop/reference/eviction/; +/develop/manual/keyspace/;/develop/use/keyspace; +/develop/manual/programmability/;/develop/interact/programmability/; +/develop/manual/programmability/eval-intro;/develop/interact/programmability/eval-intro; +/develop/manual/programmability/eval-intro/;/develop/interact/programmability/eval-intro; +/develop/manual/programmability/functions-intro;/develop/interact/programmability/functions-intro; +/develop/manual/programmability/functions-intro/;/develop/interact/programmability/functions-intro; +/develop/manual/pubsub;/develop/interact/pubsub; +/develop/modules/;/operate/oss_and_stack/stack-with-enterprise/; +/develop/reference/cluster-spec;/operate/oss_and_stack/reference/cluster-spec; +/develop/stack;/operate/oss_and_stack/; +/develop/stack/;/develop/about/about-stack/; +/develop/stack/bloom;/develop/data-types/probabilistic/bloom-filter; +/develop/stack/json;/develop/data-types/json/; +/develop/stack/json/;/develop/data-types/json/; +/develop/stack/json/path;/develop/data-types/json/path; +/develop/stack/json/path/;/develop/data-types/json/path; +/develop/stack/search;/develop/interact/search-and-query/; +/develop/stack/search/;/develop/interact/search-and-query/; +/develop/stack/search/indexing_json;/develop/interact/search-and-query/indexing/; +/develop/stack/search/indexing_json/;/develop/interact/search-and-query/indexing/; +/develop/stack/search/reference/highlight;/develop/interact/search-and-query/advanced-concepts/highlight; +/develop/stack/search/reference/query_syntax;/develop/interact/search-and-query/advanced-concepts/query_syntax; +/develop/stack/search/reference/query_syntax/;/develop/interact/search-and-query/advanced-concepts/query_syntax; +/develop/stack/search/reference/vectors/;/develop/interact/search-and-query/advanced-concepts/vectors; +/develop/stack/timeseries/;/develop/data-types/timeseries/; \ No newline at end of file From d57d6d4d650d016fc1a54f4c8ef0c99443d2b21d Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Mon, 22 Jan 2024 20:06:30 +0100 Subject: [PATCH 08/15] Fixed broken relrefs --- build/migrate.py | 24 +++ build/migrate/corrected_dev_refs.csv | 33 +++- content/develop/connect/clients/_index.md | 12 +- content/develop/connect/clients/dotnet.md | 4 +- content/develop/connect/clients/go.md | 2 +- content/develop/connect/clients/java.md | 166 ++++++++++-------- content/develop/connect/clients/nodejs.md | 4 +- content/develop/connect/clients/python.md | 4 +- content/develop/connect/insight/_index.md | 2 +- .../tutorials/insight-stream-consumer.md | 4 +- content/develop/data-types/_index.md | 30 ++-- content/develop/data-types/json/_index.md | 2 +- .../develop/data-types/json/indexing_JSON.md | 6 +- .../data-types/probabilistic/Configuration.md | 4 +- content/develop/data-types/sets.md | 4 +- content/develop/data-types/sorted-sets.md | 2 +- content/develop/data-types/streams.md | 2 +- content/develop/data-types/strings.md | 2 +- .../data-types/timeseries/configuration.md | 4 +- content/develop/get-started/_index.md | 2 +- content/develop/get-started/data-store.md | 8 +- .../develop/get-started/document-database.md | 10 +- content/develop/get-started/faq.md | 2 +- .../develop/get-started/vector-database.md | 12 +- .../interact/programmability/_index.md | 2 +- .../programmability/functions-intro.md | 2 +- .../interact/programmability/lua-api.md | 6 +- .../triggers-and-functions/Debugging.md | 4 +- .../triggers-and-functions/Development.md | 2 +- .../triggers-and-functions/Quick_Start_CLI.md | 2 +- .../triggers-and-functions/Quick_Start_RI.md | 2 +- .../triggers-and-functions/_index.md | 2 +- .../concepts/Binary_Data.md | 12 +- .../concepts/Cluster_Support.md | 6 +- .../concepts/Library_Configuration.md | 2 +- .../concepts/Sync_Async.md | 4 +- .../concepts/triggers/KeySpace_Triggers.md | 4 +- .../concepts/triggers/Stream_Triggers.md | 4 +- content/develop/interact/pubsub.md | 2 +- .../interact/search-and-query/_index.md | 2 +- .../administration/overview.md | 4 +- .../advanced-concepts/_index.md | 10 +- .../advanced-concepts/escaping.md | 2 +- .../advanced-concepts/query_syntax.md | 8 +- .../advanced-concepts/scoring.md | 4 +- .../advanced-concepts/stemming.md | 2 +- .../advanced-concepts/tags.md | 2 +- .../advanced-concepts/vectors.md | 8 +- .../basic-constructs/_index.md | 6 +- .../configuration-parameters.md | 12 +- .../field-and-type-options.md | 6 +- .../search-and-query/indexing/_index.md | 20 +-- .../search-and-query/query/combined.md | 2 +- .../search-and-query/query/exact-match.md | 2 +- .../search-and-query/query/vector-search.md | 6 +- content/develop/reference/protocol-spec.md | 8 +- 56 files changed, 284 insertions(+), 221 deletions(-) diff --git a/build/migrate.py b/build/migrate.py index 3874c93fcc..caa51e64cc 100755 --- a/build/migrate.py +++ b/build/migrate.py @@ -9,6 +9,7 @@ from git import Repo from components.component import All from components.util import mkdir_p +import csv ''' @@ -100,6 +101,23 @@ def replace_links(markdown_content, old_prefix, new_prefix): updated_content = re.sub(link_pattern, r'\1' + new_prefix + r'\3', markdown_content) return updated_content +def _load_csv_file(file_path): + + result = {} + + script_path = os.getcwd() + '/' + __file__ + csv_file = slash(os.path.dirname(script_path), file_path) + + with open(csv_file) as cf: + reader = csv.DictReader(cf, delimiter=';') + for row in reader: + key = row['broken_ref'] + value = row['fixed_ref'] + result[key] = value + + return result + + ''' Replace the link within the file ''' @@ -110,6 +128,12 @@ def replace_links_in_file(file_path, old_prefix, new_prefix): link_pattern = re.compile(r'(\[.*?\]\()(' + re.escape(old_prefix) + r')(.*?)' + r'(\))') #updated_content = re.sub(link_pattern, r'\1' + '{{ relURL "" }}' + new_prefix + r'\3', file_content) updated_content = re.sub(link_pattern, r'\1' + '{{< relref "' + new_prefix + r'\3' + '" >}}' + r'\4', file_content) + + corrected_links = _load_csv_file('./migrate/corrected_dev_refs.csv') + + for k in corrected_links: + updated_content = updated_content.replace('{{< relref "' + k + '" >}}', '{{< relref "' + corrected_links[k] + '" >}}') + with open(file_path, 'w', encoding='utf-8') as file: file.write(updated_content) diff --git a/build/migrate/corrected_dev_refs.csv b/build/migrate/corrected_dev_refs.csv index bd11b22995..980c6b61dc 100644 --- a/build/migrate/corrected_dev_refs.csv +++ b/build/migrate/corrected_dev_refs.csv @@ -1,10 +1,10 @@ -Broken Ref;Fixed Ref;Fixed Ref +broken_ref;fixed_ref;comment /develop/about/about-stack/;/operate/oss_and_stack/;Misses an introduction of Redis and Redis Stack /develop/clients/om-clients/stack-dotnet/;/develop/connect/clients/om-clients/stack-dotnet;Referenced files should not have a trailing slash /develop/clients/om-clients/stack-node/;/develop/connect/clients/om-clients/stack-node; /develop/clients/om-clients/stack-python/;/develop/connect/clients/om-clients/stack-python; /develop/clients/om-clients/stack-spring/;/develop/connect/clients/om-clients/stack-spring; -/develop/commands/ft.create.md;/commands/ft.create/; +/develop/commands/ft.create.md;/commands/FT.CREATE; /develop/connect/clients/om-clients/stack-dotnet/;/develop/connect/clients/om-clients/stack-dotnet; /develop/connect/clients/om-clients/stack-node/;/develop/connect/clients/om-clients/stack-node; /develop/connect/clients/om-clients/stack-python/;/develop/connect/clients/om-clients/stack-python; @@ -14,68 +14,84 @@ Broken Ref;Fixed Ref;Fixed Ref /develop/data-types/bitmaps/;/develop/data-types/bitmaps; /develop/data-types/geospatial/;/develop/data-types/geospatial; /develop/data-types/hashes/;/develop/data-types/hashes; -/develop/data-types/hyperloglogs;/develop/data-types/probabilistic/hyperloglogs/; -/develop/data-types/hyperloglogs;/develop/data-types/probabilistic/hyperloglogs/; +/develop/data-types/hyperloglogs;/develop/data-types/probabilistic/hyperloglogs; /develop/data-types/json/path/;/develop/data-types/json/path; /develop/data-types/lists/;/develop/data-types/lists; /develop/data-types/sets/;/develop/data-types/sets; /develop/data-types/streams/;/develop/data-types/streams; +/develop/data-types/streams-tutorial;/develop/data-types/streams; /develop/data-types/strings/;/develop/data-types/strings; /develop/get-started/data-store/;/develop/get-started/data-store; -/develop/get-started/document-database;/develop/get-started/document-database; +/develop/get-started/document-database/;/develop/get-started/document-database; /develop/get-started/faq/;/develop/get-started/faq; /develop/get-started/vector-database/;/develop/get-started/vector-database; +/develop/getting-started/;/operate/oss_and_stack/install/; /develop/getting-started/install-stack/;/operate/oss_and_stack/install/install-stack/; /develop/getting-started/install-stack/docker;/operate/oss_and_stack/install/install-stack/docker; /develop/install/install-stack;/operate/oss_and_stack/install/install-stack/; /develop/install/install-stack/;/operate/oss_and_stack/install/install-stack/; /develop/interact/programmability/triggers-and-functions/concepts/function_flags/;/develop/interact/programmability/triggers-and-functions/concepts/Function_Flags; /develop/interact/programmability/triggers-and-functions/concepts/javascript_api/;/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API; +/develop/interact/programmability/triggers-and-functions/concepts/javascript_api/#clientcall;/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API;Hugo didn't like the links that contained '#' /develop/interact/programmability/triggers-and-functions/concepts/sync_async/;/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async; /develop/interact/programmability/triggers-and-functions/concepts/triggers/keyspace_triggers/;/develop/interact/programmability/triggers-and-functions/concepts/triggers/Keyspace_Triggers; /develop/interact/programmability/triggers-and-functions/concepts/triggers/stream_triggers/;/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers; /develop/interact/programmability/triggers-and-functions/configuration/;/develop/interact/programmability/triggers-and-functions/Configuration; +/develop/interact/programmability/triggers-and-functions/configuration/#remote-task-default-timeout;/develop/interact/programmability/triggers-and-functions/Configuration; /develop/interact/programmability/triggers-and-functions/quick_start;/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI; /develop/interact/programmability/triggers-and-functions/quick_start/;/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI; /develop/interact/pubsub/;/develop/interact/pubsub; /develop/interact/search-and-query/administration/design/;/develop/interact/search-and-query/administration/design; +/develop/interact/search-and-query/administration/design/#query-execution-engine;/develop/interact/search-and-query/administration/design; /develop/interact/search-and-query/administration/extensions/;/develop/interact/search-and-query/administration/extensions; /develop/interact/search-and-query/administration/overview/;/develop/interact/search-and-query/administration/overview; +/develop/interact/search-and-query/administration/overview/#auto-complete;/develop/interact/search-and-query/administration/overview; /develop/interact/search-and-query/advanced-concepts/chinese/;/develop/interact/search-and-query/advanced-concepts/chinese; +/develop/interact/search-and-query/advanced-concepts/chinese/#using-custom-dictionaries;/develop/interact/search-and-query/advanced-concepts/chinese; /develop/interact/search-and-query/advanced-concepts/phonetic_matching/;/develop/interact/search-and-query/advanced-concepts/phonetic_matching; /develop/interact/search-and-query/advanced-concepts/query_syntax/;/develop/interact/search-and-query/advanced-concepts/query_syntax; /develop/interact/search-and-query/advanced-concepts/sorting/;/develop/interact/search-and-query/advanced-concepts/sorting; /develop/interact/search-and-query/advanced-concepts/stemming/;/develop/interact/search-and-query/advanced-concepts/stemming; /develop/interact/search-and-query/advanced-concepts/stemming//;/develop/interact/search-and-query/advanced-concepts/stemming; +/develop/interact/search-and-query/advanced-concepts/stemming//#supported-languages;/develop/interact/search-and-query/advanced-concepts/stemming; /develop/interact/search-and-query/advanced-concepts/stopwords/;/develop/interact/search-and-query/advanced-concepts/stopwords; /develop/interact/search-and-query/advanced-concepts/tags/;/develop/interact/search-and-query/advanced-concepts/tags; /develop/interact/search-and-query/advanced-concepts/vectors/;/develop/interact/search-and-query/advanced-concepts/vectors; +/develop/interact/search-and-query/advanced-concepts/vectors/#querying-vector-fields;/develop/interact/search-and-query/advanced-concepts/vectors; +/develop/interact/search-and-query/advanced-concepts/vectors/#vector-search-examples;/develop/interact/search-and-query/advanced-concepts/vectors; /develop/interact/search-and-query/basic-constructs/configuration-parameters/;/develop/interact/search-and-query/basic-constructs/configuration-parameters; +/develop/interact/search-and-query/basic-constructs/configuration-parameters/#default_dialect;/develop/interact/search-and-query/basic-constructs/configuration-parameters; /develop/interact/search-and-query/basic-constructs/schema-definition/;/develop/interact/search-and-query/basic-constructs/schema-definition; /develop/interact/search-and-query/img/polygons.png;/develop/interact/search-and-query/img/polygons.png;Markdown image reference, RelRefs don’t work with images. Markdown syntax ![Name](Ref) /develop/interact/search-and-query/query/combined/;/develop/interact/search-and-query/query/combined; /develop/interact/search-and-query/quickstart/;/develop/get-started/document-database; /develop/interact/search-and-query/search/aggregations/;/develop/interact/search-and-query/advanced-concepts/aggregations; +/develop/interact/search-and-query/search/aggregations/#cursor-api;/develop/interact/search-and-query/advanced-concepts/aggregations; /develop/interact/search-and-query/search/vectors/;/develop/get-started/vector-database; +/develop/interact/search-and-query/search/vectors/#creation-attributes-per-algorithm;/develop/get-started/vector-database; /develop/management/;/operate/oss_and_stack/management/; /develop/management/security/;/operate/oss_and_stack/management/security/; +/develop/management/security/#protected-mode;/operate/oss_and_stack/management/security/; /develop/manual/cli;/develop/connect/cli; /develop/manual/cli/;/develop/connect/cli; /develop/manual/client-side-caching/;/develop/use/client-side-caching; /develop/manual/config/;/operate/oss_and_stack/management/config; /develop/manual/data-types/streams;/develop/data-types/streams; -/develop/manual/eviction/;/develop/reference/eviction/; +/develop/manual/eviction/;/develop/reference/eviction; /develop/manual/keyspace/;/develop/use/keyspace; /develop/manual/programmability/;/develop/interact/programmability/; +/develop/manual/programmability/#read-only_scripts;/develop/interact/programmability/; /develop/manual/programmability/eval-intro;/develop/interact/programmability/eval-intro; +/develop/manual/programmability/eval-intro/#eval-flags;/develop/interact/programmability/eval-intro; /develop/manual/programmability/eval-intro/;/develop/interact/programmability/eval-intro; /develop/manual/programmability/functions-intro;/develop/interact/programmability/functions-intro; /develop/manual/programmability/functions-intro/;/develop/interact/programmability/functions-intro; +/develop/manual/programmability/functions-intro/#function-flags;/develop/interact/programmability/functions-intro; /develop/manual/pubsub;/develop/interact/pubsub; /develop/modules/;/operate/oss_and_stack/stack-with-enterprise/; /develop/reference/cluster-spec;/operate/oss_and_stack/reference/cluster-spec; /develop/stack;/operate/oss_and_stack/; -/develop/stack/;/develop/about/about-stack/; +/develop/stack/;/operate/oss_and_stack/; /develop/stack/bloom;/develop/data-types/probabilistic/bloom-filter; /develop/stack/json;/develop/data-types/json/; /develop/stack/json/;/develop/data-types/json/; @@ -85,8 +101,11 @@ Broken Ref;Fixed Ref;Fixed Ref /develop/stack/search/;/develop/interact/search-and-query/; /develop/stack/search/indexing_json;/develop/interact/search-and-query/indexing/; /develop/stack/search/indexing_json/;/develop/interact/search-and-query/indexing/; +/develop/stack/search/indexing_json/#index-json-arrays-as-vector;/develop/interact/search-and-query/indexing/; /develop/stack/search/reference/highlight;/develop/interact/search-and-query/advanced-concepts/highlight; /develop/stack/search/reference/query_syntax;/develop/interact/search-and-query/advanced-concepts/query_syntax; /develop/stack/search/reference/query_syntax/;/develop/interact/search-and-query/advanced-concepts/query_syntax; +/develop/stack/search/reference/query_syntax/#query-attributes;/develop/interact/search-and-query/advanced-concepts/query_syntax; /develop/stack/search/reference/vectors/;/develop/interact/search-and-query/advanced-concepts/vectors; +/develop/stack/search/reference/vectors/#runtime-attributes;/develop/interact/search-and-query/advanced-concepts/vectors; /develop/stack/timeseries/;/develop/data-types/timeseries/; \ No newline at end of file diff --git a/content/develop/connect/clients/_index.md b/content/develop/connect/clients/_index.md index d281bb9f82..80f7309059 100644 --- a/content/develop/connect/clients/_index.md +++ b/content/develop/connect/clients/_index.md @@ -15,9 +15,9 @@ title: Connect with Redis clients weight: 45 --- -Here, you will learn how to connect your application to a Redis database. If you're new to Redis, you might first want to [install Redis with Redis Stack and RedisInsight]({{< relref "/develop/getting-started/install-stack/" >}}). +Here, you will learn how to connect your application to a Redis database. If you're new to Redis, you might first want to [install Redis with Redis Stack and RedisInsight]({{< relref "/operate/oss_and_stack/install/install-stack/" >}}). -For more Redis topics, see [Using]({{< relref "/develop/manual/" >}}) and [Managing]({{< relref "/develop/management/" >}}) Redis. +For more Redis topics, see [Using]({{< relref "/develop/manual/" >}}) and [Managing]({{< relref "/operate/oss_and_stack/management/" >}}) Redis. If you're ready to get started, see the following guides for the official client libraries you can use with Redis. For a complete list of community-driven clients, see [Clients](/resources/clients/). @@ -26,9 +26,9 @@ If you're ready to get started, see the following guides for the official client The Redis OM client libraries let you use the document modeling, indexing, and querying capabilities of Redis Stack much like the way you'd use an [ORM](https://en.wikipedia.org/wiki/Object%E2%80%93relational_mapping). The following Redis OM libraries support Redis Stack: -* [Redis OM .NET]({{< relref "/develop/clients/om-clients/stack-dotnet/" >}}) -* [Redis OM Node]({{< relref "/develop/clients/om-clients/stack-node/" >}}) -* [Redis OM Python]({{< relref "/develop/clients/om-clients/stack-python/" >}}) -* [Redis OM Spring]({{< relref "/develop/clients/om-clients/stack-spring/" >}}) +* [Redis OM .NET]({{< relref "/develop/connect/clients/om-clients/stack-dotnet" >}}) +* [Redis OM Node]({{< relref "/develop/connect/clients/om-clients/stack-node" >}}) +* [Redis OM Python]({{< relref "/develop/connect/clients/om-clients/stack-python" >}}) +* [Redis OM Spring]({{< relref "/develop/connect/clients/om-clients/stack-spring" >}})
\ No newline at end of file diff --git a/content/develop/connect/clients/dotnet.md b/content/develop/connect/clients/dotnet.md index a1c0c3457c..e9e469c636 100644 --- a/content/develop/connect/clients/dotnet.md +++ b/content/develop/connect/clients/dotnet.md @@ -20,7 +20,7 @@ Install Redis and the Redis client, then connect your .NET application to a Redi ## NRedisStack [NRedisStack](https://github.com/redis/NRedisStack) is a .NET client for Redis. -`NredisStack` requires a running Redis or [Redis Stack](https://redis.io/docs/getting-started/install-stack/) server. See [Getting started]({{< relref "/develop/getting-started/" >}}) for Redis installation instructions. +`NredisStack` requires a running Redis or [Redis Stack](https://redis.io/docs/getting-started/install-stack/) server. See [Getting started]({{< relref "/operate/oss_and_stack/install/" >}}) for Redis installation instructions. ### Install @@ -105,7 +105,7 @@ Console.WriteLine(db.StringGet("foo")); // prints bar #### Connect to your production Redis with TLS -When you deploy your application, use TLS and follow the [Redis security]({{< relref "/develop/management/security/" >}}) guidelines. +When you deploy your application, use TLS and follow the [Redis security]({{< relref "/operate/oss_and_stack/management/security/" >}}) guidelines. Before connecting your application to the TLS-enabled Redis server, ensure that your certificates and private keys are in the correct format. diff --git a/content/develop/connect/clients/go.md b/content/develop/connect/clients/go.md index 02f239c8e7..a496e8d2b7 100644 --- a/content/develop/connect/clients/go.md +++ b/content/develop/connect/clients/go.md @@ -115,7 +115,7 @@ client := redis.NewClusterClient(&redis.ClusterOptions{ #### Connect to your production Redis with TLS -When you deploy your application, use TLS and follow the [Redis security]({{< relref "/develop/management/security/" >}}) guidelines. +When you deploy your application, use TLS and follow the [Redis security]({{< relref "/operate/oss_and_stack/management/security/" >}}) guidelines. Establish a secure connection with your Redis database using this snippet. diff --git a/content/develop/connect/clients/java.md b/content/develop/connect/clients/java.md index fc7e79ebce..97dcfbc9f3 100644 --- a/content/develop/connect/clients/java.md +++ b/content/develop/connect/clients/java.md @@ -114,7 +114,7 @@ JedisCluster jedis = new JedisCluster(jedisClusterNodes); #### Connect to your production Redis with TLS -When you deploy your application, use TLS and follow the [Redis security]({{< relref "/develop/management/security/" >}}) guidelines. +When you deploy your application, use TLS and follow the [Redis security]({{< relref "/operate/oss_and_stack/management/security/" >}}) guidelines. Before connecting your application to the TLS-enabled Redis server, ensure that your certificates and private keys are in the correct format. @@ -194,104 +194,124 @@ public class Main { } ``` -### Example: Indexing and querying JSON documents +### Production usage -Make sure that you have Redis Stack and `Jedis` installed. +### Configuring Connection pool +As mentioned in the previous section, use `JedisPool` or `JedisPooled` to create a connection pool. +`JedisPooled`, added in Jedis version 4.0.0, provides capabilities similar to `JedisPool` but with a more straightforward API. +A connection pool holds a specified number of connections, creates more connections when necessary, and terminates them when they are no longer needed. -Import dependencies and add a sample `User` class: +Here is a simplified connection lifecycle in a pool: -```java -import redis.clients.jedis.JedisPooled; -import redis.clients.jedis.search.*; -import redis.clients.jedis.search.aggr.*; -import redis.clients.jedis.search.schemafields.*; - -class User { - private String name; - private String email; - private int age; - private String city; - - public User(String name, String email, int age, String city) { - this.name = name; - this.email = email; - this.age = age; - this.city = city; - } - - //... -} -``` +1. A connection is requested from the pool. +2. A connection is served: + - An idle connection is served when non-active connections are available, or + - A new connection is created when the number of connections is under `maxTotal`. +3. The connection becomes active. +4. The connection is released back to the pool. +5. The connection is marked as stale. +6. The connection is kept idle for `minEvictableIdleTime`. +7. The connection becomes evictable if the number of connections is greater than `minIdle`. +8. The connection is ready to be closed. -Connect to your Redis database with `JedisPooled`. +It's important to configure the connection pool correctly. +Use `GenericObjectPoolConfig` from [Apache Commons Pool2](https://commons.apache.org/proper/commons-pool/apidocs/org/apache/commons/pool2/impl/GenericObjectPoolConfig.html). ```java -JedisPooled jedis = new JedisPooled("localhost", 6379); +ConnectionPoolConfig poolConfig = new ConnectionPoolConfig(); +// maximum active connections in the pool, +// tune this according to your needs and application type +// default is 8 +poolConfig.setMaxTotal(8); + +// maximum idle connections in the pool, default is 8 +poolConfig.setMaxIdle(8); +// minimum idle connections in the pool, default 0 +poolConfig.setMinIdle(0); + +// Enables waiting for a connection to become available. +poolConfig.setBlockWhenExhausted(true); +// The maximum number of seconds to wait for a connection to become available +poolConfig.setMaxWait(Duration.ofSeconds(1)); + +// Enables sending a PING command periodically while the connection is idle. +poolConfig.setTestWhileIdle(true); +// controls the period between checks for idle connections in the pool +poolConfig.setTimeBetweenEvictionRuns(Duration.ofSeconds(1)); + +// JedisPooled does all hard work on fetching and releasing connection to the pool +// to prevent connection starvation +JedisPooled jedis = new JedisPooled(poolConfig, "localhost", 6379); ``` -Let's create some test data to add to your database. - -```java -User user1 = new User("Paul John", "paul.john@example.com", 42, "London"); -User user2 = new User("Eden Zamir", "eden.zamir@example.com", 29, "Tel Aviv"); -User user3 = new User("Paul Zamir", "paul.zamir@example.com", 35, "Tel Aviv"); -``` +### Timeout -Create an index. In this example, all JSON documents with the key prefix `user:` are indexed. For more information, see [Query syntax]({{< relref "/develop/interact/search-and-query/query/" >}}). +To set a timeout for a connection, use the `JedisPooled` or `JedisPool` constructor with the `timeout` parameter, or use `JedisClientConfig` with the `socketTimeout` and `connectionTimeout` parameters: ```java -jedis.ftCreate("idx:users", - FTCreateParams.createParams() - .on(IndexDataType.JSON) - .addPrefix("user:"), - TextField.of("$.name").as("name"), - TagField.of("$.city").as("city"), - NumericField.of("$.age").as("age") +HostAndPort hostAndPort = new HostAndPort("localhost", 6379); + +JedisPooled jedisWithTimeout = new JedisPooled(hostAndPort, + DefaultJedisClientConfig.builder() + .socketTimeoutMillis(5000) // set timeout to 5 seconds + .connectionTimeoutMillis(5000) // set connection timeout to 5 seconds + .build(), + poolConfig ); ``` -Use [`JSON.SET`](/commands/json.set) to set each user value at the specified path. +### Exception handling +The Jedis Exception Hierarchy is rooted on `JedisException`, which implements `RuntimeException`, and are therefore all unchecked exceptions. -```java -jedis.jsonSetWithEscape("user:1", user1); -jedis.jsonSetWithEscape("user:2", user2); -jedis.jsonSetWithEscape("user:3", user3); +``` +JedisException +├── JedisDataException +│ ├── JedisRedirectionException +│ │ ├── JedisMovedDataException +│ │ └── JedisAskDataException +│ ├── AbortedTransactionException +│ ├── JedisAccessControlException +│ └── JedisNoScriptException +├── JedisClusterException +│ ├── JedisClusterOperationException +│ ├── JedisConnectionException +│ └── JedisValidationException +└── InvalidURIException ``` -Let's find user `Paul` and filter the results by age. +#### General Exceptions +In general, Jedis can throw the following exceptions while executing commands: -```java -var query = new Query("Paul @age:[30 40]"); -var result = jedis.ftSearch("idx:users", query).getDocuments(); -System.out.println(result); -// Prints: [id:user:3, score: 1.0, payload:null, properties:[$={"name":"Paul Zamir","email":"paul.zamir@example.com","age":35,"city":"Tel Aviv"}]] -``` +- `JedisConnectionException` - when the connection to Redis is lost or closed unexpectedly. Configure failover to handle this exception automatically with Resilience4J and the built-in Jedis failover mechanism. +- `JedisAccessControlException` - when the user does not have the permission to execute the command or the user ID and/or password are incorrect. +- `JedisDataException` - when there is a problem with the data being sent to or received from the Redis server. Usually, the error message will contain more information about the failed command. +- `JedisException` - this exception is a catch-all exception that can be thrown for any other unexpected errors. -Return only the `city` field. +Conditions when `JedisException` can be thrown: +- Bad return from a health check with the [`PING`](/commands/ping) command +- Failure during SHUTDOWN +- Pub/Sub failure when issuing commands (disconnect) +- Any unknown server messages +- Sentinel: can connect to sentinel but master is not monitored or all Sentinels are down. +- MULTI or DISCARD command failed +- Shard commands key hash check failed or no Reachable Shards +- Retry deadline exceeded/number of attempts (Retry Command Executor) +- POOL - pool exhausted, error adding idle objects, returning broken resources to the pool -```java -var city_query = new Query("Paul @age:[30 40]"); -var city_result = jedis.ftSearch("idx:users", city_query.returnFields("city")).getDocuments(); -System.out.println(city_result); -// Prints: [id:user:3, score: 1.0, payload:null, properties:[city=Tel Aviv]] -``` +All the Jedis exceptions are runtime exceptions and in most cases irrecoverable, so in general bubble up to the API capturing the error message. -Count all users in the same city. +## DNS cache and Redis -```java -AggregationBuilder ab = new AggregationBuilder("*") - .groupBy("@city", Reducers.count().as("count")); -AggregationResult ar = jedis.ftAggregate("idx:users", ab); +When you connect to a Redis with multiple endpoints, such as [Redis Enterprise Active-Active](https://redis.com/redis-enterprise/technology/active-active-geo-distribution/), it's recommended to disable the JVM's DNS cache to load-balance requests across multiple endpoints. -for (int idx=0; idx < ar.getTotalResults(); idx++) { - System.out.println(ar.getRow(idx).getString("city") + " - " + ar.getRow(idx).getString("count")); -} -// Prints: -// London - 1 -// Tel Aviv - 2 +You can do this in your application's code with the following snippet: +```java +java.security.Security.setProperty("networkaddress.cache.ttl","0"); +java.security.Security.setProperty("networkaddress.cache.negative.ttl", "0"); ``` ### Learn more * [Jedis API reference](https://www.javadoc.io/doc/redis.clients/jedis/latest/index.html) +* [Failover with Jedis](https://github.com/redis/jedis/blob/master/docs/failover.md) * [GitHub](https://github.com/redis/jedis) diff --git a/content/develop/connect/clients/nodejs.md b/content/develop/connect/clients/nodejs.md index b4e1a9981f..6937d43c1d 100644 --- a/content/develop/connect/clients/nodejs.md +++ b/content/develop/connect/clients/nodejs.md @@ -20,7 +20,7 @@ Install Redis and the Redis client, then connect your Node.js application to a R ## node-redis [node-redis](https://github.com/redis/node-redis) is a modern, high-performance Redis client for Node.js. -`node-redis` requires a running Redis or [Redis Stack](https://redis.io/docs/getting-started/install-stack/) server. See [Getting started]({{< relref "/develop/getting-started/" >}}) for Redis installation instructions. +`node-redis` requires a running Redis or [Redis Stack](https://redis.io/docs/getting-started/install-stack/) server. See [Getting started]({{< relref "/operate/oss_and_stack/install/" >}}) for Redis installation instructions. ### Install @@ -114,7 +114,7 @@ await cluster.quit(); #### Connect to your production Redis with TLS -When you deploy your application, use TLS and follow the [Redis security]({{< relref "/develop/management/security/" >}}) guidelines. +When you deploy your application, use TLS and follow the [Redis security]({{< relref "/operate/oss_and_stack/management/security/" >}}) guidelines. ```js const client = createClient({ diff --git a/content/develop/connect/clients/python.md b/content/develop/connect/clients/python.md index a0cb4419fb..66535e802e 100644 --- a/content/develop/connect/clients/python.md +++ b/content/develop/connect/clients/python.md @@ -21,7 +21,7 @@ Install Redis and the Redis client, then connect your Python application to a Re Get started with the [redis-py](https://github.com/redis/redis-py) client for Redis. -`redis-py` requires a running Redis or [Redis Stack]({{< relref "/develop/getting-started/install-stack/" >}}) server. See [Getting started]({{< relref "/develop/getting-started/" >}}) for Redis installation instructions. +`redis-py` requires a running Redis or [Redis Stack]({{< relref "/operate/oss_and_stack/install/install-stack/" >}}) server. See [Getting started]({{< relref "/operate/oss_and_stack/install/" >}}) for Redis installation instructions. ### Install @@ -95,7 +95,7 @@ For more information, see [redis-py Clustering](https://redis-py.readthedocs.io/ #### Connect to your production Redis with TLS -When you deploy your application, use TLS and follow the [Redis security]({{< relref "/develop/management/security/" >}}) guidelines. +When you deploy your application, use TLS and follow the [Redis security]({{< relref "/operate/oss_and_stack/management/security/" >}}) guidelines. ```python import redis diff --git a/content/develop/connect/insight/_index.md b/content/develop/connect/insight/_index.md index e020ff6a98..e311a1b42f 100644 --- a/content/develop/connect/insight/_index.md +++ b/content/develop/connect/insight/_index.md @@ -40,7 +40,7 @@ RedisInsight is a powerful tool for visualizing and optimizing data in Redis or Browse, filter and visualize your key-value Redis data structures. * [CRUD](https://en.wikipedia.org/wiki/Create,_read,_update_and_delete) support for lists, hashes, strings, sets, sorted sets, and streams -* CRUD support for [JSON]({{< relref "/develop/stack/json" >}}) +* CRUD support for [JSON]({{< relref "/develop/data-types/json/" >}}) * Group keys according to their namespaces diff --git a/content/develop/connect/insight/tutorials/insight-stream-consumer.md b/content/develop/connect/insight/tutorials/insight-stream-consumer.md index 91c894799b..4d43e0e84a 100644 --- a/content/develop/connect/insight/tutorials/insight-stream-consumer.md +++ b/content/develop/connect/insight/tutorials/insight-stream-consumer.md @@ -19,7 +19,7 @@ A _stream_ is an append-only log file. When you add data to it, you cannot change it. That may seem like a disadvantage; however, a stream serves as a log or single source of truth. It can also be used as a buffer between processes that work at different speeds and do not need to know about each other. -For more conceptual information about streams, see [Redis Streams]({{< relref "/develop/manual/data-types/streams" >}}). +For more conceptual information about streams, see [Redis Streams]({{< relref "/develop/data-types/streams" >}}). In this topic, you will learn how to add and work with streams as well as consumer groups in RedisInsight. @@ -209,5 +209,5 @@ Use streams for auditing and processing events in banking, gaming, supply chain, ## Related topics -- [Redis Streams]({{< relref "/develop/manual/data-types/streams" >}}) +- [Redis Streams]({{< relref "/develop/data-types/streams" >}}) - [Introducing Redis Streams with RedisInsight, node.js, and Python](https://www.youtube.com/watch?v=q2UOkQmIo9Q) (video) \ No newline at end of file diff --git a/content/develop/data-types/_index.md b/content/develop/data-types/_index.md index c858ab38e5..f00d11f595 100644 --- a/content/develop/data-types/_index.md +++ b/content/develop/data-types/_index.md @@ -16,7 +16,7 @@ weight: 35 --- Redis is a data structure server. -At its core, Redis provides a collection of native data types that help you solve a wide variety of problems, from [caching]({{< relref "/develop/manual/client-side-caching/" >}}) to [queuing]({{< relref "/develop/data-types/lists/" >}}) to [event processing]({{< relref "/develop/data-types/streams/" >}}). +At its core, Redis provides a collection of native data types that help you solve a wide variety of problems, from [caching]({{< relref "/develop/use/client-side-caching" >}}) to [queuing]({{< relref "/develop/data-types/lists" >}}) to [event processing]({{< relref "/develop/data-types/streams" >}}). Below is a short description of each data type, with links to broader overviews and command references. If you'd like to try a comprehensive tutorial for each data structure, see their overview pages below. @@ -29,7 +29,7 @@ If you'd like to try a comprehensive tutorial for each data structure, see their [Redis strings]({{< relref "/develop/data-types/strings" >}}) are the most basic Redis data type, representing a sequence of bytes. For more information, see: -* [Overview of Redis strings]({{< relref "/develop/data-types/strings/" >}}) +* [Overview of Redis strings]({{< relref "/develop/data-types/strings" >}}) * [Redis string command reference](/commands/?group=string) ### Lists @@ -37,7 +37,7 @@ For more information, see: [Redis lists]({{< relref "/develop/data-types/lists" >}}) are lists of strings sorted by insertion order. For more information, see: -* [Overview of Redis lists]({{< relref "/develop/data-types/lists/" >}}) +* [Overview of Redis lists]({{< relref "/develop/data-types/lists" >}}) * [Redis list command reference](/commands/?group=list) ### Sets @@ -46,7 +46,7 @@ For more information, see: With a Redis set, you can add, remove, and test for existence in O(1) time (in other words, regardless of the number of set elements). For more information, see: -* [Overview of Redis sets]({{< relref "/develop/data-types/sets/" >}}) +* [Overview of Redis sets]({{< relref "/develop/data-types/sets" >}}) * [Redis set command reference](/commands/?group=set) ### Hashes @@ -55,7 +55,7 @@ For more information, see: As such, Redis hashes resemble [Python dictionaries](https://docs.python.org/3/tutorial/datastructures.html#dictionaries), [Java HashMaps](https://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html), and [Ruby hashes](https://ruby-doc.org/core-3.1.2/Hash.html). For more information, see: -* [Overview of Redis hashes]({{< relref "/develop/data-types/hashes/" >}}) +* [Overview of Redis hashes]({{< relref "/develop/data-types/hashes" >}}) * [Redis hashes command reference](/commands/?group=hash) ### Sorted sets @@ -80,39 +80,39 @@ For more information, see: [Redis geospatial indexes]({{< relref "/develop/data-types/geospatial" >}}) are useful for finding locations within a given geographic radius or bounding box. For more information, see: -* [Overview of Redis geospatial indexes]({{< relref "/develop/data-types/geospatial/" >}}) +* [Overview of Redis geospatial indexes]({{< relref "/develop/data-types/geospatial" >}}) * [Redis geospatial indexes command reference](/commands/?group=geo) ### Bitmaps -[Redis bitmaps]({{< relref "/develop/data-types/bitmaps/" >}}) let you perform bitwise operations on strings. +[Redis bitmaps]({{< relref "/develop/data-types/bitmaps" >}}) let you perform bitwise operations on strings. For more information, see: -* [Overview of Redis bitmaps]({{< relref "/develop/data-types/bitmaps/" >}}) +* [Overview of Redis bitmaps]({{< relref "/develop/data-types/bitmaps" >}}) * [Redis bitmap command reference](/commands/?group=bitmap) ### Bitfields -[Redis bitfields]({{< relref "/develop/data-types/bitfields/" >}}) efficiently encode multiple counters in a string value. +[Redis bitfields]({{< relref "/develop/data-types/bitfields" >}}) efficiently encode multiple counters in a string value. Bitfields provide atomic get, set, and increment operations and support different overflow policies. For more information, see: -* [Overview of Redis bitfields]({{< relref "/develop/data-types/bitfields/" >}}) +* [Overview of Redis bitfields]({{< relref "/develop/data-types/bitfields" >}}) * The [`BITFIELD`](/commands/bitfield) command. ### HyperLogLog -The [Redis HyperLogLog]({{< relref "/develop/data-types/hyperloglogs" >}}) data structures provide probabilistic estimates of the cardinality (i.e., number of elements) of large sets. For more information, see: +The [Redis HyperLogLog]({{< relref "/develop/data-types/probabilistic/hyperloglogs" >}}) data structures provide probabilistic estimates of the cardinality (i.e., number of elements) of large sets. For more information, see: -* [Overview of Redis HyperLogLog]({{< relref "/develop/data-types/hyperloglogs" >}}) +* [Overview of Redis HyperLogLog]({{< relref "/develop/data-types/probabilistic/hyperloglogs" >}}) * [Redis HyperLogLog command reference](/commands/?group=hyperloglog) ## Extensions To extend the features provided by the included data types, use one of these options: -1. Write your own custom [server-side functions in Lua]({{< relref "/develop/manual/programmability/" >}}). -1. Write your own Redis module using the [modules API]({{< relref "/develop/reference/modules/" >}}) or check out the [community-supported modules]({{< relref "/develop/modules/" >}}). -1. Use [JSON]({{< relref "/develop/stack/json/" >}}), [querying]({{< relref "/develop/stack/search/" >}}), [time series]({{< relref "/develop/stack/timeseries/" >}}), and other capabilities provided by [Redis Stack]({{< relref "/develop/stack/" >}}). +1. Write your own custom [server-side functions in Lua]({{< relref "/develop/interact/programmability/" >}}). +1. Write your own Redis module using the [modules API]({{< relref "/develop/reference/modules/" >}}) or check out the [community-supported modules]({{< relref "/operate/oss_and_stack/stack-with-enterprise/" >}}). +1. Use [JSON]({{< relref "/develop/data-types/json/" >}}), [querying]({{< relref "/develop/interact/search-and-query/" >}}), [time series]({{< relref "/develop/data-types/timeseries/" >}}), and other capabilities provided by [Redis Stack]({{< relref "/operate/oss_and_stack/" >}}).
diff --git a/content/develop/data-types/json/_index.md b/content/develop/data-types/json/_index.md index d552f9e6df..415f8b570b 100644 --- a/content/develop/data-types/json/_index.md +++ b/content/develop/data-types/json/_index.md @@ -170,7 +170,7 @@ To run RedisJSON with Docker, use the `redis-stack-server` Docker image: $ docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest ``` -For more information about running Redis Stack in a Docker container, see [Run Redis Stack on Docker]({{< relref "/develop/getting-started/install-stack/docker" >}}). +For more information about running Redis Stack in a Docker container, see [Run Redis Stack on Docker]({{< relref "/operate/oss_and_stack/install/install-stack/docker" >}}). ### Download binaries diff --git a/content/develop/data-types/json/indexing_JSON.md b/content/develop/data-types/json/indexing_JSON.md index 00f05688df..cf7b2811b4 100644 --- a/content/develop/data-types/json/indexing_JSON.md +++ b/content/develop/data-types/json/indexing_JSON.md @@ -15,8 +15,8 @@ title: Index/Search JSON documents weight: 2 --- -In addition to storing JSON documents, you can also index them using the [Search and Query]({{< relref "/develop/stack/search" >}}) feature. This enables full-text search capabilities and document retrieval based on their content. +In addition to storing JSON documents, you can also index them using the [Search and Query]({{< relref "/develop/interact/search-and-query/" >}}) feature. This enables full-text search capabilities and document retrieval based on their content. -To use these features, you must install two modules: RedisJSON and RediSearch. [Redis Stack]({{< relref "/develop/stack" >}}) automatically includes both modules. +To use these features, you must install two modules: RedisJSON and RediSearch. [Redis Stack]({{< relref "/operate/oss_and_stack/" >}}) automatically includes both modules. -See the [tutorial]({{< relref "/develop/stack/search/indexing_json" >}}) to learn how to search and query your JSON. \ No newline at end of file +See the [tutorial]({{< relref "/develop/interact/search-and-query/indexing/" >}}) to learn how to search and query your JSON. \ No newline at end of file diff --git a/content/develop/data-types/probabilistic/Configuration.md b/content/develop/data-types/probabilistic/Configuration.md index a8ae293d9e..e99c2e1d02 100644 --- a/content/develop/data-types/probabilistic/Configuration.md +++ b/content/develop/data-types/probabilistic/Configuration.md @@ -22,13 +22,13 @@ weight: 100 Setting configuration parameters at load-time is done by appending arguments after the `--loadmodule` argument when starting a server from the command line or after the `loadmodule` directive in a Redis config file. For example: -In [redis.conf]({{< relref "/develop/manual/config/" >}}): +In [redis.conf]({{< relref "/operate/oss_and_stack/management/config" >}}): ```sh loadmodule ./redisbloom.so [OPT VAL]... ``` -From the [Redis CLI]({{< relref "/develop/manual/cli/" >}}), using the [MODULE LOAD](/commands/module-load/) command: +From the [Redis CLI]({{< relref "/develop/connect/cli" >}}), using the [MODULE LOAD](/commands/module-load/) command: ``` 127.0.0.6379> MODULE LOAD redisbloom.so [OPT VAL]... diff --git a/content/develop/data-types/sets.md b/content/develop/data-types/sets.md index a8c043a243..e3623ad944 100644 --- a/content/develop/data-types/sets.md +++ b/content/develop/data-types/sets.md @@ -180,10 +180,10 @@ As an alternative, consider the [`SSCAN`](/commands/sscan), which lets you retri ## Alternatives Sets membership checks on large datasets (or on streaming data) can use a lot of memory. -If you're concerned about memory usage and don't need perfect precision, consider a [Bloom filter or Cuckoo filter]({{< relref "/develop/stack/bloom" >}}) as an alternative to a set. +If you're concerned about memory usage and don't need perfect precision, consider a [Bloom filter or Cuckoo filter]({{< relref "/develop/data-types/probabilistic/bloom-filter" >}}) as an alternative to a set. Redis sets are frequently used as a kind of index. -If you need to index and query your data, consider the [JSON]({{< relref "/develop/stack/json" >}}) data type and the [Search and query]({{< relref "/develop/stack/search" >}}) features. +If you need to index and query your data, consider the [JSON]({{< relref "/develop/data-types/json/" >}}) data type and the [Search and query]({{< relref "/develop/interact/search-and-query/" >}}) features. ## Learn more diff --git a/content/develop/data-types/sorted-sets.md b/content/develop/data-types/sorted-sets.md index 379533ff0c..e1d68e068f 100644 --- a/content/develop/data-types/sorted-sets.md +++ b/content/develop/data-types/sorted-sets.md @@ -249,7 +249,7 @@ This command's time complexity is O(log(n) + m), where _m_ is the number of resu ## Alternatives Redis sorted sets are sometimes used for indexing other Redis data structures. -If you need to index and query your data, consider the [JSON]({{< relref "/develop/stack/json" >}}) data type and the [Search and query]({{< relref "/develop/stack/search" >}}) features. +If you need to index and query your data, consider the [JSON]({{< relref "/develop/data-types/json/" >}}) data type and the [Search and query]({{< relref "/develop/interact/search-and-query/" >}}) features. ## Learn more diff --git a/content/develop/data-types/streams.md b/content/develop/data-types/streams.md index 46bb3d94b0..a47bfc921d 100644 --- a/content/develop/data-types/streams.md +++ b/content/develop/data-types/streams.md @@ -936,6 +936,6 @@ A few remarks: ## Learn more -* The [Redis Streams Tutorial]({{< relref "/develop/data-types/streams-tutorial" >}}) explains Redis streams with many examples. +* The [Redis Streams Tutorial]({{< relref "/develop/data-types/streams" >}}) explains Redis streams with many examples. * [Redis Streams Explained](https://www.youtube.com/watch?v=Z8qcpXyMAiA) is an entertaining introduction to streams in Redis. * [Redis University's RU202](https://university.redis.com/courses/ru202/) is a free, online course dedicated to Redis Streams. diff --git a/content/develop/data-types/strings.md b/content/develop/data-types/strings.md index d1df0b8570..7b4c4d863c 100644 --- a/content/develop/data-types/strings.md +++ b/content/develop/data-types/strings.md @@ -136,7 +136,7 @@ These random-access string commands may cause performance issues when dealing wi ## Alternatives -If you're storing structured data as a serialized string, you may also want to consider Redis [hashes]({{< relref "/develop/data-types/hashes" >}}) or [JSON]({{< relref "/develop/stack/json" >}}). +If you're storing structured data as a serialized string, you may also want to consider Redis [hashes]({{< relref "/develop/data-types/hashes" >}}) or [JSON]({{< relref "/develop/data-types/json/" >}}). ## Learn more diff --git a/content/develop/data-types/timeseries/configuration.md b/content/develop/data-types/timeseries/configuration.md index 81d31acb8f..18ad929852 100644 --- a/content/develop/data-types/timeseries/configuration.md +++ b/content/develop/data-types/timeseries/configuration.md @@ -22,13 +22,13 @@ weight: 3 Setting configuration parameters at load-time is done by appending arguments after the `--loadmodule` argument when starting a server from the command line or after the `loadmodule` directive in a Redis config file. For example: -In [redis.conf]({{< relref "/develop/manual/config/" >}}): +In [redis.conf]({{< relref "/operate/oss_and_stack/management/config" >}}): ```sh loadmodule ./redistimeseries.so [OPT VAL]... ``` -From the [Redis CLI]({{< relref "/develop/manual/cli/" >}}), using the [MODULE LOAD](/commands/module-load/) command: +From the [Redis CLI]({{< relref "/develop/connect/cli" >}}), using the [MODULE LOAD](/commands/module-load/) command: ``` 127.0.0.6379> MODULE LOAD redistimeseries.so [OPT VAL]... diff --git a/content/develop/get-started/_index.md b/content/develop/get-started/_index.md index 16612f579f..c2d1189a2e 100644 --- a/content/develop/get-started/_index.md +++ b/content/develop/get-started/_index.md @@ -26,4 +26,4 @@ Redis can be used as a database, cache, streaming engine, message broker, and mo Please select the guide that aligns best with your specific usage scenario. -You can find answers to frequently asked questions in the [FAQ]({{< relref "/develop/get-started/faq/" >}}). +You can find answers to frequently asked questions in the [FAQ]({{< relref "/develop/get-started/faq" >}}). diff --git a/content/develop/get-started/data-store.md b/content/develop/get-started/data-store.md index d3a76267b7..7b593f63cf 100644 --- a/content/develop/get-started/data-store.md +++ b/content/develop/get-started/data-store.md @@ -33,7 +33,7 @@ The easiest way to get started with Redis is to use Redis Cloud: -You can alternatively follow the [installation guides]({{< relref "/develop/install/install-stack/" >}}) to install Redis on your local machine. +You can alternatively follow the [installation guides]({{< relref "/operate/oss_and_stack/install/install-stack/" >}}) to install Redis on your local machine. ## Connect @@ -82,7 +82,7 @@ You can get a complete overview of available data types in this documentation si ## Scan the keyspace -Each item within Redis has a unique key. All items live within the Redis [keyspace]({{< relref "/develop/manual/keyspace/" >}}). You can scan the Redis keyspace via the [SCAN command](/commands/scan/). Here is an example that scans for the first 100 keys that have the prefix `bike:`: +Each item within Redis has a unique key. All items live within the Redis [keyspace]({{< relref "/develop/use/keyspace" >}}). You can scan the Redis keyspace via the [SCAN command](/commands/scan/). Here is an example that scans for the first 100 keys that have the prefix `bike:`: {{< clients-example scan_example >}} SCAN 0 MATCH "bike:*" COUNT 100 @@ -94,5 +94,5 @@ SCAN 0 MATCH "bike:*" COUNT 100 You can address more use cases with Redis by learning about Redis Stack. Here are two additional quick start guides: -* [Redis as a document database]({{< relref "/develop/get-started/document-database/" >}}) -* [Redis as a vector database]({{< relref "/develop/get-started/vector-database/" >}}) \ No newline at end of file +* [Redis as a document database]({{< relref "/develop/get-started/document-database" >}}) +* [Redis as a vector database]({{< relref "/develop/get-started/vector-database" >}}) \ No newline at end of file diff --git a/content/develop/get-started/document-database.md b/content/develop/get-started/document-database.md index 9cc53a65d5..792400a091 100644 --- a/content/develop/get-started/document-database.md +++ b/content/develop/get-started/document-database.md @@ -37,7 +37,7 @@ The examples in this article refer to a simple bicycle inventory that contains J ## Setup -The easiest way to get started with [Redis Stack]({{< relref "/develop/about/about-stack/" >}}) is to use Redis Cloud: +The easiest way to get started with [Redis Stack]({{< relref "/operate/oss_and_stack/" >}}) is to use Redis Cloud: 1. Create a [free account](https://redis.com/try-free?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). 2. Follow the instructions to create a free database. @@ -46,7 +46,7 @@ The easiest way to get started with [Redis Stack]({{< relref "/develop/about/abo This free Redis Cloud database comes out of the box with all the Redis Stack features. -You can alternatively use the [installation guides]({{< relref "/develop/install/install-stack" >}}) to install Redis Stack on your local machine. +You can alternatively use the [installation guides]({{< relref "/operate/oss_and_stack/install/install-stack/" >}}) to install Redis Stack on your local machine. ## Connect @@ -65,9 +65,9 @@ You can copy and paste the connection details from the Redis Cloud database conf ## Create an index -As explained in the [in-memory data store]({{< relref "/develop/get-started/data-store/" >}}) quick start guide, Redis allows you to access an item directly via its key. You also learned how to scan the keyspace. Whereby you can use other data structures (e.g., hashes and sorted sets) as secondary indexes, your application would need to maintain those indexes manually. Redis Stack turns Redis into a document database by allowing you to declare which fields are auto-indexed. Redis Stack currently supports secondary index creation on the [hashes]({{< relref "/develop/data-types/hashes" >}}) and [JSON]({{< relref "/develop/data-types/json" >}}) documents. +As explained in the [in-memory data store]({{< relref "/develop/get-started/data-store" >}}) quick start guide, Redis allows you to access an item directly via its key. You also learned how to scan the keyspace. Whereby you can use other data structures (e.g., hashes and sorted sets) as secondary indexes, your application would need to maintain those indexes manually. Redis Stack turns Redis into a document database by allowing you to declare which fields are auto-indexed. Redis Stack currently supports secondary index creation on the [hashes]({{< relref "/develop/data-types/hashes" >}}) and [JSON]({{< relref "/develop/data-types/json" >}}) documents. -The following example shows an [FT.CREATE](/commands/ft.create/) command that creates an index with some text fields, a numeric field (price), and a tag field (condition). The text fields have a weight of 1.0, meaning they have the same relevancy in the context of full-text searches. The field names follow the [JSONPath]({{< relref "/develop/data-types/json/path/" >}}) notion. Each such index field maps to a property within the JSON document. +The following example shows an [FT.CREATE](/commands/ft.create/) command that creates an index with some text fields, a numeric field (price), and a tag field (condition). The text fields have a weight of 1.0, meaning they have the same relevancy in the context of full-text searches. The field names follow the [JSONPath]({{< relref "/develop/data-types/json/path" >}}) notion. Each such index field maps to a property within the JSON document. {{< clients-example search_quickstart create_index >}} @@ -175,4 +175,4 @@ Please see the [query documentation]({{< relref "/develop/interact/search-and-qu You can learn more about how to use Redis Stack as a vector database in the following quick start guide: -* [Redis as a vector database]({{< relref "/develop/get-started/vector-database/" >}}) +* [Redis as a vector database]({{< relref "/develop/get-started/vector-database" >}}) diff --git a/content/develop/get-started/faq.md b/content/develop/get-started/faq.md index 0ec280b273..4f79e7dae9 100644 --- a/content/develop/get-started/faq.md +++ b/content/develop/get-started/faq.md @@ -85,7 +85,7 @@ with an error to write commands (but will continue to accept read-only commands). You can also configure Redis to evict keys when the max memory limit -is reached. See the [eviction policy docs]({{< relref "/develop/manual/eviction/" >}}) for more information on this. +is reached. See the [eviction policy docs]({{< relref "/develop/reference/eviction" >}}) for more information on this. ## Background saving fails with a fork() error on Linux? diff --git a/content/develop/get-started/vector-database.md b/content/develop/get-started/vector-database.md index 38d81805e4..e3d2480799 100644 --- a/content/develop/get-started/vector-database.md +++ b/content/develop/get-started/vector-database.md @@ -36,7 +36,7 @@ You can use Redis Stack as a vector database. It allows you to: ## Set a vector database up -The easiest way to get started with [Redis Stack]({{< relref "/develop/about/about-stack/" >}}) is to use Redis Cloud: +The easiest way to get started with [Redis Stack]({{< relref "/operate/oss_and_stack/" >}}) is to use Redis Cloud: 1. Create a [free account](https://redis.com/try-free?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). 2. Follow the instructions to create a free database. @@ -45,13 +45,13 @@ The easiest way to get started with [Redis Stack]({{< relref "/develop/about/abo This free Redis Cloud database comes out of the box with all the Redis Stack features. -You can alternatively use the [installation guides]({{< relref "/develop/install/install-stack" >}}) to install Redis Stack on your local machine. +You can alternatively use the [installation guides]({{< relref "/operate/oss_and_stack/install/install-stack/" >}}) to install Redis Stack on your local machine. ## Install the required Python packages The code examples are currently provided for Redis CLI and Python. For Python, you will need to create a virtual environment and install the following Python packages: -* `redis`: You can find further details about the `redis-py` client library in the [clients]({{< relref "/develop/connect/clients/python/" >}}) section of this documentation site. +* `redis`: You can find further details about the `redis-py` client library in the [clients]({{< relref "/develop/connect/clients/python" >}}) section of this documentation site. * `pandas`: Pandas is a data analysis library. * `sentence-transformers`: You will use the [SentenceTransformers](https://www.sbert.net/) framework to generate embeddings on full text. Sentence-BERT (SBERT) is a [BERT](https://en.wikipedia.org/wiki/BERT_(language_model)) model modification that produces consistent and contextually rich sentence embeddings. SBERT improves tasks like semantic search and text grouping by allowing for efficient and meaningful comparison of sentence-level semantic similarity. * `tabulate`: This package is optional. Pandas use it to render Markdown. @@ -170,7 +170,7 @@ Here is a breakdown of the `VECTOR` schema field definition: * `DIM 768`: The length or dimension of the embeddings, which you determined previously to be `768`. * `DISTANCE_METRIC COSINE`: The distance function is, in this example, [cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity). -You can find further details about all these options in the [vector reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}). +You can find further details about all these options in the [vector reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}). ### 2. Check the state of the index @@ -182,7 +182,7 @@ FT.INFO idx:bikes_vss ## Search and query -This quick start guide focuses on the vector search aspect. Still, you can learn more about how to query based on vector metadata in the [document database quick start guide]({{< relref "/develop/get-started/document-database/" >}}). +This quick start guide focuses on the vector search aspect. Still, you can learn more about how to query based on vector metadata in the [document database quick start guide]({{< relref "/develop/get-started/document-database" >}}). ### 1. Embed your prompts @@ -245,6 +245,6 @@ From the description, this bike is an excellent match for younger children, and ## Next steps -1. You can learn more about the query options, such as pre-filters and radius queries, by reading the [vector reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}). +1. You can learn more about the query options, such as pre-filters and radius queries, by reading the [vector reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}). 2. The complete [search and query documentation](https://redis.io/docs/interact/search-and-query/) might be interesting for you. 3. If you want to follow the code examples more interactively, then you can use the [Jupyter notebook](https://github.com/RedisVentures/redis-vss-getting-started/blob/main/vector_similarity_with_redis.ipynb) that inspired this quick start guide. diff --git a/content/develop/interact/programmability/_index.md b/content/develop/interact/programmability/_index.md index 61860b66d7..e327b2ba4f 100644 --- a/content/develop/interact/programmability/_index.md +++ b/content/develop/interact/programmability/_index.md @@ -17,7 +17,7 @@ title: Redis programmability weight: 20 --- -Redis provides a programming interface that lets you execute custom scripts on the server itself. In Redis 7 and beyond, you can use [Redis Functions]({{< relref "/develop/manual/programmability/functions-intro" >}}) to manage and run your scripts. In Redis 6.2 and below, you use [Lua scripting with the EVAL command]({{< relref "/develop/manual/programmability/eval-intro" >}}) to program the server. +Redis provides a programming interface that lets you execute custom scripts on the server itself. In Redis 7 and beyond, you can use [Redis Functions]({{< relref "/develop/interact/programmability/functions-intro" >}}) to manage and run your scripts. In Redis 6.2 and below, you use [Lua scripting with the EVAL command]({{< relref "/develop/interact/programmability/eval-intro" >}}) to program the server. ## Background diff --git a/content/develop/interact/programmability/functions-intro.md b/content/develop/interact/programmability/functions-intro.md index 408d1b3b1f..ca4f2a9ea4 100644 --- a/content/develop/interact/programmability/functions-intro.md +++ b/content/develop/interact/programmability/functions-intro.md @@ -17,7 +17,7 @@ title: Redis functions weight: 1 --- -Redis Functions is an API for managing code to be executed on the server. This feature, which became available in Redis 7, supersedes the use of [EVAL]({{< relref "/develop/manual/programmability/eval-intro" >}}) in prior versions of Redis. +Redis Functions is an API for managing code to be executed on the server. This feature, which became available in Redis 7, supersedes the use of [EVAL]({{< relref "/develop/interact/programmability/eval-intro" >}}) in prior versions of Redis. ## Prologue (or, what's wrong with Eval Scripts?) diff --git a/content/develop/interact/programmability/lua-api.md b/content/develop/interact/programmability/lua-api.md index 85142a4cb1..3889746963 100644 --- a/content/develop/interact/programmability/lua-api.md +++ b/content/develop/interact/programmability/lua-api.md @@ -452,7 +452,7 @@ redis> FUNCTION LOAD "#!lua name=mylib\n redis.register_function{function_name=' **Important:** Use script flags with care, which may negatively impact if misused. -Note that the default for Eval scripts are different than the default for functions that are mentioned below, see [Eval Flags]({{< relref "/develop/manual/programmability/eval-intro/#eval-flags" >}}) +Note that the default for Eval scripts are different than the default for functions that are mentioned below, see [Eval Flags]({{< relref "/develop/interact/programmability/eval-intro" >}}) When you register a function or load an Eval script, the server does not know how it accesses the database. By default, Redis assumes that all scripts read and write data. @@ -480,7 +480,7 @@ You can use the following flags and instruct the server to treat the scripts' ex However, note that the server will return an error if the script attempts to call a write command. Also note that currently [`PUBLISH`](/commands/publish), [`SPUBLISH`](/commands/spublish) and [`PFCOUNT`](/commands/pfcount) are also considered write commands in scripts, because they could attempt to propagate commands to replicas and AOF file. - For more information please refer to [Read-only scripts]({{< relref "/develop/manual/programmability/#read-only_scripts" >}}) + For more information please refer to [Read-only scripts]({{< relref "/develop/interact/programmability/" >}}) * `allow-oom`: use this flag to allow a script to execute when the server is out of memory (OOM). @@ -508,7 +508,7 @@ You can use the following flags and instruct the server to treat the scripts' ex This flag has no effect when cluster mode is disabled. -Please refer to [Function Flags]({{< relref "/develop/manual/programmability/functions-intro/#function-flags" >}}) and [Eval Flags]({{< relref "/develop/manual/programmability/eval-intro/#eval-flags" >}}) for a detailed example. +Please refer to [Function Flags]({{< relref "/develop/interact/programmability/functions-intro" >}}) and [Eval Flags]({{< relref "/develop/interact/programmability/eval-intro" >}}) for a detailed example. ### `redis.REDIS_VERSION` diff --git a/content/develop/interact/programmability/triggers-and-functions/Debugging.md b/content/develop/interact/programmability/triggers-and-functions/Debugging.md index 5e0bd03cf7..7be528b2ba 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Debugging.md +++ b/content/develop/interact/programmability/triggers-and-functions/Debugging.md @@ -21,7 +21,7 @@ weight: 5 There are two methods you can use to debug your Redis Stack functions: 1. Make judicious use of the `redis.log` function, which writes to the Redis log file. -1. Use Redis [pub/sub]({{< relref "/develop/interact/pubsub/" >}}). +1. Use Redis [pub/sub]({{< relref "/develop/interact/pubsub" >}}). ### Use `redis.log` @@ -46,7 +46,7 @@ After loading the library and executing the function with [`TFCALL`](/commands/t ### Use Redis pub/sub -If you don't have access to your Redis database log files, you can use pub/sub. The following example demonstrates how to use the [client.call]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/javascript_api/#clientcall" >}}) API to publish to a pub/sub channel. +If you don't have access to your Redis database log files, you can use pub/sub. The following example demonstrates how to use the [client.call]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API" >}}) API to publish to a pub/sub channel. ```javascript #!js api_version=1.0 name=lib diff --git a/content/develop/interact/programmability/triggers-and-functions/Development.md b/content/develop/interact/programmability/triggers-and-functions/Development.md index cbb0ff7d61..f0a8c45f37 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Development.md +++ b/content/develop/interact/programmability/triggers-and-functions/Development.md @@ -17,7 +17,7 @@ title: Development weight: 4 --- -To aid in the development of new libraries of triggers and functions, you can use the type declaration files for the [triggers and functions API]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/javascript_api/" >}}), which allows your preferred development environment to provide autocompletion and type checking. You can install this information using the following command: +To aid in the development of new libraries of triggers and functions, you can use the type declaration files for the [triggers and functions API]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API" >}}), which allows your preferred development environment to provide autocompletion and type checking. You can install this information using the following command: ```bash npm install https://gitpkg.now.sh/RedisGears/RedisGears/js_api --save-dev diff --git a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md index 903c483352..358b84906b 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md +++ b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md @@ -17,7 +17,7 @@ title: Quick start using redis-cli weight: 2 --- -Make sure that you have [Redis Stack installed]({{< relref "/develop/getting-started/install-stack/" >}}) and running. Alternatively, you can create a [free Redis Cloud account](https://redis.com/try-free/?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). The triggers and functions preview is available in the fixed subscription plan for the Google Cloud Asia Pacific (Tokyo) and AWS Asia Pacific (Singapore) regions. +Make sure that you have [Redis Stack installed]({{< relref "/operate/oss_and_stack/install/install-stack/" >}}) and running. Alternatively, you can create a [free Redis Cloud account](https://redis.com/try-free/?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). The triggers and functions preview is available in the fixed subscription plan for the Google Cloud Asia Pacific (Tokyo) and AWS Asia Pacific (Singapore) regions. ## Connect to Redis Stack diff --git a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md index b9ac879ee0..c76ded42af 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md +++ b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md @@ -17,7 +17,7 @@ title: Quick start using RedisInsight weight: 1 --- -Make sure that you have [Redis Stack installed]({{< relref "/develop/getting-started/install-stack/" >}}) and running. Alternatively, you can create a [free Redis Cloud account](https://redis.com/try-free/?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). +Make sure that you have [Redis Stack installed]({{< relref "/operate/oss_and_stack/install/install-stack/" >}}) and running. Alternatively, you can create a [free Redis Cloud account](https://redis.com/try-free/?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). If you haven't already installed RedisInsight, you can download the latest version [here](https://redis.com/redis-enterprise/redis-insight/?_ga=2.232184223.127667221.1704724457-86137583.1685485233&_gl=1*1gygred*_ga*ODYxMzc1ODMuMTY4NTQ4NTIzMw..*_ga_8BKGRQKRPV*MTcwNDkyMzExMC40MDEuMS4xNzA0OTI3MjQ2LjUyLjAuMA..*_gcl_au*MTQzODY1OTU4OS4xNzAxMTg0MzY0). If this is your first time using RedisInsight, you may wish to read through the [RedisInsight guide](https://redis.io/docs/connect/insight/) before continuing with this guide. diff --git a/content/develop/interact/programmability/triggers-and-functions/_index.md b/content/develop/interact/programmability/triggers-and-functions/_index.md index c5d56bcb6d..01894e43e1 100644 --- a/content/develop/interact/programmability/triggers-and-functions/_index.md +++ b/content/develop/interact/programmability/triggers-and-functions/_index.md @@ -35,7 +35,7 @@ The triggers and functions feature of Redis Stack allows running JavaScript func ## Quick links -* [Quick start guide]({{< relref "/develop/interact/programmability/triggers-and-functions/quick_start" >}}) +* [Quick start guide]({{< relref "/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI" >}}) * [Source code](https://github.com/RedisGears/RedisGears) * [Latest release](https://github.com/RedisGears/RedisGears/releases) * [Docker image](https://hub.docker.com/r/redis/redis-stack-server/) diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/Binary_Data.md b/content/develop/interact/programmability/triggers-and-functions/concepts/Binary_Data.md index 05ee5162c8..219c5c485e 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/Binary_Data.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/Binary_Data.md @@ -21,12 +21,12 @@ By default, triggers and functions will decode all data as a string and will rai 1. Binary function arguments 2. Binary command results -3. Binary key names on [keyspace triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/keyspace_triggers/" >}}) -4. Binary data on [stream triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/stream_triggers/" >}}) +3. Binary key names on [keyspace triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/Keyspace_Triggers" >}}) +4. Binary data on [stream triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers" >}}) ### Binary function arguments -It is possible to instruct triggers and functions not to decode function arguments as `JS` `Strings` using the [redis.functionFlags.RAW_ARGUMENTS]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/function_flags/" >}}) function flag. In this case, the function arguments will be given as `JS` `ArrayBuffer`. Example: +It is possible to instruct triggers and functions not to decode function arguments as `JS` `Strings` using the [redis.functionFlags.RAW_ARGUMENTS]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/Function_Flags" >}}) function flag. In this case, the function arguments will be given as `JS` `ArrayBuffer`. Example: ```js #!js api_version=1.0 name=lib @@ -80,7 +80,7 @@ Notice that a `JS` `ArrayBuffer` can be returned by a function, it will be retur ### Binary keys names on database triggers -On [keyspace triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/keyspace_triggers/" >}}), if the key name that triggered the event is binary, the `data.key` field will be NULL. The `data.key_raw` field is always provided as a `JS` `ArrayBuffer` and can be used as in the following example: +On [keyspace triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/Keyspace_Triggers" >}}), if the key name that triggered the event is binary, the `data.key` field will be NULL. The `data.key_raw` field is always provided as a `JS` `ArrayBuffer` and can be used as in the following example: ```js #!js api_version=1.0 name=lib @@ -116,11 +116,11 @@ OK 3) "\xaa" ``` -For more information see [keyspace triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/keyspace_triggers/" >}}). +For more information see [keyspace triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/Keyspace_Triggers" >}}). ### Binary data on stream consumers -On [stream triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/stream_triggers/" >}}), if the key name is binary. The `data.stream_name` field will be NULL. The `data.stream_name_raw` field is always provided as a `JS` `ArrayBuffer` and can be used in this case. In addition, if the content of the steam is binary, it will also appear as `null` under `data.record`. In this case, it is possible to use `data.record` (which always exists) and contains the data as a `JS` `ArrayBuffer`. Example: +On [stream triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers" >}}), if the key name is binary. The `data.stream_name` field will be NULL. The `data.stream_name_raw` field is always provided as a `JS` `ArrayBuffer` and can be used in this case. In addition, if the content of the steam is binary, it will also appear as `null` under `data.record`. In this case, it is possible to use `data.record` (which always exists) and contains the data as a `JS` `ArrayBuffer`. Example: ```js #!js api_version=1.0 name=lib diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/Cluster_Support.md b/content/develop/interact/programmability/triggers-and-functions/concepts/Cluster_Support.md index 18c259ab4b..9d96b668b6 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/Cluster_Support.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/Cluster_Support.md @@ -31,7 +31,7 @@ redis.registerClusterFunction("dbsize", async(async_client) => { }); ``` -`redis.registerClusterFunction` is passed the remote function name, which will be used later to call the remote function, and the remote function code. The remote function must be a Coroutine (async function) and it is executed in the background on the remote shard. For more information about async function, please refer to [sync and async]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/sync_async/" >}}) page. +`redis.registerClusterFunction` is passed the remote function name, which will be used later to call the remote function, and the remote function code. The remote function must be a Coroutine (async function) and it is executed in the background on the remote shard. For more information about async function, please refer to [sync and async]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async" >}}) page. We have couple of options for calling a remote function. These options are exposed through the async client that is given to a Coroutine: @@ -109,11 +109,11 @@ The remote function arguments and results are serialized in the following way: ## Execution timeout -Remote functions will not be permitted to run forever and will timeout. The timeout period can be configured using [remote-task-default-timeout]({{< relref "/develop/interact/programmability/triggers-and-functions/configuration/#remote-task-default-timeout" >}}). When using `async_client.runOnShards` API, the timeout will be added as error to the error array. When using `async_client.runOnKey`, a timeout will cause an exception to be raised. +Remote functions will not be permitted to run forever and will timeout. The timeout period can be configured using [remote-task-default-timeout]({{< relref "/develop/interact/programmability/triggers-and-functions/Configuration" >}}). When using `async_client.runOnShards` API, the timeout will be added as error to the error array. When using `async_client.runOnKey`, a timeout will cause an exception to be raised. ## Remote function limitations -All the limitations listed on [coroutines]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/sync_async/" >}}) also apply to remote functions. Remote function also come with some extra limitations: +All the limitations listed on [coroutines]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async" >}}) also apply to remote functions. Remote function also come with some extra limitations: * Remote functions can only perform read operations. An attempt to perform a write operation will result in an error. * Remote function are not guaranteed to succeed (if the shard crashed for example). In such cases a timeout error will be given. diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/Library_Configuration.md b/content/develop/interact/programmability/triggers-and-functions/concepts/Library_Configuration.md index f1fa95e5d8..d5579ddb2e 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/Library_Configuration.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/Library_Configuration.md @@ -17,7 +17,7 @@ title: Library configuration weight: 6 --- -When writing a library, you may want to provide a loading configuration so that different users can use the same library with slightly different behaviour, without changing the base code. For example, assume you write a library that adds a `__last_updated__` field to a hash (you can see how it can also be done with [keyspace triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/keyspace_triggers/" >}})), the code will look like this: +When writing a library, you may want to provide a loading configuration so that different users can use the same library with slightly different behaviour, without changing the base code. For example, assume you write a library that adds a `__last_updated__` field to a hash (you can see how it can also be done with [keyspace triggers]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/triggers/Keyspace_Triggers" >}})), the code will look like this: ```js #!js api_version=1.0 name=lib diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md b/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md index 3241bae355..ae4117b3f2 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md @@ -228,7 +228,7 @@ RedisGears also provided `client.callAsyncRaw` API, which is the same as `client Blocking Redis might fail for a few reasons: -* Redis reached OOM state and the `redis.functionFlags.NO_WRITES` or `redis.functionFlags.ALLOW_OOM` flags are not set (see [functions flags]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/function_flags/" >}}) for more information) +* Redis reached OOM state and the `redis.functionFlags.NO_WRITES` or `redis.functionFlags.ALLOW_OOM` flags are not set (see [functions flags]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/Function_Flags" >}}) for more information) * `redis.functionFlags.NO_WRITES` flag is not set and the Redis instance changed roles and is now a replica. * The ACL user that invoked the function was deleted. @@ -236,7 +236,7 @@ The failure will result in an exception that the function writer can choose to h # Block Redis timeout -Blocking Redis for a long time is discouraged and is considered an unsafe operation. The triggers and functions feature attempts to protect the function writer and will time out the blocking function if it continues for too long. The timeout can be set as a [module configuration]({{< relref "/develop/interact/programmability/triggers-and-functions/configuration/" >}}) along side the fatal failure policy that indicates how to handle the timeout. Policies can be one of the following: +Blocking Redis for a long time is discouraged and is considered an unsafe operation. The triggers and functions feature attempts to protect the function writer and will time out the blocking function if it continues for too long. The timeout can be set as a [module configuration]({{< relref "/develop/interact/programmability/triggers-and-functions/Configuration" >}}) along side the fatal failure policy that indicates how to handle the timeout. Policies can be one of the following: * Abort - Stop the function invocation even at the cost of losing the atomicity property. * Kill - Keep the atomicity property and do not stop the function invocation. In this case there is a risk of an external process killing the Redis server, thinking that the shard is not responding. diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md index 1c8282225e..f91dc83f96 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md @@ -45,7 +45,7 @@ Argument Description: * `consumer`: The consumer name. * `prefix `: The key prefix on which the trigger should be fired. -* `callback`: The callback function to invoke, following the same rules of [Sync and Async invocation]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/sync_async/" >}}). The callback will only be invoked on the primary shard. +* `callback`: The callback function to invoke, following the same rules of [Sync and Async invocation]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async" >}}). The callback will only be invoked on the primary shard. Run the example: @@ -121,7 +121,7 @@ We can display trigger information using [`TFUNCTION LIST`](/commands/tfunction- If the callback function passed to the trigger is a `JS` function (not a Coroutine), it is guaranteed that the callback will be invoked atomically along side the operation that caused the trigger; meaning all clients will see the data only after the callback has completed. In addition, it is guaranteed that the effect of the callback will be replicated to the replica and the AOF in a `multi/exec` block together with the command that fired the trigger. -If the callback is a Coroutine, it will be executed in the background and there is no guarantee on where or if it will be executed. The guarantees are the same as described on [sync and async invocation]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/sync_async/" >}}). +If the callback is a Coroutine, it will be executed in the background and there is no guarantee on where or if it will be executed. The guarantees are the same as described on [sync and async invocation]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async" >}}). ## Upgrades diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md index 01e0e0fed1..bd68331e07 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md @@ -44,9 +44,9 @@ Argument Description: * consumer - the consumer name. * stream - streams name prefix on which to trigger the callback. -* callback - the callback to invoke on each element in the stream. Following the same rules of [sync and async invocation]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/sync_async/" >}}). The callback will be invoke only on primary shard. +* callback - the callback to invoke on each element in the stream. Following the same rules of [sync and async invocation]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async" >}}). The callback will be invoke only on primary shard. -If we register this library (see the [quick start]({{< relref "/develop/interact/programmability/triggers-and-functions/quick_start/" >}}) section to learn how to Register a RedisGears function) and run the following command on our Redis: +If we register this library (see the [quick start]({{< relref "/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI" >}}) section to learn how to Register a RedisGears function) and run the following command on our Redis: ``` XADD stream:1 * foo1 bar1 diff --git a/content/develop/interact/pubsub.md b/content/develop/interact/pubsub.md index 1f892b3bf7..7aaa6662ae 100644 --- a/content/develop/interact/pubsub.md +++ b/content/develop/interact/pubsub.md @@ -54,7 +54,7 @@ As the name suggests, it means that a message will be delivered once if at all. Once the message is sent by the Redis server, there's no chance of it being sent again. If the subscriber is unable to handle the message (for example, due to an error or a network disconnect) the message is forever lost. -If your application requires stronger delivery guarantees, you may want to learn about [Redis Streams]({{< relref "/develop/data-types/streams-tutorial" >}}). +If your application requires stronger delivery guarantees, you may want to learn about [Redis Streams]({{< relref "/develop/data-types/streams" >}}). Messages in streams are persisted, and support both _at-most-once_ as well as _at-least-once_ delivery semantics. ## Format of pushed messages diff --git a/content/develop/interact/search-and-query/_index.md b/content/develop/interact/search-and-query/_index.md index 0557d1abf3..71e0995cf9 100644 --- a/content/develop/interact/search-and-query/_index.md +++ b/content/develop/interact/search-and-query/_index.md @@ -37,7 +37,7 @@ The search and query features of Redis Stack allow you to use Redis as a: Here are the next steps to get you started: -1. Follow our [quick start guide]({{< relref "/develop/get-started/document-database/" >}}) to get some initial hands-on experience. +1. Follow our [quick start guide]({{< relref "/develop/get-started/document-database" >}}) to get some initial hands-on experience. 2. Learn how to [create an index]({{< relref "/develop/interact/search-and-query/indexing/" >}}). 3. Learn how to [query your data]({{< relref "/develop/interact/search-and-query/query/" >}}). diff --git a/content/develop/interact/search-and-query/administration/overview.md b/content/develop/interact/search-and-query/administration/overview.md index c39230ff1d..ed439f5207 100644 --- a/content/develop/interact/search-and-query/administration/overview.md +++ b/content/develop/interact/search-and-query/administration/overview.md @@ -135,7 +135,7 @@ Optionally, you can choose not to save any one of those attributes besides the I ### Numeric index -Numeric properties are indexed in a special data structure that enables filtering by numeric ranges in an efficient way. One could view a numeric value as a term operating just like an inverted index. For example, all the products with the price $100 are in a specific list, which is intersected with the rest of the query. See [query execution engine]({{< relref "/develop/interact/search-and-query/administration/design/#query-execution-engine" >}}) for more information. +Numeric properties are indexed in a special data structure that enables filtering by numeric ranges in an efficient way. One could view a numeric value as a term operating just like an inverted index. For example, all the products with the price $100 are in a specific list, which is intersected with the rest of the query. See [query execution engine]({{< relref "/develop/interact/search-and-query/administration/design" >}}) for more information. However, in order to filter by a range of prices, you would have to intersect the query with all the distinct prices within that range, or perform a union query. If the range has many values in it, this becomes highly inefficient. @@ -229,7 +229,7 @@ And negative clauses can also be added to filter out plasma and CRT TVs: Redis Stack comes with a few very basic scoring functions to evaluate document relevance. They are all based on document scores and term frequency. This is regardless of the ability to use sortable fields (see below). Scoring functions are specified by adding the `SCORER {scorer_name}` argument to a search request. -If you prefer a custom scoring function, it is possible to add more functions using the [extension API]({{< relref "/develop/interact/search-and-query/administration/extensions/" >}}). +If you prefer a custom scoring function, it is possible to add more functions using the [extension API]({{< relref "/develop/interact/search-and-query/administration/extensions" >}}). These are the pre-bundled scoring functions available in Redis Stack: diff --git a/content/develop/interact/search-and-query/advanced-concepts/_index.md b/content/develop/interact/search-and-query/advanced-concepts/_index.md index 694e570176..17b0d05e41 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/_index.md +++ b/content/develop/interact/search-and-query/advanced-concepts/_index.md @@ -32,23 +32,23 @@ Redis Stack supports the following search and query features. This article provi * Multi-field queries * Query on [JSON]({{< relref "/develop/data-types/json/" >}}) documents -* [Aggregation]({{< relref "/develop/interact/search-and-query/search/aggregations/" >}}) +* [Aggregation]({{< relref "/develop/interact/search-and-query/advanced-concepts/aggregations" >}}) * Boolean queries with AND, OR, and NOT operators between subqueries * Optional query clauses * Retrieval of full document contents or only their IDs * Exact phrase search and slop-based search * [Numeric filters]({{< relref "/develop/interact/search-and-query/query/#numeric-filters-in-query" >}}) and ranges * [Geo-filtering]({{< relref "/develop/interact/search-and-query/query/#geo-filters-in-query" >}}) using Redis [geo commands](/commands/?group=geo) -* [Vector similartiy search]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}) +* [Vector similartiy search]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}) ## Full-text search features * [Prefix-based searches]({{< relref "/develop/interact/search-and-query/query/#prefix-matching" >}}) * Field weights -* [Auto-complete]({{< relref "/develop/interact/search-and-query/administration/overview/#auto-complete" >}}) and fuzzy prefix suggestions -* [Stemming]({{< relref "/develop/interact/search-and-query/advanced-concepts/stemming/" >}})-based query expansion for [many languages]({{< relref "/develop/interact/search-and-query/advanced-concepts/stemming//#supported-languages" >}}) using [Snowball](http://snowballstem.org/) -* Support for custom functions for query expansion and scoring (see [Extensions]({{< relref "/develop/interact/search-and-query/administration/extensions/" >}})) +* [Auto-complete]({{< relref "/develop/interact/search-and-query/administration/overview" >}}) and fuzzy prefix suggestions +* [Stemming]({{< relref "/develop/interact/search-and-query/advanced-concepts/stemming" >}})-based query expansion for [many languages]({{< relref "/develop/interact/search-and-query/advanced-concepts/stemming" >}}) using [Snowball](http://snowballstem.org/) +* Support for custom functions for query expansion and scoring (see [Extensions]({{< relref "/develop/interact/search-and-query/administration/extensions" >}})) * Unicode support (UTF-8 input required) * Document ranking diff --git a/content/develop/interact/search-and-query/advanced-concepts/escaping.md b/content/develop/interact/search-and-query/advanced-concepts/escaping.md index 132bbacf8f..134a7bff4b 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/escaping.md +++ b/content/develop/interact/search-and-query/advanced-concepts/escaping.md @@ -19,7 +19,7 @@ weight: 4 Redis Stack uses a very simple tokenizer for documents and a slightly more sophisticated tokenizer for queries. Both allow a degree of control over string escaping and tokenization. -Note: There is a different mechanism for tokenizing text and tag fields, this document refers only to text fields. For tag fields please refer to the [tag fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/tags/" >}}) documentation. +Note: There is a different mechanism for tokenizing text and tag fields, this document refers only to text fields. For tag fields please refer to the [tag fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/tags" >}}) documentation. ## The rules of text field tokenization diff --git a/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md b/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md index 7a4c7b5b00..c85c658f7e 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md +++ b/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md @@ -49,7 +49,7 @@ You can use simple syntax for complex queries using these rules: * Georadius matches on geo fields with the syntax `@field:[{lon} {lat} {radius} {m|km|mi|ft}]`. * As of 2.6, range queries on vector fields with the syntax `@field:[VECTOR_RANGE {radius} $query_vec]`, where `query_vec` is given as a query parameter. * As of v2.4, k-nearest neighbors (KNN) queries on vector fields with or without pre-filtering with the syntax `{filter_query}=>[KNN {num} @field $query_vec]`. -* Tag field filters with the syntax `@field:{tag | tag | ...}`. See the full documentation on [tags]({{< relref "/develop/interact/search-and-query/advanced-concepts/tags/" >}}). +* Tag field filters with the syntax `@field:{tag | tag | ...}`. See the full documentation on [tags]({{< relref "/develop/interact/search-and-query/advanced-concepts/tags" >}}). * Optional terms or clauses: `foo ~bar` means bar is optional but documents containing `bar` will rank higher. * Fuzzy matching on terms: `%hello%` means all terms with Levenshtein distance of 1 from it. Use multiple pairs of '%' brackets to increase the Levenshtein distance. * An expression in a query can be wrapped in parentheses to disambiguate, for example, `(hello|hella) (world|werld)`. @@ -265,7 +265,7 @@ The general syntax for hybrid query is `{some filter query}=>[ KNN {num|$num} @v `@vector_field:[VECTOR_RANGE 0.5 $query_vec]` -As of v2.4, the KNN vector search can be used at most once in a query, while, as of v2.6, the vector range filter can be used multiple times in a query. For more information on vector similarity syntax, see [Querying vector fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}), and [Vector search examples]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/#vector-search-examples" >}}) sections. +As of v2.4, the KNN vector search can be used at most once in a query, while, as of v2.6, the vector range filter can be used multiple times in a query. For more information on vector similarity syntax, see [Querying vector fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}), and [Vector search examples]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}) sections. ## Prefix matching @@ -370,7 +370,7 @@ The supported attributes are: As of v2.6.1, the query attributes syntax supports these additional attributes: * **$yield_distance_as**: specifies the distance field name, used for later sorting and/or returning, for clauses that yield some distance metric. It is currently supported for vector queries only (both KNN and range). -* **vector query params**: pass optional parameters for [vector queries]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/#querying-vector-fields" >}}) in key-value format. +* **vector query params**: pass optional parameters for [vector queries]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}) in key-value format. ## A few query examples @@ -460,4 +460,4 @@ As of v2.6.1, the query attributes syntax supports these additional attributes: The query parser is built using the Lemon Parser Generator and a Ragel based lexer. You can see the `DIALECT 2` grammar definition [at this git repo](https://github.com/RediSearch/RediSearch/blob/master/src/query_parser/v2/parser.y). -You can also see the [DEFAULT_DIALECT]({{< relref "/develop/interact/search-and-query/basic-constructs/configuration-parameters/#default_dialect" >}}) configuration parameter. +You can also see the [DEFAULT_DIALECT]({{< relref "/develop/interact/search-and-query/basic-constructs/configuration-parameters" >}}) configuration parameter. diff --git a/content/develop/interact/search-and-query/advanced-concepts/scoring.md b/content/develop/interact/search-and-query/advanced-concepts/scoring.md index 43c48cf7d4..2fe5e892b9 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/scoring.md +++ b/content/develop/interact/search-and-query/advanced-concepts/scoring.md @@ -19,9 +19,9 @@ weight: 8 When searching, documents are scored based on their relevance to the query. The score is a floating point number between 0.0 and 1.0, where 1.0 is the highest score. The score is returned as part of the search results and can be used to sort the results. -Redis Stack comes with a few very basic scoring functions to evaluate document relevance. They are all based on document scores and term frequency. This is regardless of the ability to use [sortable fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/sorting/" >}}). Scoring functions are specified by adding the `SCORER {scorer_name}` argument to a search query. +Redis Stack comes with a few very basic scoring functions to evaluate document relevance. They are all based on document scores and term frequency. This is regardless of the ability to use [sortable fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/sorting" >}}). Scoring functions are specified by adding the `SCORER {scorer_name}` argument to a search query. -If you prefer a custom scoring function, it is possible to add more functions using the [extension API]({{< relref "/develop/interact/search-and-query/administration/extensions/" >}}). +If you prefer a custom scoring function, it is possible to add more functions using the [extension API]({{< relref "/develop/interact/search-and-query/administration/extensions" >}}). The following is a list of the pre-bundled scoring functions available in Redis Stack and a short explanation about how they work. Each function is mentioned by registered name, which can be passed as a `SCORER` argument in [`FT.SEARCH`](/commands/ft.search). diff --git a/content/develop/interact/search-and-query/advanced-concepts/stemming.md b/content/develop/interact/search-and-query/advanced-concepts/stemming.md index fa955dcf3d..c16a596d53 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/stemming.md +++ b/content/develop/interact/search-and-query/advanced-concepts/stemming.md @@ -29,7 +29,7 @@ For further details see the [Snowball Stemmer website](https://snowballstem.org/ Stemming maps different forms of the same word to a common root - "stem" - for example, the English stemmer maps *studied* ,*studies* and *study* to *studi* . So a searching for *studied* would also find documents which only have the other forms. -In order to define which language the Stemmer should apply when building the index, you need to specify the `LANGUAGE` parameter for the entire index or for the specific field. For more details check the [FT.CREATE]({{< relref "/develop/commands/ft.create.md" >}}) syntax. +In order to define which language the Stemmer should apply when building the index, you need to specify the `LANGUAGE` parameter for the entire index or for the specific field. For more details check the [FT.CREATE]({{< relref "/commands/FT.CREATE" >}}) syntax. **Create a index with language definition** diff --git a/content/develop/interact/search-and-query/advanced-concepts/tags.md b/content/develop/interact/search-and-query/advanced-concepts/tags.md index fab3f38391..de490bec4b 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/tags.md +++ b/content/develop/interact/search-and-query/advanced-concepts/tags.md @@ -155,6 +155,6 @@ You can see what that looks like in the following example: (error) Syntax error at offset 27 near be ``` -Note: stop words are words that are so common that a search engine ignores them. To learn more, see [stop words]({{< relref "/develop/interact/search-and-query/advanced-concepts/stopwords/" >}}). +Note: stop words are words that are so common that a search engine ignores them. To learn more, see [stop words]({{< relref "/develop/interact/search-and-query/advanced-concepts/stopwords" >}}). Given the potential for syntax errors,it is recommended that you escape all spaces within tag queries. diff --git a/content/develop/interact/search-and-query/advanced-concepts/vectors.md b/content/develop/interact/search-and-query/advanced-concepts/vectors.md index df641712e7..d154020d4f 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/vectors.md +++ b/content/develop/interact/search-and-query/advanced-concepts/vectors.md @@ -17,7 +17,7 @@ weight: 14 --- *Vector fields* allow you to use vector similarity queries in the [`FT.SEARCH`](/commands/ft.search) command. -*Vector similarity* enables you to load, index, and query vectors stored as fields in Redis hashes or in JSON documents (via integration with the [JSON module]({{< relref "/develop/stack/json/" >}})) +*Vector similarity* enables you to load, index, and query vectors stored as fields in Redis hashes or in JSON documents (via integration with the [JSON module]({{< relref "/develop/data-types/json/" >}})) Vector similarity provides these functionalities: @@ -180,7 +180,7 @@ Unlike in hashes, vectors are stored in JSON documents as arrays (not as blobs). JSON.SET 1 $ '{"vec":[1,2,3,4]}' ``` -As of v2.6.1, JSON supports multi-value indexing. This capability accounts for vectors as well. Thus, it is possible to index multiple vectors under the same JSONPath. Additional information is available in the [Indexing JSON documents]({{< relref "/develop/stack/search/indexing_json/#index-json-arrays-as-vector" >}}) section. +As of v2.6.1, JSON supports multi-value indexing. This capability accounts for vectors as well. Thus, it is possible to index multiple vectors under the same JSONPath. Additional information is available in the [Indexing JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) section. **Example** ``` @@ -221,11 +221,11 @@ Every `*_attribute` parameter should refer to an attribute in the [`PARAMS`](/co * `$` - An attribute that holds the query vector as blob and must be passed through the `PARAMS` section. The blob's byte size should match the vector field dimension and type. -* `[ |$ [...]]` - An optional part for passing one or more vector similarity query parameters. Parameters should come in key-value pairs and should be valid parameters for the query. See which [runtime parameters]({{< relref "/develop/stack/search/reference/vectors/#runtime-attributes" >}}) are valid for each algorithm. +* `[ |$ [...]]` - An optional part for passing one or more vector similarity query parameters. Parameters should come in key-value pairs and should be valid parameters for the query. See which [runtime parameters]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}) are valid for each algorithm. * `[AS | $]` - An optional part for specifying a distance field name, for later sorting by the similarity metric and/or returning it. By default, the distance field name is "`___score`" and it can be used for sorting without using `AS ` in the query. -**Note:** As of v2.6, vector query params and distance field name can be specified in [query attributes]({{< relref "/develop/stack/search/reference/query_syntax/#query-attributes" >}}) like syntax as well. Thus, the following format is also supported: +**Note:** As of v2.6, vector query params and distance field name can be specified in [query attributes]({{< relref "/develop/interact/search-and-query/advanced-concepts/query_syntax" >}}) like syntax as well. Thus, the following format is also supported: ``` =>[]=>{$: ( | $); ... } diff --git a/content/develop/interact/search-and-query/basic-constructs/_index.md b/content/develop/interact/search-and-query/basic-constructs/_index.md index dfde56179d..7df41149c5 100644 --- a/content/develop/interact/search-and-query/basic-constructs/_index.md +++ b/content/develop/interact/search-and-query/basic-constructs/_index.md @@ -37,8 +37,8 @@ Fields that are not indexed will not contribute to search results. However, they ## Schema -The index structure in defined by a schema. The schema defines how fields are stored and indexed. It specifies the type of each field and other important information. +The index structure is defined by a schema. The schema defines how fields are stored and indexed. It specifies the type of each field and other important information. -To create an index, you need to define the schema for your collection. Learn more about how to define the schema on the [schema definition]({{< relref "/develop/interact/search-and-query/basic-constructs/schema-definition/" >}}) page. +To create an index, you need to define the schema for your collection. Learn more about how to define the schema on the [schema definition]({{< relref "/develop/interact/search-and-query/basic-constructs/schema-definition" >}}) page. -## Learn more: \ No newline at end of file +## Learn more: diff --git a/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md b/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md index 1d7b2ceda6..a86eec4d73 100644 --- a/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md +++ b/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md @@ -23,13 +23,13 @@ weight: 4 Setting configuration parameters at load-time is done by appending arguments after the `--loadmodule` argument when starting a server from the command line, or after the `loadmodule` directive in a Redis config file. For example: -In [redis.conf]({{< relref "/develop/manual/config/" >}}): +In [redis.conf]({{< relref "/operate/oss_and_stack/management/config" >}}): ``` loadmodule ./redisearch.so [OPT VAL]... ``` -From the [Redis CLI]({{< relref "/develop/manual/cli/" >}}), using the [MODULE LOAD](/commands/module-load/) command: +From the [Redis CLI]({{< relref "/develop/connect/cli" >}}), using the [MODULE LOAD](/commands/module-load/) command: ``` 127.0.0.6379> MODULE LOAD redisearch.so [OPT VAL]... @@ -182,7 +182,7 @@ $ redis-server --loadmodule ./redisearch.so CONCURRENT_WRITE_MODE ### EXTLOAD -If present, RediSearch will try to load an extension dynamic library from its specified file path. See [Extensions]({{< relref "/develop/interact/search-and-query/administration/extensions/" >}}) for details. +If present, RediSearch will try to load an extension dynamic library from its specified file path. See [Extensions]({{< relref "/develop/interact/search-and-query/administration/extensions" >}}) for details. #### Default @@ -282,7 +282,7 @@ $ redis-server --loadmodule ./redisearch.so MAXAGGREGATERESULTS 3000000 ### FRISOINI -If present, load the custom Chinese dictionary from the specified path. See [Using custom dictionaries]({{< relref "/develop/interact/search-and-query/advanced-concepts/chinese/#using-custom-dictionaries" >}}) for more details. +If present, load the custom Chinese dictionary from the specified path. See [Using custom dictionaries]({{< relref "/develop/interact/search-and-query/advanced-concepts/chinese" >}}) for more details. #### Default @@ -298,7 +298,7 @@ $ redis-server --loadmodule ./redisearch.so FRISOINI /opt/dict/friso.ini ### CURSOR_MAX_IDLE -The maximum idle time (in ms) that can be set to the [cursor api]({{< relref "/develop/interact/search-and-query/search/aggregations/#cursor-api" >}}). +The maximum idle time (in ms) that can be set to the [cursor api]({{< relref "/develop/interact/search-and-query/advanced-concepts/aggregations" >}}). #### Default @@ -548,7 +548,7 @@ $ redis-server --loadmodule ./redisearch.so DEFAULT_DIALECT 2 ### VSS_MAX_RESIZE -The maximum memory resize for vector similarity indexes in bytes. This value will override default memory limits if you need to allow for a large [`BLOCK_SIZE`]({{< relref "/develop/interact/search-and-query/search/vectors/#creation-attributes-per-algorithm" >}}). +The maximum memory resize for vector similarity indexes in bytes. This value will override default memory limits if you need to allow for a large [`BLOCK_SIZE`]({{< relref "/develop/get-started/vector-database" >}}). #### Default diff --git a/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md b/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md index 52504018ba..9e7a705581 100644 --- a/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md +++ b/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md @@ -105,7 +105,7 @@ Notice that `{count}` represents the total number of attribute pairs passed in t * `{attribute_name} {attribute_value}` are algorithm attributes for the creation of the vector index. Every algorithm has its own mandatory and optional attributes. -For more information about vector fields, see [vector fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}). +For more information about vector fields, see [vector fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}). ## Tag fields @@ -129,7 +129,7 @@ You can search for documents with specific tags using the `@:{} FT.SEARCH idx "@tags:{blue}" ``` -For more information about tag fields, see [Tag Fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/tags/" >}}). +For more information about tag fields, see [Tag Fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/tags" >}}). ## Text fields @@ -152,7 +152,7 @@ where - `dm:pt` - double metaphone for Portuguese - `dm:es` - double metaphone for Spanish - For more information, see [Phonetic Matching]({{< relref "/develop/interact/search-and-query/advanced-concepts/phonetic_matching/" >}}). + For more information, see [Phonetic Matching]({{< relref "/develop/interact/search-and-query/advanced-concepts/phonetic_matching" >}}). - `SORTABLE` indicates that the field can be sorted. This is useful for performing range queries and sorting search results based on text values. - `NOINDEX` indicates that the field is not indexed. This is useful for storing text that you don't want to search for, but that you still want to retrieve in search results. - `WITHSUFFIXTRIE` indicates that the field will be indexed with a suffix trie. The index will keep a suffix trie with all terms which match the suffix. It is used to optimize `contains (*foo*)` and `suffix (*foo)` queries. Otherwise, a brute-force search on the trie is performed. If a suffix trie exists for some fields, these queries will be disabled for other fields. diff --git a/content/develop/interact/search-and-query/indexing/_index.md b/content/develop/interact/search-and-query/indexing/_index.md index e10eb4639c..b3a77ca2cf 100644 --- a/content/develop/interact/search-and-query/indexing/_index.md +++ b/content/develop/interact/search-and-query/indexing/_index.md @@ -21,7 +21,7 @@ In addition to indexing Redis hashes, Redis Stack can also index JSON documents. Before you can index and search JSON documents, you need a database with either: -- [Redis Stack]({{< relref "/develop/getting-started/install-stack/" >}}), which automatically includes JSON and searching and querying features +- [Redis Stack]({{< relref "/operate/oss_and_stack/install/install-stack/" >}}), which automatically includes JSON and searching and querying features - Redis v6.x or later with the following modules installed and enabled: - RediSearch v2.2 or later - RedisJSON v2.0 or later @@ -30,7 +30,7 @@ Before you can index and search JSON documents, you need a database with either: When you create an index with the [`FT.CREATE`](/commands/ft.create) command, include the `ON JSON` keyword to index any existing and future JSON documents stored in the database. -To define the `SCHEMA`, you can provide [JSONPath]({{< relref "/develop/stack/json/path" >}}) expressions. +To define the `SCHEMA`, you can provide [JSONPath]({{< relref "/develop/data-types/json/path" >}}) expressions. The result of each JSONPath expression is indexed and associated with a logical name called an `attribute` (previously known as a `field`). You can use these attributes in queries. @@ -167,7 +167,7 @@ And lastly, search for the Bluetooth headphones that are most similar to an imag 4) "{\"name\":\"Wireless earbuds\",\"description\":\"Wireless Bluetooth in-ear headphones\",\"connection\":{\"wireless\":true,\"connection\":\"Bluetooth\"},\"price\":64.99,\"stock\":17,\"colors\":[\"black\",\"white\"],\"embedding\":[-0.7,-0.51,0.88,0.14]}" ``` -For more information about search queries, see [Search query syntax]({{< relref "/develop/stack/search/reference/query_syntax" >}}). +For more information about search queries, see [Search query syntax]({{< relref "/develop/interact/search-and-query/advanced-concepts/query_syntax" >}}). {{% alert title="Note" color="info" %}} [`FT.SEARCH`](/commands/ft.search) queries require `attribute` modifiers. Don't use JSONPath expressions in queries because the query parser doesn't fully support them. @@ -175,7 +175,7 @@ For more information about search queries, see [Search query syntax]({{< relref ## Index JSON arrays as TAG -If you want to index string or boolean values as TAG within a JSON array, use the [JSONPath]({{< relref "/develop/stack/json/path" >}}) wildcard operator. +If you want to index string or boolean values as TAG within a JSON array, use the [JSONPath]({{< relref "/develop/data-types/json/path" >}}) wildcard operator. To index an item's list of available colors, specify the JSONPath `$.colors.*` in the `SCHEMA` definition during index creation: @@ -424,7 +424,7 @@ Now you can search for the two headphones that are most similar to the image emb 4) "{\"name\":\"Wireless earbuds\",\"description\":\"Wireless Bluetooth in-ear headphones\",\"price\":64.99,\"stock\":17,\"colors\":[\"black\",\"white\"],\"embedding\":[-0.7,-0.51,0.88,0.14]}" ``` -If you want to index multiple numeric arrays as VECTOR, use a [JSONPath]({{< relref "/develop/stack/json/path/" >}}) leading to multiple numeric arrays using JSONPath operators such as wildcard, filter, union, array slice, and/or recursive descent. +If you want to index multiple numeric arrays as VECTOR, use a [JSONPath]({{< relref "/develop/data-types/json/path" >}}) leading to multiple numeric arrays using JSONPath operators such as wildcard, filter, union, array slice, and/or recursive descent. For example, assume that your JSON items include an array of vector embeddings, where each vector represents a different image of the same product. To index these vectors, specify the JSONPath `$.embeddings[*]` in the schema definition during index creation: @@ -459,7 +459,7 @@ Now you can search for the two headphones that are most similar to an image embe ``` Note that `0.771500051022` is the L2 distance between the query vector and `[-0.8,-0.15,0.33,-0.01]`, which is the second element in the embedding array, and it is lower than the L2 distance between the query vector and `[-0.7,-0.51,0.88,0.14]`, which is the first element in the embedding array. -For more information on vector similarity syntax, see [Vector fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}). +For more information on vector similarity syntax, see [Vector fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}). ## Index JSON objects @@ -527,7 +527,7 @@ For example, this query only returns the `name` and `price` of each set of headp ### Project with JSONPath -You can use [JSONPath]({{< relref "/develop/stack/json/path" >}}) expressions in a `RETURN` statement to extract any part of the JSON document, even fields that were not defined in the index `SCHEMA`. +You can use [JSONPath]({{< relref "/develop/data-types/json/path" >}}) expressions in a `RETURN` statement to extract any part of the JSON document, even fields that were not defined in the index `SCHEMA`. For example, the following query uses the JSONPath expression `$.stock` to return each item's stock in addition to the name and price attributes. @@ -577,7 +577,7 @@ This query returns the field as the alias `"stock"` instead of the JSONPath expr ### Highlight search terms -You can [highlight]({{< relref "/develop/stack/search/reference/highlight" >}}) relevant search terms in any indexed `TEXT` attribute. +You can [highlight]({{< relref "/develop/interact/search-and-query/advanced-concepts/highlight" >}}) relevant search terms in any indexed `TEXT` attribute. For [`FT.SEARCH`](/commands/ft.search), you have to explicitly set which attributes you want highlighted after the `RETURN` and `HIGHLIGHT` parameters. @@ -606,9 +606,9 @@ For example, highlight the word "bluetooth" with bold HTML tags in item names an ## Aggregate with JSONPath -You can use [aggregation]({{< relref "/develop/interact/search-and-query/search/aggregations/" >}}) to generate statistics or build facet queries. +You can use [aggregation]({{< relref "/develop/interact/search-and-query/advanced-concepts/aggregations" >}}) to generate statistics or build facet queries. -The `LOAD` option accepts [JSONPath]({{< relref "/develop/stack/json/path" >}}) expressions. You can use any value in the pipeline, even if the value is not indexed. +The `LOAD` option accepts [JSONPath]({{< relref "/develop/data-types/json/path" >}}) expressions. You can use any value in the pipeline, even if the value is not indexed. This example uses aggregation to calculate a 10% price discount for each item and sorts the items from least expensive to most expensive: diff --git a/content/develop/interact/search-and-query/query/combined.md b/content/develop/interact/search-and-query/query/combined.md index f948bc0c15..4075b896f4 100644 --- a/content/develop/interact/search-and-query/query/combined.md +++ b/content/develop/interact/search-and-query/query/combined.md @@ -26,7 +26,7 @@ A combined query is a combination of several query types, such as: You can use logical query operators to combine query expressions for numeric, tag, and text fields. For vector fields, you can combine a KNN query with a pre-filter. {{% alert title="Note" color="warning" %}} -The operators are interpreted slightly differently depending on the query dialect used. The default dialect is `DIALECT 1`; see [this article]({{< relref "/develop/interact/search-and-query/basic-constructs/configuration-parameters/#default_dialect" >}}) for information on how to change the dialect version. This article uses the second version of the query dialect, `DIALECT 2`, and uses additional brackets (`(...)`) to help clarify the examples. Further details can be found in the [query syntax documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/query_syntax/" >}}). +The operators are interpreted slightly differently depending on the query dialect used. The default dialect is `DIALECT 1`; see [this article]({{< relref "/develop/interact/search-and-query/basic-constructs/configuration-parameters" >}}) for information on how to change the dialect version. This article uses the second version of the query dialect, `DIALECT 2`, and uses additional brackets (`(...)`) to help clarify the examples. Further details can be found in the [query syntax documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/query_syntax" >}}). {{% /alert %}} The examples in this article use the following schema: diff --git a/content/develop/interact/search-and-query/query/exact-match.md b/content/develop/interact/search-and-query/query/exact-match.md index e522b4da6a..de783e15d1 100644 --- a/content/develop/interact/search-and-query/query/exact-match.md +++ b/content/develop/interact/search-and-query/query/exact-match.md @@ -27,7 +27,7 @@ The examples in this article use a schema with the following fields: | `condition` | `TAG` | | `price` | `NUMERIC` | -You can find more details about creating the index and loading the demo data in the [quick start guide]({{< relref "/develop/interact/search-and-query/quickstart/" >}}). +You can find more details about creating the index and loading the demo data in the [quick start guide]({{< relref "/develop/get-started/document-database" >}}). ## Numeric field diff --git a/content/develop/interact/search-and-query/query/vector-search.md b/content/develop/interact/search-and-query/query/vector-search.md index c00779b42d..f5cf678e81 100644 --- a/content/develop/interact/search-and-query/query/vector-search.md +++ b/content/develop/interact/search-and-query/query/vector-search.md @@ -15,7 +15,7 @@ title: Vector search weight: 5 --- -This article gives you a good overview of how to perform vector search queries with Redis Stack. See the [Redis as a vector database quick start guide]({{< relref "/develop/get-started/vector-database/" >}}) for more information about Redis as a vector database. You can also find more detailed information about all the parameters in the [vector reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}). +This article gives you a good overview of how to perform vector search queries with Redis Stack. See the [Redis as a vector database quick start guide]({{< relref "/develop/get-started/vector-database" >}}) for more information about Redis as a vector database. You can also find more detailed information about all the parameters in the [vector reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}). A vector search query on a vector field allows you to find all vectors in a vector space that are close to a given vector. You can query for the k-nearest neighbors or vectors within a given radius. @@ -80,9 +80,9 @@ FT.SEARCH index "@field:[VECTOR_RANGE radius $vector]=>{$YIELD_DISTANCE_AS: dist Here is a more detailed explanation of this query: -1. **Range query**: the syntax of a radius query is very similar to the regular range query, except for the keyword `VECTOR_RANGE`. You can also combine a vector radius query with other queries in the same way as regular range queries. See [combined queries article]({{< relref "/develop/interact/search-and-query/query/combined/" >}}) for more details. +1. **Range query**: the syntax of a radius query is very similar to the regular range query, except for the keyword `VECTOR_RANGE`. You can also combine a vector radius query with other queries in the same way as regular range queries. See [combined queries article]({{< relref "/develop/interact/search-and-query/query/combined" >}}) for more details. 2. **Additional step**: the `=>` arrow means that the range query is followed by evaluating additional parameters. -3. **Range query parameters**: parameters such as `$YIELD_DISTANCE_AS` can be found in the [vectors reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors/" >}}). +3. **Range query parameters**: parameters such as `$YIELD_DISTANCE_AS` can be found in the [vectors reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}). 4. **Vector binary data**: you need to use `PARAMS` to pass the binary representation of the vector. 5. **Dialect**: vector search has been available since version two of the query dialect. diff --git a/content/develop/reference/protocol-spec.md b/content/develop/reference/protocol-spec.md index 4d381b2543..8b46ad27bf 100644 --- a/content/develop/reference/protocol-spec.md +++ b/content/develop/reference/protocol-spec.md @@ -37,7 +37,7 @@ RESP is the protocol you should implement in your Redis client. {{% alert title="Note" color="info" %}} The protocol outlined here is used only for client-server communication. -[Redis Cluster]({{< relref "/develop/reference/cluster-spec" >}}) uses a different binary protocol for exchanging messages between nodes. +[Redis Cluster]({{< relref "/operate/oss_and_stack/reference/cluster-spec" >}}) uses a different binary protocol for exchanging messages between nodes. {{% /alert %}} ## RESP versions @@ -69,12 +69,12 @@ This is the simplest model possible; however, there are some exceptions: * Redis requests can be [pipelined](#multiple-commands-and-pipelining). Pipelining enables clients to send multiple commands at once and wait for replies later. -* When a RESP2 connection subscribes to a [Pub/Sub]({{< relref "/develop/manual/pubsub" >}}) channel, the protocol changes semantics and becomes a *push* protocol. +* When a RESP2 connection subscribes to a [Pub/Sub]({{< relref "/develop/interact/pubsub" >}}) channel, the protocol changes semantics and becomes a *push* protocol. The client no longer requires sending commands because the server will automatically send new messages to the client (for the channels the client is subscribed to) as soon as they are received. * The [`MONITOR`](/commands/monitor) command. Invoking the [`MONITOR`](/commands/monitor) command switches the connection to an ad-hoc push mode. The protocol of this mode is not specified but is obvious to parse. -* [Protected mode]({{< relref "/develop/management/security/#protected-mode" >}}). +* [Protected mode]({{< relref "/operate/oss_and_stack/management/security/" >}}). Connections opened from a non-loopback address to a Redis while in protected mode are denied and terminated by the server. Before terminating the connection, Redis unconditionally sends a `-DENIED` reply, regardless of whether the client writes to the socket. * The [RESP3 Push type](#resp3-pushes). @@ -481,7 +481,7 @@ Example: (The raw RESP encoding is split into multiple lines for readability). Some client libraries may ignore the difference between this type and the string type and return a native string in both cases. -However, interactive clients, such as command line interfaces (e.g., [`redis-cli`]({{< relref "/develop/manual/cli" >}})), can use this type and know that their output should be presented to the human user as is and without quoting the string. +However, interactive clients, such as command line interfaces (e.g., [`redis-cli`]({{< relref "/develop/connect/cli" >}})), can use this type and know that their output should be presented to the human user as is and without quoting the string. For example, the Redis command [`INFO`](/commands/info) outputs a report that includes newlines. When using RESP3, `redis-cli` displays it correctly because it is sent as a Verbatim String reply (with its three bytes being "txt"). From bc40e3de1f437fa80839c0eb3345d3b05e33d820 Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Mon, 22 Jan 2024 20:07:21 +0100 Subject: [PATCH 09/15] Added a baseurl shortcode --- layouts/shortcodes/baseurl.html | 1 + 1 file changed, 1 insertion(+) create mode 100644 layouts/shortcodes/baseurl.html diff --git a/layouts/shortcodes/baseurl.html b/layouts/shortcodes/baseurl.html new file mode 100644 index 0000000000..b9d49ea11c --- /dev/null +++ b/layouts/shortcodes/baseurl.html @@ -0,0 +1 @@ +{{ .Site.BaseURL }} \ No newline at end of file From 4ae86189ba19baf04c288c57c89766b2d934505d Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Mon, 22 Jan 2024 20:07:47 +0100 Subject: [PATCH 10/15] Instructions about shortcodes in the README --- README.md | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/README.md b/README.md index 75dee7dfac..187979226b 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,16 @@ * **/tailwind.config.js**: This is the Tailwind CSS framwork's configuration file. * **/postcss.config.js**: Needed to make Tailwind statically accessible to the site. +## Requirements + +You will need the following tools to build the site locally: + +- `python3`: Python >=3.8 +- `node` and `npm`: Node.js >=19.7.0, and the Node.js package manager >= 9.5.0 +- `hugo`: Hugo site generator >= v0.111.2 as extended edition +- `make`: To run the make script +- `git`: To manage the source code of the documentation + ## Build script and data files The site can be built via `make all`. Here is what's executed: @@ -44,3 +54,26 @@ The build pipeline that is defined within `.github/workflows/main.yml` builds th 5. Authenticate to the GCS bucket 6. Validate the branch name 7. Sync the branch with a GCS folder + + +## Relative links + +We are using the following syntax for Hugo relrefs: + +''' +[Link title]({{< relref "link relative to the site's base url" >}}) +''' + +Here is an example: + +``` +[Data structure store]({{< relref "/develop/get-started/data-store" >}}) +``` + +It's strongly advised to use `relref` because it provides the following advantages: + +1. Links are checked at build time. Any broken reference within the site is reported and prevents a successful build. +2. References are prefixed with the site's base URL, which means that they work as in builds with a different base URL + + +The following needs to be taken into account when using `relref`: The reference `/develop/get-started/data-store` and `/develop/get-started/data-store/` don't mean the same. You must use the trailing slash if the referenced article is an `_index.md` file within a folder (e.g., `.../data-store/` for `.../data-store/_index.md`). Othersise, you should not use the trailing slash (e.g., `.../get-started/data-store.md`). \ No newline at end of file From 415081b91cc75bf0bc59381957f44c4155653830 Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Tue, 23 Jan 2024 16:29:25 +0100 Subject: [PATCH 11/15] Cleaned some more broken link cases up --- README.md | 12 +- build/migrate.py | 43 +- ...rected_dev_refs.csv => corrected_refs.csv} | 42 +- content/commands/acl-getuser/index.md | 4 +- content/commands/acl-list/index.md | 2 +- content/commands/acl-log/index.md | 4 +- content/commands/acl-setuser/index.md | 12 +- content/commands/acl-whoami/index.md | 2 +- content/commands/acl/index.md | 4 +- content/commands/append/index.md | 10 +- content/commands/auth/index.md | 4 +- content/commands/bf.add/index.md | 6 +- content/commands/bf.card/index.md | 2 +- content/commands/bf.exists/index.md | 4 +- content/commands/bf.info/index.md | 4 +- content/commands/bf.insert/index.md | 16 +- content/commands/bf.loadchunk/index.md | 12 +- content/commands/bf.madd/index.md | 10 +- content/commands/bf.mexists/index.md | 4 +- content/commands/bf.reserve/index.md | 5 +- content/commands/bf.scandump/index.md | 6 +- content/commands/bgrewriteaof/index.md | 2 +- content/commands/bgsave/index.md | 2 +- content/commands/bitcount/index.md | 8 +- content/commands/bitfield/index.md | 12 +- content/commands/bitfield_ro/index.md | 12 +- content/commands/bitop/index.md | 2 +- content/commands/bitpos/index.md | 4 +- content/commands/blmove/index.md | 14 +- content/commands/blmpop/index.md | 8 +- content/commands/blpop/index.md | 18 +- content/commands/brpop/index.md | 4 +- content/commands/brpoplpush/index.md | 14 +- content/commands/bzmpop/index.md | 8 +- content/commands/bzpopmax/index.md | 4 +- content/commands/bzpopmin/index.md | 4 +- content/commands/cf.add/index.md | 4 +- content/commands/cf.addnx/index.md | 8 +- content/commands/cf.count/index.md | 4 +- content/commands/cf.del/index.md | 2 +- content/commands/cf.exists/index.md | 4 +- content/commands/cf.info/index.md | 2 +- content/commands/cf.insert/index.md | 6 +- content/commands/cf.insertnx/index.md | 10 +- content/commands/cf.loadchunk/index.md | 12 +- content/commands/cf.mexists/index.md | 4 +- content/commands/cf.reserve/index.md | 7 +- content/commands/cf.scandump/index.md | 6 +- content/commands/client-caching/index.md | 2 +- content/commands/client-getname/index.md | 2 +- content/commands/client-getredir/index.md | 2 +- content/commands/client-id/index.md | 4 +- content/commands/client-info/index.md | 2 +- content/commands/client-kill/index.md | 21 +- content/commands/client-list/index.md | 7 +- content/commands/client-no-touch/index.md | 2 +- content/commands/client-pause/index.md | 10 +- content/commands/client-setinfo/index.md | 4 +- content/commands/client-setname/index.md | 6 +- content/commands/client-tracking/index.md | 5 +- content/commands/client-unblock/index.md | 6 +- content/commands/client-unpause/index.md | 2 +- content/commands/client/index.md | 2 +- .../commands/cluster-addslotsrange/index.md | 6 +- content/commands/cluster-delslots/index.md | 2 +- .../commands/cluster-delslotsrange/index.md | 6 +- content/commands/cluster-failover/index.md | 4 +- .../commands/cluster-getkeysinslot/index.md | 2 +- content/commands/cluster-info/index.md | 6 +- content/commands/cluster-meet/index.md | 2 +- content/commands/cluster-nodes/index.md | 2 +- content/commands/cluster-replicas/index.md | 2 +- content/commands/cluster-reset/index.md | 2 +- content/commands/cluster-saveconfig/index.md | 2 +- content/commands/cluster-setslot/index.md | 11 +- content/commands/cluster-shards/index.md | 2 +- content/commands/cluster-slaves/index.md | 6 +- content/commands/cluster-slots/index.md | 2 +- content/commands/cluster/index.md | 2 +- content/commands/cms.incrby/index.md | 2 +- content/commands/cms.info/index.md | 2 +- content/commands/cms.initbydim/index.md | 2 +- content/commands/cms.initbyprob/index.md | 2 +- content/commands/cms.merge/index.md | 7 +- content/commands/cms.query/index.md | 2 +- content/commands/command-count/index.md | 2 +- content/commands/command-getkeys/index.md | 6 +- .../commands/command-getkeysandflags/index.md | 6 +- content/commands/command-info/index.md | 4 +- content/commands/command-list/index.md | 7 +- content/commands/command/index.md | 36 +- content/commands/config-resetstat/index.md | 2 +- content/commands/config-rewrite/index.md | 2 +- content/commands/config/index.md | 2 +- content/commands/decr/index.md | 2 +- content/commands/decrby/index.md | 2 +- content/commands/discard/index.md | 2 +- content/commands/dump/index.md | 6 +- content/commands/eval_ro/index.md | 6 +- content/commands/evalsha/index.md | 4 +- content/commands/evalsha_ro/index.md | 6 +- content/commands/exec/index.md | 2 +- content/commands/expire/index.md | 28 +- content/commands/expireat/index.md | 4 +- content/commands/expiretime/index.md | 2 +- content/commands/failover/index.md | 6 +- content/commands/fcall/index.md | 2 +- content/commands/fcall_ro/index.md | 4 +- content/commands/ft._list/index.md | 4 +- content/commands/ft.aggregate/index.md | 77 +- content/commands/ft.aliasadd/index.md | 4 +- content/commands/ft.aliasdel/index.md | 4 +- content/commands/ft.aliasupdate/index.md | 4 +- content/commands/ft.alter/index.md | 6 +- content/commands/ft.config-get/index.md | 4 +- content/commands/ft.config-help/index.md | 4 +- content/commands/ft.config-set/index.md | 6 +- content/commands/ft.create/index.md | 87 +- content/commands/ft.cursor-del/index.md | 4 +- content/commands/ft.cursor-read/index.md | 8 +- content/commands/ft.dictadd/index.md | 4 +- content/commands/ft.dictdel/index.md | 4 +- content/commands/ft.dictdump/index.md | 4 +- content/commands/ft.dropindex/index.md | 14 +- content/commands/ft.explain/index.md | 12 +- content/commands/ft.explaincli/index.md | 12 +- content/commands/ft.info/index.md | 8 +- content/commands/ft.profile/index.md | 14 +- content/commands/ft.search/index.md | 121 +-- content/commands/ft.spellcheck/index.md | 25 +- content/commands/ft.sugadd/index.md | 11 +- content/commands/ft.sugdel/index.md | 4 +- content/commands/ft.sugget/index.md | 12 +- content/commands/ft.suglen/index.md | 4 +- content/commands/ft.syndump/index.md | 4 +- content/commands/ft.synupdate/index.md | 11 +- content/commands/ft.tagvals/index.md | 10 +- content/commands/function-dump/index.md | 6 +- content/commands/function-list/index.md | 2 +- content/commands/function-load/index.md | 2 +- content/commands/function-stats/index.md | 2 +- content/commands/function/index.md | 2 +- content/commands/geoadd/index.md | 9 +- content/commands/geodist/index.md | 2 +- content/commands/geohash/index.md | 2 +- content/commands/geopos/index.md | 2 +- content/commands/georadius/index.md | 16 +- content/commands/georadius_ro/index.md | 9 +- content/commands/georadiusbymember/index.md | 14 +- .../commands/georadiusbymember_ro/index.md | 9 +- content/commands/geosearch/index.md | 15 +- content/commands/geosearchstore/index.md | 13 +- content/commands/getdel/index.md | 2 +- content/commands/getex/index.md | 9 +- content/commands/getset/index.md | 8 +- content/commands/hello/index.md | 8 +- content/commands/hincrbyfloat/index.md | 6 +- content/commands/hmset/index.md | 2 +- content/commands/hrandfield/index.md | 2 +- content/commands/hscan/index.md | 2 +- content/commands/incr/index.md | 18 +- content/commands/incrby/index.md | 2 +- content/commands/incrbyfloat/index.md | 2 +- content/commands/info/index.md | 20 +- content/commands/json.arrappend/index.md | 10 +- content/commands/json.arrindex/index.md | 10 +- content/commands/json.arrinsert/index.md | 10 +- content/commands/json.arrlen/index.md | 10 +- content/commands/json.arrpop/index.md | 10 +- content/commands/json.arrtrim/index.md | 8 +- content/commands/json.clear/index.md | 8 +- content/commands/json.debug-help/index.md | 8 +- content/commands/json.debug-memory/index.md | 8 +- content/commands/json.del/index.md | 8 +- content/commands/json.forget/index.md | 2 +- content/commands/json.get/index.md | 13 +- content/commands/json.merge/index.md | 8 +- content/commands/json.mget/index.md | 8 +- content/commands/json.mset/index.md | 8 +- content/commands/json.numincrby/index.md | 8 +- content/commands/json.nummultby/index.md | 8 +- content/commands/json.objkeys/index.md | 8 +- content/commands/json.objlen/index.md | 8 +- content/commands/json.resp/index.md | 12 +- content/commands/json.set/index.md | 8 +- content/commands/json.strappend/index.md | 8 +- content/commands/json.strlen/index.md | 8 +- content/commands/json.toggle/index.md | 8 +- content/commands/json.type/index.md | 8 +- content/commands/keys/index.md | 2 +- content/commands/lastsave/index.md | 4 +- content/commands/latency-graph/index.md | 2 +- content/commands/latency-histogram/index.md | 2 +- content/commands/latency-latest/index.md | 2 +- content/commands/latency/index.md | 2 +- content/commands/lmove/index.md | 12 +- content/commands/lmpop/index.md | 8 +- content/commands/lpushx/index.md | 2 +- content/commands/lset/index.md | 2 +- content/commands/ltrim/index.md | 2 +- content/commands/memory-stats/index.md | 18 +- content/commands/memory/index.md | 2 +- content/commands/migrate/index.md | 13 +- content/commands/module-loadex/index.md | 5 +- content/commands/module-unload/index.md | 2 +- content/commands/module/index.md | 2 +- content/commands/monitor/index.md | 12 +- content/commands/move/index.md | 2 +- content/commands/mset/index.md | 4 +- content/commands/multi/index.md | 2 +- content/commands/object/index.md | 2 +- content/commands/pexpire/index.md | 2 +- content/commands/pexpireat/index.md | 2 +- content/commands/pexpiretime/index.md | 2 +- content/commands/pfadd/index.md | 2 +- content/commands/pfcount/index.md | 6 +- content/commands/psetex/index.md | 4 +- content/commands/psubscribe/index.md | 8 +- content/commands/pttl/index.md | 4 +- content/commands/pubsub-channels/index.md | 2 +- content/commands/pubsub-numpat/index.md | 4 +- content/commands/pubsub-numsub/index.md | 2 +- content/commands/pubsub-shardnumsub/index.md | 2 +- content/commands/pubsub/index.md | 2 +- content/commands/readwrite/index.md | 2 +- content/commands/rename/index.md | 2 +- content/commands/reset/index.md | 20 +- content/commands/restore-asking/index.md | 3 +- content/commands/restore/index.md | 7 +- content/commands/rpoplpush/index.md | 12 +- content/commands/rpushx/index.md | 2 +- content/commands/save/index.md | 2 +- content/commands/scan/index.md | 32 +- content/commands/script-debug/index.md | 4 +- content/commands/script-exists/index.md | 6 +- content/commands/script-flush/index.md | 2 +- content/commands/script-kill/index.md | 4 +- content/commands/script-load/index.md | 6 +- content/commands/script/index.md | 2 +- content/commands/sdiffstore/index.md | 2 +- content/commands/select/index.md | 4 +- content/commands/set/index.md | 9 +- content/commands/setbit/index.md | 16 +- content/commands/setex/index.md | 4 +- content/commands/setnx/index.md | 16 +- content/commands/setrange/index.md | 2 +- content/commands/shutdown/index.md | 4 +- content/commands/sintercard/index.md | 2 +- content/commands/sinterstore/index.md | 2 +- content/commands/slaveof/index.md | 4 +- content/commands/slowlog-get/index.md | 2 +- content/commands/slowlog-len/index.md | 2 +- content/commands/slowlog/index.md | 2 +- content/commands/smembers/index.md | 2 +- content/commands/sort/index.md | 17 +- content/commands/sort_ro/index.md | 13 +- content/commands/spop/index.md | 2 +- content/commands/srandmember/index.md | 2 +- content/commands/sscan/index.md | 2 +- content/commands/subscribe/index.md | 10 +- content/commands/substr/index.md | 2 +- content/commands/sunionstore/index.md | 2 +- content/commands/sync/index.md | 2 +- content/commands/tdigest.add/index.md | 2 +- content/commands/tdigest.byrank/index.md | 2 +- content/commands/tdigest.byrevrank/index.md | 2 +- content/commands/tdigest.cdf/index.md | 2 +- content/commands/tdigest.create/index.md | 2 +- content/commands/tdigest.info/index.md | 22 +- content/commands/tdigest.max/index.md | 2 +- content/commands/tdigest.merge/index.md | 5 +- content/commands/tdigest.min/index.md | 2 +- content/commands/tdigest.quantile/index.md | 2 +- content/commands/tdigest.rank/index.md | 2 +- content/commands/tdigest.reset/index.md | 2 +- content/commands/tdigest.revrank/index.md | 2 +- .../commands/tdigest.trimmed_mean/index.md | 2 +- content/commands/tfcall/index.md | 2 +- content/commands/tfcallasync/index.md | 2 +- content/commands/tfunction-delete/index.md | 7 +- content/commands/tfunction-list/index.md | 3 +- content/commands/tfunction-load/index.md | 7 +- content/commands/topk.add/index.md | 2 +- content/commands/topk.count/index.md | 2 +- content/commands/topk.incrby/index.md | 2 +- content/commands/topk.info/index.md | 2 +- content/commands/topk.list/index.md | 6 +- content/commands/topk.query/index.md | 2 +- content/commands/topk.reserve/index.md | 2 +- content/commands/ts.add/index.md | 44 +- content/commands/ts.alter/index.md | 29 +- content/commands/ts.create/index.md | 42 +- content/commands/ts.createrule/index.md | 20 +- content/commands/ts.decrby/index.md | 32 +- content/commands/ts.del/index.md | 6 +- content/commands/ts.deleterule/index.md | 6 +- content/commands/ts.get/index.md | 12 +- content/commands/ts.incrby/index.md | 32 +- content/commands/ts.info/index.md | 48 +- content/commands/ts.madd/index.md | 8 +- content/commands/ts.mget/index.md | 20 +- content/commands/ts.mrange/index.md | 61 +- content/commands/ts.mrevrange/index.md | 59 +- content/commands/ts.queryindex/index.md | 12 +- content/commands/ts.range/index.md | 30 +- content/commands/ts.revrange/index.md | 30 +- content/commands/ttl/index.md | 2 +- content/commands/unlink/index.md | 6 +- content/commands/unwatch/index.md | 2 +- content/commands/wait/index.md | 2 +- content/commands/waitaof/index.md | 4 +- content/commands/xack/index.md | 8 +- content/commands/xadd/index.md | 11 +- content/commands/xautoclaim/index.md | 11 +- content/commands/xclaim/index.md | 14 +- content/commands/xgroup-create/index.md | 3 +- .../commands/xgroup-createconsumer/index.md | 4 +- content/commands/xgroup-setid/index.md | 2 +- content/commands/xgroup/index.md | 2 +- content/commands/xinfo-consumers/index.md | 4 +- content/commands/xinfo-groups/index.md | 6 +- content/commands/xinfo-stream/index.md | 4 +- content/commands/xinfo/index.md | 2 +- content/commands/xlen/index.md | 4 +- content/commands/xpending/index.md | 22 +- content/commands/xrange/index.md | 10 +- content/commands/xread/index.md | 21 +- content/commands/xreadgroup/index.md | 31 +- content/commands/xrevrange/index.md | 2 +- content/commands/xsetid/index.md | 3 +- content/commands/zadd/index.md | 12 +- content/commands/zcount/index.md | 4 +- content/commands/zdiff/index.md | 2 +- content/commands/zinter/index.md | 7 +- content/commands/zintercard/index.md | 2 +- content/commands/zinterstore/index.md | 5 +- content/commands/zlexcount/index.md | 4 +- content/commands/zmpop/index.md | 8 +- content/commands/zrandmember/index.md | 2 +- content/commands/zrange/index.md | 9 +- content/commands/zrangebylex/index.md | 2 +- content/commands/zrangebyscore/index.md | 2 +- content/commands/zrangestore/index.md | 7 +- content/commands/zrank/index.md | 2 +- content/commands/zremrangebylex/index.md | 2 +- content/commands/zrevrange/index.md | 4 +- content/commands/zrevrangebylex/index.md | 4 +- content/commands/zrevrangebyscore/index.md | 4 +- content/commands/zrevrank/index.md | 2 +- content/commands/zscan/index.md | 2 +- content/commands/zunion/index.md | 7 +- content/commands/zunionstore/index.md | 3 +- content/develop/_index.md | 30 +- content/develop/connect/cli.md | 32 +- content/develop/connect/clients/_index.md | 2 +- content/develop/connect/clients/dotnet.md | 2 +- content/develop/connect/clients/java.md | 2 +- .../clients/om-clients/stack-python.md | 2 +- .../clients/om-clients/stack-spring.md | 6 +- content/develop/connect/clients/python.md | 4 +- .../tutorials/insight-stream-consumer.md | 10 +- content/develop/data-types/_index.md | 20 +- content/develop/data-types/bitfields.md | 6 +- content/develop/data-types/bitmaps.md | 26 +- content/develop/data-types/geospatial.md | 4 +- content/develop/data-types/hashes.md | 16 +- content/develop/data-types/json/_index.md | 16 +- content/develop/data-types/json/developer.md | 2 +- content/develop/data-types/json/path.md | 8 +- .../data-types/json/performance/_index.md | 2 +- content/develop/data-types/json/ram.md | 2 +- content/develop/data-types/lists.md | 74 +- .../data-types/probabilistic/Configuration.md | 2 +- .../probabilistic/count-min-sketch.md | 2 +- .../data-types/probabilistic/cuckoo-filter.md | 2 +- .../data-types/probabilistic/hyperloglogs.md | 20 +- .../data-types/probabilistic/t-digest.md | 6 +- .../develop/data-types/probabilistic/top-k.md | 10 +- content/develop/data-types/sets.md | 28 +- content/develop/data-types/sorted-sets.md | 38 +- content/develop/data-types/streams.md | 150 ++-- content/develop/data-types/strings.md | 42 +- .../data-types/timeseries/configuration.md | 14 +- .../data-types/timeseries/quickstart.md | 12 +- content/develop/get-started/data-store.md | 8 +- .../develop/get-started/document-database.md | 8 +- content/develop/get-started/faq.md | 2 +- .../develop/get-started/vector-database.md | 2 +- .../interact/programmability/_index.md | 18 +- .../interact/programmability/eval-intro.md | 66 +- .../programmability/functions-intro.md | 44 +- .../interact/programmability/lua-api.md | 34 +- .../triggers-and-functions/Configuration.md | 6 +- .../triggers-and-functions/Debugging.md | 4 +- .../triggers-and-functions/Quick_Start_CLI.md | 6 +- .../triggers-and-functions/Quick_Start_RI.md | 4 +- .../concepts/Cluster_Support.md | 2 +- .../concepts/JavaScript_API.md | 4 +- .../concepts/Library_Configuration.md | 4 +- .../concepts/Sync_Async.md | 6 +- .../concepts/triggers/KeySpace_Triggers.md | 4 +- .../concepts/triggers/Stream_Triggers.md | 6 +- .../concepts/triggers/User_Functions.md | 2 +- content/develop/interact/pubsub.md | 40 +- .../search-and-query/administration/design.md | 4 +- .../administration/overview.md | 8 +- .../advanced-concepts/_index.md | 6 +- .../advanced-concepts/aggregations.md | 14 +- .../advanced-concepts/dialects.md | 20 +- .../advanced-concepts/query_syntax.md | 16 +- .../advanced-concepts/scoring.md | 2 +- .../advanced-concepts/sorting.md | 2 +- .../advanced-concepts/spellcheck.md | 4 +- .../advanced-concepts/stemming.md | 2 +- .../advanced-concepts/stopwords.md | 4 +- .../advanced-concepts/tags.md | 2 +- .../advanced-concepts/vectors.md | 20 +- .../configuration-parameters.md | 22 +- .../field-and-type-options.md | 8 +- .../basic-constructs/schema-definition.md | 4 +- .../search-and-query/deprecated/payloads.md | 2 +- .../search-and-query/indexing/_index.md | 24 +- .../interact/search-and-query/query/_index.md | 6 +- .../search-and-query/query/aggregation.md | 6 +- .../search-and-query/query/combined.md | 4 +- .../search-and-query/query/geo-spatial.md | 2 +- .../interact/search-and-query/query/range.md | 4 +- .../search-and-query/query/vector-search.md | 6 +- content/develop/interact/transactions.md | 90 +-- content/develop/manual/_index.md | 16 - content/develop/manual/client-side-caching.md | 344 -------- .../develop/manual/keyspace-notifications.md | 191 ----- content/develop/manual/keyspace.md | 154 ---- content/develop/manual/patterns/_index.md | 18 - .../develop/manual/patterns/bulk-loading.md | 156 ---- .../manual/patterns/distributed-locks.md | 242 ------ .../manual/patterns/indexes/2idx_0.png | Bin 23007 -> 0 bytes .../manual/patterns/indexes/2idx_1.png | Bin 11649 -> 0 bytes .../manual/patterns/indexes/2idx_2.png | Bin 13973 -> 0 bytes .../develop/manual/patterns/indexes/index.md | 755 ------------------ .../develop/manual/patterns/twitter-clone.md | 460 ----------- content/develop/manual/pipelining/index.md | 186 ----- .../manual/pipelining/pipeline_iops.png | Bin 14577 -> 0 bytes content/develop/reference/clients.md | 24 +- .../develop/reference/command-arguments.md | 16 +- content/develop/reference/command-tips.md | 34 +- content/develop/reference/eviction/index.md | 4 +- content/develop/reference/key-specs.md | 46 +- content/develop/reference/modules/_index.md | 20 +- .../reference/modules/modules-api-ref.md | 46 +- .../reference/modules/modules-blocking-ops.md | 4 +- .../reference/modules/modules-native-types.md | 8 +- content/develop/reference/protocol-spec.md | 40 +- content/develop/reference/sentinel-clients.md | 8 +- content/develop/use/client-side-caching.md | 12 +- content/develop/use/keyspace-notifications.md | 98 +-- content/develop/use/keyspace.md | 42 +- content/develop/use/manual/_index.md | 16 - .../develop/use/manual/client-side-caching.md | 344 -------- .../use/manual/keyspace-notifications.md | 191 ----- content/develop/use/manual/keyspace.md | 154 ---- content/develop/use/manual/patterns/_index.md | 18 - .../use/manual/patterns/bulk-loading.md | 156 ---- .../use/manual/patterns/distributed-locks.md | 242 ------ .../use/manual/patterns/indexes/2idx_0.png | Bin 23007 -> 0 bytes .../use/manual/patterns/indexes/2idx_1.png | Bin 11649 -> 0 bytes .../use/manual/patterns/indexes/2idx_2.png | Bin 13973 -> 0 bytes .../use/manual/patterns/indexes/index.md | 755 ------------------ .../use/manual/patterns/twitter-clone.md | 460 ----------- .../develop/use/manual/pipelining/index.md | 186 ----- .../use/manual/pipelining/pipeline_iops.png | Bin 14577 -> 0 bytes .../develop/use/patterns/distributed-locks.md | 12 +- content/develop/use/patterns/indexes/index.md | 40 +- content/develop/use/patterns/twitter-clone.md | 46 +- content/develop/use/pipelining/index.md | 6 +- 475 files changed, 2113 insertions(+), 7389 deletions(-) rename build/migrate/{corrected_dev_refs.csv => corrected_refs.csv} (76%) delete mode 100644 content/develop/manual/_index.md delete mode 100644 content/develop/manual/client-side-caching.md delete mode 100644 content/develop/manual/keyspace-notifications.md delete mode 100644 content/develop/manual/keyspace.md delete mode 100644 content/develop/manual/patterns/_index.md delete mode 100644 content/develop/manual/patterns/bulk-loading.md delete mode 100644 content/develop/manual/patterns/distributed-locks.md delete mode 100644 content/develop/manual/patterns/indexes/2idx_0.png delete mode 100644 content/develop/manual/patterns/indexes/2idx_1.png delete mode 100644 content/develop/manual/patterns/indexes/2idx_2.png delete mode 100644 content/develop/manual/patterns/indexes/index.md delete mode 100644 content/develop/manual/patterns/twitter-clone.md delete mode 100644 content/develop/manual/pipelining/index.md delete mode 100644 content/develop/manual/pipelining/pipeline_iops.png delete mode 100644 content/develop/use/manual/_index.md delete mode 100644 content/develop/use/manual/client-side-caching.md delete mode 100644 content/develop/use/manual/keyspace-notifications.md delete mode 100644 content/develop/use/manual/keyspace.md delete mode 100644 content/develop/use/manual/patterns/_index.md delete mode 100644 content/develop/use/manual/patterns/bulk-loading.md delete mode 100644 content/develop/use/manual/patterns/distributed-locks.md delete mode 100644 content/develop/use/manual/patterns/indexes/2idx_0.png delete mode 100644 content/develop/use/manual/patterns/indexes/2idx_1.png delete mode 100644 content/develop/use/manual/patterns/indexes/2idx_2.png delete mode 100644 content/develop/use/manual/patterns/indexes/index.md delete mode 100644 content/develop/use/manual/patterns/twitter-clone.md delete mode 100644 content/develop/use/manual/pipelining/index.md delete mode 100644 content/develop/use/manual/pipelining/pipeline_iops.png diff --git a/README.md b/README.md index 187979226b..1700cbb1b1 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,9 @@ The build pipeline that is defined within `.github/workflows/main.yml` builds th 7. Sync the branch with a GCS folder -## Relative links +## Hugo specifics + +### Relative links We are using the following syntax for Hugo relrefs: @@ -76,4 +78,10 @@ It's strongly advised to use `relref` because it provides the following advantag 2. References are prefixed with the site's base URL, which means that they work as in builds with a different base URL -The following needs to be taken into account when using `relref`: The reference `/develop/get-started/data-store` and `/develop/get-started/data-store/` don't mean the same. You must use the trailing slash if the referenced article is an `_index.md` file within a folder (e.g., `.../data-store/` for `.../data-store/_index.md`). Othersise, you should not use the trailing slash (e.g., `.../get-started/data-store.md`). \ No newline at end of file +The following needs to be taken into account when using `relref`: The reference `/develop/get-started/data-store` and `/develop/get-started/data-store/` don't mean the same. You must use the trailing slash if the referenced article is an `_index.md` file within a folder (e.g., `.../data-store/` for `.../data-store/_index.md`). Othersise, you should not use the trailing slash (e.g., `.../get-started/data-store.md`). + +RelRefs with dots (`.`) and hashtags (`#`) in the reference name, such as `/commands/ft.create` or `/develop/data-types/timeseries/configuration#compaction_policy`, don't seem to work. Please use the `{{< baseurl >}}` as a workaround in that case. Here is an example: + +``` +[compaction]({{< baseurl >}}/develop/data-types/timeseries/configuration#compaction_policy) +``` \ No newline at end of file diff --git a/build/migrate.py b/build/migrate.py index caa51e64cc..2435239316 100755 --- a/build/migrate.py +++ b/build/migrate.py @@ -118,6 +118,17 @@ def _load_csv_file(file_path): return result +''' +The replace link function that is passed over to re.sub +''' +def _replace_link(match, new_prefix): + # Relrefs don't like dots in the link + if '.' in match.group(3): + return match.group(1) + '{{< baseurl >}}' + new_prefix + match.group(3) + match.group(4) + else: + return match.group(1) + '{{< relref "' + new_prefix + match.group(3) + '" >}}' + match.group(4) + + ''' Replace the link within the file ''' @@ -126,13 +137,20 @@ def replace_links_in_file(file_path, old_prefix, new_prefix): file_content = file.read() link_pattern = re.compile(r'(\[.*?\]\()(' + re.escape(old_prefix) + r')(.*?)' + r'(\))') - #updated_content = re.sub(link_pattern, r'\1' + '{{ relURL "" }}' + new_prefix + r'\3', file_content) - updated_content = re.sub(link_pattern, r'\1' + '{{< relref "' + new_prefix + r'\3' + '" >}}' + r'\4', file_content) + #updated_content = re.sub(link_pattern, r'\1' + '{{< relref "' + new_prefix + r'\3' + '" >}}' + r'\4', file_content) + updated_content = re.sub(link_pattern, lambda match: _replace_link(match, new_prefix), file_content) - corrected_links = _load_csv_file('./migrate/corrected_dev_refs.csv') + # Correct links based on a list + corrected_links = _load_csv_file('./migrate/corrected_refs.csv') for k in corrected_links: - updated_content = updated_content.replace('{{< relref "' + k + '" >}}', '{{< relref "' + corrected_links[k] + '" >}}') + # Relrefs don't like dots and hashtags in the link + if '.' in corrected_links[k]: + updated_content = updated_content.replace('{{< relref "' + k + '" >}}', '{{< baseurl >}}' + corrected_links[k]) + elif '#' in k: + updated_content = updated_content.replace('{{< relref "' + k + '" >}}', '{{< baseurl >}}' + corrected_links[k] + '#' + k.split('#')[1]) + else: + updated_content = updated_content.replace('{{< relref "' + k + '" >}}', '{{< relref "' + corrected_links[k] + '" >}}') with open(file_path, 'w', encoding='utf-8') as file: file.write(updated_content) @@ -292,19 +310,30 @@ def fetch_io(): def migrate_commands(): copy_files(DOCS_SRC_CMD, DOCS_CMD) markdown_files = find_markdown_files(DOCS_CMD) + for f in markdown_files: add_categories(f, 'categories', ['docs', 'develop', 'stack', 'oss', 'rs', 'rc', 'oss', 'kubernetes', 'clients']) - + remove_prop_from_file(f, "aliases") + replace_links_in_file(f, '/docs', '/develop') + replace_links_in_file(f, '/commands', '/commands') ''' Migrate the developer documentation ''' def migrate_developer_docs(): + + create_index_file(DOCS_DEV, 'Develop', 'Learn how to develop with Redis') + dev_content = ['get-started', 'connect', 'data-types', 'interact', 'manual', 'reference'] for topic in dev_content: source = slash(DOCS_SRC_DOCS, topic) - target = slash(DOCS_DEV, topic) + target = slash(DOCS_DEV, topic) + + # Rename manual to use + if (topic == 'manual'): + target = slash(DOCS_DEV, 'use') + copy_files(source, target) excluded_content = ['reference/signals.md', 'reference/cluster-spec.md', 'reference/arm.md', 'reference/internals'] @@ -320,6 +349,8 @@ def migrate_developer_docs(): for f in markdown_files: print("Replacing links in {}".format(f)) replace_links_in_file(f, '/docs', '/develop') + # Ensures that the URL-s are rewritten in relrefs + replace_links_in_file(f, '/commands', '/commands') remove_prop_from_file(f, "aliases") add_categories(f, 'categories', ['docs', 'develop', 'stack', 'oss', 'rs', 'rc', 'oss', 'kubernetes', 'clients']) diff --git a/build/migrate/corrected_dev_refs.csv b/build/migrate/corrected_refs.csv similarity index 76% rename from build/migrate/corrected_dev_refs.csv rename to build/migrate/corrected_refs.csv index 980c6b61dc..04533f7d07 100644 --- a/build/migrate/corrected_dev_refs.csv +++ b/build/migrate/corrected_refs.csv @@ -1,10 +1,11 @@ broken_ref;fixed_ref;comment +#-- Developer docs;; /develop/about/about-stack/;/operate/oss_and_stack/;Misses an introduction of Redis and Redis Stack /develop/clients/om-clients/stack-dotnet/;/develop/connect/clients/om-clients/stack-dotnet;Referenced files should not have a trailing slash /develop/clients/om-clients/stack-node/;/develop/connect/clients/om-clients/stack-node; /develop/clients/om-clients/stack-python/;/develop/connect/clients/om-clients/stack-python; /develop/clients/om-clients/stack-spring/;/develop/connect/clients/om-clients/stack-spring; -/develop/commands/ft.create.md;/commands/FT.CREATE; +/develop/commands/ft.create.md;/commands/ft.create; /develop/connect/clients/om-clients/stack-dotnet/;/develop/connect/clients/om-clients/stack-dotnet; /develop/connect/clients/om-clients/stack-node/;/develop/connect/clients/om-clients/stack-node; /develop/connect/clients/om-clients/stack-python/;/develop/connect/clients/om-clients/stack-python; @@ -108,4 +109,41 @@ broken_ref;fixed_ref;comment /develop/stack/search/reference/query_syntax/#query-attributes;/develop/interact/search-and-query/advanced-concepts/query_syntax; /develop/stack/search/reference/vectors/;/develop/interact/search-and-query/advanced-concepts/vectors; /develop/stack/search/reference/vectors/#runtime-attributes;/develop/interact/search-and-query/advanced-concepts/vectors; -/develop/stack/timeseries/;/develop/data-types/timeseries/; \ No newline at end of file +/develop/stack/timeseries/;/develop/data-types/timeseries/; +#-- Command docs;; +/commands/module-load/;/commands/module-load; +/commands/auth/;/commands/auth; +/commands/scan/;/commands/scan; +/develop/management/security/acl;/operate/oss_and_stack/management/security/acl; +/develop/management/security/acl#selectors;/operate/oss_and_stack/management/security/acl; +/develop/management/security/acl/#command-categories;/operate/oss_and_stack/management/security/acl; +/develop/management/security/acl/;/operate/oss_and_stack/management/security/acl; +/develop/manual/programmability/;/develop/interact/programmability/; +/develop/manual/programmability/#read-only-scripts;/develop/interact/programmability; +/develop/interact/search-and-query/search/aggregations/;/develop/interact/search-and-query/advanced-concepts/aggregations; +/develop/interact/search-and-query/search/aggregations;/develop/interact/search-and-query/advanced-concepts/aggregations; +/develop/interact/search-and-query/search/aggregations/#supported-groupby-reducers;/develop/interact/search-and-query/advanced-concepts/aggregations; +/develop/stack/search/reference/aggregations/;/develop/interact/search-and-query/advanced-concepts/aggregations; +/develop/stack/search/reference/aggregations;/develop/interact/search-and-query/advanced-concepts/aggregations; +/develop/stack/search/reference/aggregations/#cursor-api;/develop/interact/search-and-query/advanced-concepts/aggregations; +/develop/reference/protocol-spec/;/develop/reference/protocol-spec; +/develop/reference/protocol-spec/#resp-integers;/develop/reference/protocol-spec; +/develop/reference/protocol-spec/#resp-arrays;/develop/reference/protocol-spec; +/develop/reference/protocol-spec/#resp-simple-strings;/develop/reference/protocol-spec; +/develop/reference/protocol-spec/#resp-errors;/develop/reference/protocol-spec; +/develop/stack/search/configuring;/develop/interact/search-and-query/administration; +/develop/interact/search-and-query/advanced-concepts/spellcheck/;/develop/interact/search-and-query/advanced-concepts/spellcheck; +/develop/interact/search-and-query/advanced-concepts/highlight/;/develop/interact/search-and-query/advanced-concepts/highlight; +/develop/interact/search-and-query/advanced-concepts/scoring/;/develop/interact/search-and-query/advanced-concepts/scoring; +/develop/stack/search/reference/tags;/develop/interact/search-and-query/advanced-concepts/tags; +/develop/manual/keyspace;/develop/use/keyspace; +/develop/stack/timeseries/configuration/;/develop/data-types/timeseries/configuration; +/develop/stack/timeseries/configuration/#compaction_policy;/develop/data-types/timeseries/configuration; +/develop/stack/timeseries/configuration/#duplicate_policy;/develop/data-types/timeseries/configuration; +/develop/stack/timeseries/configuration/#retention_policy;/develop/data-types/timeseries/configuration; +/develop/stack/timeseries/configuration/#chunk_size_bytes;/develop/data-types/timeseries/configuration; +/develop/stack/timeseries;/develop/data-types/timeseries/; +/develop/manual/;/develop/use/; + + + diff --git a/content/commands/acl-getuser/index.md b/content/commands/acl-getuser/index.md index 54c3e561c0..77083b28f1 100644 --- a/content/commands/acl-getuser/index.md +++ b/content/commands/acl-getuser/index.md @@ -46,8 +46,8 @@ The command returns all the rules defined for an existing ACL user. Specifically, it lists the user's ACL flags, password hashes, commands, key patterns, channel patterns (Added in version 6.2) and selectors (Added in version 7.0). Additional information may be returned in the future if more metadata is added to the user. -Command rules are always returned in the same format as the one used in the [`ACL SETUSER`](/commands/acl-setuser) command. -Before version 7.0, keys and channels were returned as an array of patterns, however in version 7.0 later they are now also returned in same format as the one used in the [`ACL SETUSER`](/commands/acl-setuser) command. +Command rules are always returned in the same format as the one used in the [`ACL SETUSER`]({{< relref "/commands/acl-setuser" >}}) command. +Before version 7.0, keys and channels were returned as an array of patterns, however in version 7.0 later they are now also returned in same format as the one used in the [`ACL SETUSER`]({{< relref "/commands/acl-setuser" >}}) command. Note: This description of command rules reflects the user's effective permissions, so while it may not be identical to the set of rules used to configure the user, it is still functionally identical. Selectors are listed in the order they were applied to the user, and include information about commands, key patterns, and channel patterns. diff --git a/content/commands/acl-list/index.md b/content/commands/acl-list/index.md index a00a2be016..aa440df68c 100644 --- a/content/commands/acl-list/index.md +++ b/content/commands/acl-list/index.md @@ -34,7 +34,7 @@ The command shows the currently active ACL rules in the Redis server. Each line in the returned array defines a different user, and the format is the same used in the redis.conf file or the external ACL file, so you can cut and paste what is returned by the ACL LIST command directly inside a -configuration file if you wish (but make sure to check [`ACL SAVE`](/commands/acl-save)). +configuration file if you wish (but make sure to check [`ACL SAVE`]({{< relref "/commands/acl-save" >}})). ## Examples diff --git a/content/commands/acl-log/index.md b/content/commands/acl-log/index.md index c30a619f4a..2854c3ac9c 100644 --- a/content/commands/acl-log/index.md +++ b/content/commands/acl-log/index.md @@ -47,12 +47,12 @@ title: ACL LOG --- The command shows a list of recent ACL security events: -1. Failures to authenticate their connections with [`AUTH`](/commands/auth) or [`HELLO`](/commands/hello). +1. Failures to authenticate their connections with [`AUTH`]({{< relref "/commands/auth" >}}) or [`HELLO`]({{< relref "/commands/hello" >}}). 2. Commands denied because against the current ACL rules. 3. Commands denied because accessing keys not allowed in the current ACL rules. The optional argument specifies how many entries to show. By default -up to ten failures are returned. The special [`RESET`](/commands/reset) argument clears the log. +up to ten failures are returned. The special [`RESET`]({{< relref "/commands/reset" >}}) argument clears the log. Entries are displayed starting from the most recent. ## Examples diff --git a/content/commands/acl-setuser/index.md b/content/commands/acl-setuser/index.md index a683026731..cba9e1b681 100644 --- a/content/commands/acl-setuser/index.md +++ b/content/commands/acl-setuser/index.md @@ -63,7 +63,7 @@ Then, you can use another `ACL SETUSER` call to modify the user rules: ACL SETUSER virginia +get -The above rule applies the new rule to the user `virginia`, so other than [`SET`](/commands/set), the user `virginia` can now also use the [`GET`](/commands/get) command. +The above rule applies the new rule to the user `virginia`, so other than [`SET`]({{< relref "/commands/set" >}}), the user `virginia` can now also use the [`GET`]({{< relref "/commands/get" >}}) command. Starting from Redis 7.0, ACL rules can also be grouped into multiple distinct sets of rules, called _selectors_. Selectors are added by wrapping the rules in parentheses and providing them just like any other rule. @@ -74,7 +74,7 @@ For example: This sets a user with two sets of permissions, one defined on the user and one defined with a selector. The root user permissions only allow executing the get command, but can be executed on any keys. -The selector then grants a secondary set of permissions: access to the [`SET`](/commands/set) command to be executed on any key that starts with `app1`. +The selector then grants a secondary set of permissions: access to the [`SET`]({{< relref "/commands/set" >}}) command to be executed on any key that starts with `app1`. Using multiple selectors allows you to grant permissions that are different depending on what keys are being accessed. When we want to be sure to define a user from scratch, without caring if @@ -103,7 +103,7 @@ This is a list of all the supported Redis ACL rules: ### Command rules -* `~`: Adds the specified key pattern (glob style pattern, like in the [`KEYS`](/commands/keys) command), to the list of key patterns accessible by the user. This grants both read and write permissions to keys that match the pattern. You can add multiple key patterns to the same user. Example: `~objects:*` +* `~`: Adds the specified key pattern (glob style pattern, like in the [`KEYS`]({{< relref "/commands/keys" >}}) command), to the list of key patterns accessible by the user. This grants both read and write permissions to keys that match the pattern. You can add multiple key patterns to the same user. Example: `~objects:*` * `%R~`: (Available in Redis 7.0 and later) Adds the specified read key pattern. This behaves similar to the regular key pattern but only grants permission to read from keys that match the given pattern. See [key permissions](/topics/acl#key-permissions) for more information. * `%W~`: (Available in Redis 7.0 and later) Adds the specified write key pattern. This behaves similar to the regular key pattern but only grants permission to write to keys that match the given pattern. See [key permissions](/topics/acl#key-permissions) for more information. * `%RW~`: (Available in Redis 7.0 and later) Alias for `~`. @@ -113,7 +113,7 @@ This is a list of all the supported Redis ACL rules: * `allchannels`: Alias for `&*`, it allows the user to access all Pub/Sub channels. * `resetchannels`: Removes all channel patterns from the list of Pub/Sub channel patterns the user can access. * `+`: Adds the command to the list of commands the user can call. Can be used with `|` for allowing subcommands (e.g "+config|get"). -* `+@`: Adds all the commands in the specified category to the list of commands the user is able to execute. Example: `+@string` (adds all the string commands). For a list of categories, check the [`ACL CAT`](/commands/acl-cat) command. +* `+@`: Adds all the commands in the specified category to the list of commands the user is able to execute. Example: `+@string` (adds all the string commands). For a list of categories, check the [`ACL CAT`]({{< relref "/commands/acl-cat" >}}) command. * `+|first-arg`: Allows a specific first argument of an otherwise disabled command. It is only supported on commands with no sub-commands, and is not allowed as negative form like -SELECT|1, only additive starting with "+". This feature is deprecated and may be removed in the future. * `allcommands`: Alias of `+@all`. Adds all the commands there are in the server, including *future commands* loaded via module, to be executed by this user. * `-`: Remove the command to the list of commands the user can call. Starting Redis 7.0, it can be used with `|` for blocking subcommands (e.g., "-config|set"). @@ -123,13 +123,13 @@ This is a list of all the supported Redis ACL rules: ### User management rules * `on`: Set the user as active, it will be possible to authenticate as this user using `AUTH `. -* `off`: Set user as not active, it will be impossible to log as this user. Please note that if a user gets disabled (set to off) after there are connections already authenticated with such a user, the connections will continue to work as expected. To also kill the old connections you can use [`CLIENT KILL`](/commands/client-kill) with the user option. An alternative is to delete the user with [`ACL DELUSER`](/commands/acl-deluser), that will result in all the connections authenticated as the deleted user to be disconnected. +* `off`: Set user as not active, it will be impossible to log as this user. Please note that if a user gets disabled (set to off) after there are connections already authenticated with such a user, the connections will continue to work as expected. To also kill the old connections you can use [`CLIENT KILL`]({{< relref "/commands/client-kill" >}}) with the user option. An alternative is to delete the user with [`ACL DELUSER`]({{< relref "/commands/acl-deluser" >}}), that will result in all the connections authenticated as the deleted user to be disconnected. * `nopass`: The user is set as a _no password_ user. It means that it will be possible to authenticate as such user with any password. By default, the `default` special user is set as "nopass". The `nopass` rule will also reset all the configured passwords for the user. * `>password`: Adds the specified clear text password as a hashed password in the list of the users passwords. Every user can have many active passwords, so that password rotation will be simpler. The specified password is not stored as clear text inside the server. Example: `>mypassword`. * `#`: Adds the specified hashed password to the list of user passwords. A Redis hashed password is hashed with SHA256 and translated into a hexadecimal string. Example: `#c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2`. * `password` but removes the password instead of adding it. * `!`: Like `#` but removes the password instead of adding it. -* `()`: (Available in Redis 7.0 and later) Creates a new selector to match rules against. Selectors are evaluated after the user permissions, and are evaluated according to the order they are defined. If a command matches either the user permissions or any selector, it is allowed. See [selectors](/docs/management/security/acl#selectors) for more information. +* `()`: (Available in Redis 7.0 and later) Creates a new selector to match rules against. Selectors are evaluated after the user permissions, and are evaluated according to the order they are defined. If a command matches either the user permissions or any selector, it is allowed. See [selectors]({{< baseurl >}}/operate/oss_and_stack/management/security/acl#selectors) for more information. * `clearselectors`: (Available in Redis 7.0 and later) Deletes all of the selectors attached to the user. * `reset`: Removes any capability from the user. They are set to off, without passwords, unable to execute any command, unable to access any key. diff --git a/content/commands/acl-whoami/index.md b/content/commands/acl-whoami/index.md index 2624323798..b276d0d93f 100644 --- a/content/commands/acl-whoami/index.md +++ b/content/commands/acl-whoami/index.md @@ -29,7 +29,7 @@ title: ACL WHOAMI --- Return the username the current connection is authenticated with. New connections are authenticated with the "default" user. They -can change user using [`AUTH`](/commands/auth). +can change user using [`AUTH`]({{< relref "/commands/auth" >}}). ## Examples diff --git a/content/commands/acl/index.md b/content/commands/acl/index.md index bd06cfd25d..9e6d822c6d 100644 --- a/content/commands/acl/index.md +++ b/content/commands/acl/index.md @@ -23,6 +23,6 @@ syntax_fmt: ACL syntax_str: '' title: ACL --- -This is a container command for [Access Control List](/docs/management/security/acl/) commands. +This is a container command for [Access Control List]({{< relref "/operate/oss_and_stack/management/security/acl" >}}) commands. -To see the list of available commands you can call [`ACL HELP`](/commands/acl-help). +To see the list of available commands you can call [`ACL HELP`]({{< relref "/commands/acl-help" >}}). diff --git a/content/commands/append/index.md b/content/commands/append/index.md index 86b2b53a57..845d58bf97 100644 --- a/content/commands/append/index.md +++ b/content/commands/append/index.md @@ -56,7 +56,7 @@ title: APPEND If `key` already exists and is a string, this command appends the `value` at the end of the string. If `key` does not exist it is created and set as an empty string, so `APPEND` -will be similar to [`SET`](/commands/set) in this special case. +will be similar to [`SET`]({{< relref "/commands/set" >}}) in this special case. ## Examples @@ -80,12 +80,12 @@ APPEND timeseries "fixed-size sample" Accessing individual elements in the time series is not hard: -* [`STRLEN`](/commands/strlen) can be used in order to obtain the number of samples. -* [`GETRANGE`](/commands/getrange) allows for random access of elements. +* [`STRLEN`]({{< relref "/commands/strlen" >}}) can be used in order to obtain the number of samples. +* [`GETRANGE`]({{< relref "/commands/getrange" >}}) allows for random access of elements. If our time series have associated time information we can easily implement - a binary search to get range combining [`GETRANGE`](/commands/getrange) with the Lua scripting + a binary search to get range combining [`GETRANGE`]({{< relref "/commands/getrange" >}}) with the Lua scripting engine available in Redis 2.6. -* [`SETRANGE`](/commands/setrange) can be used to overwrite an existing time series. +* [`SETRANGE`]({{< relref "/commands/setrange" >}}) can be used to overwrite an existing time series. The limitation of this pattern is that we are forced into an append-only mode of operation, there is no way to cut the time series to a given size easily diff --git a/content/commands/auth/index.md b/content/commands/auth/index.md index fb08542a99..3c054401f3 100644 --- a/content/commands/auth/index.md +++ b/content/commands/auth/index.md @@ -65,7 +65,7 @@ When Redis ACLs are used, the command should be given in an extended way: AUTH In order to authenticate the current connection with one of the connections -defined in the ACL list (see [`ACL SETUSER`](/commands/acl-setuser)) and the official [ACL guide](/topics/acl) for more information. +defined in the ACL list (see [`ACL SETUSER`]({{< relref "/commands/acl-setuser" >}})) and the official [ACL guide](/topics/acl) for more information. When ACLs are used, the single argument form of the command, where only the password is specified, assumes that the implicit username is "default". @@ -74,4 +74,4 @@ When ACLs are used, the single argument form of the command, where only the pass Because of the high performance nature of Redis, it is possible to try a lot of passwords in parallel in very short time, so make sure to generate a strong and very long password so that this attack is infeasible. -A good way to generate strong passwords is via the [`ACL GENPASS`](/commands/acl-genpass) command. +A good way to generate strong passwords is via the [`ACL GENPASS`]({{< relref "/commands/acl-genpass" >}}) command. diff --git a/content/commands/bf.add/index.md b/content/commands/bf.add/index.md index 58d41ee347..20afeda7c8 100644 --- a/content/commands/bf.add/index.md +++ b/content/commands/bf.add/index.md @@ -29,7 +29,7 @@ title: BF.ADD --- Adds an item to a Bloom filter. -This command is similar to [`BF.MADD`](/commands/bf.madd), except that only one item can be added. +This command is similar to [`BF.MADD`]({{< baseurl >}}/commands/bf.madd), except that only one item can be added. ## Required arguments @@ -37,7 +37,7 @@ This command is similar to [`BF.MADD`](/commands/bf.madd), except that only one is key name for a Bloom filter to add the item to. -If `key` does not exist - a new Bloom filter is created with default error rate, capacity, and expansion (see [`BF.RESERVE`](/commands/bf.reserve)). +If `key` does not exist - a new Bloom filter is created with default error rate, capacity, and expansion (see [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve)).
item @@ -49,7 +49,7 @@ is an item to add. Returns one of these replies: -- [Integer reply](/docs/reference/protocol-spec#integers) - where "1" means that the item has been added successfully, and "0" means that such item was already added to the filter (which could be wrong) +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - where "1" means that the item has been added successfully, and "0" means that such item was already added to the filter (which could be wrong) - [] on error (invalid arguments, wrong key type, etc.) and also when the filter is full ## Examples diff --git a/content/commands/bf.card/index.md b/content/commands/bf.card/index.md index dd5a6d7af2..b1f6c77814 100644 --- a/content/commands/bf.card/index.md +++ b/content/commands/bf.card/index.md @@ -41,7 +41,7 @@ is key name for a Bloom filter. Returns one of these replies: -- [Integer reply](/docs/reference/protocol-spec#integers) - the number of items that were added to this Bloom filter and detected as unique (items that caused at least one bit to be set in at least one sub-filter), or 0 when `key` does not exist. +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - the number of items that were added to this Bloom filter and detected as unique (items that caused at least one bit to be set in at least one sub-filter), or 0 when `key` does not exist. - [] on error (invalid arguments, wrong key type, etc.) Note: when `key` exists - return the same value as `BF.INFO key ITEMS`. diff --git a/content/commands/bf.exists/index.md b/content/commands/bf.exists/index.md index e104dfecb4..f7aba4a607 100644 --- a/content/commands/bf.exists/index.md +++ b/content/commands/bf.exists/index.md @@ -29,7 +29,7 @@ title: BF.EXISTS --- Determines whether a given item was added to a Bloom filter. -This command is similar to [`BF.MEXISTS`](/commands/bf.mexists), except that only one item can be checked. +This command is similar to [`BF.MEXISTS`]({{< baseurl >}}/commands/bf.mexists), except that only one item can be checked. ## Required arguments @@ -48,7 +48,7 @@ is an item to check. Returns one of these replies: -- [Integer reply](/docs/reference/protocol-spec#integers), where `1` means that, with high probability, `item` was already added to the filter, and `0` means that `key` does not exist or that `item` had not been added to the filter. +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), where `1` means that, with high probability, `item` was already added to the filter, and `0` means that `key` does not exist or that `item` had not been added to the filter. - [] on error (invalid arguments, wrong key type, etc.) ## Examples diff --git a/content/commands/bf.info/index.md b/content/commands/bf.info/index.md index 2262b25ef1..feccbb3717 100644 --- a/content/commands/bf.info/index.md +++ b/content/commands/bf.info/index.md @@ -86,12 +86,12 @@ When no optional argument is specified: return all information fields. When no optional argument is specified, returns one of these replies: -- [Array reply](/docs/reference/protocol-spec#arrays) with argument name ([Simple string reply](/docs/reference/protocol-spec#simple-strings)) and value ([Integer reply](/docs/reference/protocol-spec#integers)) pairs +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) with argument name ([Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}})) and value ([Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})) pairs - [] on error (invalid arguments, key does not exist, wrong key type, and so on) When an optional argument is specified, returns one of these replies: -- [Integer reply](/docs/reference/protocol-spec#integers) - argument value +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - argument value - [] on error (invalid arguments, key does not exist, wrong key type, and so on) ## Examples diff --git a/content/commands/bf.insert/index.md b/content/commands/bf.insert/index.md index a6a55e893a..18edbce2fe 100644 --- a/content/commands/bf.insert/index.md +++ b/content/commands/bf.insert/index.md @@ -50,17 +50,15 @@ since: 1.0.0 stack_path: docs/data-types/probabilistic summary: Adds one or more items to a Bloom Filter. A filter will be created if it does not exist -syntax_fmt: "BF.INSERT key [CAPACITY\_capacity] [ERROR\_error] - [EXPANSION\_expansion]\ - \ [NOCREATE] [NONSCALING] ITEMS item [item - ...]" +syntax_fmt: "BF.INSERT key [CAPACITY\_capacity] [ERROR\_error] [EXPANSION\_expansion]\ + \ [NOCREATE] [NONSCALING] ITEMS item [item ...]" syntax_str: "[CAPACITY\_capacity] [ERROR\_error] [EXPANSION\_expansion] [NOCREATE]\ \ [NONSCALING] ITEMS item [item ...]" title: BF.INSERT --- Creates a new Bloom filter if the `key` does not exist using the specified error rate, capacity, and expansion, then adds all specified items to the Bloom Filter. -This command is similar to [`BF.MADD`](/commands/bf.madd), except that the error rate, capacity, and expansion can be specified. It is a sugarcoated combination of [`BF.RESERVE`](/commands/bf.reserve) and [`BF.MADD`](/commands/bf.madd). +This command is similar to [`BF.MADD`]({{< baseurl >}}/commands/bf.madd), except that the error rate, capacity, and expansion can be specified. It is a sugarcoated combination of [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve) and [`BF.MADD`]({{< baseurl >}}/commands/bf.madd). ## Required arguments @@ -91,14 +89,14 @@ It is an error to specify `NOCREATE` together with either `CAPACITY` or `ERROR`. Specifies the desired `capacity` for the filter to be created. This parameter is ignored if the filter already exists. If the filter is automatically created and this parameter is absent, then the module-level `capacity` is used. -See [`BF.RESERVE`](/commands/bf.reserve) for more information about the impact of this value. +See [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve) for more information about the impact of this value.
ERROR error Specifies the `error` ratio of the newly created filter if it does not yet exist. If the filter is automatically created and `error` is not specified then the module-level error rate is used. -See [`BF.RESERVE`](/commands/bf.reserve) for more information about the format of this value. +See [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve) for more information about the format of this value.
NONSCALING @@ -120,8 +118,8 @@ Otherwise, use an `expansion` of `1` to reduce memory consumption. The default v Returns one of these replies: -- [Array reply](/docs/reference/protocol-spec#arrays) where each element is one of these options: - - [Integer reply](/docs/reference/protocol-spec#integers), where `1` denotes that the item has been added successfully, and `0` means that such item had already added to the filter (which could be wrong) +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) where each element is one of these options: + - [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), where `1` denotes that the item has been added successfully, and `0` means that such item had already added to the filter (which could be wrong) - [] when the item cannot be added because the filter is full - [], for example, when the number of arguments or key type is wrong, and also when `NOCREATE` is specified and `key` does not exist. diff --git a/content/commands/bf.loadchunk/index.md b/content/commands/bf.loadchunk/index.md index c864174e94..31140cc191 100644 --- a/content/commands/bf.loadchunk/index.md +++ b/content/commands/bf.loadchunk/index.md @@ -29,9 +29,9 @@ syntax_fmt: BF.LOADCHUNK key iterator data syntax_str: iterator data title: BF.LOADCHUNK --- -Restores a Bloom filter previously saved using [`BF.SCANDUMP`](/commands/bf.scandump). +Restores a Bloom filter previously saved using [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump). -See the [`BF.SCANDUMP`](/commands/bf.scandump) command for example usage. +See the [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump) command for example usage. Notes @@ -49,21 +49,21 @@ is key name for a Bloom filter to restore.
iterator -Iterator value associated with `data` (returned by [`BF.SCANDUMP`](/commands/bf.scandump)) +Iterator value associated with `data` (returned by [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump))
data -Current data chunk (returned by [`BF.SCANDUMP`](/commands/bf.scandump)) +Current data chunk (returned by [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump))
## Return value Returns one of these replies: -- [Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if executed correctly +- [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if executed correctly - [] on error (invalid arguments, wrong key type, wrong data, etc.) ## Examples -See [`BF.SCANDUMP`](/commands/bf.scandump) for an example. +See [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump) for an example. diff --git a/content/commands/bf.madd/index.md b/content/commands/bf.madd/index.md index 6d38bcdd66..603731454d 100644 --- a/content/commands/bf.madd/index.md +++ b/content/commands/bf.madd/index.md @@ -33,9 +33,9 @@ title: BF.MADD --- Adds one or more items to a Bloom filter. -This command is similar to [`BF.ADD`](/commands/bf.add), except that you can add more than one item. +This command is similar to [`BF.ADD`]({{< baseurl >}}/commands/bf.add), except that you can add more than one item. -This command is similar to [`BF.INSERT`](/commands/bf.insert), except that the error rate, capacity, and expansion cannot be specified. +This command is similar to [`BF.INSERT`]({{< baseurl >}}/commands/bf.insert), except that the error rate, capacity, and expansion cannot be specified. ## Required arguments @@ -43,7 +43,7 @@ This command is similar to [`BF.INSERT`](/commands/bf.insert), except that the e is key name for a Bloom filter to add the items to. -If `key` does not exist - a new Bloom filter is created with default error rate, capacity, and expansion (see [`BF.RESERVE`](/commands/bf.reserve)). +If `key` does not exist - a new Bloom filter is created with default error rate, capacity, and expansion (see [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve)).
item... @@ -55,8 +55,8 @@ One or more items to add. Returns one of these replies: -- [Array reply](/docs/reference/protocol-spec#arrays) where each element is either - - [Integer reply](/docs/reference/protocol-spec#integers) - where "1" means that the item has been added successfully, and "0" means that such item was already added to the filter (which could be wrong) +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) where each element is either + - [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - where "1" means that the item has been added successfully, and "0" means that such item was already added to the filter (which could be wrong) - [] when the item cannot be added because the filter is full - [] on error (invalid arguments, wrong key type, etc.) diff --git a/content/commands/bf.mexists/index.md b/content/commands/bf.mexists/index.md index 1399dff5da..6f2dd54eb9 100644 --- a/content/commands/bf.mexists/index.md +++ b/content/commands/bf.mexists/index.md @@ -31,7 +31,7 @@ title: BF.MEXISTS --- Determines whether one or more items were added to a Bloom filter. -This command is similar to [`BF.EXISTS`](/commands/bf.exists), except that more than one item can be checked. +This command is similar to [`BF.EXISTS`]({{< baseurl >}}/commands/bf.exists), except that more than one item can be checked. ## Required arguments @@ -50,7 +50,7 @@ One or more items to check. Returns one of these replies: -- [Array reply](/docs/reference/protocol-spec#arrays) of [Integer reply](/docs/reference/protocol-spec#integers) - where "1" means that, with high probability, `item` was already added to the filter, and "0" means that `key` does not exist or that `item` was definitely not added to the filter. +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - where "1" means that, with high probability, `item` was already added to the filter, and "0" means that `key` does not exist or that `item` was definitely not added to the filter. - [] on error (invalid arguments, wrong key type, etc.) ## Examples diff --git a/content/commands/bf.reserve/index.md b/content/commands/bf.reserve/index.md index a85b640a4e..c86ba22cb3 100644 --- a/content/commands/bf.reserve/index.md +++ b/content/commands/bf.reserve/index.md @@ -33,8 +33,7 @@ module: Bloom since: 1.0.0 stack_path: docs/data-types/probabilistic summary: Creates a new Bloom Filter -syntax_fmt: "BF.RESERVE key error_rate capacity [EXPANSION\_expansion] - [NONSCALING]" +syntax_fmt: "BF.RESERVE key error_rate capacity [EXPANSION\_expansion] [NONSCALING]" syntax_str: "error_rate capacity [EXPANSION\_expansion] [NONSCALING]" title: BF.RESERVE --- @@ -95,7 +94,7 @@ Otherwise, you use an `expansion` of `1` to reduce memory consumption. The defau Returns one of these replies: -- [Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if filter created successfully +- [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if filter created successfully - [] on error (invalid arguments, key already exists, etc.) ## Examples diff --git a/content/commands/bf.scandump/index.md b/content/commands/bf.scandump/index.md index e5dd9d95bd..57b7047419 100644 --- a/content/commands/bf.scandump/index.md +++ b/content/commands/bf.scandump/index.md @@ -29,7 +29,7 @@ title: BF.SCANDUMP --- Begins an incremental save of the Bloom filter. -This command is useful for large Bloom filters that cannot fit into the [`DUMP`](/commands/dump) and [`RESTORE`](/commands/restore) model. +This command is useful for large Bloom filters that cannot fit into the [`DUMP`]({{< relref "/commands/dump" >}}) and [`RESTORE`]({{< relref "/commands/restore" >}}) model. The first time this command is called, the value of `iter` should be 0. @@ -51,11 +51,11 @@ Iterator value; either 0 or the iterator from a previous invocation of this comm Returns one of these replies: -- [Array reply](/docs/reference/protocol-spec#arrays) of [Integer reply](/docs/reference/protocol-spec#integers) (_Iterator_) and [] (_Data_). +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) (_Iterator_) and [] (_Data_). The Iterator is passed as input to the next invocation of `BF.SCANDUMP`. If _Iterator_ is 0, then it means iteration has completed. - The iterator-data pair should also be passed to [`BF.LOADCHUNK`](/commands/bf.loadchunk) when restoring the filter. + The iterator-data pair should also be passed to [`BF.LOADCHUNK`]({{< baseurl >}}/commands/bf.loadchunk) when restoring the filter. - [] on error (invalid arguments, key not found, wrong key type, etc.) diff --git a/content/commands/bgrewriteaof/index.md b/content/commands/bgrewriteaof/index.md index e35b5aab64..a069b9c4ab 100644 --- a/content/commands/bgrewriteaof/index.md +++ b/content/commands/bgrewriteaof/index.md @@ -42,7 +42,7 @@ process doing persistence. Specifically: -* If a Redis child is creating a snapshot on disk, the AOF rewrite is _scheduled_ but not started until the saving child producing the RDB file terminates. In this case the `BGREWRITEAOF` will still return a positive status reply, but with an appropriate message. You can check if an AOF rewrite is scheduled looking at the [`INFO`](/commands/info) command as of Redis 2.6 or successive versions. +* If a Redis child is creating a snapshot on disk, the AOF rewrite is _scheduled_ but not started until the saving child producing the RDB file terminates. In this case the `BGREWRITEAOF` will still return a positive status reply, but with an appropriate message. You can check if an AOF rewrite is scheduled looking at the [`INFO`]({{< relref "/commands/info" >}}) command as of Redis 2.6 or successive versions. * If an AOF rewrite is already in progress the command returns an error and no AOF rewrite will be scheduled for a later time. * If the AOF rewrite could start, but the attempt at starting it fails (for instance because of an error in creating the child process), an error is returned to the caller. diff --git a/content/commands/bgsave/index.md b/content/commands/bgsave/index.md index bce771b490..4095f7fe89 100644 --- a/content/commands/bgsave/index.md +++ b/content/commands/bgsave/index.md @@ -53,7 +53,7 @@ If `BGSAVE SCHEDULE` is used, the command will immediately return `OK` when an AOF rewrite is in progress and schedule the background save to run at the next opportunity. -A client may be able to check if the operation succeeded using the [`LASTSAVE`](/commands/lastsave) +A client may be able to check if the operation succeeded using the [`LASTSAVE`]({{< relref "/commands/lastsave" >}}) command. Please refer to the [persistence documentation][tp] for detailed information. diff --git a/content/commands/bitcount/index.md b/content/commands/bitcount/index.md index 58ce118bcc..510fe44db7 100644 --- a/content/commands/bitcount/index.md +++ b/content/commands/bitcount/index.md @@ -77,7 +77,7 @@ By default all the bytes contained in the string are examined. It is possible to specify the counting operation only in an interval passing the additional arguments _start_ and _end_. -Like for the [`GETRANGE`](/commands/getrange) command start and end can contain negative values in +Like for the [`GETRANGE`]({{< relref "/commands/getrange" >}}) command start and end can contain negative values in order to index bytes starting from the end of the string, where -1 is the last byte, -2 is the penultimate, and so forth. @@ -108,13 +108,13 @@ One example is a Web application that needs the history of user visits, so that for instance it is possible to determine what users are good targets of beta features. -Using the [`SETBIT`](/commands/setbit) command this is trivial to accomplish, identifying every day +Using the [`SETBIT`]({{< relref "/commands/setbit" >}}) command this is trivial to accomplish, identifying every day with a small progressive integer. For instance day 0 is the first day the application was put online, day 1 the next day, and so forth. Every time a user performs a page view, the application can register that in -the current day the user visited the web site using the [`SETBIT`](/commands/setbit) command setting +the current day the user visited the web site using the [`SETBIT`]({{< relref "/commands/setbit" >}}) command setting the bit corresponding to the current day. Later it will be trivial to know the number of single days the user visited the @@ -132,7 +132,7 @@ In the above example of counting days, even after 10 years the application is online we still have just `365*10` bits of data per user, that is just 456 bytes per user. With this amount of data `BITCOUNT` is still as fast as any other O(1) Redis -command like [`GET`](/commands/get) or [`INCR`](/commands/incr). +command like [`GET`]({{< relref "/commands/get" >}}) or [`INCR`]({{< relref "/commands/incr" >}}). When the bitmap is big, there are two alternatives: diff --git a/content/commands/bitfield/index.md b/content/commands/bitfield/index.md index 9bd5260766..553661c4ab 100644 --- a/content/commands/bitfield/index.md +++ b/content/commands/bitfield/index.md @@ -109,14 +109,10 @@ key_specs: linkTitle: BITFIELD since: 3.2.0 summary: Performs arbitrary bitfield integer operations on strings. -syntax_fmt: "BITFIELD key [GET\_encoding offset | [OVERFLOW\_] -\ - \ - [GET\_encoding\ - \ offset | [OVERFLOW\_] - - ...]]" +syntax_fmt: "BITFIELD key [GET\_encoding offset | [OVERFLOW\_]\ + \ [GET\_encoding\ + \ offset | [OVERFLOW\_] ...]]" syntax_str: "[GET\_encoding offset | [OVERFLOW\_] [GET\_encoding offset | [OVERFLOW\_\ ] \ diff --git a/content/commands/bitfield_ro/index.md b/content/commands/bitfield_ro/index.md index 6ee9b4a051..b29e7a28cc 100644 --- a/content/commands/bitfield_ro/index.md +++ b/content/commands/bitfield_ro/index.md @@ -59,15 +59,15 @@ syntax_fmt: "BITFIELD_RO key [GET\_encoding offset [GET encoding offset ...]]" syntax_str: "[GET\_encoding offset [GET encoding offset ...]]" title: BITFIELD_RO --- -Read-only variant of the [`BITFIELD`](/commands/bitfield) command. -It is like the original [`BITFIELD`](/commands/bitfield) but only accepts `GET` subcommand and can safely be used in read-only replicas. +Read-only variant of the [`BITFIELD`]({{< relref "/commands/bitfield" >}}) command. +It is like the original [`BITFIELD`]({{< relref "/commands/bitfield" >}}) but only accepts `GET` subcommand and can safely be used in read-only replicas. -Since the original [`BITFIELD`](/commands/bitfield) has `SET` and `INCRBY` options it is technically flagged as a writing command in the Redis command table. -For this reason read-only replicas in a Redis Cluster will redirect it to the master instance even if the connection is in read-only mode (see the [`READONLY`](/commands/readonly) command of Redis Cluster). +Since the original [`BITFIELD`]({{< relref "/commands/bitfield" >}}) has `SET` and `INCRBY` options it is technically flagged as a writing command in the Redis command table. +For this reason read-only replicas in a Redis Cluster will redirect it to the master instance even if the connection is in read-only mode (see the [`READONLY`]({{< relref "/commands/readonly" >}}) command of Redis Cluster). -Since Redis 6.2, the `BITFIELD_RO` variant was introduced in order to allow [`BITFIELD`](/commands/bitfield) behavior in read-only replicas without breaking compatibility on command flags. +Since Redis 6.2, the `BITFIELD_RO` variant was introduced in order to allow [`BITFIELD`]({{< relref "/commands/bitfield" >}}) behavior in read-only replicas without breaking compatibility on command flags. -See original [`BITFIELD`](/commands/bitfield) for more details. +See original [`BITFIELD`]({{< relref "/commands/bitfield" >}}) for more details. ## Examples diff --git a/content/commands/bitop/index.md b/content/commands/bitop/index.md index b3ced21958..9da2646012 100644 --- a/content/commands/bitop/index.md +++ b/content/commands/bitop/index.md @@ -120,7 +120,7 @@ GET dest ## Pattern: real time metrics using bitmaps -`BITOP` is a good complement to the pattern documented in the [`BITCOUNT`](/commands/bitcount) command +`BITOP` is a good complement to the pattern documented in the [`BITCOUNT`]({{< relref "/commands/bitcount" >}}) command documentation. Different bitmaps can be combined in order to obtain a target bitmap where the population counting operation is performed. diff --git a/content/commands/bitpos/index.md b/content/commands/bitpos/index.md index 202d315b74..9b2fc0f241 100644 --- a/content/commands/bitpos/index.md +++ b/content/commands/bitpos/index.md @@ -84,7 +84,7 @@ The position is returned, thinking of the string as an array of bits from left t right, where the first byte's most significant bit is at position 0, the second byte's most significant bit is at position 8, and so forth. -The same bit position convention is followed by [`GETBIT`](/commands/getbit) and [`SETBIT`](/commands/setbit). +The same bit position convention is followed by [`GETBIT`]({{< relref "/commands/getbit" >}}) and [`SETBIT`]({{< relref "/commands/setbit" >}}). By default, all the bytes contained in the string are examined. It is possible to look for bits only in a specified interval passing the additional arguments _start_ and _end_ (it is possible to just pass _start_, the operation will assume that the end is the last byte of the string. However there are semantic differences as explained later). @@ -95,7 +95,7 @@ So `start=0` and `end=2` means to look at the first three bits. Note that bit positions are returned always as absolute values starting from bit zero even when _start_ and _end_ are used to specify a range. -Like for the [`GETRANGE`](/commands/getrange) command start and end can contain negative values in +Like for the [`GETRANGE`]({{< relref "/commands/getrange" >}}) command start and end can contain negative values in order to index bytes starting from the end of the string, where -1 is the last byte, -2 is the penultimate, and so forth. When `BIT` is specified, -1 is the last bit, -2 is the penultimate, and so forth. diff --git a/content/commands/blmove/index.md b/content/commands/blmove/index.md index 098865c454..7ba11888e4 100644 --- a/content/commands/blmove/index.md +++ b/content/commands/blmove/index.md @@ -94,22 +94,22 @@ syntax_fmt: BLMOVE source destination timeout syntax_str: destination timeout title: BLMOVE --- -`BLMOVE` is the blocking variant of [`LMOVE`](/commands/lmove). -When `source` contains elements, this command behaves exactly like [`LMOVE`](/commands/lmove). -When used inside a [`MULTI`](/commands/multi)/[`EXEC`](/commands/exec) block, this command behaves exactly like [`LMOVE`](/commands/lmove). +`BLMOVE` is the blocking variant of [`LMOVE`]({{< relref "/commands/lmove" >}}). +When `source` contains elements, this command behaves exactly like [`LMOVE`]({{< relref "/commands/lmove" >}}). +When used inside a [`MULTI`]({{< relref "/commands/multi" >}})/[`EXEC`]({{< relref "/commands/exec" >}}) block, this command behaves exactly like [`LMOVE`]({{< relref "/commands/lmove" >}}). When `source` is empty, Redis will block the connection until another client pushes to it or until `timeout` (a double value specifying the maximum number of seconds to block) is reached. A `timeout` of zero can be used to block indefinitely. -This command comes in place of the now deprecated [`BRPOPLPUSH`](/commands/brpoplpush). Doing +This command comes in place of the now deprecated [`BRPOPLPUSH`]({{< relref "/commands/brpoplpush" >}}). Doing `BLMOVE RIGHT LEFT` is equivalent. -See [`LMOVE`](/commands/lmove) for more information. +See [`LMOVE`]({{< relref "/commands/lmove" >}}) for more information. ## Pattern: Reliable queue -Please see the pattern description in the [`LMOVE`](/commands/lmove) documentation. +Please see the pattern description in the [`LMOVE`]({{< relref "/commands/lmove" >}}) documentation. ## Pattern: Circular list -Please see the pattern description in the [`LMOVE`](/commands/lmove) documentation. +Please see the pattern description in the [`LMOVE`]({{< relref "/commands/lmove" >}}) documentation. diff --git a/content/commands/blmpop/index.md b/content/commands/blmpop/index.md index 178a81b4fa..ccc81bf7f6 100644 --- a/content/commands/blmpop/index.md +++ b/content/commands/blmpop/index.md @@ -75,11 +75,11 @@ syntax_fmt: "BLMPOP timeout numkeys key [key ...] [COUNT\_count]" syntax_str: "numkeys key [key ...] [COUNT\_count]" title: BLMPOP --- -`BLMPOP` is the blocking variant of [`LMPOP`](/commands/lmpop). +`BLMPOP` is the blocking variant of [`LMPOP`]({{< relref "/commands/lmpop" >}}). -When any of the lists contains elements, this command behaves exactly like [`LMPOP`](/commands/lmpop). -When used inside a [`MULTI`](/commands/multi)/[`EXEC`](/commands/exec) block, this command behaves exactly like [`LMPOP`](/commands/lmpop). +When any of the lists contains elements, this command behaves exactly like [`LMPOP`]({{< relref "/commands/lmpop" >}}). +When used inside a [`MULTI`]({{< relref "/commands/multi" >}})/[`EXEC`]({{< relref "/commands/exec" >}}) block, this command behaves exactly like [`LMPOP`]({{< relref "/commands/lmpop" >}}). When all lists are empty, Redis will block the connection until another client pushes to it or until the `timeout` (a double value specifying the maximum number of seconds to block) elapses. A `timeout` of zero can be used to block indefinitely. -See [`LMPOP`](/commands/lmpop) for more information. +See [`LMPOP`]({{< relref "/commands/lmpop" >}}) for more information. diff --git a/content/commands/blpop/index.md b/content/commands/blpop/index.md index ca4bacaba8..e756008db3 100644 --- a/content/commands/blpop/index.md +++ b/content/commands/blpop/index.md @@ -58,7 +58,7 @@ syntax_str: timeout title: BLPOP --- `BLPOP` is a blocking list pop primitive. -It is the blocking version of [`LPOP`](/commands/lpop) because it blocks the connection when there +It is the blocking version of [`LPOP`]({{< relref "/commands/lpop" >}}) because it blocks the connection when there are no elements to pop from any of the given lists. An element is popped from the head of the first list that is non-empty, with the given keys being checked in the order that they are given. @@ -85,7 +85,7 @@ that order). ## Blocking behavior If none of the specified keys exist, `BLPOP` blocks the connection until another -client performs an [`LPUSH`](/commands/lpush) or [`RPUSH`](/commands/rpush) operation against one of the keys. +client performs an [`LPUSH`]({{< relref "/commands/lpush" >}}) or [`RPUSH`]({{< relref "/commands/rpush" >}}) operation against one of the keys. Once new data is present on one of the lists, the client returns with the name of the key unblocking it and the popped value. @@ -108,7 +108,7 @@ specified keys. There are times when a list can receive multiple elements in the context of the same conceptual command: * Variadic push operations such as `LPUSH mylist a b c`. -* After an [`EXEC`](/commands/exec) of a [`MULTI`](/commands/multi) block with multiple push operations against the same list. +* After an [`EXEC`]({{< relref "/commands/exec" >}}) of a [`MULTI`]({{< relref "/commands/multi" >}}) block with multiple push operations against the same list. * Executing a Lua Script with Redis 2.6 or newer. When multiple elements are pushed inside a list where there are clients blocking, the behavior is different for Redis 2.4 and Redis 2.6 or newer. @@ -118,7 +118,7 @@ For Redis 2.6 what happens is that the command performing multiple pushes is exe Client A: BLPOP foo 0 Client B: LPUSH foo a b c -If the above condition happens using a Redis 2.6 server or greater, Client **A** will be served with the `c` element, because after the [`LPUSH`](/commands/lpush) command the list contains `c,b,a`, so taking an element from the left means to return `c`. +If the above condition happens using a Redis 2.6 server or greater, Client **A** will be served with the `c` element, because after the [`LPUSH`]({{< relref "/commands/lpush" >}}) command the list contains `c,b,a`, so taking an element from the left means to return `c`. Instead Redis 2.4 works in a different way: clients are served *in the context* of the push operation, so as long as `LPUSH foo a b c` starts pushing the first element to the list, it will be delivered to the Client **A**, that will receive `a` (the first element pushed). @@ -132,14 +132,14 @@ Note that for the same reason a Lua script or a `MULTI/EXEC` block may push elem reading the replies in batch), however this setup makes sense almost solely when it is the last command of the pipeline. -Using `BLPOP` inside a [`MULTI`](/commands/multi) / [`EXEC`](/commands/exec) block does not make a lot of sense +Using `BLPOP` inside a [`MULTI`]({{< relref "/commands/multi" >}}) / [`EXEC`]({{< relref "/commands/exec" >}}) block does not make a lot of sense as it would require blocking the entire server in order to execute the block atomically, which in turn does not allow other clients to perform a push -operation. For this reason the behavior of `BLPOP` inside [`MULTI`](/commands/multi) / [`EXEC`](/commands/exec) when the list is empty is to return a `nil` multi-bulk reply, which is the same +operation. For this reason the behavior of `BLPOP` inside [`MULTI`]({{< relref "/commands/multi" >}}) / [`EXEC`]({{< relref "/commands/exec" >}}) when the list is empty is to return a `nil` multi-bulk reply, which is the same thing that happens when the timeout is reached. If you like science fiction, think of time flowing at infinite speed inside a -[`MULTI`](/commands/multi) / [`EXEC`](/commands/exec) block... +[`MULTI`]({{< relref "/commands/multi" >}}) / [`EXEC`]({{< relref "/commands/exec" >}}) block... ## Examples @@ -157,7 +157,7 @@ redis> BLPOP list1 list2 0 When `BLPOP` returns an element to the client, it also removes the element from the list. This means that the element only exists in the context of the client: if the client crashes while processing the returned element, it is lost forever. -This can be a problem with some application where we want a more reliable messaging system. When this is the case, please check the [`BRPOPLPUSH`](/commands/brpoplpush) command, that is a variant of `BLPOP` that adds the returned element to a target list before returning it to the client. +This can be a problem with some application where we want a more reliable messaging system. When this is the case, please check the [`BRPOPLPUSH`]({{< relref "/commands/brpoplpush" >}}) command, that is a variant of `BLPOP` that adds the returned element to a target list before returning it to the client. ## Pattern: Event notification @@ -166,7 +166,7 @@ primitives. For instance for some application you may need to block waiting for elements into a Redis Set, so that as far as a new element is added to the Set, it is possible to retrieve it without resort to polling. -This would require a blocking version of [`SPOP`](/commands/spop) that is not available, but using +This would require a blocking version of [`SPOP`]({{< relref "/commands/spop" >}}) that is not available, but using blocking list operations we can easily accomplish this task. The consumer will do: diff --git a/content/commands/brpop/index.md b/content/commands/brpop/index.md index ae7fa249b7..fc9becd671 100644 --- a/content/commands/brpop/index.md +++ b/content/commands/brpop/index.md @@ -58,13 +58,13 @@ syntax_str: timeout title: BRPOP --- `BRPOP` is a blocking list pop primitive. -It is the blocking version of [`RPOP`](/commands/rpop) because it blocks the connection when there +It is the blocking version of [`RPOP`]({{< relref "/commands/rpop" >}}) because it blocks the connection when there are no elements to pop from any of the given lists. An element is popped from the tail of the first list that is non-empty, with the given keys being checked in the order that they are given. See the [BLPOP documentation][cb] for the exact semantics, since `BRPOP` is -identical to [`BLPOP`](/commands/blpop) with the only difference being that it pops elements from +identical to [`BLPOP`]({{< relref "/commands/blpop" >}}) with the only difference being that it pops elements from the tail of a list instead of popping from the head. [cb]: /commands/blpop diff --git a/content/commands/brpoplpush/index.md b/content/commands/brpoplpush/index.md index 8064848bd9..a1c05be80d 100644 --- a/content/commands/brpoplpush/index.md +++ b/content/commands/brpoplpush/index.md @@ -70,7 +70,7 @@ key_specs: type: range insert: true linkTitle: BRPOPLPUSH -replaced_by: '[`BLMOVE`](/commands/blmove) with the `RIGHT` and `LEFT` arguments' +replaced_by: '[`BLMOVE`]({{< relref "/commands/blmove" >}}) with the `RIGHT` and `LEFT` arguments' since: 2.2.0 summary: Pops an element from a list, pushes it to another list and returns it. Block until an element is available otherwise. Deletes the list if the last element was @@ -79,19 +79,19 @@ syntax_fmt: BRPOPLPUSH source destination timeout syntax_str: destination timeout title: BRPOPLPUSH --- -`BRPOPLPUSH` is the blocking variant of [`RPOPLPUSH`](/commands/rpoplpush). -When `source` contains elements, this command behaves exactly like [`RPOPLPUSH`](/commands/rpoplpush). -When used inside a [`MULTI`](/commands/multi)/[`EXEC`](/commands/exec) block, this command behaves exactly like [`RPOPLPUSH`](/commands/rpoplpush). +`BRPOPLPUSH` is the blocking variant of [`RPOPLPUSH`]({{< relref "/commands/rpoplpush" >}}). +When `source` contains elements, this command behaves exactly like [`RPOPLPUSH`]({{< relref "/commands/rpoplpush" >}}). +When used inside a [`MULTI`]({{< relref "/commands/multi" >}})/[`EXEC`]({{< relref "/commands/exec" >}}) block, this command behaves exactly like [`RPOPLPUSH`]({{< relref "/commands/rpoplpush" >}}). When `source` is empty, Redis will block the connection until another client pushes to it or until `timeout` is reached. A `timeout` of zero can be used to block indefinitely. -See [`RPOPLPUSH`](/commands/rpoplpush) for more information. +See [`RPOPLPUSH`]({{< relref "/commands/rpoplpush" >}}) for more information. ## Pattern: Reliable queue -Please see the pattern description in the [`RPOPLPUSH`](/commands/rpoplpush) documentation. +Please see the pattern description in the [`RPOPLPUSH`]({{< relref "/commands/rpoplpush" >}}) documentation. ## Pattern: Circular list -Please see the pattern description in the [`RPOPLPUSH`](/commands/rpoplpush) documentation. +Please see the pattern description in the [`RPOPLPUSH`]({{< relref "/commands/rpoplpush" >}}) documentation. diff --git a/content/commands/bzmpop/index.md b/content/commands/bzmpop/index.md index b7959381f0..1f57b027be 100644 --- a/content/commands/bzmpop/index.md +++ b/content/commands/bzmpop/index.md @@ -77,11 +77,11 @@ syntax_fmt: "BZMPOP timeout numkeys key [key ...] [COUNT\_count]" syntax_str: "numkeys key [key ...] [COUNT\_count]" title: BZMPOP --- -`BZMPOP` is the blocking variant of [`ZMPOP`](/commands/zmpop). +`BZMPOP` is the blocking variant of [`ZMPOP`]({{< relref "/commands/zmpop" >}}). -When any of the sorted sets contains elements, this command behaves exactly like [`ZMPOP`](/commands/zmpop). -When used inside a [`MULTI`](/commands/multi)/[`EXEC`](/commands/exec) block, this command behaves exactly like [`ZMPOP`](/commands/zmpop). +When any of the sorted sets contains elements, this command behaves exactly like [`ZMPOP`]({{< relref "/commands/zmpop" >}}). +When used inside a [`MULTI`]({{< relref "/commands/multi" >}})/[`EXEC`]({{< relref "/commands/exec" >}}) block, this command behaves exactly like [`ZMPOP`]({{< relref "/commands/zmpop" >}}). When all sorted sets are empty, Redis will block the connection until another client adds members to one of the keys or until the `timeout` (a double value specifying the maximum number of seconds to block) elapses. A `timeout` of zero can be used to block indefinitely. -See [`ZMPOP`](/commands/zmpop) for more information. +See [`ZMPOP`]({{< relref "/commands/zmpop" >}}) for more information. diff --git a/content/commands/bzpopmax/index.md b/content/commands/bzpopmax/index.md index e33b828439..54a999df33 100644 --- a/content/commands/bzpopmax/index.md +++ b/content/commands/bzpopmax/index.md @@ -60,7 +60,7 @@ syntax_fmt: BZPOPMAX key [key ...] timeout syntax_str: timeout title: BZPOPMAX --- -`BZPOPMAX` is the blocking variant of the sorted set [`ZPOPMAX`](/commands/zpopmax) primitive. +`BZPOPMAX` is the blocking variant of the sorted set [`ZPOPMAX`]({{< relref "/commands/zpopmax" >}}) primitive. It is the blocking version because it blocks the connection when there are no members to pop from any of the given sorted sets. @@ -71,7 +71,7 @@ The `timeout` argument is interpreted as a double value specifying the maximum number of seconds to block. A timeout of zero can be used to block indefinitely. See the [BZPOPMIN documentation][cb] for the exact semantics, since `BZPOPMAX` -is identical to [`BZPOPMIN`](/commands/bzpopmin) with the only difference being that it pops members +is identical to [`BZPOPMIN`]({{< relref "/commands/bzpopmin" >}}) with the only difference being that it pops members with the highest scores instead of popping the ones with the lowest scores. [cb]: /commands/bzpopmin diff --git a/content/commands/bzpopmin/index.md b/content/commands/bzpopmin/index.md index fb4dbb8916..cf6b4ba67a 100644 --- a/content/commands/bzpopmin/index.md +++ b/content/commands/bzpopmin/index.md @@ -60,7 +60,7 @@ syntax_fmt: BZPOPMIN key [key ...] timeout syntax_str: timeout title: BZPOPMIN --- -`BZPOPMIN` is the blocking variant of the sorted set [`ZPOPMIN`](/commands/zpopmin) primitive. +`BZPOPMIN` is the blocking variant of the sorted set [`ZPOPMIN`]({{< relref "/commands/zpopmin" >}}) primitive. It is the blocking version because it blocks the connection when there are no members to pop from any of the given sorted sets. @@ -71,7 +71,7 @@ The `timeout` argument is interpreted as a double value specifying the maximum number of seconds to block. A timeout of zero can be used to block indefinitely. See the [BLPOP documentation][cl] for the exact semantics, since `BZPOPMIN` is -identical to [`BLPOP`](/commands/blpop) with the only difference being the data structure being +identical to [`BLPOP`]({{< relref "/commands/blpop" >}}) with the only difference being the data structure being popped from. [cl]: /commands/blpop diff --git a/content/commands/cf.add/index.md b/content/commands/cf.add/index.md index 1f2fe80edf..754aa39cdb 100644 --- a/content/commands/cf.add/index.md +++ b/content/commands/cf.add/index.md @@ -30,7 +30,7 @@ title: CF.ADD Adds an item to the cuckoo filter. Cuckoo filters can contain the same item multiple times, and consider each addition as separate. -Use [`CF.ADDNX`](/commands/cf.addnx) to add an item only if it does not exist. +Use [`CF.ADDNX`]({{< baseurl >}}/commands/cf.addnx) to add an item only if it does not exist. ## Required arguments @@ -50,7 +50,7 @@ is an item to add. Returns one of these replies: -- [Integer reply](/docs/reference/protocol-spec#integers) - where "1" means that the item has been added successfully +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - where "1" means that the item has been added successfully - [] on error (invalid arguments, wrong key type, etc.) and also when the filter is full ## Complexity diff --git a/content/commands/cf.addnx/index.md b/content/commands/cf.addnx/index.md index 95d27f071f..ca0ff3531d 100644 --- a/content/commands/cf.addnx/index.md +++ b/content/commands/cf.addnx/index.md @@ -29,12 +29,12 @@ title: CF.ADDNX --- Adds an item to a cuckoo filter if the item does not exist. -This command is similar to the combination of [`CF.EXISTS`](/commands/cf.exists) and [`CF.ADD`](/commands/cf.add). It does not add an item into the filter if its fingerprint already exists. +This command is similar to the combination of [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists) and [`CF.ADD`]({{< baseurl >}}/commands/cf.add). It does not add an item into the filter if its fingerprint already exists. Notes: -- This command is slower than [`CF.ADD`](/commands/cf.add) because it first checks whether the item exists. -- Since [`CF.EXISTS`](/commands/cf.exists) can result in false positive, `CF.ADDNX` may not add an item because it is supposedly already exist, which may be wrong. +- This command is slower than [`CF.ADD`]({{< baseurl >}}/commands/cf.add) because it first checks whether the item exists. +- Since [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists) can result in false positive, `CF.ADDNX` may not add an item because it is supposedly already exist, which may be wrong. @@ -56,7 +56,7 @@ is an item to add. Returns one of these replies: -- [Integer reply](/docs/reference/protocol-spec#integers), where `0` means that the item's fingerprint already exist in the filter, and `1` means that the item has been successfully added to the filter. +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), where `0` means that the item's fingerprint already exist in the filter, and `1` means that the item has been successfully added to the filter. - [] on error (invalid arguments, wrong key type, etc.) and also when the filter is full. ## Examples diff --git a/content/commands/cf.count/index.md b/content/commands/cf.count/index.md index 2d35e4a17b..0b318902ab 100644 --- a/content/commands/cf.count/index.md +++ b/content/commands/cf.count/index.md @@ -29,7 +29,7 @@ title: CF.COUNT --- Returns an estimation of the number of times a given item was added to a cuckoo filter. -If you just want to check that a given item was added to a cuckoo filter, use [`CF.EXISTS`](/commands/cf.exists). +If you just want to check that a given item was added to a cuckoo filter, use [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists). ## Required arguments @@ -48,7 +48,7 @@ is an item to check. Returns one of these replies: -- [Integer reply](/docs/reference/protocol-spec#integers), where a positive value is an estimation of the number of times `item` was added to the filter. An overestimation is possible, but not an underestimation. `0` means that `key` does not exist or that `item` had not been added to the filter. See note in [`CF.DEL`](/commands/cf.del). +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), where a positive value is an estimation of the number of times `item` was added to the filter. An overestimation is possible, but not an underestimation. `0` means that `key` does not exist or that `item` had not been added to the filter. See note in [`CF.DEL`]({{< baseurl >}}/commands/cf.del). - [] on error (invalid arguments, wrong key type, etc.) ## Examples diff --git a/content/commands/cf.del/index.md b/content/commands/cf.del/index.md index 643b5d3bcb..e39b6eb25c 100644 --- a/content/commands/cf.del/index.md +++ b/content/commands/cf.del/index.md @@ -57,7 +57,7 @@ checked on all `sub-filters`. Returns one of these replies: -- [Integer reply](/docs/reference/protocol-spec#integers) - where "1" means that the item has been deleted, and "0" means that such item was not found in the filter +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - where "1" means that the item has been deleted, and "0" means that such item was not found in the filter - [] on error (invalid arguments, wrong key type, etc.) ## Examples diff --git a/content/commands/cf.exists/index.md b/content/commands/cf.exists/index.md index 60dcace812..0c567a8a0f 100644 --- a/content/commands/cf.exists/index.md +++ b/content/commands/cf.exists/index.md @@ -29,7 +29,7 @@ title: CF.EXISTS --- Determines whether a given item was added to a cuckoo filter. -This command is similar to [`CF.MEXISTS`](/commands/cf.mexists), except that only one item can be checked. +This command is similar to [`CF.MEXISTS`]({{< baseurl >}}/commands/cf.mexists), except that only one item can be checked. ## Required arguments @@ -48,7 +48,7 @@ is an item to check. Returns one of these replies: -- [Integer reply](/docs/reference/protocol-spec#integers), where `1` means that, with high probability, `item` had already been added to the filter, and `0` means that `key` does not exist or that `item` had not been added to the filter. See note in [`CF.DEL`](/commands/cf.del). +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), where `1` means that, with high probability, `item` had already been added to the filter, and `0` means that `key` does not exist or that `item` had not been added to the filter. See note in [`CF.DEL`]({{< baseurl >}}/commands/cf.del). - [] on error (invalid arguments, wrong key type, and so on) ## Examples diff --git a/content/commands/cf.info/index.md b/content/commands/cf.info/index.md index e1caecea1d..804060b990 100644 --- a/content/commands/cf.info/index.md +++ b/content/commands/cf.info/index.md @@ -38,7 +38,7 @@ is key name for a cuckoo filter. Returns one of these replies: -- [Array reply](/docs/reference/protocol-spec#arrays) with argument name ([Simple string reply](/docs/reference/protocol-spec#simple-strings)) and value ([Integer reply](/docs/reference/protocol-spec#integers)) pairs +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) with argument name ([Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}})) and value ([Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})) pairs - [] on error (invalid arguments, key does not exist, wrong key type, and so on) ## Examples diff --git a/content/commands/cf.insert/index.md b/content/commands/cf.insert/index.md index ead8b2b160..9afa071d50 100644 --- a/content/commands/cf.insert/index.md +++ b/content/commands/cf.insert/index.md @@ -44,7 +44,7 @@ title: CF.INSERT --- Adds one or more items to a cuckoo filter, allowing the filter to be created with a custom capacity if it does not exist yet. -This command is similar to [`CF.ADD`](/commands/cf.add), except that more than one item can be added and capacity can be specified. +This command is similar to [`CF.ADD`]({{< baseurl >}}/commands/cf.add), except that more than one item can be added and capacity can be specified. ## Required arguments @@ -70,7 +70,7 @@ If the filter already exists, then this parameter is ignored. If the filter does not exist yet and this parameter is *not* specified, then the filter is created with the module-level default capacity which is 1024. -See [`CF.RESERVE`](/commands/cf.reserve) for more information on cuckoo filter capacities. +See [`CF.RESERVE`]({{< baseurl >}}/commands/cf.reserve) for more information on cuckoo filter capacities.
NOCREATE @@ -84,7 +84,7 @@ This option is mutually exclusive with `CAPACITY`. Returns one of these replies: -- [Array reply](/docs/reference/protocol-spec#arrays) of [Integer reply](/docs/reference/protocol-spec#integers) - where "1" means that the item has been successfully added to the filter, and "-1" means that the item was not added because the filter is full. +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - where "1" means that the item has been successfully added to the filter, and "-1" means that the item was not added because the filter is full. - [] on error (invalid arguments, wrong key type, and so on) and also when `NOCREATE` is specified and `key` does not exist. ## Examples diff --git a/content/commands/cf.insertnx/index.md b/content/commands/cf.insertnx/index.md index 3e30fbe4a4..8a3de4b547 100644 --- a/content/commands/cf.insertnx/index.md +++ b/content/commands/cf.insertnx/index.md @@ -44,12 +44,12 @@ title: CF.INSERTNX --- Adds one or more items to a cuckoo filter if they did not exist previously, allowing the filter to be created with a custom capacity if it does not exist yet. -This command is similar to [`CF.ADDNX`](/commands/cf.addnx), except that more than one item can be added and capacity can be specified. +This command is similar to [`CF.ADDNX`]({{< baseurl >}}/commands/cf.addnx), except that more than one item can be added and capacity can be specified. Notes: -- This command is slower than [`CF.INSERT`](/commands/cf.insert) because it first checks whether each item exists. -- Since [`CF.EXISTS`](/commands/cf.exists) can result in false positive, `CF.INSERTNX` may not add an item because it is supposedly already exist, which may be wrong. +- This command is slower than [`CF.INSERT`]({{< baseurl >}}/commands/cf.insert) because it first checks whether each item exists. +- Since [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists) can result in false positive, `CF.INSERTNX` may not add an item because it is supposedly already exist, which may be wrong. @@ -77,7 +77,7 @@ If the filter already exists, then this parameter is ignored. If the filter does not exist yet and this parameter is *not* specified, then the filter is created with the module-level default capacity which is 1024. -See [`CF.RESERVE`](/commands/cf.reserve) for more information on cuckoo filter capacities. +See [`CF.RESERVE`]({{< baseurl >}}/commands/cf.reserve) for more information on cuckoo filter capacities.
NOCREATE @@ -91,7 +91,7 @@ This option is mutually exclusive with `CAPACITY`. Returns one of these replies: -- [Array reply](/docs/reference/protocol-spec#arrays) of [Integer reply](/docs/reference/protocol-spec#integers), where `0` means that the item's fingerprint already exists in the filter, `1` means that the item has been successfully added to the filter, and `-1` means that the item was not added because the filter is full. +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), where `0` means that the item's fingerprint already exists in the filter, `1` means that the item has been successfully added to the filter, and `-1` means that the item was not added because the filter is full. - [] on error (invalid arguments, wrong key type, etc.) and also when `NOCREATE` is specified and `key` does not exist. ### Complexity diff --git a/content/commands/cf.loadchunk/index.md b/content/commands/cf.loadchunk/index.md index 3212d97404..95659ff2c6 100644 --- a/content/commands/cf.loadchunk/index.md +++ b/content/commands/cf.loadchunk/index.md @@ -29,9 +29,9 @@ syntax_fmt: CF.LOADCHUNK key iterator data syntax_str: iterator data title: CF.LOADCHUNK --- -Restores a cuckoo filter previously saved using [`CF.SCANDUMP`](/commands/cf.scandump). +Restores a cuckoo filter previously saved using [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump). -See the [`CF.SCANDUMP`](/commands/cf.scandump) command for example usage. +See the [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump) command for example usage. Notes @@ -49,21 +49,21 @@ is key name for a cuckoo filter to restore.
iterator -Iterator value associated with `data` (returned by [`CF.SCANDUMP`](/commands/cf.scandump)) +Iterator value associated with `data` (returned by [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump))
data -Current data chunk (returned by [`CF.SCANDUMP`](/commands/cf.scandump)) +Current data chunk (returned by [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump))
## Return value Returns one of these replies: -- [Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if executed correctly +- [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if executed correctly - [] on error (invalid arguments, wrong key type, wrong data, etc.) ## Examples -See [`CF.SCANDUMP`](/commands/cf.scandump) for an example. +See [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump) for an example. diff --git a/content/commands/cf.mexists/index.md b/content/commands/cf.mexists/index.md index fd8ca664bc..073ef2d762 100644 --- a/content/commands/cf.mexists/index.md +++ b/content/commands/cf.mexists/index.md @@ -31,7 +31,7 @@ title: CF.MEXISTS --- Determines whether one or more items were added to a cuckoo filter. -This command is similar to [`CF.EXISTS`](/commands/cf.exists), except that more than one item can be checked. +This command is similar to [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists), except that more than one item can be checked. ## Required arguments @@ -50,7 +50,7 @@ One or more items to check. Returns one of these replies: -- [Array reply](/docs/reference/protocol-spec#arrays) of [Integer reply](/docs/reference/protocol-spec#integers) - where "1" means that, with high probability, `item` was already added to the filter, and "0" means that `key` does not exist or that `item` had not added to the filter. See note in [`CF.DEL`](/commands/cf.del). +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - where "1" means that, with high probability, `item` was already added to the filter, and "0" means that `key` does not exist or that `item` had not added to the filter. See note in [`CF.DEL`]({{< baseurl >}}/commands/cf.del). - [] on error (invalid arguments, wrong key type, etc.) ## Examples diff --git a/content/commands/cf.reserve/index.md b/content/commands/cf.reserve/index.md index 4bfdfd1e7a..0e2f233a79 100644 --- a/content/commands/cf.reserve/index.md +++ b/content/commands/cf.reserve/index.md @@ -35,9 +35,8 @@ module: Bloom since: 1.0.0 stack_path: docs/data-types/probabilistic summary: Creates a new Cuckoo Filter -syntax_fmt: "CF.RESERVE key capacity [BUCKETSIZE\_bucketsize] - [MAXITERATIONS\_\ - maxiterations] [EXPANSION\_expansion]" +syntax_fmt: "CF.RESERVE key capacity [BUCKETSIZE\_bucketsize] [MAXITERATIONS\_maxiterations]\ + \ [EXPANSION\_expansion]" syntax_str: "capacity [BUCKETSIZE\_bucketsize] [MAXITERATIONS\_maxiterations] [EXPANSION\_\ expansion]" title: CF.RESERVE @@ -108,7 +107,7 @@ Expansion is rounded to the next `2^n` number. Returns one of these replies: -- [Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if filter created successfully +- [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if filter created successfully - [] on error (invalid arguments, key already exists, etc.) ## Examples diff --git a/content/commands/cf.scandump/index.md b/content/commands/cf.scandump/index.md index 3eba4216ec..6eb97e498c 100644 --- a/content/commands/cf.scandump/index.md +++ b/content/commands/cf.scandump/index.md @@ -29,7 +29,7 @@ title: CF.SCANDUMP --- Begins an incremental save of the cuckoo filter. -This command is useful for large cuckoo filters that cannot fit into the [`DUMP`](/commands/dump) and [`RESTORE`](/commands/restore) model. +This command is useful for large cuckoo filters that cannot fit into the [`DUMP`]({{< relref "/commands/dump" >}}) and [`RESTORE`]({{< relref "/commands/restore" >}}) model. The first time this command is called, the value of `iter` should be 0. @@ -51,11 +51,11 @@ Iterator value; either 0 or the iterator from a previous invocation of this comm Returns one of these replies: -- [Array reply](/docs/reference/protocol-spec#arrays) of [Integer reply](/docs/reference/protocol-spec#integers) (_Iterator_) and [] (_Data_). +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) (_Iterator_) and [] (_Data_). The Iterator is passed as input to the next invocation of `CF.SCANDUMP`. If _Iterator_ is 0, then it means iteration has completed. - The iterator-data pair should also be passed to [`CF.LOADCHUNK`](/commands/cf.loadchunk) when restoring the filter. + The iterator-data pair should also be passed to [`CF.LOADCHUNK`]({{< baseurl >}}/commands/cf.loadchunk) when restoring the filter. - [] on error (invalid arguments, key not found, wrong key type, etc.) diff --git a/content/commands/client-caching/index.md b/content/commands/client-caching/index.md index 5ec0ab2dab..f64816b93a 100644 --- a/content/commands/client-caching/index.md +++ b/content/commands/client-caching/index.md @@ -46,7 +46,7 @@ Please check the [client side caching documentation](/topics/client-side-caching) for background information. -When tracking is enabled Redis, using the [`CLIENT TRACKING`](/commands/client-tracking) command, it is +When tracking is enabled Redis, using the [`CLIENT TRACKING`]({{< relref "/commands/client-tracking" >}}) command, it is possible to specify the `OPTIN` or `OPTOUT` options, so that keys in read only commands are not automatically remembered by the server to be invalidated later. When we are in `OPTIN` mode, we can enable the diff --git a/content/commands/client-getname/index.md b/content/commands/client-getname/index.md index 0e0eeacaff..7ecee2a3f3 100644 --- a/content/commands/client-getname/index.md +++ b/content/commands/client-getname/index.md @@ -28,4 +28,4 @@ syntax_fmt: CLIENT GETNAME syntax_str: '' title: CLIENT GETNAME --- -The `CLIENT GETNAME` returns the name of the current connection as set by [`CLIENT SETNAME`](/commands/client-setname). Since every new connection starts without an associated name, if no name was assigned a null bulk reply is returned. +The `CLIENT GETNAME` returns the name of the current connection as set by [`CLIENT SETNAME`]({{< relref "/commands/client-setname" >}}). Since every new connection starts without an associated name, if no name was assigned a null bulk reply is returned. diff --git a/content/commands/client-getredir/index.md b/content/commands/client-getredir/index.md index 31af6b59a5..ecfafd74c7 100644 --- a/content/commands/client-getredir/index.md +++ b/content/commands/client-getredir/index.md @@ -32,7 +32,7 @@ title: CLIENT GETREDIR --- This command returns the client ID we are redirecting our [tracking](/topics/client-side-caching) notifications to. We set a client -to redirect to when using [`CLIENT TRACKING`](/commands/client-tracking) to enable tracking. However in +to redirect to when using [`CLIENT TRACKING`]({{< relref "/commands/client-tracking" >}}) to enable tracking. However in order to avoid forcing client libraries implementations to remember the ID notifications are redirected to, this command exists in order to improve introspection and allow clients to check later if redirection is active diff --git a/content/commands/client-id/index.md b/content/commands/client-id/index.md index 88f6704b3d..999a6c667d 100644 --- a/content/commands/client-id/index.md +++ b/content/commands/client-id/index.md @@ -34,8 +34,8 @@ ID has certain guarantees: 1. It is never repeated, so if `CLIENT ID` returns the same number, the caller can be sure that the underlying client did not disconnect and reconnect the connection, but it is still the same connection. 2. The ID is monotonically incremental. If the ID of a connection is greater than the ID of another connection, it is guaranteed that the second connection was established with the server at a later time. -This command is especially useful together with [`CLIENT UNBLOCK`](/commands/client-unblock) which was -introduced also in Redis 5 together with `CLIENT ID`. Check the [`CLIENT UNBLOCK`](/commands/client-unblock) command page for a pattern involving the two commands. +This command is especially useful together with [`CLIENT UNBLOCK`]({{< relref "/commands/client-unblock" >}}) which was +introduced also in Redis 5 together with `CLIENT ID`. Check the [`CLIENT UNBLOCK`]({{< relref "/commands/client-unblock" >}}) command page for a pattern involving the two commands. ## Examples diff --git a/content/commands/client-info/index.md b/content/commands/client-info/index.md index cc7416d858..789d182c27 100644 --- a/content/commands/client-info/index.md +++ b/content/commands/client-info/index.md @@ -32,7 +32,7 @@ title: CLIENT INFO --- The command returns information and statistics about the current client connection in a mostly human readable format. -The reply format is identical to that of [`CLIENT LIST`](/commands/client-list), and the content consists only of information about the current client. +The reply format is identical to that of [`CLIENT LIST`]({{< relref "/commands/client-list" >}}), and the content consists only of information about the current client. ## Examples diff --git a/content/commands/client-kill/index.md b/content/commands/client-kill/index.md index 8dc236f7ce..250d6de432 100644 --- a/content/commands/client-kill/index.md +++ b/content/commands/client-kill/index.md @@ -113,16 +113,11 @@ history: linkTitle: CLIENT KILL since: 2.4.0 summary: Terminates open connections. -syntax_fmt: "CLIENT KILL ] | [USER\_username] | [ADDR\_ip:port] | - [LADDR\_\ - ip:port] | [SKIPME\_] [[ID\_client-id] | - [TYPE\_] | - [USER\_username] | [ADDR\_ip:port] | [LADDR\_\ - ip:port] | [SKIPME\_] ...]>>" +syntax_fmt: "CLIENT KILL ] | [USER\_username] | [ADDR\_ip:port] | [LADDR\_ip:port]\ + \ | [SKIPME\_] [[ID\_client-id] | [TYPE\_] | [USER\_username] | [ADDR\_ip:port] | [LADDR\_ip:port] | [SKIPME\_\ + ] ...]>>" syntax_str: '' title: CLIENT KILL --- @@ -130,7 +125,7 @@ The `CLIENT KILL` command closes a given client connection. This command support CLIENT KILL addr:port -The `ip:port` should match a line returned by the [`CLIENT LIST`](/commands/client-list) command (`addr` field). +The `ip:port` should match a line returned by the [`CLIENT LIST`]({{< relref "/commands/client-list" >}}) command (`addr` field). The new format: @@ -141,8 +136,8 @@ instead of killing just by address. The following filters are available: * `CLIENT KILL ADDR ip:port`. This is exactly the same as the old three-arguments behavior. * `CLIENT KILL LADDR ip:port`. Kill all clients connected to specified local (bind) address. -* `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field. Client `ID`'s are retrieved using the [`CLIENT LIST`](/commands/client-list) command. -* `CLIENT KILL TYPE type`, where *type* is one of `normal`, `master`, `replica` and `pubsub`. This closes the connections of **all the clients** in the specified class. Note that clients blocked into the [`MONITOR`](/commands/monitor) command are considered to belong to the `normal` class. +* `CLIENT KILL ID client-id`. Allows to kill a client by its unique `ID` field. Client `ID`'s are retrieved using the [`CLIENT LIST`]({{< relref "/commands/client-list" >}}) command. +* `CLIENT KILL TYPE type`, where *type* is one of `normal`, `master`, `replica` and `pubsub`. This closes the connections of **all the clients** in the specified class. Note that clients blocked into the [`MONITOR`]({{< relref "/commands/monitor" >}}) command are considered to belong to the `normal` class. * `CLIENT KILL USER username`. Closes all the connections that are authenticated with the specified [ACL](/topics/acl) username, however it returns an error if the username does not map to an existing ACL user. * `CLIENT KILL SKIPME yes/no`. By default this option is set to `yes`, that is, the client calling the command will not get killed, however setting this option to `no` will have the effect of also killing the client calling the command. diff --git a/content/commands/client-list/index.md b/content/commands/client-list/index.md index cedc759a05..500020d978 100644 --- a/content/commands/client-list/index.md +++ b/content/commands/client-list/index.md @@ -73,8 +73,7 @@ history: linkTitle: CLIENT LIST since: 2.4.0 summary: Lists open connections. -syntax_fmt: "CLIENT LIST [TYPE\_] - [ID\_client-id\ +syntax_fmt: "CLIENT LIST [TYPE\_] [ID\_client-id\ \ [client-id ...]]" syntax_str: "[ID\_client-id [client-id ...]]" title: CLIENT LIST @@ -82,7 +81,7 @@ title: CLIENT LIST The `CLIENT LIST` command returns information and statistics about the client connections server in a mostly human readable format. -You can use one of the optional subcommands to filter the list. The `TYPE type` subcommand filters the list by clients' type, where *type* is one of `normal`, `master`, `replica`, and `pubsub`. Note that clients blocked by the [`MONITOR`](/commands/monitor) command belong to the `normal` class. +You can use one of the optional subcommands to filter the list. The `TYPE type` subcommand filters the list by clients' type, where *type* is one of `normal`, `master`, `replica`, and `pubsub`. Note that clients blocked by the [`MONITOR`]({{< relref "/commands/monitor" >}}) command belong to the `normal` class. The `ID` filter only returns entries for clients with IDs matching the `client-id` arguments. @@ -92,7 +91,7 @@ Here is the meaning of the fields: * `addr`: address/port of the client * `laddr`: address/port of local address client connected to (bind address) * `fd`: file descriptor corresponding to the socket -* `name`: the name set by the client with [`CLIENT SETNAME`](/commands/client-setname) +* `name`: the name set by the client with [`CLIENT SETNAME`]({{< relref "/commands/client-setname" >}}) * `age`: total duration of the connection in seconds * `idle`: idle time of the connection in seconds * `flags`: client flags (see below) diff --git a/content/commands/client-no-touch/index.md b/content/commands/client-no-touch/index.md index 576fb28fe6..39afb05bd8 100644 --- a/content/commands/client-no-touch/index.md +++ b/content/commands/client-no-touch/index.md @@ -44,6 +44,6 @@ title: CLIENT NO-TOUCH --- The `CLIENT NO-TOUCH` command controls whether commands sent by the client will alter the LRU/LFU of the keys they access. -When turned on, the current client will not change LFU/LRU stats, unless it sends the [`TOUCH`](/commands/touch) command. +When turned on, the current client will not change LFU/LRU stats, unless it sends the [`TOUCH`]({{< relref "/commands/touch" >}}) command. When turned off, the client touches LFU/LRU stats just as a normal client. diff --git a/content/commands/client-pause/index.md b/content/commands/client-pause/index.md index 5c57e5ccc4..a9ceeb981d 100644 --- a/content/commands/client-pause/index.md +++ b/content/commands/client-pause/index.md @@ -66,10 +66,10 @@ Client pause currently supports two modes: For the `WRITE` mode, some commands have special behavior: -* [`EVAL`](/commands/eval)/[`EVALSHA`](/commands/evalsha): Will block client for all scripts. -* [`PUBLISH`](/commands/publish): Will block client. -* [`PFCOUNT`](/commands/pfcount): Will block client. -* [`WAIT`](/commands/wait): Acknowledgments will be delayed, so this command will appear blocked. +* [`EVAL`]({{< relref "/commands/eval" >}})/[`EVALSHA`]({{< relref "/commands/evalsha" >}}): Will block client for all scripts. +* [`PUBLISH`]({{< relref "/commands/publish" >}}): Will block client. +* [`PFCOUNT`]({{< relref "/commands/pfcount" >}}): Will block client. +* [`WAIT`]({{< relref "/commands/wait" >}}): Acknowledgments will be delayed, so this command will appear blocked. This command is useful as it makes able to switch clients from a Redis instance to another one in a controlled way. For example during an instance upgrade the system administrator could do the following: @@ -79,7 +79,7 @@ This command is useful as it makes able to switch clients from a Redis instance * Reconfigure clients to connect with the new master. Since Redis 6.2, the recommended mode for client pause is `WRITE`. This mode will stop all replication traffic, can be -aborted with the [`CLIENT UNPAUSE`](/commands/client-unpause) command, and allows reconfiguring the old master without risking accepting writes after the +aborted with the [`CLIENT UNPAUSE`]({{< relref "/commands/client-unpause" >}}) command, and allows reconfiguring the old master without risking accepting writes after the failover. This is also the mode used during cluster failover. For versions before 6.2, it is possible to send `CLIENT PAUSE` in a MULTI/EXEC block together with the `INFO replication` command in order to get the current master offset at the time the clients are blocked. This way it is possible to wait for a specific offset in the replica side in order to make sure all the replication stream was processed. diff --git a/content/commands/client-setinfo/index.md b/content/commands/client-setinfo/index.md index 06174bc904..d07730d391 100644 --- a/content/commands/client-setinfo/index.md +++ b/content/commands/client-setinfo/index.md @@ -43,7 +43,7 @@ syntax_fmt: "CLIENT SETINFO " syntax_str: '' title: CLIENT SETINFO --- -The `CLIENT SETINFO` command assigns various info attributes to the current connection which are displayed in the output of [`CLIENT LIST`](/commands/client-list) and [`CLIENT INFO`](/commands/client-info). +The `CLIENT SETINFO` command assigns various info attributes to the current connection which are displayed in the output of [`CLIENT LIST`]({{< relref "/commands/client-list" >}}) and [`CLIENT INFO`]({{< relref "/commands/client-info" >}}). Client libraries are expected to pipeline this command after authentication on all connections and ignore failures since they could be connected to an older version that doesn't support them. @@ -52,6 +52,6 @@ Currently the supported attributes are: * `lib-name` - meant to hold the name of the client library that's in use. * `lib-ver` - meant to hold the client library's version. -There is no limit to the length of these attributes. However it is not possible to use spaces, newlines, or other non-printable characters that would violate the format of the [`CLIENT LIST`](/commands/client-list) reply. +There is no limit to the length of these attributes. However it is not possible to use spaces, newlines, or other non-printable characters that would violate the format of the [`CLIENT LIST`]({{< relref "/commands/client-list" >}}) reply. Note that these attributes are **not** cleared by the RESET command. diff --git a/content/commands/client-setname/index.md b/content/commands/client-setname/index.md index a681401c1e..92d52303fb 100644 --- a/content/commands/client-setname/index.md +++ b/content/commands/client-setname/index.md @@ -37,15 +37,15 @@ title: CLIENT SETNAME --- The `CLIENT SETNAME` command assigns a name to the current connection. -The assigned name is displayed in the output of [`CLIENT LIST`](/commands/client-list) so that it is possible to identify the client that performed a given connection. +The assigned name is displayed in the output of [`CLIENT LIST`]({{< relref "/commands/client-list" >}}) so that it is possible to identify the client that performed a given connection. For instance when Redis is used in order to implement a queue, producers and consumers of messages may want to set the name of the connection according to their role. -There is no limit to the length of the name that can be assigned if not the usual limits of the Redis string type (512 MB). However it is not possible to use spaces in the connection name as this would violate the format of the [`CLIENT LIST`](/commands/client-list) reply. +There is no limit to the length of the name that can be assigned if not the usual limits of the Redis string type (512 MB). However it is not possible to use spaces in the connection name as this would violate the format of the [`CLIENT LIST`]({{< relref "/commands/client-list" >}}) reply. It is possible to entirely remove the connection name setting it to the empty string, that is not a valid connection name since it serves to this specific purpose. -The connection name can be inspected using [`CLIENT GETNAME`](/commands/client-getname). +The connection name can be inspected using [`CLIENT GETNAME`]({{< relref "/commands/client-getname" >}}). Every new connection starts without an assigned name. diff --git a/content/commands/client-tracking/index.md b/content/commands/client-tracking/index.md index 0d192e9983..51ed94fd9e 100644 --- a/content/commands/client-tracking/index.md +++ b/content/commands/client-tracking/index.md @@ -68,8 +68,7 @@ hidden: false linkTitle: CLIENT TRACKING since: 6.0.0 summary: Controls server-assisted client-side caching for the connection. -syntax_fmt: "CLIENT TRACKING [REDIRECT\_client-id] [PREFIX\_prefix - [PREFIX\ +syntax_fmt: "CLIENT TRACKING [REDIRECT\_client-id] [PREFIX\_prefix [PREFIX\ \ prefix ...]] [BCAST] [OPTIN] [OPTOUT] [NOLOOP]" syntax_str: "[REDIRECT\_client-id] [PREFIX\_prefix [PREFIX prefix ...]] [BCAST] [OPTIN]\ \ [OPTOUT] [NOLOOP]" @@ -98,7 +97,7 @@ unless tracking is turned off with `CLIENT TRACKING off` at some point. The following are the list of options that modify the behavior of the command when enabling tracking: -* `REDIRECT `: send invalidation messages to the connection with the specified ID. The connection must exist. You can get the ID of a connection using [`CLIENT ID`](/commands/client-id). If the connection we are redirecting to is terminated, when in RESP3 mode the connection with tracking enabled will receive `tracking-redir-broken` push messages in order to signal the condition. +* `REDIRECT `: send invalidation messages to the connection with the specified ID. The connection must exist. You can get the ID of a connection using [`CLIENT ID`]({{< relref "/commands/client-id" >}}). If the connection we are redirecting to is terminated, when in RESP3 mode the connection with tracking enabled will receive `tracking-redir-broken` push messages in order to signal the condition. * `BCAST`: enable tracking in broadcasting mode. In this mode invalidation messages are reported for all the prefixes specified, regardless of the keys requested by the connection. Instead when the broadcasting mode is not enabled, Redis will track which keys are fetched using read-only commands, and will report invalidation messages only for such keys. * `PREFIX `: for broadcasting, register a given key prefix, so that notifications will be provided only for keys starting with this string. This option can be given multiple times to register multiple prefixes. If broadcasting is enabled without this option, Redis will send notifications for every key. You can't delete a single prefix, but you can delete all prefixes by disabling and re-enabling tracking. Using this option adds the additional time complexity of O(N^2), where N is the total number of prefixes tracked. * `OPTIN`: when broadcasting is NOT active, normally don't track keys in read only commands, unless they are called immediately after a `CLIENT CACHING yes` command. diff --git a/content/commands/client-unblock/index.md b/content/commands/client-unblock/index.md index 3a96846c09..3505499573 100644 --- a/content/commands/client-unblock/index.md +++ b/content/commands/client-unblock/index.md @@ -47,7 +47,7 @@ syntax_fmt: CLIENT UNBLOCK client-id [TIMEOUT | ERROR] syntax_str: '[TIMEOUT | ERROR]' title: CLIENT UNBLOCK --- -This command can unblock, from a different connection, a client blocked in a blocking operation, such as for instance [`BRPOP`](/commands/brpop) or [`XREAD`](/commands/xread) or [`WAIT`](/commands/wait). +This command can unblock, from a different connection, a client blocked in a blocking operation, such as for instance [`BRPOP`]({{< relref "/commands/brpop" >}}) or [`XREAD`]({{< relref "/commands/xread" >}}) or [`WAIT`]({{< relref "/commands/wait" >}}). By default the client is unblocked as if the timeout of the command was reached, however if an additional (and optional) argument is passed, it is possible to specify the unblocking behavior, that can be **TIMEOUT** (the default) or **ERROR**. If **ERROR** is specified, the behavior is to unblock the client returning as error the fact that the client was force-unblocked. Specifically the client will receive the following error: @@ -59,7 +59,7 @@ the same, however the error code will remain `-UNBLOCKED`. This command is useful especially when we are monitoring many keys with a limited number of connections. For instance we may want to monitor multiple -streams with [`XREAD`](/commands/xread) without using more than N connections. However at some +streams with [`XREAD`]({{< relref "/commands/xread" >}}) without using more than N connections. However at some point the consumer process is informed that there is one more stream key to monitor. In order to avoid using more connections, the best behavior would be to stop the blocking command from one of the connections in the pool, add @@ -68,7 +68,7 @@ the new key, and issue the blocking command again. To obtain this behavior the following pattern is used. The process uses an additional *control connection* in order to send the `CLIENT UNBLOCK` command if needed. In the meantime, before running the blocking operation on the other -connections, the process runs [`CLIENT ID`](/commands/client-id) in order to get the ID associated +connections, the process runs [`CLIENT ID`]({{< relref "/commands/client-id" >}}) in order to get the ID associated with that connection. When a new key should be added, or when a key should no longer be monitored, the relevant connection blocking command is aborted by sending `CLIENT UNBLOCK` in the control connection. The blocking command diff --git a/content/commands/client-unpause/index.md b/content/commands/client-unpause/index.md index 9e8a782627..5828fb944a 100644 --- a/content/commands/client-unpause/index.md +++ b/content/commands/client-unpause/index.md @@ -31,4 +31,4 @@ syntax_fmt: CLIENT UNPAUSE syntax_str: '' title: CLIENT UNPAUSE --- -`CLIENT UNPAUSE` is used to resume command processing for all clients that were paused by [`CLIENT PAUSE`](/commands/client-pause). +`CLIENT UNPAUSE` is used to resume command processing for all clients that were paused by [`CLIENT PAUSE`]({{< relref "/commands/client-pause" >}}). diff --git a/content/commands/client/index.md b/content/commands/client/index.md index d76f17802b..1c13b503bf 100644 --- a/content/commands/client/index.md +++ b/content/commands/client/index.md @@ -25,4 +25,4 @@ title: CLIENT --- This is a container command for client connection commands. -To see the list of available commands you can call [`CLIENT HELP`](/commands/client-help). \ No newline at end of file +To see the list of available commands you can call [`CLIENT HELP`]({{< relref "/commands/client-help" >}}). \ No newline at end of file diff --git a/content/commands/cluster-addslotsrange/index.md b/content/commands/cluster-addslotsrange/index.md index 496d83e80a..f6a5f8d9af 100644 --- a/content/commands/cluster-addslotsrange/index.md +++ b/content/commands/cluster-addslotsrange/index.md @@ -41,13 +41,13 @@ syntax_fmt: CLUSTER ADDSLOTSRANGE start-slot end-slot [start-slot end-slot ...] syntax_str: '' title: CLUSTER ADDSLOTSRANGE --- -The `CLUSTER ADDSLOTSRANGE` is similar to the [`CLUSTER ADDSLOTS`](/commands/cluster-addslots) command in that they both assign hash slots to nodes. +The `CLUSTER ADDSLOTSRANGE` is similar to the [`CLUSTER ADDSLOTS`]({{< relref "/commands/cluster-addslots" >}}) command in that they both assign hash slots to nodes. -The difference between the two commands is that [`CLUSTER ADDSLOTS`](/commands/cluster-addslots) takes a list of slots to assign to the node, while `CLUSTER ADDSLOTSRANGE` takes a list of slot ranges (specified by start and end slots) to assign to the node. +The difference between the two commands is that [`CLUSTER ADDSLOTS`]({{< relref "/commands/cluster-addslots" >}}) takes a list of slots to assign to the node, while `CLUSTER ADDSLOTSRANGE` takes a list of slot ranges (specified by start and end slots) to assign to the node. ## Example -To assign slots 1 2 3 4 5 to the node, the [`CLUSTER ADDSLOTS`](/commands/cluster-addslots) command is: +To assign slots 1 2 3 4 5 to the node, the [`CLUSTER ADDSLOTS`]({{< relref "/commands/cluster-addslots" >}}) command is: > CLUSTER ADDSLOTS 1 2 3 4 5 OK diff --git a/content/commands/cluster-delslots/index.md b/content/commands/cluster-delslots/index.md index 1bfcae8f80..45774d5b00 100644 --- a/content/commands/cluster-delslots/index.md +++ b/content/commands/cluster-delslots/index.md @@ -45,7 +45,7 @@ has consequently removed the associations for the passed hash slots, we say those hash slots are *unbound*. Note that the existence of unbound hash slots occurs naturally when a node has not been configured to handle them (something that can be done with the -[`CLUSTER ADDSLOTS`](/commands/cluster-addslots) command) and if it has not received any information about +[`CLUSTER ADDSLOTS`]({{< relref "/commands/cluster-addslots" >}}) command) and if it has not received any information about who owns those hash slots (something that it can learn from heartbeat or update messages). diff --git a/content/commands/cluster-delslotsrange/index.md b/content/commands/cluster-delslotsrange/index.md index 3d67f6c95f..26045c6309 100644 --- a/content/commands/cluster-delslotsrange/index.md +++ b/content/commands/cluster-delslotsrange/index.md @@ -41,12 +41,12 @@ syntax_fmt: CLUSTER DELSLOTSRANGE start-slot end-slot [start-slot end-slot ...] syntax_str: '' title: CLUSTER DELSLOTSRANGE --- -The `CLUSTER DELSLOTSRANGE` command is similar to the [`CLUSTER DELSLOTS`](/commands/cluster-delslots) command in that they both remove hash slots from the node. -The difference is that [`CLUSTER DELSLOTS`](/commands/cluster-delslots) takes a list of hash slots to remove from the node, while `CLUSTER DELSLOTSRANGE` takes a list of slot ranges (specified by start and end slots) to remove from the node. +The `CLUSTER DELSLOTSRANGE` command is similar to the [`CLUSTER DELSLOTS`]({{< relref "/commands/cluster-delslots" >}}) command in that they both remove hash slots from the node. +The difference is that [`CLUSTER DELSLOTS`]({{< relref "/commands/cluster-delslots" >}}) takes a list of hash slots to remove from the node, while `CLUSTER DELSLOTSRANGE` takes a list of slot ranges (specified by start and end slots) to remove from the node. ## Example -To remove slots 1 2 3 4 5 from the node, the [`CLUSTER DELSLOTS`](/commands/cluster-delslots) command is: +To remove slots 1 2 3 4 5 from the node, the [`CLUSTER DELSLOTS`]({{< relref "/commands/cluster-delslots" >}}) command is: > CLUSTER DELSLOTS 1 2 3 4 5 OK diff --git a/content/commands/cluster-failover/index.md b/content/commands/cluster-failover/index.md index 6d824dd853..77927c64dd 100644 --- a/content/commands/cluster-failover/index.md +++ b/content/commands/cluster-failover/index.md @@ -102,6 +102,6 @@ Because of this the **TAKEOVER** option should be used with care. * An `OK` reply is no guarantee that the failover will succeed. * A replica can only be promoted to a master if it is known as a replica by a majority of the masters in the cluster. If the replica is a new node that has just been added to the cluster (for example after upgrading it), it may not yet be known to all the masters in the cluster. - To check that the masters are aware of a new replica, you can send [`CLUSTER NODES`](/commands/cluster-nodes) or [`CLUSTER REPLICAS`](/commands/cluster-replicas) to each of the master nodes and check that it appears as a replica, before sending `CLUSTER FAILOVER` to the replica. -* To check that the failover has actually happened you can use [`ROLE`](/commands/role), `INFO REPLICATION` (which indicates "role:master" after successful failover), or [`CLUSTER NODES`](/commands/cluster-nodes) to verify that the state of the cluster has changed sometime after the command was sent. + To check that the masters are aware of a new replica, you can send [`CLUSTER NODES`]({{< relref "/commands/cluster-nodes" >}}) or [`CLUSTER REPLICAS`]({{< relref "/commands/cluster-replicas" >}}) to each of the master nodes and check that it appears as a replica, before sending `CLUSTER FAILOVER` to the replica. +* To check that the failover has actually happened you can use [`ROLE`]({{< relref "/commands/role" >}}), `INFO REPLICATION` (which indicates "role:master" after successful failover), or [`CLUSTER NODES`]({{< relref "/commands/cluster-nodes" >}}) to verify that the state of the cluster has changed sometime after the command was sent. * To check if the failover has failed, check the replica's log for "Manual failover timed out", which is logged if the replica has given up after a few seconds. diff --git a/content/commands/cluster-getkeysinslot/index.md b/content/commands/cluster-getkeysinslot/index.md index c8df07d0a2..0b685055eb 100644 --- a/content/commands/cluster-getkeysinslot/index.md +++ b/content/commands/cluster-getkeysinslot/index.md @@ -42,7 +42,7 @@ of this API to batch-processing keys. The main usage of this command is during rehashing of cluster slots from one node to another. The way the rehashing is performed is exposed in the Redis Cluster specification, or in a more simple to digest form, as an appendix -of the [`CLUSTER SETSLOT`](/commands/cluster-setslot) command documentation. +of the [`CLUSTER SETSLOT`]({{< relref "/commands/cluster-setslot" >}}) command documentation. ``` > CLUSTER GETKEYSINSLOT 7000 3 diff --git a/content/commands/cluster-info/index.md b/content/commands/cluster-info/index.md index cd6fdda1af..3536231ad3 100644 --- a/content/commands/cluster-info/index.md +++ b/content/commands/cluster-info/index.md @@ -27,7 +27,7 @@ syntax_fmt: CLUSTER INFO syntax_str: '' title: CLUSTER INFO --- -`CLUSTER INFO` provides [`INFO`](/commands/info) style information about Redis Cluster vital parameters. +`CLUSTER INFO` provides [`INFO`]({{< relref "/commands/info" >}}) style information about Redis Cluster vital parameters. The following fields are always present in the reply: ``` @@ -62,9 +62,9 @@ The following message-related fields may be included in the reply if the value i Each message type includes statistics on the number of messages sent and received. Here are the explanation of these fields: -* `cluster_stats_messages_ping_sent` and `cluster_stats_messages_ping_received`: Cluster bus PING (not to be confused with the client command [`PING`](/commands/ping)). +* `cluster_stats_messages_ping_sent` and `cluster_stats_messages_ping_received`: Cluster bus PING (not to be confused with the client command [`PING`]({{< relref "/commands/ping" >}})). * `cluster_stats_messages_pong_sent` and `cluster_stats_messages_pong_received`: PONG (reply to PING). -* `cluster_stats_messages_meet_sent` and `cluster_stats_messages_meet_received`: Handshake message sent to a new node, either through gossip or [`CLUSTER MEET`](/commands/cluster-meet). +* `cluster_stats_messages_meet_sent` and `cluster_stats_messages_meet_received`: Handshake message sent to a new node, either through gossip or [`CLUSTER MEET`]({{< relref "/commands/cluster-meet" >}}). * `cluster_stats_messages_fail_sent` and `cluster_stats_messages_fail_received`: Mark node xxx as failing. * `cluster_stats_messages_publish_sent` and `cluster_stats_messages_publish_received`: Pub/Sub Publish propagation, see [Pubsub](/topics/pubsub#pubsub). * `cluster_stats_messages_auth-req_sent` and `cluster_stats_messages_auth-req_received`: Replica initiated leader election to replace its master. diff --git a/content/commands/cluster-meet/index.md b/content/commands/cluster-meet/index.md index e99006d438..fd50141800 100644 --- a/content/commands/cluster-meet/index.md +++ b/content/commands/cluster-meet/index.md @@ -79,6 +79,6 @@ If the optional `cluster_bus_port` argument is not provided, the default of port When a given node receives a `CLUSTER MEET` message, the node specified in the command still does not know the node we sent the command to. So in order for the node to force the receiver to accept it as a trusted node, it sends a -`MEET` packet instead of a [`PING`](/commands/ping) packet. The two packets have exactly the +`MEET` packet instead of a [`PING`]({{< relref "/commands/ping" >}}) packet. The two packets have exactly the same format, but the former forces the receiver to acknowledge the node as trusted. diff --git a/content/commands/cluster-nodes/index.md b/content/commands/cluster-nodes/index.md index ff05e9e161..a8877ae8d8 100644 --- a/content/commands/cluster-nodes/index.md +++ b/content/commands/cluster-nodes/index.md @@ -38,7 +38,7 @@ order to store on disk the cluster state (however the on disk cluster state has a few additional info appended at the end). Note that normally clients willing to fetch the map between Cluster -hash slots and node addresses should use [`CLUSTER SLOTS`](/commands/cluster-slots) instead. +hash slots and node addresses should use [`CLUSTER SLOTS`]({{< relref "/commands/cluster-slots" >}}) instead. `CLUSTER NODES`, that provides more information, should be used for administrative tasks, debugging, and configuration inspections. It is also used by `redis-cli` in order to manage a cluster. diff --git a/content/commands/cluster-replicas/index.md b/content/commands/cluster-replicas/index.md index 14d7d748b4..3e376236e6 100644 --- a/content/commands/cluster-replicas/index.md +++ b/content/commands/cluster-replicas/index.md @@ -35,7 +35,7 @@ syntax_str: '' title: CLUSTER REPLICAS --- The command provides a list of replica nodes replicating from the specified -master node. The list is provided in the same format used by [`CLUSTER NODES`](/commands/cluster-nodes) (please refer to its documentation for the specification of the format). +master node. The list is provided in the same format used by [`CLUSTER NODES`]({{< relref "/commands/cluster-nodes" >}}) (please refer to its documentation for the specification of the format). The command will fail if the specified node is not known or if it is not a master according to the node table of the node receiving the command. diff --git a/content/commands/cluster-reset/index.md b/content/commands/cluster-reset/index.md index 3b769ce036..fcccb19f68 100644 --- a/content/commands/cluster-reset/index.md +++ b/content/commands/cluster-reset/index.md @@ -46,7 +46,7 @@ title: CLUSTER RESET Reset a Redis Cluster node, in a more or less drastic way depending on the reset type, that can be **hard** or **soft**. Note that this command **does not work for masters if they hold one or more keys**, in that case -to completely reset a master node keys must be removed first, e.g. by using [`FLUSHALL`](/commands/flushall) first, +to completely reset a master node keys must be removed first, e.g. by using [`FLUSHALL`]({{< relref "/commands/flushall" >}}) first, and then `CLUSTER RESET`. Effects on the node: diff --git a/content/commands/cluster-saveconfig/index.md b/content/commands/cluster-saveconfig/index.md index affdc5ba50..c5fde5df6f 100644 --- a/content/commands/cluster-saveconfig/index.md +++ b/content/commands/cluster-saveconfig/index.md @@ -36,7 +36,7 @@ flushed on the computer disk. This command is mainly used in the event a `nodes.conf` node state file gets lost / deleted for some reason, and we want to generate it again from scratch. It can also be useful in case of mundane alterations of a node cluster -configuration via the [`CLUSTER`](/commands/cluster) command in order to ensure the new configuration +configuration via the [`CLUSTER`]({{< relref "/commands/cluster" >}}) command in order to ensure the new configuration is persisted on disk, however all the commands should normally be able to auto schedule to persist the configuration on disk when it is important to do so for the correctness of the system in the event of a restart. diff --git a/content/commands/cluster-setslot/index.md b/content/commands/cluster-setslot/index.md index 5b259b90c6..f048894b19 100644 --- a/content/commands/cluster-setslot/index.md +++ b/content/commands/cluster-setslot/index.md @@ -48,8 +48,7 @@ hidden: false linkTitle: CLUSTER SETSLOT since: 3.0.0 summary: Binds a hash slot to a node. -syntax_fmt: "CLUSTER SETSLOT slot " syntax_str: "" title: CLUSTER SETSLOT @@ -87,13 +86,13 @@ the node is not already owner of the specified hash slot. When a slot is set in importing state, the node changes behavior in the following way: -1. Commands about this hash slot are refused and a `MOVED` redirection is generated as usually, but in the case the command follows an [`ASKING`](/commands/asking) command, in this case the command is executed. +1. Commands about this hash slot are refused and a `MOVED` redirection is generated as usually, but in the case the command follows an [`ASKING`]({{< relref "/commands/asking" >}}) command, in this case the command is executed. -In this way when a node in migrating state generates an `ASK` redirection, the client contacts the target node, sends [`ASKING`](/commands/asking), and immediately after sends the command. This way commands about non-existing keys in the old node or keys already migrated to the target node are executed in the target node, so that: +In this way when a node in migrating state generates an `ASK` redirection, the client contacts the target node, sends [`ASKING`]({{< relref "/commands/asking" >}}), and immediately after sends the command. This way commands about non-existing keys in the old node or keys already migrated to the target node are executed in the target node, so that: 1. New keys are always created in the target node. During a hash slot migration we'll have to move only old keys, not new ones. 2. Commands about keys already migrated are correctly processed in the context of the node which is the target of the migration, the new hash slot owner, in order to guarantee consistency. -3. Without [`ASKING`](/commands/asking) the behavior is the same as usually. This guarantees that clients with a broken hash slots mapping will not write for error in the target node, creating a new version of a key that has yet to be migrated. +3. Without [`ASKING`]({{< relref "/commands/asking" >}}) the behavior is the same as usually. This guarantees that clients with a broken hash slots mapping will not write for error in the target node, creating a new version of a key that has yet to be migrated. ## CLUSTER SETSLOT `` STABLE @@ -122,7 +121,7 @@ The `CLUSTER SETSLOT` command is an important piece used by Redis Cluster in ord 1. Set the destination node slot to *importing* state using `CLUSTER SETSLOT IMPORTING `. 2. Set the source node slot to *migrating* state using `CLUSTER SETSLOT MIGRATING `. -3. Get keys from the source node with [`CLUSTER GETKEYSINSLOT`](/commands/cluster-getkeysinslot) command and move them into the destination node using the [`MIGRATE`](/commands/migrate) command. +3. Get keys from the source node with [`CLUSTER GETKEYSINSLOT`]({{< relref "/commands/cluster-getkeysinslot" >}}) command and move them into the destination node using the [`MIGRATE`]({{< relref "/commands/migrate" >}}) command. 4. Send `CLUSTER SETSLOT NODE ` to the destination node. 5. Send `CLUSTER SETSLOT NODE ` to the source node. 6. Send `CLUSTER SETSLOT NODE ` to the other master nodes (optional). diff --git a/content/commands/cluster-shards/index.md b/content/commands/cluster-shards/index.md index 1d410e9aa3..41dace5580 100644 --- a/content/commands/cluster-shards/index.md +++ b/content/commands/cluster-shards/index.md @@ -33,7 +33,7 @@ A shard is defined as a collection of nodes that serve the same set of slots and A shard may only have a single master at a given time, but may have multiple or no replicas. It is possible for a shard to not be serving any slots while still having replicas. -This command replaces the [`CLUSTER SLOTS`](/commands/cluster-slots) command, by providing a more efficient and extensible representation of the cluster. +This command replaces the [`CLUSTER SLOTS`]({{< relref "/commands/cluster-slots" >}}) command, by providing a more efficient and extensible representation of the cluster. The command is suitable to be used by Redis Cluster client libraries in order to understand the topology of the cluster. A client should issue this command on startup in order to retrieve the map associating cluster *hash slots* with actual node information. diff --git a/content/commands/cluster-slaves/index.md b/content/commands/cluster-slaves/index.md index b36adde078..1462718835 100644 --- a/content/commands/cluster-slaves/index.md +++ b/content/commands/cluster-slaves/index.md @@ -31,17 +31,17 @@ hidden: false hints: - nondeterministic_output linkTitle: CLUSTER SLAVES -replaced_by: '[`CLUSTER REPLICAS`](/commands/cluster-replicas)' +replaced_by: '[`CLUSTER REPLICAS`]({{< relref "/commands/cluster-replicas" >}})' since: 3.0.0 summary: Lists the replica nodes of a master node. syntax_fmt: CLUSTER SLAVES node-id syntax_str: '' title: CLUSTER SLAVES --- -**A note about the word slave used in this man page and command name**: starting with Redis version 5, if not for backward compatibility, the Redis project no longer uses the word slave. Please use the new command [`CLUSTER REPLICAS`](/commands/cluster-replicas). The command `CLUSTER SLAVES` will continue to work for backward compatibility. +**A note about the word slave used in this man page and command name**: starting with Redis version 5, if not for backward compatibility, the Redis project no longer uses the word slave. Please use the new command [`CLUSTER REPLICAS`]({{< relref "/commands/cluster-replicas" >}}). The command `CLUSTER SLAVES` will continue to work for backward compatibility. The command provides a list of replica nodes replicating from the specified -master node. The list is provided in the same format used by [`CLUSTER NODES`](/commands/cluster-nodes) (please refer to its documentation for the specification of the format). +master node. The list is provided in the same format used by [`CLUSTER NODES`]({{< relref "/commands/cluster-nodes" >}}) (please refer to its documentation for the specification of the format). The command will fail if the specified node is not known or if it is not a master according to the node table of the node receiving the command. diff --git a/content/commands/cluster-slots/index.md b/content/commands/cluster-slots/index.md index 1a80e97b69..904549cdc9 100644 --- a/content/commands/cluster-slots/index.md +++ b/content/commands/cluster-slots/index.md @@ -30,7 +30,7 @@ history: - - 7.0.0 - Added additional networking metadata field. linkTitle: CLUSTER SLOTS -replaced_by: '[`CLUSTER SHARDS`](/commands/cluster-shards)' +replaced_by: '[`CLUSTER SHARDS`]({{< relref "/commands/cluster-shards" >}})' since: 3.0.0 summary: Returns the mapping of cluster slots to nodes. syntax_fmt: CLUSTER SLOTS diff --git a/content/commands/cluster/index.md b/content/commands/cluster/index.md index f994e52b15..2b2b49e75a 100644 --- a/content/commands/cluster/index.md +++ b/content/commands/cluster/index.md @@ -25,4 +25,4 @@ title: CLUSTER --- This is a container command for Redis Cluster commands. -To see the list of available commands you can call [`CLUSTER HELP`](/commands/cluster-help). +To see the list of available commands you can call [`CLUSTER HELP`]({{< relref "/commands/cluster-help" >}}). diff --git a/content/commands/cms.incrby/index.md b/content/commands/cms.incrby/index.md index 5cb5b8be5d..07cf7ac8c7 100644 --- a/content/commands/cms.incrby/index.md +++ b/content/commands/cms.incrby/index.md @@ -45,7 +45,7 @@ Increases the count of item by increment. Multiple items can be increased with o ## Return -[Array reply](/docs/reference/protocol-spec#arrays) of [Integer reply](/docs/reference/protocol-spec#integers) with an updated min-count of each of the items in the sketch. +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) with an updated min-count of each of the items in the sketch. Count of each item after increment. diff --git a/content/commands/cms.info/index.md b/content/commands/cms.info/index.md index 6986308952..baee931a51 100644 --- a/content/commands/cms.info/index.md +++ b/content/commands/cms.info/index.md @@ -33,7 +33,7 @@ Returns width, depth and total count of the sketch. ## Return -[Array reply](/docs/reference/protocol-spec#arrays) with information of the filter. +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) with information of the filter. ## Examples diff --git a/content/commands/cms.initbydim/index.md b/content/commands/cms.initbydim/index.md index 8bb59510f9..1a15d963e0 100644 --- a/content/commands/cms.initbydim/index.md +++ b/content/commands/cms.initbydim/index.md @@ -40,7 +40,7 @@ Initializes a Count-Min Sketch to dimensions specified by user. ## Return -[Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if executed correctly, or [] otherwise. +[Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if executed correctly, or [] otherwise. ## Examples diff --git a/content/commands/cms.initbyprob/index.md b/content/commands/cms.initbyprob/index.md index 0fb598239f..3e5c74fe5b 100644 --- a/content/commands/cms.initbyprob/index.md +++ b/content/commands/cms.initbyprob/index.md @@ -44,7 +44,7 @@ Initializes a Count-Min Sketch to accommodate requested tolerances. ## Return -[Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if executed correctly, or [] otherwise. +[Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if executed correctly, or [] otherwise. ## Examples diff --git a/content/commands/cms.merge/index.md b/content/commands/cms.merge/index.md index 9152c13510..0de7b2f423 100644 --- a/content/commands/cms.merge/index.md +++ b/content/commands/cms.merge/index.md @@ -36,9 +36,8 @@ module: Bloom since: 2.0.0 stack_path: docs/data-types/probabilistic summary: Merges several sketches into one sketch -syntax_fmt: "CMS.MERGE destination numKeys source [source ...] [WEIGHTS weight - \ - \ [weight ...]]" +syntax_fmt: CMS.MERGE destination numKeys source [source ...] [WEIGHTS weight [weight + ...]] syntax_str: numKeys source [source ...] [WEIGHTS weight [weight ...]] title: CMS.MERGE --- @@ -53,7 +52,7 @@ Merges several sketches into one sketch. All sketches must have identical width ## Return -[Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if executed correctly, or [] otherwise. +[Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if executed correctly, or [] otherwise. ## Examples diff --git a/content/commands/cms.query/index.md b/content/commands/cms.query/index.md index a1464be63b..499a67e06b 100644 --- a/content/commands/cms.query/index.md +++ b/content/commands/cms.query/index.md @@ -39,7 +39,7 @@ Returns the count for one or more items in a sketch. Count of one or more items -[Array reply](/docs/reference/protocol-spec#arrays) of [Integer reply](/docs/reference/protocol-spec#integers) with a min-count of each of the items in the sketch. +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) with a min-count of each of the items in the sketch. ## Examples diff --git a/content/commands/command-count/index.md b/content/commands/command-count/index.md index cd1baf1392..54a820d4ab 100644 --- a/content/commands/command-count/index.md +++ b/content/commands/command-count/index.md @@ -27,7 +27,7 @@ syntax_fmt: COMMAND COUNT syntax_str: '' title: COMMAND COUNT --- -Returns [Integer reply](/docs/reference/protocol-spec#integers) of number of total commands in this Redis server. +Returns [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) of number of total commands in this Redis server. ## Examples diff --git a/content/commands/command-getkeys/index.md b/content/commands/command-getkeys/index.md index 3c0584733a..2bae223b1e 100644 --- a/content/commands/command-getkeys/index.md +++ b/content/commands/command-getkeys/index.md @@ -36,14 +36,14 @@ syntax_fmt: COMMAND GETKEYS command [arg [arg ...]] syntax_str: '[arg [arg ...]]' title: COMMAND GETKEYS --- -Returns [Array reply](/docs/reference/protocol-spec#arrays) of keys from a full Redis command. +Returns [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of keys from a full Redis command. `COMMAND GETKEYS` is a helper command to let you find the keys from a full Redis command. -[`COMMAND`](/commands/command) provides information on how to find the key names of each command (see `firstkey`, [key specifications](/topics/key-specs#logical-operation-flags), and `movablekeys`), +[`COMMAND`]({{< relref "/commands/command" >}}) provides information on how to find the key names of each command (see `firstkey`, [key specifications](/topics/key-specs#logical-operation-flags), and `movablekeys`), but in some cases it's not possible to find keys of certain commands and then the entire command must be parsed to discover some / all key names. -You can use `COMMAND GETKEYS` or [`COMMAND GETKEYSANDFLAGS`](/commands/command-getkeysandflags) to discover key names directly from how Redis parses the commands. +You can use `COMMAND GETKEYS` or [`COMMAND GETKEYSANDFLAGS`]({{< relref "/commands/command-getkeysandflags" >}}) to discover key names directly from how Redis parses the commands. ## Examples diff --git a/content/commands/command-getkeysandflags/index.md b/content/commands/command-getkeysandflags/index.md index eb550228fb..72b0e41f82 100644 --- a/content/commands/command-getkeysandflags/index.md +++ b/content/commands/command-getkeysandflags/index.md @@ -36,13 +36,13 @@ syntax_fmt: COMMAND GETKEYSANDFLAGS command [arg [arg ...]] syntax_str: '[arg [arg ...]]' title: COMMAND GETKEYSANDFLAGS --- -Returns [Array reply](/docs/reference/protocol-spec#arrays) of keys from a full Redis command and their usage flags. +Returns [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of keys from a full Redis command and their usage flags. `COMMAND GETKEYSANDFLAGS` is a helper command to let you find the keys from a full Redis command together with flags indicating what each key is used for. -[`COMMAND`](/commands/command) provides information on how to find the key names of each command (see `firstkey`, [key specifications](/topics/key-specs#logical-operation-flags), and `movablekeys`), +[`COMMAND`]({{< relref "/commands/command" >}}) provides information on how to find the key names of each command (see `firstkey`, [key specifications](/topics/key-specs#logical-operation-flags), and `movablekeys`), but in some cases it's not possible to find keys of certain commands and then the entire command must be parsed to discover some / all key names. -You can use [`COMMAND GETKEYS`](/commands/command-getkeys) or `COMMAND GETKEYSANDFLAGS` to discover key names directly from how Redis parses the commands. +You can use [`COMMAND GETKEYS`]({{< relref "/commands/command-getkeys" >}}) or `COMMAND GETKEYSANDFLAGS` to discover key names directly from how Redis parses the commands. Refer to [key specifications](/topics/key-specs#logical-operation-flags) for information about the meaning of the key flags. diff --git a/content/commands/command-info/index.md b/content/commands/command-info/index.md index e1d7978f4d..03e76e1d0e 100644 --- a/content/commands/command-info/index.md +++ b/content/commands/command-info/index.md @@ -38,9 +38,9 @@ syntax_fmt: COMMAND INFO [command-name [command-name ...]] syntax_str: '' title: COMMAND INFO --- -Returns [Array reply](/docs/reference/protocol-spec#arrays) of details about multiple Redis commands. +Returns [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of details about multiple Redis commands. -Same result format as [`COMMAND`](/commands/command) except you can specify which commands +Same result format as [`COMMAND`]({{< relref "/commands/command" >}}) except you can specify which commands get returned. If you request details about non-existing commands, their return diff --git a/content/commands/command-list/index.md b/content/commands/command-list/index.md index 2a7f3b2960..f9ff01517c 100644 --- a/content/commands/command-list/index.md +++ b/content/commands/command-list/index.md @@ -43,9 +43,8 @@ hints: linkTitle: COMMAND LIST since: 7.0.0 summary: Returns a list of command names. -syntax_fmt: "COMMAND LIST [FILTERBY\_]" +syntax_fmt: "COMMAND LIST [FILTERBY\_]" syntax_str: '' title: COMMAND LIST --- @@ -54,5 +53,5 @@ Return an array of the server's command names. You can use the optional _FILTERBY_ modifier to apply one of the following filters: - **MODULE module-name**: get the commands that belong to the module specified by _module-name_. - - **ACLCAT category**: get the commands in the [ACL category](/docs/management/security/acl/#command-categories) specified by _category_. + - **ACLCAT category**: get the commands in the [ACL category]({{< baseurl >}}/operate/oss_and_stack/management/security/acl#command-categories) specified by _category_. - **PATTERN pattern**: get the commands that match the given glob-like _pattern_. diff --git a/content/commands/command/index.md b/content/commands/command/index.md index 55c37c68b8..e07dff2e22 100644 --- a/content/commands/command/index.md +++ b/content/commands/command/index.md @@ -80,8 +80,8 @@ Command arity _always includes_ the command's name itself (and the subcommand wh Examples: -* [`GET`](/commands/get)'s arity is _2_ since the command only accepts one argument and always has the format `GET _key_`. -* [`MGET`](/commands/mget)'s arity is _-2_ since the command accepts at least one argument, but possibly multiple ones: `MGET _key1_ [key2] [key3] ...`. +* [`GET`]({{< relref "/commands/get" >}})'s arity is _2_ since the command only accepts one argument and always has the format `GET _key_`. +* [`MGET`]({{< relref "/commands/mget" >}})'s arity is _-2_ since the command accepts at least one argument, but possibly multiple ones: `MGET _key1_ [key2] [key3] ...`. ## Flags @@ -93,10 +93,10 @@ Command flags are an array. It can contain the following simple strings (status * **blocking:** the command may block the requesting client. * **denyoom**: the command is rejected if the server's memory usage is too high (see the _maxmemory_ configuration directive). * **fast:** the command operates in constant or log(N) time. - This flag is used for monitoring latency with the [`LATENCY`](/commands/latency) command. + This flag is used for monitoring latency with the [`LATENCY`]({{< relref "/commands/latency" >}}) command. * **loading:** the command is allowed while the database is loading. * **movablekeys:** the _first key_, _last key_, and _step_ values don't determine all key positions. - Clients need to use [`COMMAND GETKEYS`](/commands/command-getkeys) or [key specifications][td] in this case. + Clients need to use [`COMMAND GETKEYS`]({{< relref "/commands/command-getkeys" >}}) or [key specifications][td] in this case. See below for more details. * **no_auth:** executing the command doesn't require authentication. * **no_async_loading:** the command is denied during asynchronous loading (that is when a replica uses disk-less `SWAPDB SYNC`, and allows access to the old dataset). @@ -108,15 +108,15 @@ Command flags are an array. It can contain the following simple strings (status As of Redis 7.0, this flag is a [command tip][tb]. * **readonly:** the command doesn't modify data. * **sort_for_script:** the command's output is sorted when called from a script. -* **skip_monitor:** the command is not shown in [`MONITOR`](/commands/monitor)'s output. -* **skip_slowlog:** the command is not shown in [`SLOWLOG`](/commands/slowlog)'s output. +* **skip_monitor:** the command is not shown in [`MONITOR`]({{< relref "/commands/monitor" >}})'s output. +* **skip_slowlog:** the command is not shown in [`SLOWLOG`]({{< relref "/commands/slowlog" >}})'s output. As of Redis 7.0, this flag is a [command tip][tb]. * **stale:** the command is allowed while a replica has stale data. * **write:** the command may modify data. ### Movablekeys -Consider [`SORT`](/commands/sort): +Consider [`SORT`]({{< relref "/commands/sort" >}}): ``` 1) 1) "sort" @@ -135,16 +135,16 @@ For those commands, the _movablekeys_ flag indicates that the _first key_, _last Here are several examples of commands that have the _movablekeys_ flag: -* [`SORT`](/commands/sort): the optional _STORE_, _BY_, and _GET_ modifiers are followed by names of keys. -* [`ZUNION`](/commands/zunion): the _numkeys_ argument specifies the number key name arguments. -* [`MIGRATE`](/commands/migrate): the keys appear _KEYS_ keyword and only when the second argument is the empty string. +* [`SORT`]({{< relref "/commands/sort" >}}): the optional _STORE_, _BY_, and _GET_ modifiers are followed by names of keys. +* [`ZUNION`]({{< relref "/commands/zunion" >}}): the _numkeys_ argument specifies the number key name arguments. +* [`MIGRATE`]({{< relref "/commands/migrate" >}}): the keys appear _KEYS_ keyword and only when the second argument is the empty string. Redis Cluster clients need to use other measures, as follows, to locate the keys for such commands. -You can use the [`COMMAND GETKEYS`](/commands/command-getkeys) command and have your Redis server report all keys of a given command's invocation. +You can use the [`COMMAND GETKEYS`]({{< relref "/commands/command-getkeys" >}}) command and have your Redis server report all keys of a given command's invocation. As of Redis 7.0, clients can use the [key specifications](#key-specifications) to identify the positions of key names. -The only commands that require using [`COMMAND GETKEYS`](/commands/command-getkeys) are [`SORT`](/commands/sort) and [`MIGRATE`](/commands/migrate) for clients that parse keys' specifications. +The only commands that require using [`COMMAND GETKEYS`]({{< relref "/commands/command-getkeys" >}}) are [`SORT`]({{< relref "/commands/sort" >}}) and [`MIGRATE`]({{< relref "/commands/migrate" >}}) for clients that parse keys' specifications. For more information, please refer to the [key specifications page][tr]. @@ -161,9 +161,9 @@ Redis commands usually accept one, two or multiple number of keys. Commands that accept a single key have both _first key_ and _last key_ set to 1. -Commands that accept two key name arguments, e.g. [`BRPOPLPUSH`](/commands/brpoplpush), [`SMOVE`](/commands/smove) and [`RENAME`](/commands/rename), have this value set to the position of their second key. +Commands that accept two key name arguments, e.g. [`BRPOPLPUSH`]({{< relref "/commands/brpoplpush" >}}), [`SMOVE`]({{< relref "/commands/smove" >}}) and [`RENAME`]({{< relref "/commands/rename" >}}), have this value set to the position of their second key. -Multi-key commands that accept an arbitrary number of keys, such as [`MSET`](/commands/mset), use the value -1. +Multi-key commands that accept an arbitrary number of keys, such as [`MSET`]({{< relref "/commands/mset" >}}), use the value -1. ## Step @@ -194,8 +194,8 @@ Consider the following two examples: ``` The step count allows us to find keys' positions. -For example [`MSET`](/commands/mset): Its syntax is `MSET _key1_ _val1_ [key2] [val2] [key3] [val3]...`, so the keys are at every other position (step value of _2_). -Unlike [`MGET`](/commands/mget), which uses a step value of _1_. +For example [`MSET`]({{< relref "/commands/mset" >}}): Its syntax is `MSET _key1_ _val1_ [key2] [val2] [key3] [val3]...`, so the keys are at every other position (step value of _2_). +Unlike [`MGET`]({{< relref "/commands/mget" >}}), which uses a step value of _1_. ## ACL categories @@ -219,7 +219,7 @@ For more information please check the [key specifications page][td]. ## Subcommands This is an array containing all of the command's subcommands, if any. -Some Redis commands have subcommands (e.g., the `REWRITE` subcommand of [`CONFIG`](/commands/config)). +Some Redis commands have subcommands (e.g., the `REWRITE` subcommand of [`CONFIG`]({{< relref "/commands/config" >}})). Each element in the array represents one subcommand and follows the same specifications as those of `COMMAND`'s reply. [ta]: /topics/acl @@ -229,7 +229,7 @@ Each element in the array represents one subcommand and follows the same specifi ## Examples -The following is `COMMAND`'s output for the [`GET`](/commands/get) command: +The following is `COMMAND`'s output for the [`GET`]({{< relref "/commands/get" >}}) command: ``` 1) 1) "get" diff --git a/content/commands/config-resetstat/index.md b/content/commands/config-resetstat/index.md index b82b8f5ac8..8fb4c3dc3e 100644 --- a/content/commands/config-resetstat/index.md +++ b/content/commands/config-resetstat/index.md @@ -33,7 +33,7 @@ syntax_fmt: CONFIG RESETSTAT syntax_str: '' title: CONFIG RESETSTAT --- -Resets the statistics reported by Redis using the [`INFO`](/commands/info) and [`LATENCY HISTOGRAM`](/commands/latency-histogram) commands. +Resets the statistics reported by Redis using the [`INFO`]({{< relref "/commands/info" >}}) and [`LATENCY HISTOGRAM`]({{< relref "/commands/latency-histogram" >}}) commands. The following is a non-exhaustive list of values that are reset: diff --git a/content/commands/config-rewrite/index.md b/content/commands/config-rewrite/index.md index e227c62be6..b1e1a13b13 100644 --- a/content/commands/config-rewrite/index.md +++ b/content/commands/config-rewrite/index.md @@ -33,7 +33,7 @@ syntax_fmt: CONFIG REWRITE syntax_str: '' title: CONFIG REWRITE --- -The `CONFIG REWRITE` command rewrites the `redis.conf` file the server was started with, applying the minimal changes needed to make it reflect the configuration currently used by the server, which may be different compared to the original one because of the use of the [`CONFIG SET`](/commands/config-set) command. +The `CONFIG REWRITE` command rewrites the `redis.conf` file the server was started with, applying the minimal changes needed to make it reflect the configuration currently used by the server, which may be different compared to the original one because of the use of the [`CONFIG SET`]({{< relref "/commands/config-set" >}}) command. The rewrite is performed in a very conservative way: diff --git a/content/commands/config/index.md b/content/commands/config/index.md index 48f7aa0164..740ff6b0a4 100644 --- a/content/commands/config/index.md +++ b/content/commands/config/index.md @@ -25,4 +25,4 @@ title: CONFIG --- This is a container command for runtime configuration commands. -To see the list of available commands you can call [`CONFIG HELP`](/commands/config-help). +To see the list of available commands you can call [`CONFIG HELP`]({{< relref "/commands/config-help" >}}). diff --git a/content/commands/decr/index.md b/content/commands/decr/index.md index 6c1b313088..57155c5832 100644 --- a/content/commands/decr/index.md +++ b/content/commands/decr/index.md @@ -56,7 +56,7 @@ An error is returned if the key contains a value of the wrong type or contains a string that can not be represented as integer. This operation is limited to **64 bit signed integers**. -See [`INCR`](/commands/incr) for extra information on increment/decrement operations. +See [`INCR`]({{< relref "/commands/incr" >}}) for extra information on increment/decrement operations. ## Examples diff --git a/content/commands/decrby/index.md b/content/commands/decrby/index.md index 28ee08987a..d0bd094dc0 100644 --- a/content/commands/decrby/index.md +++ b/content/commands/decrby/index.md @@ -59,7 +59,7 @@ An error is returned if the key contains a value of the wrong type or contains a string that can not be represented as integer. This operation is limited to 64 bit signed integers. -See [`INCR`](/commands/incr) for extra information on increment/decrement operations. +See [`INCR`]({{< relref "/commands/incr" >}}) for extra information on increment/decrement operations. ## Examples diff --git a/content/commands/discard/index.md b/content/commands/discard/index.md index 0dbbfcef1b..721c2edcb1 100644 --- a/content/commands/discard/index.md +++ b/content/commands/discard/index.md @@ -35,4 +35,4 @@ connection state to normal. [tt]: /topics/transactions -If [`WATCH`](/commands/watch) was used, `DISCARD` unwatches all keys watched by the connection. +If [`WATCH`]({{< relref "/commands/watch" >}}) was used, `DISCARD` unwatches all keys watched by the connection. diff --git a/content/commands/dump/index.md b/content/commands/dump/index.md index ce0b6bcc5b..25bbe59fc8 100644 --- a/content/commands/dump/index.md +++ b/content/commands/dump/index.md @@ -52,7 +52,7 @@ title: DUMP --- Serialize the value stored at key in a Redis-specific format and return it to the user. -The returned value can be synthesized back into a Redis key using the [`RESTORE`](/commands/restore) +The returned value can be synthesized back into a Redis key using the [`RESTORE`]({{< relref "/commands/restore" >}}) command. The serialization format is opaque and non-standard, however it has a few @@ -60,7 +60,7 @@ semantic characteristics: * It contains a 64-bit checksum that is used to make sure errors will be detected. - The [`RESTORE`](/commands/restore) command makes sure to check the checksum before synthesizing a + The [`RESTORE`]({{< relref "/commands/restore" >}}) command makes sure to check the checksum before synthesizing a key using the serialized value. * Values are encoded in the same format used by RDB. * An RDB version is encoded inside the serialized value, so that different Redis @@ -68,7 +68,7 @@ semantic characteristics: value. The serialized value does NOT contain expire information. -In order to capture the time to live of the current value the [`PTTL`](/commands/pttl) command +In order to capture the time to live of the current value the [`PTTL`]({{< relref "/commands/pttl" >}}) command should be used. If `key` does not exist a nil bulk reply is returned. diff --git a/content/commands/eval_ro/index.md b/content/commands/eval_ro/index.md index e566843a26..d3d81ec4e3 100644 --- a/content/commands/eval_ro/index.md +++ b/content/commands/eval_ro/index.md @@ -63,11 +63,11 @@ syntax_fmt: EVAL_RO script numkeys [key [key ...]] [arg [arg ...]] syntax_str: numkeys [key [key ...]] [arg [arg ...]] title: EVAL_RO --- -This is a read-only variant of the [`EVAL`](/commands/eval) command that cannot execute commands that modify data. +This is a read-only variant of the [`EVAL`]({{< relref "/commands/eval" >}}) command that cannot execute commands that modify data. -For more information about when to use this command vs [`EVAL`](/commands/eval), please refer to [Read-only scripts](/docs/manual/programmability/#read-only-scripts). +For more information about when to use this command vs [`EVAL`]({{< relref "/commands/eval" >}}), please refer to [Read-only scripts]({{< baseurl >}}/develop/interact/programmability#read-only-scripts). -For more information about [`EVAL`](/commands/eval) scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). +For more information about [`EVAL`]({{< relref "/commands/eval" >}}) scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). ## Examples diff --git a/content/commands/evalsha/index.md b/content/commands/evalsha/index.md index 7137b16435..d0ea8a5c49 100644 --- a/content/commands/evalsha/index.md +++ b/content/commands/evalsha/index.md @@ -64,7 +64,7 @@ title: EVALSHA --- Evaluate a script from the server's cache by its SHA1 digest. -The server caches scripts by using the [`SCRIPT LOAD`](/commands/script-load) command. -The command is otherwise identical to [`EVAL`](/commands/eval). +The server caches scripts by using the [`SCRIPT LOAD`]({{< relref "/commands/script-load" >}}) command. +The command is otherwise identical to [`EVAL`]({{< relref "/commands/eval" >}}). Please refer to the [Redis Programmability](/topics/programmability) and [Introduction to Eval Scripts](/topics/eval-intro) for more information about Lua scripts. diff --git a/content/commands/evalsha_ro/index.md b/content/commands/evalsha_ro/index.md index bebce6259c..5a018a0e88 100644 --- a/content/commands/evalsha_ro/index.md +++ b/content/commands/evalsha_ro/index.md @@ -62,8 +62,8 @@ syntax_fmt: EVALSHA_RO sha1 numkeys [key [key ...]] [arg [arg ...]] syntax_str: numkeys [key [key ...]] [arg [arg ...]] title: EVALSHA_RO --- -This is a read-only variant of the [`EVALSHA`](/commands/evalsha) command that cannot execute commands that modify data. +This is a read-only variant of the [`EVALSHA`]({{< relref "/commands/evalsha" >}}) command that cannot execute commands that modify data. -For more information about when to use this command vs [`EVALSHA`](/commands/evalsha), please refer to [Read-only scripts](/docs/manual/programmability/#read-only-scripts). +For more information about when to use this command vs [`EVALSHA`]({{< relref "/commands/evalsha" >}}), please refer to [Read-only scripts]({{< baseurl >}}/develop/interact/programmability#read-only-scripts). -For more information about [`EVALSHA`](/commands/evalsha) scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). +For more information about [`EVALSHA`]({{< relref "/commands/evalsha" >}}) scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). diff --git a/content/commands/exec/index.md b/content/commands/exec/index.md index 1343021a3b..eb05b018b6 100644 --- a/content/commands/exec/index.md +++ b/content/commands/exec/index.md @@ -34,7 +34,7 @@ connection state to normal. [tt]: /topics/transactions -When using [`WATCH`](/commands/watch), `EXEC` will execute commands only if the watched keys were +When using [`WATCH`]({{< relref "/commands/watch" >}}), `EXEC` will execute commands only if the watched keys were not modified, allowing for a [check-and-set mechanism][ttc]. [ttc]: /topics/transactions#cas diff --git a/content/commands/expire/index.md b/content/commands/expire/index.md index e2b260f5c3..725cf9cd60 100644 --- a/content/commands/expire/index.md +++ b/content/commands/expire/index.md @@ -79,27 +79,27 @@ A key with an associated timeout is often said to be _volatile_ in Redis terminology. The timeout will only be cleared by commands that delete or overwrite the -contents of the key, including [`DEL`](/commands/del), [`SET`](/commands/set), [`GETSET`](/commands/getset) and all the `*STORE` +contents of the key, including [`DEL`]({{< relref "/commands/del" >}}), [`SET`]({{< relref "/commands/set" >}}), [`GETSET`]({{< relref "/commands/getset" >}}) and all the `*STORE` commands. This means that all the operations that conceptually _alter_ the value stored at the key without replacing it with a new one will leave the timeout untouched. -For instance, incrementing the value of a key with [`INCR`](/commands/incr), pushing a new value -into a list with [`LPUSH`](/commands/lpush), or altering the field value of a hash with [`HSET`](/commands/hset) are +For instance, incrementing the value of a key with [`INCR`]({{< relref "/commands/incr" >}}), pushing a new value +into a list with [`LPUSH`]({{< relref "/commands/lpush" >}}), or altering the field value of a hash with [`HSET`]({{< relref "/commands/hset" >}}) are all operations that will leave the timeout untouched. The timeout can also be cleared, turning the key back into a persistent key, -using the [`PERSIST`](/commands/persist) command. +using the [`PERSIST`]({{< relref "/commands/persist" >}}) command. -If a key is renamed with [`RENAME`](/commands/rename), the associated time to live is transferred to +If a key is renamed with [`RENAME`]({{< relref "/commands/rename" >}}), the associated time to live is transferred to the new key name. -If a key is overwritten by [`RENAME`](/commands/rename), like in the case of an existing key `Key_A` +If a key is overwritten by [`RENAME`]({{< relref "/commands/rename" >}}), like in the case of an existing key `Key_A` that is overwritten by a call like `RENAME Key_B Key_A`, it does not matter if the original `Key_A` had a timeout associated or not, the new key `Key_A` will inherit all the characteristics of `Key_B`. -Note that calling `EXPIRE`/[`PEXPIRE`](/commands/pexpire) with a non-positive timeout or -[`EXPIREAT`](/commands/expireat)/[`PEXPIREAT`](/commands/pexpireat) with a time in the past will result in the key being +Note that calling `EXPIRE`/[`PEXPIRE`]({{< relref "/commands/pexpire" >}}) with a non-positive timeout or +[`EXPIREAT`]({{< relref "/commands/expireat" >}})/[`PEXPIREAT`]({{< relref "/commands/pexpireat" >}}) with a time in the past will result in the key being [deleted][del] rather than expired (accordingly, the emitted [key event][ntf] will be `del`, not `expired`). @@ -174,8 +174,8 @@ If the user will be idle more than 60 seconds, the key will be deleted and only subsequent page views that have less than 60 seconds of difference will be recorded. -This pattern is easily modified to use counters using [`INCR`](/commands/incr) instead of lists -using [`RPUSH`](/commands/rpush). +This pattern is easily modified to use counters using [`INCR`]({{< relref "/commands/incr" >}}) instead of lists +using [`RPUSH`]({{< relref "/commands/rpush" >}}). # Appendix: Redis expires @@ -183,7 +183,7 @@ using [`RPUSH`](/commands/rpush). Normally Redis keys are created without an associated time to live. The key will simply live forever, unless it is removed by the user in an -explicit way, for instance using the [`DEL`](/commands/del) command. +explicit way, for instance using the [`DEL`]({{< relref "/commands/del" >}}) command. The `EXPIRE` family of commands is able to associate an expire to a given key, at the cost of some additional memory used by the key. @@ -191,7 +191,7 @@ When a key has an expire set, Redis will make sure to remove the key when the specified amount of time elapsed. The key time to live can be updated or entirely removed using the `EXPIRE` and -[`PERSIST`](/commands/persist) command (or other strictly related commands). +[`PERSIST`]({{< relref "/commands/persist" >}}) command (or other strictly related commands). ## Expire accuracy @@ -246,13 +246,13 @@ second divided by 4. ## How expires are handled in the replication link and AOF file In order to obtain a correct behavior without sacrificing consistency, when a -key expires, a [`DEL`](/commands/del) operation is synthesized in both the AOF file and gains all +key expires, a [`DEL`]({{< relref "/commands/del" >}}) operation is synthesized in both the AOF file and gains all the attached replicas nodes. This way the expiration process is centralized in the master instance, and there is no chance of consistency errors. However while the replicas connected to a master will not expire keys -independently (but will wait for the [`DEL`](/commands/del) coming from the master), they'll +independently (but will wait for the [`DEL`]({{< relref "/commands/del" >}}) coming from the master), they'll still take the full state of the expires existing in the dataset, so when a replica is elected to master it will be able to expire the keys independently, fully acting as a master. diff --git a/content/commands/expireat/index.md b/content/commands/expireat/index.md index 92bdb7e5c8..326b4a15c9 100644 --- a/content/commands/expireat/index.md +++ b/content/commands/expireat/index.md @@ -73,7 +73,7 @@ syntax_fmt: EXPIREAT key unix-time-seconds [NX | XX | GT | LT] syntax_str: unix-time-seconds [NX | XX | GT | LT] title: EXPIREAT --- -`EXPIREAT` has the same effect and semantic as [`EXPIRE`](/commands/expire), but instead of +`EXPIREAT` has the same effect and semantic as [`EXPIRE`]({{< relref "/commands/expire" >}}), but instead of specifying the number of seconds representing the TTL (time to live), it takes an absolute [Unix timestamp][hewowu] (seconds since January 1, 1970). A timestamp in the past will delete the key immediately. @@ -81,7 +81,7 @@ timestamp in the past will delete the key immediately. [hewowu]: http://en.wikipedia.org/wiki/Unix_time Please for the specific semantics of the command refer to the documentation of -[`EXPIRE`](/commands/expire). +[`EXPIRE`]({{< relref "/commands/expire" >}}). ## Background diff --git a/content/commands/expiretime/index.md b/content/commands/expiretime/index.md index 971e400c5d..ea17e698bf 100644 --- a/content/commands/expiretime/index.md +++ b/content/commands/expiretime/index.md @@ -48,7 +48,7 @@ title: EXPIRETIME --- Returns the absolute Unix timestamp (since January 1, 1970) in seconds at which the given key will expire. -See also the [`PEXPIRETIME`](/commands/pexpiretime) command which returns the same information with milliseconds resolution. +See also the [`PEXPIRETIME`]({{< relref "/commands/pexpiretime" >}}) command which returns the same information with milliseconds resolution. ## Examples diff --git a/content/commands/failover/index.md b/content/commands/failover/index.md index 8419678415..ab84a046cb 100644 --- a/content/commands/failover/index.md +++ b/content/commands/failover/index.md @@ -59,7 +59,7 @@ title: FAILOVER This command will start a coordinated failover between the currently-connected-to master and one of its replicas. The failover is not synchronous, instead a background task will handle coordinating the failover. It is designed to limit data loss and unavailability of the cluster during the failover. -This command is analogous to the [`CLUSTER FAILOVER`](/commands/cluster-failover) command for non-clustered Redis and is similar to the failover support provided by sentinel. +This command is analogous to the [`CLUSTER FAILOVER`]({{< relref "/commands/cluster-failover" >}}) command for non-clustered Redis and is similar to the failover support provided by sentinel. The specific details of the default failover flow are as follows: @@ -75,7 +75,7 @@ The field `master_failover_state` in `INFO replication` can be used to track the * `waiting-for-sync`: The master is waiting for the replica to catch up to its replication offset. * `failover-in-progress`: The master has demoted itself, and is attempting to hand off ownership to a target replica. -If the previous master had additional replicas attached to it, they will continue replicating from it as chained replicas. You will need to manually execute a [`REPLICAOF`](/commands/replicaof) on these replicas to start replicating directly from the new master. +If the previous master had additional replicas attached to it, they will continue replicating from it as chained replicas. You will need to manually execute a [`REPLICAOF`]({{< relref "/commands/replicaof" >}}) on these replicas to start replicating directly from the new master. ## Optional arguments The following optional arguments exist to modify the behavior of the failover flow: @@ -99,4 +99,4 @@ For this purpose, the `FAILOVER ABORT` command exists, which will abort an ongoi The command has no side effects if issued in the `waiting-for-sync` state but can introduce multi-master scenarios in the `failover-in-progress` state. If a multi-master scenario is encountered, you will need to manually identify which master has the latest data and designate it as the master and have the other replicas. -NOTE: [`REPLICAOF`](/commands/replicaof) is disabled while a failover is in progress, this is to prevent unintended interactions with the failover that might cause data loss. +NOTE: [`REPLICAOF`]({{< relref "/commands/replicaof" >}}) is disabled while a failover is in progress, this is to prevent unintended interactions with the failover that might cause data loss. diff --git a/content/commands/fcall/index.md b/content/commands/fcall/index.md index 960551afce..877a170f86 100644 --- a/content/commands/fcall/index.md +++ b/content/commands/fcall/index.md @@ -65,7 +65,7 @@ title: FCALL --- Invoke a function. -Functions are loaded to the server with the [`FUNCTION LOAD`](/commands/function-load) command. +Functions are loaded to the server with the [`FUNCTION LOAD`]({{< relref "/commands/function-load" >}}) command. The first argument is the name of a loaded function. The second argument is the number of input key name arguments, followed by all the keys accessed by the function. diff --git a/content/commands/fcall_ro/index.md b/content/commands/fcall_ro/index.md index 3a71050284..71ff7c5254 100644 --- a/content/commands/fcall_ro/index.md +++ b/content/commands/fcall_ro/index.md @@ -63,8 +63,8 @@ syntax_fmt: FCALL_RO function numkeys [key [key ...]] [arg [arg ...]] syntax_str: numkeys [key [key ...]] [arg [arg ...]] title: FCALL_RO --- -This is a read-only variant of the [`FCALL`](/commands/fcall) command that cannot execute commands that modify data. +This is a read-only variant of the [`FCALL`]({{< relref "/commands/fcall" >}}) command that cannot execute commands that modify data. -For more information about when to use this command vs [`FCALL`](/commands/fcall), please refer to [Read-only scripts](/docs/manual/programmability/#read-only_scripts). +For more information about when to use this command vs [`FCALL`]({{< relref "/commands/fcall" >}}), please refer to [Read-only scripts]({{< baseurl >}}/develop/interact/programmability/#read-only_scripts). For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). diff --git a/content/commands/ft._list/index.md b/content/commands/ft._list/index.md index 0460d5ce61..a41e414bf8 100644 --- a/content/commands/ft._list/index.md +++ b/content/commands/ft._list/index.md @@ -28,13 +28,13 @@ Returns a list of all existing indexes. {{% alert title="Temporary command" color="info" %}} The prefix `_` in the command indicates, this is a temporary command. -In the future, a [`SCAN`](/commands/scan) type of command will be added, for use when a database +In the future, a [`SCAN`]({{< relref "/commands/scan" >}}) type of command will be added, for use when a database contains a large number of indices. {{% /alert %}} ## Return -[Array reply](/docs/reference/protocol-spec#arrays) with index names. +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) with index names. ## Examples diff --git a/content/commands/ft.aggregate/index.md b/content/commands/ft.aggregate/index.md index 41fc62f3b6..f58205d69c 100644 --- a/content/commands/ft.aggregate/index.md +++ b/content/commands/ft.aggregate/index.md @@ -165,45 +165,22 @@ since: 1.1.0 stack_path: docs/interact/search-and-query summary: Run a search query on an index and perform aggregate transformations on the results -syntax: "FT.AGGREGATE index query - [VERBATIM] - [LOAD count field [field ...]]\ - \ - [TIMEOUT timeout] - [ GROUPBY nargs property [property ...] [ REDUCE function\ - \ nargs arg [arg ...] [AS name] [ REDUCE function nargs arg [arg ...] [AS name]\ - \ ...]] ...]] - [ SORTBY nargs [ property ASC | DESC [ property ASC | DESC ...]]\ - \ [MAX num] [WITHCOUNT] - [ APPLY expression AS name [ APPLY expression AS name\ - \ ...]] - [ LIMIT offset num] - [FILTER filter] - [ WITHCURSOR [COUNT read_size]\ - \ [MAXIDLE idle_time]] - [ PARAMS nargs name value [ name value ...]] - [DIALECT\ - \ dialect] -" -syntax_fmt: "FT.AGGREGATE index query [VERBATIM] [LOAD\_count field [field ...]] -\ - \ [TIMEOUT\_timeout] [LOAD *] [GROUPBY\_nargs property [property ...] - [REDUCE\_\ - function nargs arg [arg ...] [AS\_name] [REDUCE\_function - nargs arg [arg ...]\ - \ [AS\_name] ...]] [GROUPBY\_nargs property - [property ...] [REDUCE\_function\ - \ nargs arg [arg ...] [AS\_name] - [REDUCE\_function nargs arg [arg ...] [AS\_\ - name] ...]] ...]] - [SORTBY\_nargs [property [property \ - \ ...]] - [MAX\_num]] [APPLY\_expression AS\_name [APPLY\_expression AS\_name -\ - \ ...]] [LIMIT offset num] [FILTER\_filter] [WITHCURSOR - [COUNT\_read_size] [MAXIDLE\_\ - idle_time]] [PARAMS nargs name value - [name value ...]] [DIALECT\_dialect]" +syntax: 'FT.AGGREGATE index query [VERBATIM] [LOAD count field [field ...]] [TIMEOUT + timeout] [ GROUPBY nargs property [property ...] [ REDUCE function nargs arg [arg + ...] [AS name] [ REDUCE function nargs arg [arg ...] [AS name] ...]] ...]] [ SORTBY + nargs [ property ASC | DESC [ property ASC | DESC ...]] [MAX num] [WITHCOUNT] [ + APPLY expression AS name [ APPLY expression AS name ...]] [ LIMIT offset num] [FILTER + filter] [ WITHCURSOR [COUNT read_size] [MAXIDLE idle_time]] [ PARAMS nargs name + value [ name value ...]] [DIALECT dialect] ' +syntax_fmt: "FT.AGGREGATE index query [VERBATIM] [LOAD\_count field [field ...]] \ + \ [TIMEOUT\_timeout] [LOAD *] [GROUPBY\_nargs property [property ...] [REDUCE\_\ + function nargs arg [arg ...] [AS\_name] [REDUCE\_function nargs arg [arg ...] [AS\_\ + name] ...]] [GROUPBY\_nargs property [property ...] [REDUCE\_function nargs arg\ + \ [arg ...] [AS\_name] [REDUCE\_function nargs arg [arg ...] [AS\_name] ...]] ...]]\ + \ [SORTBY\_nargs [property [property ...]] [MAX\_num]]\ + \ [APPLY\_expression AS\_name [APPLY\_expression AS\_name ...]] [LIMIT offset\ + \ num] [FILTER\_filter] [WITHCURSOR [COUNT\_read_size] [MAXIDLE\_idle_time]] [PARAMS\ + \ nargs name value [name value ...]] [DIALECT\_dialect]" syntax_str: "query [VERBATIM] [LOAD\_count field [field ...]] [TIMEOUT\_timeout] [LOAD\ \ *] [GROUPBY\_nargs property [property ...] [REDUCE\_function nargs arg [arg ...]\ \ [AS\_name] [REDUCE\_function nargs arg [arg ...] [AS\_name] ...]] [GROUPBY\_nargs\ @@ -225,7 +202,7 @@ Run a search query on an index, and perform aggregate transformations on the res
index -is index name against which the query is executed. You must first create the index using [`FT.CREATE`](/commands/ft.create). +is index name against which the query is executed. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create).
@@ -250,7 +227,7 @@ loads document attributes from the source document. - `property` is the optional name used in the result. If it is not provided, the `identifier` is used. This should be avoided. - If `*` is used as `nargs`, all attributes in a document are loaded. -Attributes needed for aggregations should be stored as `SORTABLE`, where they are available to the aggregation pipeline with very low latency. `LOAD` hurts the performance of aggregate queries considerably because every processed record needs to execute the equivalent of [`HMGET`](/commands/hmget) against a Redis key, which when executed over millions of keys, amounts to high processing times. +Attributes needed for aggregations should be stored as `SORTABLE`, where they are available to the aggregation pipeline with very low latency. `LOAD` hurts the performance of aggregate queries considerably because every processed record needs to execute the equivalent of [`HMGET`]({{< relref "/commands/hmget" >}}) against a Redis key, which when executed over millions of keys, amounts to high processing times.
GROUPBY {nargs} {property} @@ -263,7 +240,7 @@ groups the results in the pipeline based on one or more properties. Each group s reduces the matching results in each group into a single record, using a reduction function. For example, `COUNT` counts the number of records in the group. The reducers can have their own property names using the `AS {name}` optional argument. If a name is not given, the resulting name will be the name of the reduce function and the group properties. For example, if a name is not given to `COUNT_DISTINCT` by property `@foo`, the resulting name will be `count_distinct(@foo)`. -See [Supported GROUPBY reducers](/docs/interact/search-and-query/search/aggregations/#supported-groupby-reducers) for more details. +See [Supported GROUPBY reducers]({{< baseurl >}}/develop/interact/search-and-query/advanced-concepts/aggregations#supported-groupby-reducers) for more details.
@@ -317,7 +294,7 @@ filters the results using predicate expressions relating to values in each resul WITHCURSOR {COUNT} {read_size} [MAXIDLE {idle_time}] Scan part of the results with a quicker alternative than `LIMIT`. -See [Cursor API](/docs/interact/search-and-query/search/aggregations/#cursor-api) for more details. +See [Cursor API]({{< baseurl >}}/develop/interact/search-and-query/advanced-concepts/aggregations#cursor-api) for more details.
@@ -337,18 +314,18 @@ You can reference parameters in the `query` by a `$`, followed by the parameter
DIALECT {dialect_version} -selects the dialect version under which to execute the query. If not specified, the query will execute under the default dialect version set during module initial loading or via [`FT.CONFIG SET`](/commands/ft.config-set) command. +selects the dialect version under which to execute the query. If not specified, the query will execute under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) command.
## Return FT.AGGREGATE returns an array reply where each row is an array reply and represents a single aggregate result. -The [integer reply](/docs/reference/protocol-spec/#resp-integers) at position `1` does not represent a valid value. +The [integer reply]({{< baseurl >}}/develop/reference/protocol-spec#resp-integers) at position `1` does not represent a valid value. ### Return multiple values -See [Return multiple values](/commands/ft.search#return-multiple-values) in [`FT.SEARCH`](/commands/ft.search) -The `DIALECT` can be specified as a parameter in the FT.AGGREGATE command. If it is not specified, the `DEFAULT_DIALECT` is used, which can be set using [`FT.CONFIG SET`](/commands/ft.config-set) or by passing it as an argument to the `redisearch` module when it is loaded. +See [Return multiple values]({{< baseurl >}}/commands/ft.search#return-multiple-values) in [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) +The `DIALECT` can be specified as a parameter in the FT.AGGREGATE command. If it is not specified, the `DEFAULT_DIALECT` is used, which can be set using [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) or by passing it as an argument to the `redisearch` module when it is loaded. For example, with the following document and index: @@ -477,10 +454,10 @@ Next, count GitHub events by user (actor), to produce the most active users. ## See also -[`FT.CONFIG SET`](/commands/ft.config-set) | [`FT.SEARCH`](/commands/ft.search) +[`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) | [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) ## Related topics -- [Aggregations](/docs/interact/search-and-query/search/aggregations) -- [RediSearch](/docs/interact/search-and-query) +- [Aggregations]({{< relref "/develop/interact/search-and-query/advanced-concepts/aggregations" >}}) +- [RediSearch]({{< relref "/develop/interact/search-and-query" >}}) diff --git a/content/commands/ft.aliasadd/index.md b/content/commands/ft.aliasadd/index.md index 120ba6ddf6..bcaf121c52 100644 --- a/content/commands/ft.aliasadd/index.md +++ b/content/commands/ft.aliasadd/index.md @@ -74,8 +74,8 @@ Attempting to add the same alias returns a message that the alias already exists ## See also -[`FT.ALIASDEL`](/commands/ft.aliasdel) | [`FT.ALIASUPDATE`](/commands/ft.aliasupdate) +[`FT.ALIASDEL`]({{< baseurl >}}/commands/ft.aliasdel) | [`FT.ALIASUPDATE`]({{< baseurl >}}/commands/ft.aliasupdate) ## Related topics -[RediSearch](/docs/stack/search) \ No newline at end of file +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) \ No newline at end of file diff --git a/content/commands/ft.aliasdel/index.md b/content/commands/ft.aliasdel/index.md index c4c00086a4..105146394b 100644 --- a/content/commands/ft.aliasdel/index.md +++ b/content/commands/ft.aliasdel/index.md @@ -60,8 +60,8 @@ OK ## See also -[`FT.ALIASADD`](/commands/ft.aliasadd) | [`FT.ALIASUPDATE`](/commands/ft.aliasupdate) +[`FT.ALIASADD`]({{< baseurl >}}/commands/ft.aliasadd) | [`FT.ALIASUPDATE`]({{< baseurl >}}/commands/ft.aliasupdate) ## Related topics -[RediSearch](/docs/stack/search) \ No newline at end of file +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) \ No newline at end of file diff --git a/content/commands/ft.aliasupdate/index.md b/content/commands/ft.aliasupdate/index.md index 4ad7a37599..41807e6128 100644 --- a/content/commands/ft.aliasupdate/index.md +++ b/content/commands/ft.aliasupdate/index.md @@ -62,8 +62,8 @@ OK ## See also -[`FT.ALIASADD`](/commands/ft.aliasadd) | [`FT.ALIASDEL`](/commands/ft.aliasdel) +[`FT.ALIASADD`]({{< baseurl >}}/commands/ft.aliasadd) | [`FT.ALIASDEL`]({{< baseurl >}}/commands/ft.aliasdel) ## Related topics -[RediSearch](/docs/stack/search) \ No newline at end of file +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) \ No newline at end of file diff --git a/content/commands/ft.alter/index.md b/content/commands/ft.alter/index.md index 4519e2b452..dfdb63736a 100644 --- a/content/commands/ft.alter/index.md +++ b/content/commands/ft.alter/index.md @@ -67,7 +67,7 @@ if set, does not scan and index. after the SCHEMA keyword, declares which fields to add: - `attribute` is attribute to add. -- `options` are attribute options. Refer to [`FT.CREATE`](/commands/ft.create) for more information. +- `options` are attribute options. Refer to [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) for more information. Note: @@ -96,11 +96,11 @@ OK ## See also -[`FT.CREATE`](/commands/ft.create) +[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) ## Related topics -- [RediSearch](/docs/stack/search) +- [RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) diff --git a/content/commands/ft.config-get/index.md b/content/commands/ft.config-get/index.md index 74359dafdf..25a72d1d86 100644 --- a/content/commands/ft.config-get/index.md +++ b/content/commands/ft.config-get/index.md @@ -133,8 +133,8 @@ FT.CONFIG GET returns an array reply of the configuration name and value. ## See also -[`FT.CONFIG SET`](/commands/ft.config-set) | [`FT.CONFIG HELP`](/commands/ft.config-help) +[`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) | [`FT.CONFIG HELP`]({{< baseurl >}}/commands/ft.config-help) ## Related topics -[RediSearch](/docs/stack/search) \ No newline at end of file +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) \ No newline at end of file diff --git a/content/commands/ft.config-help/index.md b/content/commands/ft.config-help/index.md index daa5a2179a..849740e1d2 100644 --- a/content/commands/ft.config-help/index.md +++ b/content/commands/ft.config-help/index.md @@ -62,8 +62,8 @@ FT.CONFIG HELP returns an array reply of the configuration name and value. ## See also -[`FT.CONFIG SET`](/commands/ft.config-set) | [`FT.CONFIG GET`](/commands/ft.config-get) +[`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) | [`FT.CONFIG GET`]({{< baseurl >}}/commands/ft.config-get) ## Related topics -[RediSearch](/docs/stack/search) \ No newline at end of file +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) \ No newline at end of file diff --git a/content/commands/ft.config-set/index.md b/content/commands/ft.config-set/index.md index 4fa327765a..b2d6770bcc 100644 --- a/content/commands/ft.config-set/index.md +++ b/content/commands/ft.config-set/index.md @@ -35,7 +35,7 @@ Set the value of a RediSearch configuration parameter. Values set using `FT.CONFIG SET` are not persisted after server restart. -RediSearch configuration parameters are detailed in [Configuration parameters](/docs/stack/search/configuring). +RediSearch configuration parameters are detailed in [Configuration parameters]({{< relref "/develop/interact/search-and-query/administration" >}}). {{% alert title="Note" color="warning" %}} As detailed in the link above, not all RediSearch configuration parameters can be set at runtime. @@ -74,8 +74,8 @@ OK ## See also -[`FT.CONFIG GET`](/commands/ft.config-get) | [`FT.CONFIG HELP`](/commands/ft.config-help) +[`FT.CONFIG GET`]({{< baseurl >}}/commands/ft.config-get) | [`FT.CONFIG HELP`]({{< baseurl >}}/commands/ft.config-help) ## Related topics -[RediSearch](/docs/stack/search) +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) diff --git a/content/commands/ft.create/index.md b/content/commands/ft.create/index.md index bb5fc61f16..ea106f4173 100644 --- a/content/commands/ft.create/index.md +++ b/content/commands/ft.create/index.md @@ -162,50 +162,21 @@ module: Search since: 1.0.0 stack_path: docs/interact/search-and-query summary: Creates an index with the given spec -syntax: "FT.CREATE index - [ON HASH | JSON] - [PREFIX count prefix [prefix ...]]\ - \ - [FILTER {filter}] - [LANGUAGE default_lang] - [LANGUAGE_FIELD lang_attribute]\ - \ - [SCORE default_score] - [SCORE_FIELD score_attribute] - [PAYLOAD_FIELD\ - \ payload_attribute] - [MAXTEXTFIELDS] - [TEMPORARY seconds] - [NOOFFSETS]\ - \ - [NOHL] - [NOFIELDS] - [NOFREQS] - [STOPWORDS count [stopword ...]] -\ - \ [SKIPINITIALSCAN] - SCHEMA field_name [AS alias] TEXT | TAG | NUMERIC | GEO\ - \ | VECTOR | GEOSHAPE [ SORTABLE [UNF]] - [NOINDEX] [ field_name [AS alias] TEXT\ - \ | TAG | NUMERIC | GEO | VECTOR | GEOSHAPE [ SORTABLE [UNF]] [NOINDEX] ...] -" -syntax_fmt: "FT.CREATE index [ON\_] [PREFIX\_count prefix [prefix - \ - \ ...]] [FILTER\_filter] [LANGUAGE\_default_lang] - [LANGUAGE_FIELD\_lang_attribute]\ - \ [SCORE\_default_score] - [SCORE_FIELD\_score_attribute] [PAYLOAD_FIELD\_payload_attribute] -\ - \ [MAXTEXTFIELDS] [TEMPORARY\_seconds] [NOOFFSETS] [NOHL] [NOFIELDS] - [NOFREQS]\ - \ [STOPWORDS\_count [stopword [stopword ...]]] - [SKIPINITIALSCAN] SCHEMA field_name\ - \ [AS\_alias] [WITHSUFFIXTRIE] [SORTABLE\ - \ [UNF]] - [NOINDEX] [field_name [AS\_alias] \ - \ [WITHSUFFIXTRIE] [SORTABLE [UNF]] [NOINDEX] ...]" +syntax: 'FT.CREATE index [ON HASH | JSON] [PREFIX count prefix [prefix ...]] [FILTER + {filter}] [LANGUAGE default_lang] [LANGUAGE_FIELD lang_attribute] [SCORE default_score] + [SCORE_FIELD score_attribute] [PAYLOAD_FIELD payload_attribute] [MAXTEXTFIELDS] + [TEMPORARY seconds] [NOOFFSETS] [NOHL] [NOFIELDS] [NOFREQS] [STOPWORDS count [stopword + ...]] [SKIPINITIALSCAN] SCHEMA field_name [AS alias] TEXT | TAG | NUMERIC | GEO + | VECTOR | GEOSHAPE [ SORTABLE [UNF]] [NOINDEX] [ field_name [AS alias] TEXT | TAG + | NUMERIC | GEO | VECTOR | GEOSHAPE [ SORTABLE [UNF]] [NOINDEX] ...] ' +syntax_fmt: "FT.CREATE index [ON\_] [PREFIX\_count prefix [prefix ...]]\ + \ [FILTER\_filter] [LANGUAGE\_default_lang] [LANGUAGE_FIELD\_lang_attribute] [SCORE\_\ + default_score] [SCORE_FIELD\_score_attribute] [PAYLOAD_FIELD\_payload_attribute]\ + \ [MAXTEXTFIELDS] [TEMPORARY\_seconds] [NOOFFSETS] [NOHL] [NOFIELDS] [NOFREQS]\ + \ [STOPWORDS\_count [stopword [stopword ...]]] [SKIPINITIALSCAN] SCHEMA field_name\ + \ [AS\_alias] [WITHSUFFIXTRIE] [SORTABLE [UNF]]\ + \ [NOINDEX] [field_name [AS\_alias] [WITHSUFFIXTRIE]\ + \ [SORTABLE [UNF]] [NOINDEX] ...]" syntax_str: "[ON\_] [PREFIX\_count prefix [prefix ...]] [FILTER\_filter]\ \ [LANGUAGE\_default_lang] [LANGUAGE_FIELD\_lang_attribute] [SCORE\_default_score]\ \ [SCORE_FIELD\_score_attribute] [PAYLOAD_FIELD\_payload_attribute] [MAXTEXTFIELDS]\ @@ -244,13 +215,13 @@ after the SCHEMA keyword, declares which fields to index: - `TEXT` - Allows full-text search queries against the value in this attribute. - - `TAG` - Allows exact-match queries, such as categories or primary keys, against the value in this attribute. For more information, see [Tag Fields](/docs/interact/search-and-query/advanced-concepts/tags/). + - `TAG` - Allows exact-match queries, such as categories or primary keys, against the value in this attribute. For more information, see [Tag Fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/tags" >}}). - - `NUMERIC` - Allows numeric range queries against the value in this attribute. See [query syntax docs](/docs/interact/search-and-query/query/) for details on how to use numeric ranges. + - `NUMERIC` - Allows numeric range queries against the value in this attribute. See [query syntax docs]({{< relref "/develop/interact/search-and-query/query/" >}}) for details on how to use numeric ranges. - `GEO` - Allows radius range queries against the value (point) in this attribute. The value of the attribute must be a string containing a longitude (first) and latitude separated by a comma. - - `VECTOR` - Allows vector queries against the value in this attribute. For more information, see [Vector Fields](/docs/interact/search-and-query/search/vectors/). + - `VECTOR` - Allows vector queries against the value in this attribute. For more information, see [Vector Fields]({{< relref "/develop/get-started/vector-database" >}}). - `GEOSHAPE`- Allows polygon queries against the value in this attribute. The value of the attribute must follow a [WKT notation](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry) list of 2D points representing the polygon edges `POLYGON((x1 y1, x2 y2, ...)` separated by a comma. A `GEOSHAPE` field type can be followed by one of the following coordinate systems: - `SPHERICAL` for Geographic longitude and latitude coordinates @@ -262,7 +233,7 @@ after the SCHEMA keyword, declares which fields to index: Field options are: - - `SORTABLE` - `NUMERIC`, `TAG`, `TEXT`, or `GEO` attributes can have an optional **SORTABLE** argument. As the user [sorts the results by the value of this attribute](/docs/interact/search-and-query/advanced-concepts/sorting/), the results are available with very low latency. Note that his adds memory overhead, so consider not declaring it on large text attributes. You can sort an attribute without the `SORTABLE` option, but the latency is not as good as with `SORTABLE`. + - `SORTABLE` - `NUMERIC`, `TAG`, `TEXT`, or `GEO` attributes can have an optional **SORTABLE** argument. As the user [sorts the results by the value of this attribute]({{< relref "/develop/interact/search-and-query/advanced-concepts/sorting" >}}), the results are available with very low latency. Note that his adds memory overhead, so consider not declaring it on large text attributes. You can sort an attribute without the `SORTABLE` option, but the latency is not as good as with `SORTABLE`. - `UNF` - By default, for hashes (not with JSON) `SORTABLE` applies a normalization to the indexed value (characters set to lowercase, removal of diacritics). When using the unnormalized form (UNF), you can disable the normalization and keep the original form of the value. With JSON, `UNF` is implicit with `SORTABLE` (normalization is disabled). @@ -277,7 +248,7 @@ after the SCHEMA keyword, declares which fields to index: - `dm:pt` - Double metaphone for Portuguese - `dm:es` - Double metaphone for Spanish - For more information, see [Phonetic Matching](/docs/interact/search-and-query/advanced-concepts/phonetic_matching). + For more information, see [Phonetic Matching]({{< relref "/develop/interact/search-and-query/advanced-concepts/phonetic_matching" >}}). - `WEIGHT {weight}` for `TEXT` attributes, declares the importance of this attribute when calculating result accuracy. This is a multiplication factor, and defaults to 1 if not specified. @@ -293,7 +264,7 @@ after the SCHEMA keyword, declares which fields to index:
ON {data_type} -currently supports HASH (default) and JSON. To index JSON, you must have the [RedisJSON](/docs/stack/json) module installed. +currently supports HASH (default) and JSON. To index JSON, you must have the [RedisJSON]({{< relref "/develop/data-types/json/" >}}) module installed.
@@ -323,7 +294,7 @@ A stemmer is used for the supplied language during indexing. If an unsupported l Indonesian, Irish, Italian, Lithuanian, Nepali, Norwegian, Portuguese, Romanian, Russian, Spanish, Swedish, Tamil, Turkish, and Chinese. -When adding Chinese language documents, set `LANGUAGE chinese` for the indexer to properly tokenize the terms. If you use the default language, then search terms are extracted based on punctuation characters and whitespace. The Chinese language tokenizer makes use of a segmentation algorithm (via [Friso](https://github.com/lionsoul2014/friso)), which segments text and checks it against a predefined dictionary. See [Stemming](/docs/interact/search-and-query/advanced-concepts/stemming) for more information. +When adding Chinese language documents, set `LANGUAGE chinese` for the indexer to properly tokenize the terms. If you use the default language, then search terms are extracted based on punctuation characters and whitespace. The Chinese language tokenizer makes use of a segmentation algorithm (via [Friso](https://github.com/lionsoul2014/friso)), which segments text and checks it against a predefined dictionary. See [Stemming]({{< relref "/develop/interact/search-and-query/advanced-concepts/stemming" >}}) for more information.
@@ -347,7 +318,7 @@ is document attribute that you use as a binary safe payload string to the docume
MAXTEXTFIELDS -forces RediSearch to encode indexes as if there were more than 32 text attributes, which allows you to add additional attributes (beyond 32) using [`FT.ALTER`](/commands/ft.alter). For efficiency, RediSearch encodes indexes differently if they are created with less than 32 text attributes. +forces RediSearch to encode indexes as if there were more than 32 text attributes, which allows you to add additional attributes (beyond 32) using [`FT.ALTER`]({{< baseurl >}}/commands/ft.alter). For efficiency, RediSearch encodes indexes differently if they are created with less than 32 text attributes.
@@ -364,7 +335,7 @@ creates a lightweight temporary index that expires after a specified period of i {{% alert title="Warning" color="warning" %}} When temporary indexes expire, they drop all the records associated with them. -[`FT.DROPINDEX`](/commands/ft.dropindex) was introduced with a default of not deleting docs and a `DD` flag that enforced deletion. +[`FT.DROPINDEX`]({{< baseurl >}}/commands/ft.dropindex) was introduced with a default of not deleting docs and a `DD` flag that enforced deletion. However, for temporary indexes, documents are deleted along with the index. Historically, RediSearch used an FT.ADD command, which made a connection between the document and the index. Then, FT.DROP, also a hystoric command, deleted documents by default. In version 2.x, RediSearch indexes hashes and JSONs, and the dependency between the index and documents no longer exists. @@ -482,13 +453,13 @@ Index a JSON document using a JSON Path expression. ## See also -[`FT.ALTER`](/commands/ft.alter) | [`FT.DROPINDEX`](/commands/ft.dropindex) +[`FT.ALTER`]({{< baseurl >}}/commands/ft.alter) | [`FT.DROPINDEX`]({{< baseurl >}}/commands/ft.dropindex) ## Related topics -- [RediSearch](/docs/stack/search) -- [RedisJSON](/docs/stack/json) +- [RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) +- [RedisJSON]({{< relref "/develop/data-types/json/" >}}) - [Friso](https://github.com/lionsoul2014/friso) -- [Stemming](/docs/interact/search-and-query/advanced-concepts/stemming) -- [Phonetic Matching](/docs/interact/search-and-query/advanced-concepts/phonetic_matching/) +- [Stemming]({{< relref "/develop/interact/search-and-query/advanced-concepts/stemming" >}}) +- [Phonetic Matching]({{< relref "/develop/interact/search-and-query/advanced-concepts/phonetic_matching" >}}) - [RSCoordinator](https://github.com/RedisLabsModules/RSCoordinator) diff --git a/content/commands/ft.cursor-del/index.md b/content/commands/ft.cursor-del/index.md index 3b2fa0488a..fa0d661b72 100644 --- a/content/commands/ft.cursor-del/index.md +++ b/content/commands/ft.cursor-del/index.md @@ -73,8 +73,8 @@ Check that the cursor is deleted. ## See also -[`FT.CURSOR READ`](/commands/ft.cursor-read) +[`FT.CURSOR READ`]({{< baseurl >}}/commands/ft.cursor-read) ## Related topics -[RediSearch](/docs/stack/search) \ No newline at end of file +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) \ No newline at end of file diff --git a/content/commands/ft.cursor-read/index.md b/content/commands/ft.cursor-read/index.md index 4affd41322..824ebb9eb2 100644 --- a/content/commands/ft.cursor-read/index.md +++ b/content/commands/ft.cursor-read/index.md @@ -39,7 +39,7 @@ Read next results from an existing cursor [Examples](#examples) -See [Cursor API](/docs/stack/search/reference/aggregations/#cursor-api) for more details. +See [Cursor API]({{< baseurl >}}/develop/interact/search-and-query/advanced-concepts/aggregations#cursor-api) for more details. ## Required arguments @@ -58,7 +58,7 @@ is id of the cursor.
[COUNT read_size] -is number of results to read. This parameter overrides `COUNT` specified in [`FT.AGGREGATE`](/commands/ft.aggregate). +is number of results to read. This parameter overrides `COUNT` specified in [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate).
## Return @@ -77,8 +77,8 @@ FT.CURSOR READ returns an array reply where each row is an array reply and repre ## See also -[`FT.CURSOR DEL`](/commands/ft.cursor-del) | [`FT.AGGREGATE`](/commands/ft.aggregate) +[`FT.CURSOR DEL`]({{< baseurl >}}/commands/ft.cursor-del) | [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) ## Related topics -[RediSearch](/docs/stack/search) +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) diff --git a/content/commands/ft.dictadd/index.md b/content/commands/ft.dictadd/index.md index 516b0b92f2..b15cfc877c 100644 --- a/content/commands/ft.dictadd/index.md +++ b/content/commands/ft.dictadd/index.md @@ -67,8 +67,8 @@ FT.DICTADD returns an integer reply, the number of new terms that were added. ## See also -[`FT.DICTDEL`](/commands/ft.dictdel) | [`FT.DICTDUMP`](/commands/ft.dictdump) +[`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel) | [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump) ## Related topics -[RediSearch](/docs/stack/search) \ No newline at end of file +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) \ No newline at end of file diff --git a/content/commands/ft.dictdel/index.md b/content/commands/ft.dictdel/index.md index 7658bbe8e5..2656f46b0e 100644 --- a/content/commands/ft.dictdel/index.md +++ b/content/commands/ft.dictdel/index.md @@ -67,8 +67,8 @@ FT.DICTDEL returns an integer reply, the number of new terms that were deleted. ## See also -[`FT.DICTADD`](/commands/ft.dictadd) | [`FT.DICTDUMP`](/commands/ft.dictdump) +[`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd) | [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump) ## Related topics -[RediSearch](/docs/stack/search) +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) diff --git a/content/commands/ft.dictdump/index.md b/content/commands/ft.dictdump/index.md index a7ec30c875..de27f47bd4 100644 --- a/content/commands/ft.dictdump/index.md +++ b/content/commands/ft.dictdump/index.md @@ -60,10 +60,10 @@ FT.DICTDUMP returns an array, where each element is term (string). ## See also -[`FT.DICTADD`](/commands/ft.dictadd) | [`FT.DICTDEL`](/commands/ft.dictdel) +[`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd) | [`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel) ## Related topics -[RediSearch](/docs/stack/search) +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) diff --git a/content/commands/ft.dropindex/index.md b/content/commands/ft.dropindex/index.md index 28d588aa03..a935fa629a 100644 --- a/content/commands/ft.dropindex/index.md +++ b/content/commands/ft.dropindex/index.md @@ -29,9 +29,7 @@ module: Search since: 2.0.0 stack_path: docs/interact/search-and-query summary: Deletes the index -syntax: "FT.DROPINDEX index - [DD] -" +syntax: 'FT.DROPINDEX index [DD] ' syntax_fmt: FT.DROPINDEX index [DD] syntax_str: '[DD]' title: FT.DROPINDEX @@ -46,7 +44,7 @@ Delete an index
index -is full-text index name. You must first create the index using [`FT.CREATE`](/commands/ft.create). +is full-text index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create).
## Optional arguments @@ -57,9 +55,9 @@ is full-text index name. You must first create the index using [`FT.CREATE`](/co drop operation that, if set, deletes the actual document hashes. By default, FT.DROPINDEX does not delete the documents associated with the index. Adding the `DD` option deletes the documents as well. -If an index creation is still running ([`FT.CREATE`](/commands/ft.create) is running asynchronously), only the document hashes that have already been indexed are deleted. +If an index creation is still running ([`FT.CREATE`]({{< baseurl >}}/commands/ft.create) is running asynchronously), only the document hashes that have already been indexed are deleted. The document hashes left to be indexed remain in the database. -To check the completion of the indexing, use [`FT.INFO`](/commands/ft.info). +To check the completion of the indexing, use [`FT.INFO`]({{< baseurl >}}/commands/ft.info).
@@ -80,9 +78,9 @@ OK ## See also -[`FT.CREATE`](/commands/ft.create) | [`FT.INFO`](/commands/ft.info) +[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) | [`FT.INFO`]({{< baseurl >}}/commands/ft.info) ## Related topics -[RediSearch](/docs/stack/search) +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) diff --git a/content/commands/ft.explain/index.md b/content/commands/ft.explain/index.md index a4ccbc1ef3..349fb2a355 100644 --- a/content/commands/ft.explain/index.md +++ b/content/commands/ft.explain/index.md @@ -28,9 +28,7 @@ module: Search since: 1.0.0 stack_path: docs/interact/search-and-query summary: Returns the execution plan for a complex query -syntax: "FT.EXPLAIN index query - [DIALECT dialect] -" +syntax: 'FT.EXPLAIN index query [DIALECT dialect] ' syntax_fmt: "FT.EXPLAIN index query [DIALECT\_dialect]" syntax_str: "query [DIALECT\_dialect]" title: FT.EXPLAIN @@ -45,7 +43,7 @@ Return the execution plan for a complex query
index -is index name. You must first create the index using [`FT.CREATE`](/commands/ft.create). +is index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create).
@@ -59,7 +57,7 @@ is query string, as if sent to FT.SEARCH`.
DIALECT {dialect_version} -is dialect version under which to execute the query. If not specified, the query executes under the default dialect version set during module initial loading or via [`FT.CONFIG SET`](/commands/ft.config-set) command. +is dialect version under which to execute the query. If not specified, the query executes under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) command.
{{% alert title="Notes" color="warning" %}} @@ -103,9 +101,9 @@ INTERSECT { ## See also -[`FT.CREATE`](/commands/ft.create) | [`FT.SEARCH`](/commands/ft.search) | [`FT.CONFIG SET`](/commands/ft.config-set) +[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) | [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) | [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) ## Related topics -[RediSearch](/docs/stack/search) +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) diff --git a/content/commands/ft.explaincli/index.md b/content/commands/ft.explaincli/index.md index 7dbc31ac70..eb7baf79a6 100644 --- a/content/commands/ft.explaincli/index.md +++ b/content/commands/ft.explaincli/index.md @@ -28,9 +28,7 @@ module: Search since: 1.0.0 stack_path: docs/interact/search-and-query summary: Returns the execution plan for a complex query -syntax: "FT.EXPLAINCLI index query - [DIALECT dialect] -" +syntax: 'FT.EXPLAINCLI index query [DIALECT dialect] ' syntax_fmt: "FT.EXPLAINCLI index query [DIALECT\_dialect]" syntax_str: "query [DIALECT\_dialect]" title: FT.EXPLAINCLI @@ -45,7 +43,7 @@ Return the execution plan for a complex query but formatted for easier reading w
index -is index name. You must first create the index using [`FT.CREATE`](/commands/ft.create). +is index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create).
@@ -59,7 +57,7 @@ is query string, as if sent to FT.SEARCH`.
DIALECT {dialect_version} -is dialect version under which to execute the query. If not specified, the query executes under the default dialect version set during module initial loading or via [`FT.CONFIG SET`](/commands/ft.config-set) command. +is dialect version under which to execute the query. If not specified, the query executes under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) command. {{% alert title="Note" color="warning" %}} @@ -116,9 +114,9 @@ $ redis-cli ## See also -[`FT.CREATE`](/commands/ft.create) | [`FT.SEARCH`](/commands/ft.search) | [`FT.CONFIG SET`](/commands/ft.config-set) +[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) | [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) | [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) ## Related topics -[RediSearch](/docs/stack/search) +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) diff --git a/content/commands/ft.info/index.md b/content/commands/ft.info/index.md index b828bd5fc0..4244f8a449 100644 --- a/content/commands/ft.info/index.md +++ b/content/commands/ft.info/index.md @@ -38,7 +38,7 @@ Return information and statistics on the index
index -is full-text index name. You must first create the index using [`FT.CREATE`](/commands/ft.create). +is full-text index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create).
## Return @@ -47,7 +47,7 @@ FT.INFO returns an array reply with pairs of keys and values. Returned values include: -- `index_definition`: reflection of [`FT.CREATE`](/commands/ft.create) command parameters. +- `index_definition`: reflection of [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) command parameters. - `fields`: index schema - field names, types, and attributes. - Number of documents. - Number of distinct terms. @@ -168,9 +168,9 @@ Optional statistics include: ## See also -[`FT.CREATE`](/commands/ft.create) | [`FT.SEARCH`](/commands/ft.search) +[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) | [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) ## Related topics -[RediSearch](/docs/stack/search) +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) diff --git a/content/commands/ft.profile/index.md b/content/commands/ft.profile/index.md index 691b4db393..8fa5b1167e 100644 --- a/content/commands/ft.profile/index.md +++ b/content/commands/ft.profile/index.md @@ -49,7 +49,7 @@ syntax_str: [LIMITED] QUERY query title: FT.PROFILE --- -Apply [`FT.SEARCH`](/commands/ft.search) or [`FT.AGGREGATE`](/commands/ft.aggregate) command to collect performance details +Apply [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) or [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) command to collect performance details [Examples](#examples) @@ -58,13 +58,13 @@ Apply [`FT.SEARCH`](/commands/ft.search) or [`FT.AGGREGATE`](/commands/ft.aggreg
index -is index name, created using [`FT.CREATE`](/commands/ft.create). +is index name, created using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create).
SEARCH | AGGREGATE -is difference between [`FT.SEARCH`](/commands/ft.search) and [`FT.AGGREGATE`](/commands/ft.aggregate). +is difference between [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) and [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate).
@@ -76,14 +76,14 @@ removes details of `reader` iterator.
QUERY {query} -is query string, sent to [`FT.SEARCH`](/commands/ft.search). +is query string, sent to [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search).
Note: To reduce the size of the output, use `NOCONTENT` or `LIMIT 0 0` to reduce the reply results or `LIMITED` to not reply with details of `reader iterators` inside built-in unions such as `fuzzy` or `prefix`. ## Return -`FT.PROFILE` returns an array reply, with the first array reply identical to the reply of [`FT.SEARCH`](/commands/ft.search) and [`FT.AGGREGATE`](/commands/ft.aggregate) and a second array reply with information of time in milliseconds (ms) used to create the query and time and count of calls of iterators and result-processors. +`FT.PROFILE` returns an array reply, with the first array reply identical to the reply of [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) and [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) and a second array reply with information of time in milliseconds (ms) used to create the query and time and count of calls of iterators and result-processors. Return value has an array with two elements: @@ -172,9 +172,9 @@ Return value has an array with two elements: ## See also -[`FT.SEARCH`](/commands/ft.search) | [`FT.AGGREGATE`](/commands/ft.aggregate) +[`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) | [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) ## Related topics -[RediSearch](/docs/stack/search) +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) diff --git a/content/commands/ft.search/index.md b/content/commands/ft.search/index.md index 0ccea8625c..197b11a2f9 100644 --- a/content/commands/ft.search/index.md +++ b/content/commands/ft.search/index.md @@ -269,66 +269,27 @@ since: 1.0.0 stack_path: docs/interact/search-and-query summary: Searches the index with a textual query, returning either documents or just ids -syntax: "FT.SEARCH index query - [NOCONTENT] - [VERBATIM] [NOSTOPWORDS] - [WITHSCORES]\ - \ - [WITHPAYLOADS] - [WITHSORTKEYS] - [FILTER numeric_field min max [ FILTER\ - \ numeric_field min max ...]] - [GEOFILTER geo_field lon lat radius m | km | mi\ - \ | ft [ GEOFILTER geo_field lon lat radius m | km | mi | ft ...]] - [INKEYS count\ - \ key [key ...]] [ INFIELDS count field [field ...]] - [RETURN count identifier\ - \ [AS property] [ identifier [AS property] ...]] - [SUMMARIZE [ FIELDS count field\ - \ [field ...]] [FRAGS num] [LEN fragsize] [SEPARATOR separator]] - [HIGHLIGHT\ - \ [ FIELDS count field [field ...]] [ TAGS open close]] - [SLOP slop] - [TIMEOUT\ - \ timeout] - [INORDER] - [LANGUAGE language] - [EXPANDER expander] - [SCORER\ - \ scorer] - [EXPLAINSCORE] - [PAYLOAD payload] - [SORTBY sortby [ ASC | DESC]\ - \ [WITHCOUNT]] - [LIMIT offset num] - [PARAMS nargs name value [ name value\ - \ ...]] - [DIALECT dialect] -" -syntax_fmt: "FT.SEARCH index query [NOCONTENT] [VERBATIM] [NOSTOPWORDS] - [WITHSCORES]\ - \ [WITHPAYLOADS] [WITHSORTKEYS] [FILTER\_numeric_field - min max [FILTER\_numeric_field\ - \ min max ...]] [GEOFILTER\_geo_field - lon lat radius [GEOFILTER\_\ - geo_field lon lat - radius ...]] [INKEYS\_count key [key ...]] -\ - \ [INFIELDS\_count field [field ...]] [RETURN\_count identifier - [AS\_property]\ - \ [identifier [AS\_property] ...]] [SUMMARIZE - [FIELDS\_count field [field ...]]\ - \ [FRAGS\_num] [LEN\_fragsize] - [SEPARATOR\_separator]] [HIGHLIGHT [FIELDS\_count\ - \ field [field ...]] - [TAGS open close]] [SLOP\_slop] [TIMEOUT\_timeout] [INORDER] -\ - \ [LANGUAGE\_language] [EXPANDER\_expander] [SCORER\_scorer] - [EXPLAINSCORE]\ - \ [PAYLOAD\_payload] [SORTBY\_sortby [ASC | DESC]] - [LIMIT offset num] [PARAMS\ - \ nargs name value [name value ...]] - [DIALECT\_dialect]" +syntax: 'FT.SEARCH index query [NOCONTENT] [VERBATIM] [NOSTOPWORDS] [WITHSCORES] [WITHPAYLOADS] + [WITHSORTKEYS] [FILTER numeric_field min max [ FILTER numeric_field min max ...]] + [GEOFILTER geo_field lon lat radius m | km | mi | ft [ GEOFILTER geo_field lon lat + radius m | km | mi | ft ...]] [INKEYS count key [key ...]] [ INFIELDS count field + [field ...]] [RETURN count identifier [AS property] [ identifier [AS property] ...]] + [SUMMARIZE [ FIELDS count field [field ...]] [FRAGS num] [LEN fragsize] [SEPARATOR + separator]] [HIGHLIGHT [ FIELDS count field [field ...]] [ TAGS open close]] [SLOP + slop] [TIMEOUT timeout] [INORDER] [LANGUAGE language] [EXPANDER expander] [SCORER + scorer] [EXPLAINSCORE] [PAYLOAD payload] [SORTBY sortby [ ASC | DESC] [WITHCOUNT]] + [LIMIT offset num] [PARAMS nargs name value [ name value ...]] [DIALECT dialect] ' +syntax_fmt: "FT.SEARCH index query [NOCONTENT] [VERBATIM] [NOSTOPWORDS] [WITHSCORES]\ + \ [WITHPAYLOADS] [WITHSORTKEYS] [FILTER\_numeric_field min max [FILTER\_numeric_field\ + \ min max ...]] [GEOFILTER\_geo_field lon lat radius [GEOFILTER\_\ + geo_field lon lat radius ...]] [INKEYS\_count key [key ...]]\ + \ [INFIELDS\_count field [field ...]] [RETURN\_count identifier [AS\_property]\ + \ [identifier [AS\_property] ...]] [SUMMARIZE [FIELDS\_count field [field ...]]\ + \ [FRAGS\_num] [LEN\_fragsize] [SEPARATOR\_separator]] [HIGHLIGHT [FIELDS\_count\ + \ field [field ...]] [TAGS open close]] [SLOP\_slop] [TIMEOUT\_timeout] [INORDER]\ + \ [LANGUAGE\_language] [EXPANDER\_expander] [SCORER\_scorer] [EXPLAINSCORE] [PAYLOAD\_\ + payload] [SORTBY\_sortby [ASC | DESC]] [LIMIT offset num] [PARAMS nargs name value\ + \ [name value ...]] [DIALECT\_dialect]" syntax_str: "query [NOCONTENT] [VERBATIM] [NOSTOPWORDS] [WITHSCORES] [WITHPAYLOADS]\ \ [WITHSORTKEYS] [FILTER\_numeric_field min max [FILTER\_numeric_field min max ...]]\ \ [GEOFILTER\_geo_field lon lat radius [GEOFILTER\_geo_field\ @@ -352,13 +313,13 @@ Search the index with a textual query, returning either documents or just ids
index -is index name. You must first create the index using [`FT.CREATE`](/commands/ft.create). +is index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create).
query -is text query to search. If it's more than a single word, put it in quotes. Refer to [Query syntax](/docs/interact/search-and-query/query/) for more details. +is text query to search. If it's more than a single word, put it in quotes. Refer to [Query syntax]({{< relref "/develop/interact/search-and-query/query/" >}}) for more details.
## Optional arguments @@ -384,7 +345,7 @@ also returns the relative internal score of each document. This can be used to m
WITHPAYLOADS -retrieves optional document payloads. See [`FT.CREATE`](/commands/ft.create). The payloads follow the document id and, if `WITHSCORES` is set, the scores. +retrieves optional document payloads. See [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). The payloads follow the document id and, if `WITHSCORES` is set, the scores.
@@ -396,14 +357,14 @@ returns the value of the sorting key, right after the id and score and/or payloa
FILTER numeric_attribute min max -limits results to those having numeric values ranging between `min` and `max`, if numeric_attribute is defined as a numeric attribute in [`FT.CREATE`](/commands/ft.create). - `min` and `max` follow [`ZRANGE`](/commands/zrange) syntax, and can be `-inf`, `+inf`, and use `(` for exclusive ranges. Multiple numeric filters for different attributes are supported in one query. +limits results to those having numeric values ranging between `min` and `max`, if numeric_attribute is defined as a numeric attribute in [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). + `min` and `max` follow [`ZRANGE`]({{< relref "/commands/zrange" >}}) syntax, and can be `-inf`, `+inf`, and use `(` for exclusive ranges. Multiple numeric filters for different attributes are supported in one query.
GEOFILTER {geo_attribute} {lon} {lat} {radius} m|km|mi|ft -filter the results to a given `radius` from `lon` and `lat`. Radius is given as a number and units. See [`GEORADIUS`](/commands/georadius) for more details. +filter the results to a given `radius` from `lon` and `lat`. Radius is given as a number and units. See [`GEORADIUS`]({{< relref "/commands/georadius" >}}) for more details.
@@ -429,13 +390,13 @@ limits the attributes returned from the document. `num` is the number of attribu
SUMMARIZE ... -returns only the sections of the attribute that contain the matched text. See [Highlighting](/docs/interact/search-and-query/advanced-concepts/highlight/) for more information. +returns only the sections of the attribute that contain the matched text. See [Highlighting]({{< relref "/develop/interact/search-and-query/advanced-concepts/highlight" >}}) for more information.
HIGHLIGHT ... -formats occurrences of matched text. See [Highlighting](/docs/interact/search-and-query/advanced-concepts/highlight/) for more information. +formats occurrences of matched text. See [Highlighting]({{< relref "/develop/interact/search-and-query/advanced-concepts/highlight" >}}) for more information.
@@ -459,19 +420,19 @@ requires the terms in the document to have the same order as the terms in the qu use a stemmer for the supplied language during search for query expansion. If querying documents in Chinese, set to `chinese` to properly tokenize the query terms. Defaults to English. If an unsupported language is sent, the command returns an error. - See [`FT.CREATE`](/commands/ft.create) for the list of languages. + See [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) for the list of languages.
EXPANDER {expander} -uses a custom query expander instead of the stemmer. See [Extensions](/docs/interact/search-and-query/administration/extensions/). +uses a custom query expander instead of the stemmer. See [Extensions]({{< relref "/develop/interact/search-and-query/administration/extensions" >}}).
SCORER {scorer} -uses a [built-in](/docs/interact/search-and-query/advanced-concepts/scoring/) or a [user-provided](/docs/interact/search-and-query/administration/extensions/) scoring function. +uses a [built-in]({{< relref "/develop/interact/search-and-query/advanced-concepts/scoring" >}}) or a [user-provided]({{< relref "/develop/interact/search-and-query/administration/extensions" >}}) scoring function.
@@ -483,7 +444,7 @@ returns a textual description of how the scores were calculated. Using this opti
PAYLOAD {payload} -adds an arbitrary, binary safe payload that is exposed to custom scoring functions. See [Extensions](/docs/interact/search-and-query/administration/extensions/). +adds an arbitrary, binary safe payload that is exposed to custom scoring functions. See [Extensions]({{< relref "/develop/interact/search-and-query/administration/extensions" >}}).
@@ -525,7 +486,7 @@ You can reference parameters in the `query` by a `$`, followed by the parameter
DIALECT {dialect_version} -selects the dialect version under which to execute the query. If not specified, the query will execute under the default dialect version set during module initial loading or via [`FT.CONFIG SET`](/commands/ft.config-set) command. +selects the dialect version under which to execute the query. If not specified, the query will execute under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) command.
## Return @@ -550,7 +511,7 @@ In order to maintain backward compatibility, the default behavior with RediSearc To return all the values, use `DIALECT` 3 (or greater, when available). -The `DIALECT` can be specified as a parameter in the FT.SEARCH command. If it is not specified, the `DEFAULT_DIALECT` is used, which can be set using [`FT.CONFIG SET`](/commands/ft.config-set) or by passing it as an argument to the `redisearch` module when it is loaded. +The `DIALECT` can be specified as a parameter in the FT.SEARCH command. If it is not specified, the `DEFAULT_DIALECT` is used, which can be set using [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) or by passing it as an argument to the `redisearch` module when it is loaded. For example, with the following document and index: @@ -817,7 +778,7 @@ First, create an index using `GEOSHAPE` type with a `FLAT` coordinate system: OK {{< / highlight >}} -Adding a couple of geometries using [`HSET`](/commands/hset): +Adding a couple of geometries using [`HSET`]({{< relref "/commands/hset" >}}): {{< highlight bash >}} @@ -857,11 +818,11 @@ Query with `CONTAINS` operator: ## See also -[`FT.CREATE`](/commands/ft.create) | [`FT.AGGREGATE`](/commands/ft.aggregate) +[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) | [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) ## Related topics -- [Extensions](/docs/interact/search-and-query/administration/extensions/) -- [Highlighting](/docs/interact/search-and-query/advanced-concepts/highlight/) -- [Query syntax](/docs/interact/search-and-query/query/) -- [RediSearch](/docs/stack/search) +- [Extensions]({{< relref "/develop/interact/search-and-query/administration/extensions" >}}) +- [Highlighting]({{< relref "/develop/interact/search-and-query/advanced-concepts/highlight" >}}) +- [Query syntax]({{< relref "/develop/interact/search-and-query/query/" >}}) +- [RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) diff --git a/content/commands/ft.spellcheck/index.md b/content/commands/ft.spellcheck/index.md index 2fa47a2c3d..a873b89cc8 100644 --- a/content/commands/ft.spellcheck/index.md +++ b/content/commands/ft.spellcheck/index.md @@ -54,15 +54,10 @@ since: 1.4.0 stack_path: docs/interact/search-and-query summary: Performs spelling correction on a query, returning suggestions for misspelled terms -syntax: "FT.SPELLCHECK index query - [DISTANCE distance] - [TERMS INCLUDE | EXCLUDE\ - \ dictionary [terms [terms ...]]] - [DIALECT dialect] -" -syntax_fmt: "FT.SPELLCHECK index query [DISTANCE\_distance] [TERMS\_ dictionary [terms [terms ...]]] [DIALECT\_dialect]" +syntax: 'FT.SPELLCHECK index query [DISTANCE distance] [TERMS INCLUDE | EXCLUDE dictionary + [terms [terms ...]]] [DIALECT dialect] ' +syntax_fmt: "FT.SPELLCHECK index query [DISTANCE\_distance] [TERMS\_\ + \ dictionary [terms [terms ...]]] [DIALECT\_dialect]" syntax_str: "query [DISTANCE\_distance] [TERMS\_ dictionary [terms\ \ [terms ...]]] [DIALECT\_dialect]" title: FT.SPELLCHECK @@ -86,14 +81,14 @@ is index with the indexed terms. is search query.
-See [Spellchecking](/docs/interact/search-and-query/advanced-concepts/spellcheck/) for more details. +See [Spellchecking]({{< relref "/develop/interact/search-and-query/advanced-concepts/spellcheck" >}}) for more details. ## Optional arguments
TERMS -specifies an inclusion (`INCLUDE`) or exclusion (`EXCLUDE`) of a custom dictionary named `{dict}`. Refer to [`FT.DICTADD`](/commands/ft.dictadd), [`FT.DICTDEL`](/commands/ft.dictdel) and [`FT.DICTDUMP`](/commands/ft.dictdump) about managing custom dictionaries. +specifies an inclusion (`INCLUDE`) or exclusion (`EXCLUDE`) of a custom dictionary named `{dict}`. Refer to [`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd), [`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel) and [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump) about managing custom dictionaries.
@@ -105,7 +100,7 @@ is maximum Levenshtein distance for spelling suggestions (default: 1, max: 4).
DIALECT {dialect_version} -selects the dialect version under which to execute the query. If not specified, the query will execute under the default dialect version set during module initial loading or via [`FT.CONFIG SET`](/commands/ft.config-set) command. +selects the dialect version under which to execute the query. If not specified, the query will execute under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) command.
## Return @@ -133,9 +128,9 @@ The score is calculated by dividing the number of documents in which the suggest ## See also -[`FT.CONFIG SET`](/commands/ft.config-set) | [`FT.DICTADD`](/commands/ft.dictadd) | [`FT.DICTDEL`](/commands/ft.dictdel) | [`FT.DICTDUMP`](/commands/ft.dictdump) +[`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) | [`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd) | [`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel) | [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump) ## Related topics -- [Spellchecking](/docs/interact/search-and-query/advanced-concepts/spellcheck/) -- [RediSearch](/docs/stack/search) \ No newline at end of file +- [Spellchecking]({{< relref "/develop/interact/search-and-query/advanced-concepts/spellcheck" >}}) +- [RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) \ No newline at end of file diff --git a/content/commands/ft.sugadd/index.md b/content/commands/ft.sugadd/index.md index bce8b34fbd..19f01581d0 100644 --- a/content/commands/ft.sugadd/index.md +++ b/content/commands/ft.sugadd/index.md @@ -39,10 +39,7 @@ module: Search since: 1.0.0 stack_path: docs/interact/search-and-query summary: Adds a suggestion string to an auto-complete suggestion dictionary -syntax: "FT.SUGADD key string score - [INCR] - [PAYLOAD payload] -" +syntax: 'FT.SUGADD key string score [INCR] [PAYLOAD payload] ' syntax_fmt: "FT.SUGADD key string score [INCR] [PAYLOAD\_payload]" syntax_str: "string score [INCR] [PAYLOAD\_payload]" title: FT.SUGADD @@ -85,7 +82,7 @@ increments the existing entry of the suggestion by the given score, instead of r
PAYLOAD {payload} -saves an extra payload with the suggestion, that can be fetched by adding the `WITHPAYLOADS` argument to [`FT.SUGGET`](/commands/ft.sugget). +saves an extra payload with the suggestion, that can be fetched by adding the `WITHPAYLOADS` argument to [`FT.SUGGET`]({{< baseurl >}}/commands/ft.sugget).
## Return @@ -105,8 +102,8 @@ FT.SUGADD returns an integer reply, which is the current size of the suggestion ## See also -[`FT.SUGGET`](/commands/ft.sugget) | [`FT.SUGDEL`](/commands/ft.sugdel) | [`FT.SUGLEN`](/commands/ft.suglen) +[`FT.SUGGET`]({{< baseurl >}}/commands/ft.sugget) | [`FT.SUGDEL`]({{< baseurl >}}/commands/ft.sugdel) | [`FT.SUGLEN`]({{< baseurl >}}/commands/ft.suglen) ## Related topics -[RediSearch](/docs/stack/search) \ No newline at end of file +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) \ No newline at end of file diff --git a/content/commands/ft.sugdel/index.md b/content/commands/ft.sugdel/index.md index ea839ea1cd..1b9cdc9d87 100644 --- a/content/commands/ft.sugdel/index.md +++ b/content/commands/ft.sugdel/index.md @@ -68,8 +68,8 @@ FT.SUGDEL returns an integer reply, 1 if the string was found and deleted, 0 oth ## See also -[`FT.SUGGET`](/commands/ft.sugget) | [`FT.SUGADD`](/commands/ft.sugadd) | [`FT.SUGLEN`](/commands/ft.suglen) +[`FT.SUGGET`]({{< baseurl >}}/commands/ft.sugget) | [`FT.SUGADD`]({{< baseurl >}}/commands/ft.sugadd) | [`FT.SUGLEN`]({{< baseurl >}}/commands/ft.suglen) ## Related topics -[RediSearch](/docs/stack/search) \ No newline at end of file +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) \ No newline at end of file diff --git a/content/commands/ft.sugget/index.md b/content/commands/ft.sugget/index.md index 7a71106e0f..d544e9ef25 100644 --- a/content/commands/ft.sugget/index.md +++ b/content/commands/ft.sugget/index.md @@ -42,13 +42,7 @@ module: Search since: 1.0.0 stack_path: docs/interact/search-and-query summary: Gets completion suggestions for a prefix -syntax: "FT.SUGGET key prefix - [FUZZY] - [WITHSCORES] - [WITHPAYLOADS] - \ - \ [MAX max] -" +syntax: 'FT.SUGGET key prefix [FUZZY] [WITHSCORES] [WITHPAYLOADS] [MAX max] ' syntax_fmt: "FT.SUGGET key prefix [FUZZY] [WITHSCORES] [WITHPAYLOADS] [MAX\_max]" syntax_str: "prefix [FUZZY] [WITHSCORES] [WITHPAYLOADS] [MAX\_max]" title: FT.SUGGET @@ -124,8 +118,8 @@ FT.SUGGET returns an array reply, which is a list of the top suggestions matchin ## See also -[`FT.SUGADD`](/commands/ft.sugadd) | [`FT.SUGDEL`](/commands/ft.sugdel) | [`FT.SUGLEN`](/commands/ft.suglen) +[`FT.SUGADD`]({{< baseurl >}}/commands/ft.sugadd) | [`FT.SUGDEL`]({{< baseurl >}}/commands/ft.sugdel) | [`FT.SUGLEN`]({{< baseurl >}}/commands/ft.suglen) ## Related topics -[RediSearch](/docs/stack/search) +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) diff --git a/content/commands/ft.suglen/index.md b/content/commands/ft.suglen/index.md index c29652f4a6..b999b8953c 100644 --- a/content/commands/ft.suglen/index.md +++ b/content/commands/ft.suglen/index.md @@ -58,8 +58,8 @@ FT.SUGLEN returns an integer reply, which is the current size of the suggestion ## See also -[`FT.SUGADD`](/commands/ft.sugadd) | [`FT.SUGDEL`](/commands/ft.sugdel) | [`FT.SUGGET`](/commands/ft.sugget) +[`FT.SUGADD`]({{< baseurl >}}/commands/ft.sugadd) | [`FT.SUGDEL`]({{< baseurl >}}/commands/ft.sugdel) | [`FT.SUGGET`]({{< baseurl >}}/commands/ft.sugget) ## Related topics -[RediSearch](/docs/stack/search) \ No newline at end of file +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) \ No newline at end of file diff --git a/content/commands/ft.syndump/index.md b/content/commands/ft.syndump/index.md index a45badbe2a..738713c58c 100644 --- a/content/commands/ft.syndump/index.md +++ b/content/commands/ft.syndump/index.md @@ -66,8 +66,8 @@ FT.SYNDUMP returns an array reply, with a pair of `term` and an array of synonym ## See also -[`FT.SYNUPDATE`](/commands/ft.synupdate) +[`FT.SYNUPDATE`]({{< baseurl >}}/commands/ft.synupdate) ## Related topics -[RediSearch](/docs/stack/search) \ No newline at end of file +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) \ No newline at end of file diff --git a/content/commands/ft.synupdate/index.md b/content/commands/ft.synupdate/index.md index 563351f947..0b597a86ee 100644 --- a/content/commands/ft.synupdate/index.md +++ b/content/commands/ft.synupdate/index.md @@ -30,11 +30,8 @@ module: Search since: 1.2.0 stack_path: docs/interact/search-and-query summary: Creates or updates a synonym group with additional terms -syntax: "FT.SYNUPDATE index synonym_group_id - [SKIPINITIALSCAN] term [term ...] -" -syntax_fmt: "FT.SYNUPDATE index synonym_group_id [SKIPINITIALSCAN] term [term - ...]" +syntax: 'FT.SYNUPDATE index synonym_group_id [SKIPINITIALSCAN] term [term ...] ' +syntax_fmt: FT.SYNUPDATE index synonym_group_id [SKIPINITIALSCAN] term [term ...] syntax_str: synonym_group_id [SKIPINITIALSCAN] term [term ...] title: FT.SYNUPDATE --- @@ -89,8 +86,8 @@ OK ## See also -[`FT.SYNDUMP`](/commands/ft.syndump) +[`FT.SYNDUMP`]({{< baseurl >}}/commands/ft.syndump) ## Related topics -[RediSearch](/docs/stack/search) \ No newline at end of file +[RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) \ No newline at end of file diff --git a/content/commands/ft.tagvals/index.md b/content/commands/ft.tagvals/index.md index dd540613e1..4d49fbb41b 100644 --- a/content/commands/ft.tagvals/index.md +++ b/content/commands/ft.tagvals/index.md @@ -40,7 +40,7 @@ Return a distinct set of values indexed in a Tag field
index -is full-text index name. You must first create the index using [`FT.CREATE`](/commands/ft.create). +is full-text index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create).
@@ -53,7 +53,7 @@ Use FT.TAGVALS if your tag indexes things like cities, categories, and so on. ## Limitations -FT.TAGVALS provides no paging or sorting, and the tags are not alphabetically sorted. FT.TAGVALS only operates on [tag fields](/docs/stack/search/reference/tags). +FT.TAGVALS provides no paging or sorting, and the tags are not alphabetically sorted. FT.TAGVALS only operates on [tag fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/tags" >}}). The returned strings are lowercase with whitespaces removed, but otherwise unchanged. ## Return @@ -74,9 +74,9 @@ FT.TAGVALS returns an array reply of all distinct tags in the tag index. ## See also -[`FT.CREATE`](/commands/ft.create) +[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) ## Related topics -- [Tag fields](/docs/stack/search/reference/tags) -- [RediSearch](/docs/stack/search) +- [Tag fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/tags" >}}) +- [RediSearch]({{< relref "/develop/interact/search-and-query/" >}}) diff --git a/content/commands/function-dump/index.md b/content/commands/function-dump/index.md index 4ef37545f4..9b6b5b577d 100644 --- a/content/commands/function-dump/index.md +++ b/content/commands/function-dump/index.md @@ -27,14 +27,14 @@ syntax_str: '' title: FUNCTION DUMP --- Return the serialized payload of loaded libraries. -You can restore the serialized payload later with the [`FUNCTION RESTORE`](/commands/function-restore) command. +You can restore the serialized payload later with the [`FUNCTION RESTORE`]({{< relref "/commands/function-restore" >}}) command. For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). ## Examples -The following example shows how to dump loaded libraries using `FUNCTION DUMP` and then it calls [`FUNCTION FLUSH`](/commands/function-flush) deletes all the libraries. -Then, it restores the original libraries from the serialized payload with [`FUNCTION RESTORE`](/commands/function-restore). +The following example shows how to dump loaded libraries using `FUNCTION DUMP` and then it calls [`FUNCTION FLUSH`]({{< relref "/commands/function-flush" >}}) deletes all the libraries. +Then, it restores the original libraries from the serialized payload with [`FUNCTION RESTORE`]({{< relref "/commands/function-restore" >}}). ``` redis> FUNCTION DUMP diff --git a/content/commands/function-list/index.md b/content/commands/function-list/index.md index 05823222c9..3eb0fe7452 100644 --- a/content/commands/function-list/index.md +++ b/content/commands/function-list/index.md @@ -52,7 +52,7 @@ The following information is provided for each of the libraries in the response: Each function has the following fields: * **name:** the name of the function. * **description:** the function's description. - * **flags:** an array of [function flags](/docs/manual/programmability/functions-intro/#function-flags). + * **flags:** an array of [function flags]({{< baseurl >}}/develop/interact/programmability/functions-intro#function-flags). * **library_code:** the library's source code (when given the `WITHCODE` modifier). For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). diff --git a/content/commands/function-load/index.md b/content/commands/function-load/index.md index 53e7ea0721..150c32e65f 100644 --- a/content/commands/function-load/index.md +++ b/content/commands/function-load/index.md @@ -48,7 +48,7 @@ The library payload must start with Shebang statement that provides a metadata a Shebang format: `#! name=`. Currently engine name must be `lua`. For the Lua engine, the implementation should declare one or more entry points to the library with the [`redis.register_function()` API](/topics/lua-api#redis.register_function). -Once loaded, you can call the functions in the library with the [`FCALL`](/commands/fcall) (or [`FCALL_RO`](/commands/fcall_ro) when applicable) command. +Once loaded, you can call the functions in the library with the [`FCALL`]({{< relref "/commands/fcall" >}}) (or [`FCALL_RO`]({{< relref "/commands/fcall_ro" >}}) when applicable) command. When attempting to load a library with a name that already exists, the Redis server returns an error. The `REPLACE` modifier changes this behavior and overwrites the existing library with the new contents. diff --git a/content/commands/function-stats/index.md b/content/commands/function-stats/index.md index 2614766db5..a697d9d19a 100644 --- a/content/commands/function-stats/index.md +++ b/content/commands/function-stats/index.md @@ -45,6 +45,6 @@ The reply is map with two keys: Engine map contains statistics about the engine like number of functions and number of libraries. -You can use this command to inspect the invocation of a long-running function and decide whether kill it with the [`FUNCTION KILL`](/commands/function-kill) command. +You can use this command to inspect the invocation of a long-running function and decide whether kill it with the [`FUNCTION KILL`]({{< relref "/commands/function-kill" >}}) command. For more information please refer to [Introduction to Redis Functions](/topics/functions-intro). diff --git a/content/commands/function/index.md b/content/commands/function/index.md index 490535c7a9..f44e79f594 100644 --- a/content/commands/function/index.md +++ b/content/commands/function/index.md @@ -25,4 +25,4 @@ title: FUNCTION --- This is a container command for function commands. -To see the list of available commands you can call [`FUNCTION HELP`](/commands/function-help). \ No newline at end of file +To see the list of available commands you can call [`FUNCTION HELP`]({{< relref "/commands/function-help" >}}). \ No newline at end of file diff --git a/content/commands/geoadd/index.md b/content/commands/geoadd/index.md index ee578f2d9a..e489763add 100644 --- a/content/commands/geoadd/index.md +++ b/content/commands/geoadd/index.md @@ -80,13 +80,12 @@ linkTitle: GEOADD since: 3.2.0 summary: Adds one or more members to a geospatial index. The key is created if it doesn't exist. -syntax_fmt: "GEOADD key [NX | XX] [CH] longitude latitude member [longitude - latitude\ - \ member ...]" +syntax_fmt: GEOADD key [NX | XX] [CH] longitude latitude member [longitude latitude + member ...] syntax_str: '[NX | XX] [CH] longitude latitude member [longitude latitude member ...]' title: GEOADD --- -Adds the specified geospatial items (longitude, latitude, name) to the specified key. Data is stored into the key as a sorted set, in a way that makes it possible to query the items with the [`GEOSEARCH`](/commands/geosearch) command. +Adds the specified geospatial items (longitude, latitude, name) to the specified key. Data is stored into the key as a sorted set, in a way that makes it possible to query the items with the [`GEOSEARCH`]({{< relref "/commands/geosearch" >}}) command. The command takes arguments in the standard format x,y so the longitude must be specified before the latitude. There are limits to the coordinates that can be indexed: areas very near to the poles are not indexable. @@ -97,7 +96,7 @@ The exact limits, as specified by EPSG:900913 / EPSG:3785 / OSGEO:41001 are the The command will report an error when the user attempts to index coordinates outside the specified ranges. -**Note:** there is no **GEODEL** command because you can use [`ZREM`](/commands/zrem) to remove elements. The Geo index structure is just a sorted set. +**Note:** there is no **GEODEL** command because you can use [`ZREM`]({{< relref "/commands/zrem" >}}) to remove elements. The Geo index structure is just a sorted set. ## GEOADD options diff --git a/content/commands/geodist/index.md b/content/commands/geodist/index.md index 116d1535ab..f599a13779 100644 --- a/content/commands/geodist/index.md +++ b/content/commands/geodist/index.md @@ -73,7 +73,7 @@ title: GEODIST --- Return the distance between two members in the geospatial index represented by the sorted set. -Given a sorted set representing a geospatial index, populated using the [`GEOADD`](/commands/geoadd) command, the command returns the distance between the two specified members in the specified unit. +Given a sorted set representing a geospatial index, populated using the [`GEOADD`]({{< relref "/commands/geoadd" >}}) command, the command returns the distance between the two specified members in the specified unit. If one or both the members are missing, the command returns NULL. diff --git a/content/commands/geohash/index.md b/content/commands/geohash/index.md index c8e329eda7..fe507c2c1e 100644 --- a/content/commands/geohash/index.md +++ b/content/commands/geohash/index.md @@ -50,7 +50,7 @@ syntax_fmt: GEOHASH key [member [member ...]] syntax_str: '[member [member ...]]' title: GEOHASH --- -Return valid [Geohash](https://en.wikipedia.org/wiki/Geohash) strings representing the position of one or more elements in a sorted set value representing a geospatial index (where elements were added using [`GEOADD`](/commands/geoadd)). +Return valid [Geohash](https://en.wikipedia.org/wiki/Geohash) strings representing the position of one or more elements in a sorted set value representing a geospatial index (where elements were added using [`GEOADD`]({{< relref "/commands/geoadd" >}})). Normally Redis represents positions of elements using a variation of the Geohash technique where positions are encoded using 52 bit integers. The encoding is diff --git a/content/commands/geopos/index.md b/content/commands/geopos/index.md index d921e4bb1f..d2d2a3af71 100644 --- a/content/commands/geopos/index.md +++ b/content/commands/geopos/index.md @@ -52,7 +52,7 @@ title: GEOPOS --- Return the positions (longitude,latitude) of all the specified members of the geospatial index represented by the sorted set at *key*. -Given a sorted set representing a geospatial index, populated using the [`GEOADD`](/commands/geoadd) command, it is often useful to obtain back the coordinates of specified members. When the geospatial index is populated via [`GEOADD`](/commands/geoadd) the coordinates are converted into a 52 bit geohash, so the coordinates returned may not be exactly the ones used in order to add the elements, but small errors may be introduced. +Given a sorted set representing a geospatial index, populated using the [`GEOADD`]({{< relref "/commands/geoadd" >}}) command, it is often useful to obtain back the coordinates of specified members. When the geospatial index is populated via [`GEOADD`]({{< relref "/commands/geoadd" >}}) the coordinates are converted into a 52 bit geohash, so the coordinates returned may not be exactly the ones used in order to add the elements, but small errors may be introduced. The command can accept a variable number of arguments so it always returns an array of positions even when a single element is specified. diff --git a/content/commands/georadius/index.md b/content/commands/georadius/index.md index 5382e7b3b7..c4435afff5 100644 --- a/content/commands/georadius/index.md +++ b/content/commands/georadius/index.md @@ -161,23 +161,21 @@ key_specs: type: range update: true linkTitle: GEORADIUS -replaced_by: '[`GEOSEARCH`](/commands/geosearch) and [`GEOSEARCHSTORE`](/commands/geosearchstore) +replaced_by: '[`GEOSEARCH`]({{< relref "/commands/geosearch" >}}) and [`GEOSEARCHSTORE`]({{< relref "/commands/geosearchstore" >}}) with the `BYRADIUS` argument' since: 3.2.0 summary: Queries a geospatial index for members within a distance from a coordinate, optionally stores the result. -syntax_fmt: "GEORADIUS key longitude latitude radius - [WITHCOORD]\ - \ [WITHDIST] [WITHHASH] [COUNT\_count [ANY]] [ASC | DESC] - [STORE\_key | STOREDIST\_\ +syntax_fmt: "GEORADIUS key longitude latitude radius [WITHCOORD]\ + \ [WITHDIST] [WITHHASH] [COUNT\_count [ANY]] [ASC | DESC] [STORE\_key | STOREDIST\_\ key]" syntax_str: "longitude latitude radius [WITHCOORD] [WITHDIST] [WITHHASH]\ \ [COUNT\_count [ANY]] [ASC | DESC] [STORE\_key | STOREDIST\_key]" title: GEORADIUS --- -Return the members of a sorted set populated with geospatial information using [`GEOADD`](/commands/geoadd), which are within the borders of the area specified with the center location and the maximum distance from the center (the radius). +Return the members of a sorted set populated with geospatial information using [`GEOADD`]({{< relref "/commands/geoadd" >}}), which are within the borders of the area specified with the center location and the maximum distance from the center (the radius). -This manual page also covers the [`GEORADIUS_RO`](/commands/georadius_ro) and [`GEORADIUSBYMEMBER_RO`](/commands/georadiusbymember_ro) variants (see the section below for more information). +This manual page also covers the [`GEORADIUS_RO`]({{< relref "/commands/georadius_ro" >}}) and [`GEORADIUSBYMEMBER_RO`]({{< relref "/commands/georadiusbymember_ro" >}}) variants (see the section below for more information). The common use case for this command is to retrieve geospatial items near a specified point not farther than a given amount of meters (or other units). This allows, for example, to suggest mobile users of an application nearby places. @@ -212,9 +210,9 @@ By default the command returns the items to the client. It is possible to store ## Read-only variants -Since `GEORADIUS` and [`GEORADIUSBYMEMBER`](/commands/georadiusbymember) have a `STORE` and `STOREDIST` option they are technically flagged as writing commands in the Redis command table. For this reason read-only replicas will flag them, and Redis Cluster replicas will redirect them to the master instance even if the connection is in read-only mode (see the [`READONLY`](/commands/readonly) command of Redis Cluster). +Since `GEORADIUS` and [`GEORADIUSBYMEMBER`]({{< relref "/commands/georadiusbymember" >}}) have a `STORE` and `STOREDIST` option they are technically flagged as writing commands in the Redis command table. For this reason read-only replicas will flag them, and Redis Cluster replicas will redirect them to the master instance even if the connection is in read-only mode (see the [`READONLY`]({{< relref "/commands/readonly" >}}) command of Redis Cluster). -Breaking the compatibility with the past was considered but rejected, at least for Redis 4.0, so instead two read-only variants of the commands were added. They are exactly like the original commands but refuse the `STORE` and `STOREDIST` options. The two variants are called [`GEORADIUS_RO`](/commands/georadius_ro) and [`GEORADIUSBYMEMBER_RO`](/commands/georadiusbymember_ro), and can safely be used in replicas. +Breaking the compatibility with the past was considered but rejected, at least for Redis 4.0, so instead two read-only variants of the commands were added. They are exactly like the original commands but refuse the `STORE` and `STOREDIST` options. The two variants are called [`GEORADIUS_RO`]({{< relref "/commands/georadius_ro" >}}) and [`GEORADIUSBYMEMBER_RO`]({{< relref "/commands/georadiusbymember_ro" >}}), and can safely be used in replicas. ## Examples diff --git a/content/commands/georadius_ro/index.md b/content/commands/georadius_ro/index.md index a918782054..2fb342badf 100644 --- a/content/commands/georadius_ro/index.md +++ b/content/commands/georadius_ro/index.md @@ -117,17 +117,16 @@ key_specs: limit: 0 type: range linkTitle: GEORADIUS_RO -replaced_by: '[`GEOSEARCH`](/commands/geosearch) with the `BYRADIUS` argument' +replaced_by: '[`GEOSEARCH`]({{< relref "/commands/geosearch" >}}) with the `BYRADIUS` argument' since: 3.2.10 summary: Returns members from a geospatial index that are within a distance from a coordinate. -syntax_fmt: "GEORADIUS_RO key longitude latitude radius - [WITHCOORD]\ +syntax_fmt: "GEORADIUS_RO key longitude latitude radius [WITHCOORD]\ \ [WITHDIST] [WITHHASH] [COUNT\_count [ANY]] [ASC | DESC]" syntax_str: "longitude latitude radius [WITHCOORD] [WITHDIST] [WITHHASH]\ \ [COUNT\_count [ANY]] [ASC | DESC]" title: GEORADIUS_RO --- -Read-only variant of the [`GEORADIUS`](/commands/georadius) command. +Read-only variant of the [`GEORADIUS`]({{< relref "/commands/georadius" >}}) command. -This command is identical to the [`GEORADIUS`](/commands/georadius) command, except that it doesn't support the optional `STORE` and `STOREDIST` parameters. +This command is identical to the [`GEORADIUS`]({{< relref "/commands/georadius" >}}) command, except that it doesn't support the optional `STORE` and `STOREDIST` parameters. diff --git a/content/commands/georadiusbymember/index.md b/content/commands/georadiusbymember/index.md index f6670bb1ac..349c0c2cfa 100644 --- a/content/commands/georadiusbymember/index.md +++ b/content/commands/georadiusbymember/index.md @@ -155,28 +155,26 @@ key_specs: type: range update: true linkTitle: GEORADIUSBYMEMBER -replaced_by: '[`GEOSEARCH`](/commands/geosearch) and [`GEOSEARCHSTORE`](/commands/geosearchstore) +replaced_by: '[`GEOSEARCH`]({{< relref "/commands/geosearch" >}}) and [`GEOSEARCHSTORE`]({{< relref "/commands/geosearchstore" >}}) with the `BYRADIUS` and `FROMMEMBER` arguments' since: 3.2.0 summary: Queries a geospatial index for members within a distance from a member, optionally stores the result. -syntax_fmt: "GEORADIUSBYMEMBER key member radius [WITHCOORD] -\ - \ [WITHDIST] [WITHHASH] [COUNT\_count [ANY]] [ASC | DESC] [STORE\_key - | STOREDIST\_\ +syntax_fmt: "GEORADIUSBYMEMBER key member radius [WITHCOORD] \ + \ [WITHDIST] [WITHHASH] [COUNT\_count [ANY]] [ASC | DESC] [STORE\_key | STOREDIST\_\ key]" syntax_str: "member radius [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT\_\ count [ANY]] [ASC | DESC] [STORE\_key | STOREDIST\_key]" title: GEORADIUSBYMEMBER --- -This command is exactly like [`GEORADIUS`](/commands/georadius) with the sole difference that instead +This command is exactly like [`GEORADIUS`]({{< relref "/commands/georadius" >}}) with the sole difference that instead of taking, as the center of the area to query, a longitude and latitude value, it takes the name of a member already existing inside the geospatial index represented by the sorted set. The position of the specified member is used as the center of the query. -Please check the example below and the [`GEORADIUS`](/commands/georadius) documentation for more information about the command and its options. +Please check the example below and the [`GEORADIUS`]({{< relref "/commands/georadius" >}}) documentation for more information about the command and its options. -Note that [`GEORADIUSBYMEMBER_RO`](/commands/georadiusbymember_ro) is also available since Redis 3.2.10 and Redis 4.0.0 in order to provide a read-only command that can be used in replicas. See the [`GEORADIUS`](/commands/georadius) page for more information. +Note that [`GEORADIUSBYMEMBER_RO`]({{< relref "/commands/georadiusbymember_ro" >}}) is also available since Redis 3.2.10 and Redis 4.0.0 in order to provide a read-only command that can be used in replicas. See the [`GEORADIUS`]({{< relref "/commands/georadius" >}}) page for more information. ## Examples diff --git a/content/commands/georadiusbymember_ro/index.md b/content/commands/georadiusbymember_ro/index.md index 84a52cbe57..1823a15faa 100644 --- a/content/commands/georadiusbymember_ro/index.md +++ b/content/commands/georadiusbymember_ro/index.md @@ -110,18 +110,17 @@ key_specs: limit: 0 type: range linkTitle: GEORADIUSBYMEMBER_RO -replaced_by: '[`GEOSEARCH`](/commands/geosearch) with the `BYRADIUS` and `FROMMEMBER` +replaced_by: '[`GEOSEARCH`]({{< relref "/commands/geosearch" >}}) with the `BYRADIUS` and `FROMMEMBER` arguments' since: 3.2.10 summary: Returns members from a geospatial index that are within a distance from a member. -syntax_fmt: "GEORADIUSBYMEMBER_RO key member radius - [WITHCOORD]\ +syntax_fmt: "GEORADIUSBYMEMBER_RO key member radius [WITHCOORD]\ \ [WITHDIST] [WITHHASH] [COUNT\_count [ANY]] [ASC | DESC]" syntax_str: "member radius [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT\_\ count [ANY]] [ASC | DESC]" title: GEORADIUSBYMEMBER_RO --- -Read-only variant of the [`GEORADIUSBYMEMBER`](/commands/georadiusbymember) command. +Read-only variant of the [`GEORADIUSBYMEMBER`]({{< relref "/commands/georadiusbymember" >}}) command. -This command is identical to the [`GEORADIUSBYMEMBER`](/commands/georadiusbymember) command, except that it doesn't support the optional `STORE` and `STOREDIST` parameters. +This command is identical to the [`GEORADIUSBYMEMBER`]({{< relref "/commands/georadiusbymember" >}}) command, except that it doesn't support the optional `STORE` and `STOREDIST` parameters. diff --git a/content/commands/geosearch/index.md b/content/commands/geosearch/index.md index 7ab8a294dd..0c5553c3aa 100644 --- a/content/commands/geosearch/index.md +++ b/content/commands/geosearch/index.md @@ -161,20 +161,17 @@ key_specs: linkTitle: GEOSEARCH since: 6.2.0 summary: Queries a geospatial index for members inside an area of a box or a circle. -syntax_fmt: "GEOSEARCH key -\ - \ | BYBOX\_width height > [ASC | DESC] [COUNT\_count [ANY]] [WITHCOORD] [WITHDIST] - [WITHHASH]" +syntax_fmt: "GEOSEARCH key \ + \ | BYBOX\_width height >\ + \ [ASC | DESC] [COUNT\_count [ANY]] [WITHCOORD] [WITHDIST] [WITHHASH]" syntax_str: " | BYBOX\_width height > [ASC | DESC] [COUNT\_\ count [ANY]] [WITHCOORD] [WITHDIST] [WITHHASH]" title: GEOSEARCH --- -Return the members of a sorted set populated with geospatial information using [`GEOADD`](/commands/geoadd), which are within the borders of the area specified by a given shape. This command extends the [`GEORADIUS`](/commands/georadius) command, so in addition to searching within circular areas, it supports searching within rectangular areas. +Return the members of a sorted set populated with geospatial information using [`GEOADD`]({{< relref "/commands/geoadd" >}}), which are within the borders of the area specified by a given shape. This command extends the [`GEORADIUS`]({{< relref "/commands/georadius" >}}) command, so in addition to searching within circular areas, it supports searching within rectangular areas. -This command should be used in place of the deprecated [`GEORADIUS`](/commands/georadius) and [`GEORADIUSBYMEMBER`](/commands/georadiusbymember) commands. +This command should be used in place of the deprecated [`GEORADIUS`]({{< relref "/commands/georadius" >}}) and [`GEORADIUSBYMEMBER`]({{< relref "/commands/georadiusbymember" >}}) commands. The query's center point is provided by one of these mandatory options: @@ -183,7 +180,7 @@ The query's center point is provided by one of these mandatory options: The query's shape is provided by one of these mandatory options: -* `BYRADIUS`: Similar to [`GEORADIUS`](/commands/georadius), search inside circular area according to given ``. +* `BYRADIUS`: Similar to [`GEORADIUS`]({{< relref "/commands/georadius" >}}), search inside circular area according to given ``. * `BYBOX`: Search inside an axis-aligned rectangle, determined by `` and ``. The command optionally returns additional information using the following options: diff --git a/content/commands/geosearchstore/index.md b/content/commands/geosearchstore/index.md index c1269428af..551f3322de 100644 --- a/content/commands/geosearchstore/index.md +++ b/content/commands/geosearchstore/index.md @@ -170,20 +170,17 @@ linkTitle: GEOSEARCHSTORE since: 6.2.0 summary: Queries a geospatial index for members inside an area of a box or a circle, optionally stores the result. -syntax_fmt: "GEOSEARCHSTORE destination source - | BYBOX\_width height\ - \ > [ASC | DESC] [COUNT\_count - [ANY]] [STOREDIST]" +syntax_fmt: "GEOSEARCHSTORE destination source | BYBOX\_width height > [ASC | DESC] [COUNT\_count [ANY]] [STOREDIST]" syntax_str: "source | BYBOX\_width height > [ASC | DESC]\ \ [COUNT\_count [ANY]] [STOREDIST]" title: GEOSEARCHSTORE --- -This command is like [`GEOSEARCH`](/commands/geosearch), but stores the result in destination key. +This command is like [`GEOSEARCH`]({{< relref "/commands/geosearch" >}}), but stores the result in destination key. -This command replaces the now deprecated [`GEORADIUS`](/commands/georadius) and [`GEORADIUSBYMEMBER`](/commands/georadiusbymember). +This command replaces the now deprecated [`GEORADIUS`]({{< relref "/commands/georadius" >}}) and [`GEORADIUSBYMEMBER`]({{< relref "/commands/georadiusbymember" >}}). By default, it stores the results in the `destination` sorted set with their geospatial information. diff --git a/content/commands/getdel/index.md b/content/commands/getdel/index.md index 46f7c0d142..cea297dc61 100644 --- a/content/commands/getdel/index.md +++ b/content/commands/getdel/index.md @@ -48,7 +48,7 @@ syntax_str: '' title: GETDEL --- Get the value of `key` and delete the key. -This command is similar to [`GET`](/commands/get), except for the fact that it also deletes the key on success (if and only if the key's value type is a string). +This command is similar to [`GET`]({{< relref "/commands/get" >}}), except for the fact that it also deletes the key on success (if and only if the key's value type is a string). ## Examples diff --git a/content/commands/getex/index.md b/content/commands/getex/index.md index 89cd8cac8b..7162197373 100644 --- a/content/commands/getex/index.md +++ b/content/commands/getex/index.md @@ -68,15 +68,14 @@ key_specs: linkTitle: GETEX since: 6.2.0 summary: Returns the string value of a key after setting its expiration time. -syntax_fmt: "GETEX key [EX\_seconds | PX\_milliseconds | EXAT\_unix-time-seconds | -\ - \ PXAT\_unix-time-milliseconds | PERSIST]" +syntax_fmt: "GETEX key [EX\_seconds | PX\_milliseconds | EXAT\_unix-time-seconds |\ + \ PXAT\_unix-time-milliseconds | PERSIST]" syntax_str: "[EX\_seconds | PX\_milliseconds | EXAT\_unix-time-seconds | PXAT\_unix-time-milliseconds\ \ | PERSIST]" title: GETEX --- Get the value of `key` and optionally set its expiration. -`GETEX` is similar to [`GET`](/commands/get), but is a write command with additional options. +`GETEX` is similar to [`GET`]({{< relref "/commands/get" >}}), but is a write command with additional options. ## Options @@ -86,7 +85,7 @@ The `GETEX` command supports a set of options that modify its behavior: * `PX` *milliseconds* -- Set the specified expire time, in milliseconds. * `EXAT` *timestamp-seconds* -- Set the specified Unix time at which the key will expire, in seconds. * `PXAT` *timestamp-milliseconds* -- Set the specified Unix time at which the key will expire, in milliseconds. -* [`PERSIST`](/commands/persist) -- Remove the time to live associated with the key. +* [`PERSIST`]({{< relref "/commands/persist" >}}) -- Remove the time to live associated with the key. ## Examples diff --git a/content/commands/getset/index.md b/content/commands/getset/index.md index 04eff68405..04ca705fec 100644 --- a/content/commands/getset/index.md +++ b/content/commands/getset/index.md @@ -49,7 +49,7 @@ key_specs: type: range update: true linkTitle: GETSET -replaced_by: '[`SET`](/commands/set) with the `GET` argument' +replaced_by: '[`SET`]({{< relref "/commands/set" >}}) with the `GET` argument' since: 1.0.0 summary: Returns the previous string value of a key after setting it to a new value. syntax_fmt: GETSET key value @@ -59,12 +59,12 @@ title: GETSET Atomically sets `key` to `value` and returns the old value stored at `key`. Returns an error when `key` exists but does not hold a string value. Any previous time to live associated with the key is discarded on successful -[`SET`](/commands/set) operation. +[`SET`]({{< relref "/commands/set" >}}) operation. ## Design pattern -`GETSET` can be used together with [`INCR`](/commands/incr) for counting with atomic reset. -For example: a process may call [`INCR`](/commands/incr) against the key `mycounter` every time +`GETSET` can be used together with [`INCR`]({{< relref "/commands/incr" >}}) for counting with atomic reset. +For example: a process may call [`INCR`]({{< relref "/commands/incr" >}}) against the key `mycounter` every time some event occurs, but from time to time we need to get the value of the counter and reset it to zero atomically. This can be done using `GETSET mycounter "0"`: diff --git a/content/commands/hello/index.md b/content/commands/hello/index.md index ce89366e2a..147db937f5 100644 --- a/content/commands/hello/index.md +++ b/content/commands/hello/index.md @@ -65,7 +65,7 @@ connection's name, or provide a contextual client report. Redis version 6 and above supports two protocols: the old protocol, RESP2, and a new one introduced with Redis 6, RESP3. RESP3 has certain advantages since when the connection is in this mode, Redis is able to reply with more semantical -replies: for instance, [`HGETALL`](/commands/hgetall) will return a *map type*, so a client library +replies: for instance, [`HGETALL`]({{< relref "/commands/hgetall" >}}) will return a *map type*, so a client library implementation no longer requires to know in advance to translate the array into a hash before returning it to the caller. For a full coverage of RESP3, please check the [RESP3 specification](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md). @@ -109,10 +109,10 @@ command and specify the value "3" as the `protover` argument, like so: Because `HELLO` replies with useful information, and given that `protover` is optional or can be set to "2", client library authors may consider using this -command instead of the canonical [`PING`](/commands/ping) when setting up the connection. +command instead of the canonical [`PING`]({{< relref "/commands/ping" >}}) when setting up the connection. When called with the optional `protover` argument, this command switches the protocol to the specified version and also accepts the following options: -* `AUTH `: directly authenticate the connection in addition to switching to the specified protocol version. This makes calling [`AUTH`](/commands/auth) before `HELLO` unnecessary when setting up a new connection. Note that the `username` can be set to "default" to authenticate against a server that does not use ACLs, but rather the simpler `requirepass` mechanism of Redis prior to version 6. -* `SETNAME `: this is the equivalent of calling [`CLIENT SETNAME`](/commands/client-setname). +* `AUTH `: directly authenticate the connection in addition to switching to the specified protocol version. This makes calling [`AUTH`]({{< relref "/commands/auth" >}}) before `HELLO` unnecessary when setting up a new connection. Note that the `username` can be set to "default" to authenticate against a server that does not use ACLs, but rather the simpler `requirepass` mechanism of Redis prior to version 6. +* `SETNAME `: this is the equivalent of calling [`CLIENT SETNAME`]({{< relref "/commands/client-setname" >}}). diff --git a/content/commands/hincrbyfloat/index.md b/content/commands/hincrbyfloat/index.md index dd421366ba..0dc64c35bd 100644 --- a/content/commands/hincrbyfloat/index.md +++ b/content/commands/hincrbyfloat/index.md @@ -66,8 +66,8 @@ An error is returned if one of the following conditions occur: * The current field content or the specified increment are not parsable as a double precision floating point number. -The exact behavior of this command is identical to the one of the [`INCRBYFLOAT`](/commands/incrbyfloat) -command, please refer to the documentation of [`INCRBYFLOAT`](/commands/incrbyfloat) for further +The exact behavior of this command is identical to the one of the [`INCRBYFLOAT`]({{< relref "/commands/incrbyfloat" >}}) +command, please refer to the documentation of [`INCRBYFLOAT`]({{< relref "/commands/incrbyfloat" >}}) for further information. ## Examples @@ -84,5 +84,5 @@ HINCRBYFLOAT mykey field 2.0e2 ## Implementation details The command is always propagated in the replication link and the Append Only -File as a [`HSET`](/commands/hset) operation, so that differences in the underlying floating point +File as a [`HSET`]({{< relref "/commands/hset" >}}) operation, so that differences in the underlying floating point math implementation will not be sources of inconsistency. diff --git a/content/commands/hmset/index.md b/content/commands/hmset/index.md index 36cb0e7637..697e89765d 100644 --- a/content/commands/hmset/index.md +++ b/content/commands/hmset/index.md @@ -54,7 +54,7 @@ key_specs: type: range update: true linkTitle: HMSET -replaced_by: '[`HSET`](/commands/hset) with multiple field-value pairs' +replaced_by: '[`HSET`]({{< relref "/commands/hset" >}}) with multiple field-value pairs' since: 2.0.0 summary: Sets the values of multiple fields. syntax_fmt: HMSET key field value [field value ...] diff --git a/content/commands/hrandfield/index.md b/content/commands/hrandfield/index.md index 2673e0a25d..1a131d6e11 100644 --- a/content/commands/hrandfield/index.md +++ b/content/commands/hrandfield/index.md @@ -62,7 +62,7 @@ title: HRANDFIELD When called with just the `key` argument, return a random field from the hash value stored at `key`. If the provided `count` argument is positive, return an array of **distinct fields**. -The array's length is either `count` or the hash's number of fields ([`HLEN`](/commands/hlen)), whichever is lower. +The array's length is either `count` or the hash's number of fields ([`HLEN`]({{< relref "/commands/hlen" >}})), whichever is lower. If called with a negative `count`, the behavior changes and the command is allowed to return the **same field multiple times**. In this case, the number of returned fields is the absolute value of the specified `count`. diff --git a/content/commands/hscan/index.md b/content/commands/hscan/index.md index fb826626ce..3b8f289ad7 100644 --- a/content/commands/hscan/index.md +++ b/content/commands/hscan/index.md @@ -62,4 +62,4 @@ syntax_fmt: "HSCAN key cursor [MATCH\_pattern] [COUNT\_count]" syntax_str: "cursor [MATCH\_pattern] [COUNT\_count]" title: HSCAN --- -See [`SCAN`](/commands/scan) for `HSCAN` documentation. +See [`SCAN`]({{< relref "/commands/scan" >}}) for `HSCAN` documentation. diff --git a/content/commands/incr/index.md b/content/commands/incr/index.md index dc48810658..c4a19c3ee0 100644 --- a/content/commands/incr/index.md +++ b/content/commands/incr/index.md @@ -89,12 +89,12 @@ string representing the current date. This simple pattern can be extended in many ways: -* It is possible to use `INCR` and [`EXPIRE`](/commands/expire) together at every page view to have +* It is possible to use `INCR` and [`EXPIRE`]({{< relref "/commands/expire" >}}) together at every page view to have a counter counting only the latest N page views separated by less than the specified amount of seconds. * A client may use GETSET in order to atomically get the current counter value and reset it to zero. -* Using other atomic increment/decrement commands like [`DECR`](/commands/decr) or [`INCRBY`](/commands/incrby) it +* Using other atomic increment/decrement commands like [`DECR`]({{< relref "/commands/decr" >}}) or [`INCRBY`]({{< relref "/commands/incrby" >}}) it is possible to handle values that may get bigger or smaller depending on the operations performed by the user. Imagine for instance the score of different users in an online game. @@ -135,7 +135,7 @@ But these counters are always incremented setting an expire of 10 seconds so tha they'll be removed by Redis automatically when the current second is a different one. -Note the used of [`MULTI`](/commands/multi) and [`EXEC`](/commands/exec) in order to make sure that we'll both +Note the used of [`MULTI`]({{< relref "/commands/multi" >}}) and [`EXEC`]({{< relref "/commands/exec" >}}) in order to make sure that we'll both increment and set the expire at every API call. ## Pattern: Rate limiter 2 @@ -165,10 +165,10 @@ value greater than 10, otherwise it will expire and start again from 0. **In the above code there is a race condition**. If for some reason the client performs the `INCR` command but does not perform -the [`EXPIRE`](/commands/expire) the key will be leaked until we'll see the same IP address again. +the [`EXPIRE`]({{< relref "/commands/expire" >}}) the key will be leaked until we'll see the same IP address again. -This can be fixed easily turning the `INCR` with optional [`EXPIRE`](/commands/expire) into a Lua -script that is send using the [`EVAL`](/commands/eval) command (only available since Redis version +This can be fixed easily turning the `INCR` with optional [`EXPIRE`]({{< relref "/commands/expire" >}}) into a Lua +script that is send using the [`EVAL`]({{< relref "/commands/eval" >}}) command (only available since Redis version 2.6). ``` @@ -203,10 +203,10 @@ ELSE END ``` -The [`RPUSHX`](/commands/rpushx) command only pushes the element if the key already exists. +The [`RPUSHX`]({{< relref "/commands/rpushx" >}}) command only pushes the element if the key already exists. -Note that we have a race here, but it is not a problem: [`EXISTS`](/commands/exists) may return +Note that we have a race here, but it is not a problem: [`EXISTS`]({{< relref "/commands/exists" >}}) may return false but the key may be created by another client before we create it inside -the [`MULTI`](/commands/multi) / [`EXEC`](/commands/exec) block. +the [`MULTI`]({{< relref "/commands/multi" >}}) / [`EXEC`]({{< relref "/commands/exec" >}}) block. However this race will just miss an API call under rare conditions, so the rate limiting will still work correctly. diff --git a/content/commands/incrby/index.md b/content/commands/incrby/index.md index 0d40f2ef11..c1a71d0816 100644 --- a/content/commands/incrby/index.md +++ b/content/commands/incrby/index.md @@ -59,7 +59,7 @@ An error is returned if the key contains a value of the wrong type or contains a string that can not be represented as integer. This operation is limited to 64 bit signed integers. -See [`INCR`](/commands/incr) for extra information on increment/decrement operations. +See [`INCR`]({{< relref "/commands/incr" >}}) for extra information on increment/decrement operations. ## Examples diff --git a/content/commands/incrbyfloat/index.md b/content/commands/incrbyfloat/index.md index 1120951f96..7a0ddc7f55 100644 --- a/content/commands/incrbyfloat/index.md +++ b/content/commands/incrbyfloat/index.md @@ -92,5 +92,5 @@ INCRBYFLOAT mykey 2.0e2 ## Implementation details The command is always propagated in the replication link and the Append Only -File as a [`SET`](/commands/set) operation, so that differences in the underlying floating point +File as a [`SET`]({{< relref "/commands/set" >}}) operation, so that differences in the underlying floating point math implementation will not be sources of inconsistency. diff --git a/content/commands/info/index.md b/content/commands/info/index.md index fa66dc9491..23f78f1921 100644 --- a/content/commands/info/index.md +++ b/content/commands/info/index.md @@ -123,10 +123,10 @@ Here is the meaning of all fields in the **clients** section: `cluster_connections`. * `client_recent_max_input_buffer`: Biggest input buffer among current client connections * `client_recent_max_output_buffer`: Biggest output buffer among current client connections -* `blocked_clients`: Number of clients pending on a blocking call ([`BLPOP`](/commands/blpop), - [`BRPOP`](/commands/brpop), [`BRPOPLPUSH`](/commands/brpoplpush), [`BLMOVE`](/commands/blmove), [`BZPOPMIN`](/commands/bzpopmin), [`BZPOPMAX`](/commands/bzpopmax)) -* `tracking_clients`: Number of clients being tracked ([`CLIENT TRACKING`](/commands/client-tracking)) -* `pubsub_clients`: Number of clients in pubsub mode ([`SUBSCRIBE`](/commands/subscribe), [`PSUBSCRIBE`](/commands/psubscribe), [`SSUBSCRIBE`](/commands/ssubscribe)). Added in Redis 8.0 +* `blocked_clients`: Number of clients pending on a blocking call ([`BLPOP`]({{< relref "/commands/blpop" >}}), + [`BRPOP`]({{< relref "/commands/brpop" >}}), [`BRPOPLPUSH`]({{< relref "/commands/brpoplpush" >}}), [`BLMOVE`]({{< relref "/commands/blmove" >}}), [`BZPOPMIN`]({{< relref "/commands/bzpopmin" >}}), [`BZPOPMAX`]({{< relref "/commands/bzpopmax" >}})) +* `tracking_clients`: Number of clients being tracked ([`CLIENT TRACKING`]({{< relref "/commands/client-tracking" >}})) +* `pubsub_clients`: Number of clients in pubsub mode ([`SUBSCRIBE`]({{< relref "/commands/subscribe" >}}), [`PSUBSCRIBE`]({{< relref "/commands/psubscribe" >}}), [`SSUBSCRIBE`]({{< relref "/commands/ssubscribe" >}})). Added in Redis 8.0 * `clients_in_timeout_table`: Number of clients in the clients timeout table * `total_blocking_keys`: Number of blocking keys. Added in Redis 7.2. * `total_blocking_keys_on_nokey`: Number of blocking keys that one or more clients that would like to be unblocked when the key is deleted. Added in Redis 7.2. @@ -184,7 +184,7 @@ Here is the meaning of all fields in the **memory** section: * `rss_overhead_bytes`: Delta between `used_memory_rss` (the process RSS) and `allocator_resident` * `allocator_allocated`: Total bytes allocated form the allocator, including internal-fragmentation. Normally the same as `used_memory`. * `allocator_active`: Total bytes in the allocator active pages, this includes external-fragmentation. -* `allocator_resident`: Total bytes resident (RSS) in the allocator, this includes pages that can be released to the OS (by [`MEMORY PURGE`](/commands/memory-purge), or just waiting). +* `allocator_resident`: Total bytes resident (RSS) in the allocator, this includes pages that can be released to the OS (by [`MEMORY PURGE`]({{< relref "/commands/memory-purge" >}}), or just waiting). * `mem_not_counted_for_evict`: Used memory that's not counted for key eviction. This is basically transient replica and AOF buffers. * `mem_clients_slaves`: Memory used by replica clients - Starting Redis 7.0, replica buffers share memory with the replication backlog, so this field can show 0 when replicas don't trigger an increase of memory usage. * `mem_clients_normal`: Memory used by normal clients @@ -195,7 +195,7 @@ Here is the meaning of all fields in the **memory** section: * `mem_allocator`: Memory allocator, chosen at compile time. * `active_defrag_running`: When `activedefrag` is enabled, this indicates whether defragmentation is currently active, and the CPU percentage it intends to utilize. * `lazyfree_pending_objects`: The number of objects waiting to be freed (as a - result of calling [`UNLINK`](/commands/unlink), or [`FLUSHDB`](/commands/flushdb) and [`FLUSHALL`](/commands/flushall) with the **ASYNC** + result of calling [`UNLINK`]({{< relref "/commands/unlink" >}}), or [`FLUSHDB`]({{< relref "/commands/flushdb" >}}) and [`FLUSHALL`]({{< relref "/commands/flushall" >}}) with the **ASYNC** option) * `lazyfreed_objects`: The number of objects that have been lazy freed. @@ -218,7 +218,7 @@ used and released by Redis, but not given back to the system. The `used_memory_peak` value is generally useful to check this point. Additional introspective information about the server's memory can be obtained -by referring to the [`MEMORY STATS`](/commands/memory-stats) command and the [`MEMORY DOCTOR`](/commands/memory-doctor). +by referring to the [`MEMORY STATS`]({{< relref "/commands/memory-stats" >}}) command and the [`MEMORY DOCTOR`]({{< relref "/commands/memory-doctor" >}}). Here is the meaning of all fields in the **persistence** section: @@ -264,8 +264,8 @@ Here is the meaning of all fields in the **persistence** section: * `rdb_saves`: Number of RDB snapshots performed since startup `rdb_changes_since_last_save` refers to the number of operations that produced -some kind of changes in the dataset since the last time either [`SAVE`](/commands/save) or -[`BGSAVE`](/commands/bgsave) was called. +some kind of changes in the dataset since the last time either [`SAVE`]({{< relref "/commands/save" >}}) or +[`BGSAVE`]({{< relref "/commands/bgsave" >}}) was called. If AOF is activated, these additional fields will be added: @@ -326,7 +326,7 @@ Here is the meaning of all fields in the **stats** section: * `pubsubshard_channels`: Global number of pub/sub shard channels with client subscriptions. Added in Redis 7.0.3 * `latest_fork_usec`: Duration of the latest fork operation in microseconds * `total_forks`: Total number of fork operations since the server start -* `migrate_cached_sockets`: The number of sockets open for [`MIGRATE`](/commands/migrate) purposes +* `migrate_cached_sockets`: The number of sockets open for [`MIGRATE`]({{< relref "/commands/migrate" >}}) purposes * `slave_expires_tracked_keys`: The number of keys tracked for expiry purposes (applicable only to writable replicas) * `active_defrag_hits`: Number of value reallocations performed by active the diff --git a/content/commands/json.arrappend/index.md b/content/commands/json.arrappend/index.md index 2311f2187c..aa5e393e1c 100644 --- a/content/commands/json.arrappend/index.md +++ b/content/commands/json.arrappend/index.md @@ -63,8 +63,8 @@ is JSONPath to specify. Default is root `$`. ## Return value -`JSON.ARRAPEND` returns an [array](/docs/reference/protocol-spec/#resp-arrays) of integer replies for each path, the array's new size, or `nil`, if the matching JSON value is not an array. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +`JSON.ARRAPEND` returns an [array]({{< baseurl >}}/develop/reference/protocol-spec#resp-arrays) of integer replies for each path, the array's new size, or `nil`, if the matching JSON value is not an array. +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -96,9 +96,9 @@ redis> JSON.GET item:1 ## See also -[`JSON.ARRINDEX`](/commands/json.arrindex) | [`JSON.ARRINSERT`](/commands/json.arrinsert) +[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.arrindex/index.md b/content/commands/json.arrindex/index.md index 7c5c30be00..8456fd42e4 100644 --- a/content/commands/json.arrindex/index.md +++ b/content/commands/json.arrindex/index.md @@ -87,8 +87,8 @@ Out-of-range indexes round to the array's start and end. An inverse index range ## Return value -`JSON.ARRINDEX` returns an [array](/docs/reference/protocol-spec/#resp-arrays) of integer replies for each path, the first position in the array of each JSON value that matches the path, `-1` if unfound in the array, or `nil`, if the matching JSON value is not an array. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +`JSON.ARRINDEX` returns an [array]({{< baseurl >}}/develop/reference/protocol-spec#resp-arrays) of integer replies for each path, the first position in the array of each JSON value that matches the path, `-1` if unfound in the array, or `nil`, if the matching JSON value is not an array. +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -147,10 +147,10 @@ redis> JSON.ARRINDEX item:1 $..colors '"silver"' ## See also -[`JSON.ARRAPPEND`](/commands/json.arrappend) | [`JSON.ARRINSERT`](/commands/json.arrinsert) +[`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.arrinsert/index.md b/content/commands/json.arrinsert/index.md index 4c2ff6de9a..d9c708b28a 100644 --- a/content/commands/json.arrinsert/index.md +++ b/content/commands/json.arrinsert/index.md @@ -69,8 +69,8 @@ is JSONPath to specify. Default is root `$`. ## Return value -`JSON.ARRINSERT` returns an [array](/docs/reference/protocol-spec/#resp-arrays) of integer replies for each path, the array's new size, or `nil`, if the matching JSON value is not an array. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +`JSON.ARRINSERT` returns an [array]({{< baseurl >}}/develop/reference/protocol-spec#resp-arrays) of integer replies for each path, the array's new size, or `nil`, if the matching JSON value is not an array. +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -122,9 +122,9 @@ redis> JSON.GET item:1 $.colors ## See also -[`JSON.ARRAPPEND`](/commands/json.arrappend) | [`JSON.ARRINDEX`](/commands/json.arrindex) +[`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend) | [`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.arrlen/index.md b/content/commands/json.arrlen/index.md index fab415f7a8..4d21d0ef1d 100644 --- a/content/commands/json.arrlen/index.md +++ b/content/commands/json.arrlen/index.md @@ -49,8 +49,8 @@ is JSONPath to specify. Default is root `$`, if not provided. Returns null if th ## Return -`JSON.ARRLEN` returns an [array](/docs/reference/protocol-spec/#resp-arrays) of integer replies, an integer for each matching value, each is the array's length, or `nil`, if the matching value is not an array. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +`JSON.ARRLEN` returns an [array]({{< baseurl >}}/develop/reference/protocol-spec#resp-arrays) of integer replies, an integer for each matching value, each is the array's length, or `nil`, if the matching value is not an array. +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -95,9 +95,9 @@ redis> JSON.GET item:2 '$..max_level' ## See also -[`JSON.ARRINDEX`](/commands/json.arrindex) | [`JSON.ARRINSERT`](/commands/json.arrinsert) +[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.arrpop/index.md b/content/commands/json.arrpop/index.md index 482fa387e2..b10206ed54 100644 --- a/content/commands/json.arrpop/index.md +++ b/content/commands/json.arrpop/index.md @@ -63,8 +63,8 @@ is JSONPath to specify. Default is root `$`. ## Return -`JSON.ARRPOP` returns an [array](/docs/reference/protocol-spec/#resp-arrays) of bulk string replies for each path, each reply is the popped JSON value, or `nil`, if the matching JSON value is not an array. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +`JSON.ARRPOP` returns an [array]({{< baseurl >}}/develop/reference/protocol-spec#resp-arrays) of bulk string replies for each path, each reply is the popped JSON value, or `nil`, if the matching JSON value is not an array. +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -116,9 +116,9 @@ redis> JSON.GET key $.[1].max_level ## See also -[`JSON.ARRAPPEND`](/commands/json.arrappend) | [`JSON.ARRINDEX`](/commands/json.arrindex) +[`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend) | [`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.arrtrim/index.md b/content/commands/json.arrtrim/index.md index f4befcc3ad..6c7c747c89 100644 --- a/content/commands/json.arrtrim/index.md +++ b/content/commands/json.arrtrim/index.md @@ -77,7 +77,7 @@ Behavior as of RedisJSON v2.0: ## Return JSON.ARRTRIM returns an array of integer replies for each path, the array's new size, or `nil`, if the matching JSON value is not an array. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -123,10 +123,10 @@ redis> JSON.GET key $.[1].max_level ## See also -[`JSON.ARRINDEX`](/commands/json.arrindex) | [`JSON.ARRINSERT`](/commands/json.arrinsert) +[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.clear/index.md b/content/commands/json.clear/index.md index aa6e6545f3..dcd28c1b58 100644 --- a/content/commands/json.clear/index.md +++ b/content/commands/json.clear/index.md @@ -52,7 +52,7 @@ is JSONPath to specify. Default is root `$`. Nonexisting paths are ignored. ## Return JSON.CLEAR returns an integer reply specifying the number of matching JSON arrays and objects cleared + number of matching JSON numerical values zeroed. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). {{% alert title="Note" color="warning" %}} @@ -89,10 +89,10 @@ redis> JSON.GET doc $ ## See also -[`JSON.ARRINDEX`](/commands/json.arrindex) | [`JSON.ARRINSERT`](/commands/json.arrinsert) +[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.debug-help/index.md b/content/commands/json.debug-help/index.md index 2b00d972d5..ecd5a0e0c2 100644 --- a/content/commands/json.debug-help/index.md +++ b/content/commands/json.debug-help/index.md @@ -22,7 +22,7 @@ syntax_fmt: JSON.DEBUG HELP syntax_str: '' title: JSON.DEBUG HELP --- -Return helpful information about the [`JSON.DEBUG`](/commands/json.debug) command +Return helpful information about the [`JSON.DEBUG`]({{< baseurl >}}/commands/json.debug) command ## Return @@ -30,9 +30,9 @@ JSON.DEBUG HELP returns an array with helpful messages. ## See also -[`JSON.DEBUG`](/commands/json.debug) +[`JSON.DEBUG`]({{< baseurl >}}/commands/json.debug) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.debug-memory/index.md b/content/commands/json.debug-memory/index.md index ff7bdb8452..4d933a6f7f 100644 --- a/content/commands/json.debug-memory/index.md +++ b/content/commands/json.debug-memory/index.md @@ -51,7 +51,7 @@ is JSONPath to specify. Default is root `$`. ## Return JSON.DEBUG MEMORY returns an integer reply specified as the value size in bytes. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -75,10 +75,10 @@ redis> JSON.DEBUG MEMORY item:2 ## See also -[`JSON.SET`](/commands/json.set) | [`JSON.ARRLEN`](/commands/json.arrlen) +[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.del/index.md b/content/commands/json.del/index.md index 00833c0579..770574bb67 100644 --- a/content/commands/json.del/index.md +++ b/content/commands/json.del/index.md @@ -57,7 +57,7 @@ Deleting an object's root is equivalent to deleting the key from Redis. ## Return JSON.DEL returns an integer reply specified as the number of paths deleted (0 or more). -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -88,12 +88,12 @@ redis> JSON.GET doc $ ## See also -[`JSON.SET`](/commands/json.set) | [`JSON.ARRLEN`](/commands/json.arrlen) +[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.forget/index.md b/content/commands/json.forget/index.md index 9d79edcb99..9359457efb 100644 --- a/content/commands/json.forget/index.md +++ b/content/commands/json.forget/index.md @@ -30,4 +30,4 @@ syntax_fmt: JSON.FORGET key [path] syntax_str: '[path]' title: JSON.FORGET --- -See [`JSON.DEL`](/commands/json.del). \ No newline at end of file +See [`JSON.DEL`]({{< baseurl >}}/commands/json.del). \ No newline at end of file diff --git a/content/commands/json.get/index.md b/content/commands/json.get/index.md index 075b6de315..2c5f82d0db 100644 --- a/content/commands/json.get/index.md +++ b/content/commands/json.get/index.md @@ -39,9 +39,8 @@ module: JSON since: 1.0.0 stack_path: docs/data-types/json summary: Gets the value at one or more paths in JSON serialized form -syntax_fmt: "JSON.GET key [INDENT\_indent] [NEWLINE\_newline] [SPACE\_space] [path -\ - \ [path ...]]" +syntax_fmt: "JSON.GET key [INDENT\_indent] [NEWLINE\_newline] [SPACE\_space] [path\ + \ [path ...]]" syntax_str: "[INDENT\_indent] [NEWLINE\_newline] [SPACE\_space] [path [path ...]]" title: JSON.GET --- @@ -107,7 +106,7 @@ JSON.GET returns a bulk string representing a JSON array of string replies. Each string is the JSON serialization of each JSON value that matches a path. Using multiple paths, JSON.GET returns a bulk string representing a JSON object with string values. Each string value is an array of the JSON serialization of each JSON value that matches a path. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -138,9 +137,9 @@ redis> JSON.GET doc ..a $..b ## See also -[`JSON.SET`](/commands/json.set) | [`JSON.MGET`](/commands/json.mget) +[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.MGET`]({{< baseurl >}}/commands/json.mget) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.merge/index.md b/content/commands/json.merge/index.md index 4df457a8f7..ef6de1b01d 100644 --- a/content/commands/json.merge/index.md +++ b/content/commands/json.merge/index.md @@ -66,7 +66,7 @@ is JSON value to merge with at the specified path. Merging is done according to JSET.MERGE returns a simple string reply: `OK` if executed correctly or `error` if fails to set the new values -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -147,10 +147,10 @@ redis> JSON.GET doc ## See also -[`JSON.GET`](/commands/json.get) | [`JSON.MGET`](/commands/json.mget) | [`JSON.SET`](/commands/json.set) | [`JSON.MSET`](/commands/json.mset) +[`JSON.GET`]({{< baseurl >}}/commands/json.get) | [`JSON.MGET`]({{< baseurl >}}/commands/json.mget) | [`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.MSET`]({{< baseurl >}}/commands/json.mset) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.mget/index.md b/content/commands/json.mget/index.md index 9dc862528d..990b258ece 100644 --- a/content/commands/json.mget/index.md +++ b/content/commands/json.mget/index.md @@ -52,7 +52,7 @@ is JSONPath to specify. Returns `null` for nonexistent paths. ## Return JSON.MGET returns an array of bulk string replies specified as the JSON serialization of the value at each key's path. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -79,9 +79,9 @@ redis> JSON.MGET doc1 doc2 $..a ## See also -[`JSON.SET`](/commands/json.set) | [`JSON.GET`](/commands/json.get) +[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.GET`]({{< baseurl >}}/commands/json.get) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.mset/index.md b/content/commands/json.mset/index.md index 385e27dd6c..ce25baf1ec 100644 --- a/content/commands/json.mset/index.md +++ b/content/commands/json.mset/index.md @@ -68,7 +68,7 @@ is value to set at the specified path JSET.MSET returns a simple string reply: `OK` if executed correctly or `error` if fails to set the new values -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -91,9 +91,9 @@ redis> JSON.GET doc3 ## See also -[`JSON.SET`](/commands/json.set) | [`JSON.MGET`](/commands/json.mget) | [`JSON.GET`](/commands/json.get) +[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.MGET`]({{< baseurl >}}/commands/json.mget) | [`JSON.GET`]({{< baseurl >}}/commands/json.get) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.numincrby/index.md b/content/commands/json.numincrby/index.md index a6f092f162..09a0c714cf 100644 --- a/content/commands/json.numincrby/index.md +++ b/content/commands/json.numincrby/index.md @@ -54,7 +54,7 @@ is number value to increment. ## Return JSON.NUMINCRBY returns a bulk string reply specified as a stringified new value for each path, or `nil`, if the matching JSON value is not a number. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -86,9 +86,9 @@ redis> JSON.NUMINCRBY doc $..a 2 ## See also -[`JSON.ARRINDEX`](/commands/json.arrindex) | [`JSON.ARRINSERT`](/commands/json.arrinsert) +[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.nummultby/index.md b/content/commands/json.nummultby/index.md index 7890e803c2..ca0b885bd4 100644 --- a/content/commands/json.nummultby/index.md +++ b/content/commands/json.nummultby/index.md @@ -57,7 +57,7 @@ is JSONPath to specify. Default is root `$`. ## Return JSON.NUMMULTBY returns a bulk string reply specified as a stringified new values for each path, or `nil` element if the matching JSON value is not a number. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -72,9 +72,9 @@ redis> JSON.NUMMULTBY doc $..a 2 ## See also -[`JSON.NUMINCRBY`](/commands/json.numincrby) | [`JSON.ARRINSERT`](/commands/json.arrinsert) +[`JSON.NUMINCRBY`]({{< baseurl >}}/commands/json.numincrby) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.objkeys/index.md b/content/commands/json.objkeys/index.md index 2ef51362c7..3f83def2b2 100644 --- a/content/commands/json.objkeys/index.md +++ b/content/commands/json.objkeys/index.md @@ -52,7 +52,7 @@ is JSONPath to specify. Default is root `$`. Returns `null` for nonexistant path ## Return JSON.OBJKEYS returns an array of array replies for each path, an array of the key names in the object as a bulk string reply, or `nil` if the matching JSON value is not an object. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -67,9 +67,9 @@ redis> JSON.OBJKEYS doc $..a ## See also -[`JSON.ARRINDEX`](/commands/json.arrindex) | [`JSON.ARRINSERT`](/commands/json.arrinsert) +[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.objlen/index.md b/content/commands/json.objlen/index.md index 5a796cb6ae..726b8145e4 100644 --- a/content/commands/json.objlen/index.md +++ b/content/commands/json.objlen/index.md @@ -51,7 +51,7 @@ is JSONPath to specify. Default is root `$`. Returns `null` for nonexistant path ## Return JSON.OBJLEN returns an array of integer replies for each path specified as the number of keys in the object or `nil`, if the matching JSON value is not an object. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -65,9 +65,9 @@ redis> JSON.OBJLEN doc $..a ## See also -[`JSON.ARRINDEX`](/commands/json.arrindex) | [`JSON.ARRINSERT`](/commands/json.arrinsert) +[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.resp/index.md b/content/commands/json.resp/index.md index bb32a9fb06..2dd9719e88 100644 --- a/content/commands/json.resp/index.md +++ b/content/commands/json.resp/index.md @@ -31,7 +31,7 @@ syntax_fmt: JSON.RESP key [path] syntax_str: '[path]' title: JSON.RESP --- -Return the JSON in `key` in [Redis serialization protocol specification](/docs/reference/protocol-spec) form +Return the JSON in `key` in [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}) form [Examples](#examples) @@ -55,12 +55,12 @@ is JSONPath to specify. Default is root `$`. This command uses the following map * JSON array is represented as an array reply in which the first element is the simple string reply `[`, followed by the array's elements. * JSON object is represented as an array reply in which the first element is the simple string reply `{`. Each successive entry represents a key-value pair as a two-entry array reply of the bulk string reply. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}).
## Return -JSON.RESP returns an array reply specified as the JSON's RESP form detailed in [Redis serialization protocol specification](/docs/reference/protocol-spec). +JSON.RESP returns an array reply specified as the JSON's RESP form detailed in [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -107,9 +107,9 @@ redis> JSON.RESP item:2 ## See also -[`JSON.SET`](/commands/json.set) | [`JSON.ARRLEN`](/commands/json.arrlen) +[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.set/index.md b/content/commands/json.set/index.md index d500ef2c9c..fa7f4c6a96 100644 --- a/content/commands/json.set/index.md +++ b/content/commands/json.set/index.md @@ -80,7 +80,7 @@ sets the key only if it already exists. ## Return value JSET.SET returns a simple string reply: `OK` if executed correctly or `nil` if the specified `NX` or `XX` conditions were not met. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -125,9 +125,9 @@ redis> JSON.GET doc ## See also -[`JSON.GET`](/commands/json.get) | [`JSON.MGET`](/commands/json.mget) +[`JSON.GET`]({{< baseurl >}}/commands/json.get) | [`JSON.MGET`]({{< baseurl >}}/commands/json.mget) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.strappend/index.md b/content/commands/json.strappend/index.md index aad2c04308..2625622112 100644 --- a/content/commands/json.strappend/index.md +++ b/content/commands/json.strappend/index.md @@ -61,7 +61,7 @@ is JSONPath to specify. Default is root `$`. ## Return value JSON.STRAPPEND returns an array of integer replies for each path, the string's new length, or `nil`, if the matching JSON value is not a string. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -78,10 +78,10 @@ redis> JSON.GET doc $ ## See also -`JSON.ARRAPEND` | [`JSON.ARRINSERT`](/commands/json.arrinsert) +`JSON.ARRAPEND` | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.strlen/index.md b/content/commands/json.strlen/index.md index 56b331d499..1f7f6d0be0 100644 --- a/content/commands/json.strlen/index.md +++ b/content/commands/json.strlen/index.md @@ -50,7 +50,7 @@ is JSONPath to specify. Default is root `$`, if not provided. Returns null if th ## Return JSON.STRLEN returns by recursive descent an array of integer replies for each path, the array's length, or `nil`, if the matching JSON value is not a string. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -65,9 +65,9 @@ redis> JSON.STRLEN doc $..a ## See also -[`JSON.ARRLEN`](/commands/json.arrlen) | [`JSON.ARRINSERT`](/commands/json.arrinsert) +[`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.toggle/index.md b/content/commands/json.toggle/index.md index d9654d69d5..594cf616f2 100644 --- a/content/commands/json.toggle/index.md +++ b/content/commands/json.toggle/index.md @@ -50,7 +50,7 @@ is JSONPath to specify. Default is root `$`. ## Return JSON.TOGGLE returns an array of integer replies for each path, the new value (`0` if `false` or `1` if `true`), or `nil` for JSON values matching the path that are not Boolean. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -95,10 +95,10 @@ redis> JSON.GET doc $ ## See also -[`JSON.SET`](/commands/json.set) | [`JSON.GET`](/commands/json.get) +[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.GET`]({{< baseurl >}}/commands/json.get) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/json.type/index.md b/content/commands/json.type/index.md index 46f8cb8cc3..6fdad08ac5 100644 --- a/content/commands/json.type/index.md +++ b/content/commands/json.type/index.md @@ -51,7 +51,7 @@ is JSONPath to specify. Default is root `$`. Returns null if the `key` or `path` ## Return JSON.TYPE returns an array of string replies for each path, specified as the value's type. -For more information about replies, see [Redis serialization protocol specification](/docs/reference/protocol-spec). +For more information about replies, see [Redis serialization protocol specification]({{< relref "/develop/reference/protocol-spec" >}}). ## Examples @@ -69,9 +69,9 @@ redis> JSON.TYPE doc $..dummy ## See also -[`JSON.SET`](/commands/json.set) | [`JSON.ARRLEN`](/commands/json.arrlen) +[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen) ## Related topics -* [RedisJSON](/docs/stack/json) -* [Index and search JSON documents](/docs/stack/search/indexing_json) +* [RedisJSON]({{< relref "/develop/data-types/json/" >}}) +* [Index and search JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) diff --git a/content/commands/keys/index.md b/content/commands/keys/index.md index 8a500f1511..23990e7a91 100644 --- a/content/commands/keys/index.md +++ b/content/commands/keys/index.md @@ -50,7 +50,7 @@ This command is intended for debugging and special operations, such as changing your keyspace layout. Don't use `KEYS` in your regular application code. If you're looking for a way to find keys in a subset of your keyspace, consider -using [`SCAN`](/commands/scan) or [sets][tdts]. +using [`SCAN`]({{< relref "/commands/scan" >}}) or [sets][tdts]. [tdts]: /topics/data-types#sets diff --git a/content/commands/lastsave/index.md b/content/commands/lastsave/index.md index 546e64842e..0772e1d659 100644 --- a/content/commands/lastsave/index.md +++ b/content/commands/lastsave/index.md @@ -32,6 +32,6 @@ syntax_str: '' title: LASTSAVE --- Return the UNIX TIME of the last DB save executed with success. -A client may check if a [`BGSAVE`](/commands/bgsave) command succeeded reading the `LASTSAVE` value, -then issuing a [`BGSAVE`](/commands/bgsave) command and checking at regular intervals every N +A client may check if a [`BGSAVE`]({{< relref "/commands/bgsave" >}}) command succeeded reading the `LASTSAVE` value, +then issuing a [`BGSAVE`]({{< relref "/commands/bgsave" >}}) command and checking at regular intervals every N seconds if `LASTSAVE` changed. Redis considers the database saved successfully at startup. diff --git a/content/commands/latency-graph/index.md b/content/commands/latency-graph/index.md index 4f5f9cfa68..d21c5f03dd 100644 --- a/content/commands/latency-graph/index.md +++ b/content/commands/latency-graph/index.md @@ -40,7 +40,7 @@ title: LATENCY GRAPH --- Produces an ASCII-art style graph for the specified event. -`LATENCY GRAPH` lets you intuitively understand the latency trend of an `event` via state-of-the-art visualization. It can be used for quickly grasping the situation before resorting to means such parsing the raw data from [`LATENCY HISTORY`](/commands/latency-history) or external tooling. +`LATENCY GRAPH` lets you intuitively understand the latency trend of an `event` via state-of-the-art visualization. It can be used for quickly grasping the situation before resorting to means such parsing the raw data from [`LATENCY HISTORY`]({{< relref "/commands/latency-history" >}}) or external tooling. Valid values for `event` are: * `active-defrag-cycle` diff --git a/content/commands/latency-histogram/index.md b/content/commands/latency-histogram/index.md index afcd39ccc1..a1e45016d2 100644 --- a/content/commands/latency-histogram/index.md +++ b/content/commands/latency-histogram/index.md @@ -61,7 +61,7 @@ Each histogram consists of the following fields: This command requires the extended latency monitoring feature to be enabled, which is the default. If you need to enable it, call `CONFIG SET latency-tracking yes`. -To delete the latency histograms' data use the [`CONFIG RESETSTAT`](/commands/config-resetstat) command. +To delete the latency histograms' data use the [`CONFIG RESETSTAT`]({{< relref "/commands/config-resetstat" >}}) command. ## Examples diff --git a/content/commands/latency-latest/index.md b/content/commands/latency-latest/index.md index 2dd2e6791d..71c539094e 100644 --- a/content/commands/latency-latest/index.md +++ b/content/commands/latency-latest/index.md @@ -44,7 +44,7 @@ Each reported event has the following fields: * All-time maximum latency for this event. "All-time" means the maximum latency since the Redis instance was -started, or the time that events were reset [`LATENCY RESET`](/commands/latency-reset). +started, or the time that events were reset [`LATENCY RESET`]({{< relref "/commands/latency-reset" >}}). ## Examples diff --git a/content/commands/latency/index.md b/content/commands/latency/index.md index f60486c7a6..314cac54ef 100644 --- a/content/commands/latency/index.md +++ b/content/commands/latency/index.md @@ -25,4 +25,4 @@ title: LATENCY --- This is a container command for latency diagnostics commands. -To see the list of available commands you can call [`LATENCY HELP`](/commands/latency-help). \ No newline at end of file +To see the list of available commands you can call [`LATENCY HELP`]({{< relref "/commands/latency-help" >}}). \ No newline at end of file diff --git a/content/commands/lmove/index.md b/content/commands/lmove/index.md index 0bbdbd7a9a..9abba740c0 100644 --- a/content/commands/lmove/index.md +++ b/content/commands/lmove/index.md @@ -104,7 +104,7 @@ removing the first/last element from the list and pushing it as first/last element of the list, so it can be considered as a list rotation command (or a no-op if `wherefrom` is the same as `whereto`). -This command comes in place of the now deprecated [`RPOPLPUSH`](/commands/rpoplpush). Doing +This command comes in place of the now deprecated [`RPOPLPUSH`]({{< relref "/commands/rpoplpush" >}}). Doing `LMOVE RIGHT LEFT` is equivalent. ## Examples @@ -125,18 +125,18 @@ LRANGE myotherlist 0 -1 Redis is often used as a messaging server to implement processing of background jobs or other kinds of messaging tasks. A simple form of queue is often obtained pushing values into a list in the -producer side, and waiting for this values in the consumer side using [`RPOP`](/commands/rpop) -(using polling), or [`BRPOP`](/commands/brpop) if the client is better served by a blocking +producer side, and waiting for this values in the consumer side using [`RPOP`]({{< relref "/commands/rpop" >}}) +(using polling), or [`BRPOP`]({{< relref "/commands/brpop" >}}) if the client is better served by a blocking operation. However in this context the obtained queue is not _reliable_ as messages can be lost, for example in the case there is a network problem or if the consumer crashes just after the message is received but it is still to process. -`LMOVE` (or [`BLMOVE`](/commands/blmove) for the blocking variant) offers a way to avoid +`LMOVE` (or [`BLMOVE`]({{< relref "/commands/blmove" >}}) for the blocking variant) offers a way to avoid this problem: the consumer fetches the message and at the same time pushes it into a _processing_ list. -It will use the [`LREM`](/commands/lrem) command in order to remove the message from the +It will use the [`LREM`]({{< relref "/commands/lrem" >}}) command in order to remove the message from the _processing_ list once the message has been processed. An additional client may monitor the _processing_ list for items that remain @@ -147,7 +147,7 @@ again if needed. Using `LMOVE` with the same source and destination key, a client can visit all the elements of an N-elements list, one after the other, in O(N) without -transferring the full list from the server to the client using a single [`LRANGE`](/commands/lrange) +transferring the full list from the server to the client using a single [`LRANGE`]({{< relref "/commands/lrange" >}}) operation. The above pattern works even in the following conditions: diff --git a/content/commands/lmpop/index.md b/content/commands/lmpop/index.md index ddb3d40207..6cb5b573e0 100644 --- a/content/commands/lmpop/index.md +++ b/content/commands/lmpop/index.md @@ -72,12 +72,12 @@ title: LMPOP --- Pops one or more elements from the first non-empty list key from the list of provided key names. -`LMPOP` and [`BLMPOP`](/commands/blmpop) are similar to the following, more limited, commands: +`LMPOP` and [`BLMPOP`]({{< relref "/commands/blmpop" >}}) are similar to the following, more limited, commands: -- [`LPOP`](/commands/lpop) or [`RPOP`](/commands/rpop) which take only one key, and can return multiple elements. -- [`BLPOP`](/commands/blpop) or [`BRPOP`](/commands/brpop) which take multiple keys, but return only one element from just one key. +- [`LPOP`]({{< relref "/commands/lpop" >}}) or [`RPOP`]({{< relref "/commands/rpop" >}}) which take only one key, and can return multiple elements. +- [`BLPOP`]({{< relref "/commands/blpop" >}}) or [`BRPOP`]({{< relref "/commands/brpop" >}}) which take multiple keys, but return only one element from just one key. -See [`BLMPOP`](/commands/blmpop) for the blocking variant of this command. +See [`BLMPOP`]({{< relref "/commands/blmpop" >}}) for the blocking variant of this command. Elements are popped from either the left or right of the first non-empty list based on the passed argument. The number of returned elements is limited to the lower between the non-empty list's length, and the count argument (which defaults to 1). diff --git a/content/commands/lpushx/index.md b/content/commands/lpushx/index.md index 01c2d4f34a..ec80cdfc05 100644 --- a/content/commands/lpushx/index.md +++ b/content/commands/lpushx/index.md @@ -57,7 +57,7 @@ title: LPUSHX --- Inserts specified values at the head of the list stored at `key`, only if `key` already exists and holds a list. -In contrary to [`LPUSH`](/commands/lpush), no operation will be performed when `key` does not yet +In contrary to [`LPUSH`]({{< relref "/commands/lpush" >}}), no operation will be performed when `key` does not yet exist. ## Examples diff --git a/content/commands/lset/index.md b/content/commands/lset/index.md index 44a9626cb2..e269f1546a 100644 --- a/content/commands/lset/index.md +++ b/content/commands/lset/index.md @@ -54,7 +54,7 @@ syntax_str: index element title: LSET --- Sets the list element at `index` to `element`. -For more information on the `index` argument, see [`LINDEX`](/commands/lindex). +For more information on the `index` argument, see [`LINDEX`]({{< relref "/commands/lindex" >}}). An error is returned for out of range indexes. diff --git a/content/commands/ltrim/index.md b/content/commands/ltrim/index.md index 0d80a5705c..72c8acc840 100644 --- a/content/commands/ltrim/index.md +++ b/content/commands/ltrim/index.md @@ -71,7 +71,7 @@ causes `key` to be removed). If `end` is larger than the end of the list, Redis will treat it like the last element of the list. -A common use of `LTRIM` is together with [`LPUSH`](/commands/lpush) / [`RPUSH`](/commands/rpush). +A common use of `LTRIM` is together with [`LPUSH`]({{< relref "/commands/lpush" >}}) / [`RPUSH`]({{< relref "/commands/rpush" >}}). For example: ``` diff --git a/content/commands/memory-stats/index.md b/content/commands/memory-stats/index.md index 0507e577bb..6c859fccdb 100644 --- a/content/commands/memory-stats/index.md +++ b/content/commands/memory-stats/index.md @@ -27,25 +27,25 @@ syntax_fmt: MEMORY STATS syntax_str: '' title: MEMORY STATS --- -The `MEMORY STATS` command returns an [Array reply](/docs/reference/protocol-spec#arrays) about the memory usage of the +The `MEMORY STATS` command returns an [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) about the memory usage of the server. The information about memory usage is provided as metrics and their respective values. The following metrics are reported: -* `peak.allocated`: Peak memory consumed by Redis in bytes (see [`INFO`](/commands/info)'s +* `peak.allocated`: Peak memory consumed by Redis in bytes (see [`INFO`]({{< relref "/commands/info" >}})'s `used_memory_peak`) * `total.allocated`: Total number of bytes allocated by Redis using its - allocator (see [`INFO`](/commands/info)'s `used_memory`) + allocator (see [`INFO`]({{< relref "/commands/info" >}})'s `used_memory`) * `startup.allocated`: Initial amount of memory consumed by Redis at startup - in bytes (see [`INFO`](/commands/info)'s `used_memory_startup`) + in bytes (see [`INFO`]({{< relref "/commands/info" >}})'s `used_memory_startup`) * `replication.backlog`: Size in bytes of the replication backlog (see - [`INFO`](/commands/info)'s `repl_backlog_active`) + [`INFO`]({{< relref "/commands/info" >}})'s `repl_backlog_active`) * `clients.slaves`: The total size in bytes of all replicas overheads (output and query buffers, connection contexts) * `clients.normal`: The total size in bytes of all clients overheads (output and query buffers, connection contexts) -* `cluster.links`: Memory usage by cluster links (Added in Redis 7.0, see [`INFO`](/commands/info)'s `mem_cluster_links`). +* `cluster.links`: Memory usage by cluster links (Added in Redis 7.0, see [`INFO`]({{< relref "/commands/info" >}})'s `mem_cluster_links`). * `aof.buffer`: The summed size in bytes of AOF related buffers. * `lua.caches`: the summed size in bytes of the overheads of the Lua scripts' caches @@ -55,16 +55,16 @@ values. The following metrics are reported: * `overhead.total`: The sum of all overheads, i.e. `startup.allocated`, `replication.backlog`, `clients.slaves`, `clients.normal`, `aof.buffer` and those of the internal data structures that are used in managing the - Redis keyspace (see [`INFO`](/commands/info)'s `used_memory_overhead`) + Redis keyspace (see [`INFO`]({{< relref "/commands/info" >}})'s `used_memory_overhead`) * `keys.count`: The total number of keys stored across all databases in the server * `keys.bytes-per-key`: The ratio between `dataset.bytes` and `keys.count` * `dataset.bytes`: The size in bytes of the dataset, i.e. `overhead.total` - subtracted from `total.allocated` (see [`INFO`](/commands/info)'s `used_memory_dataset`) + subtracted from `total.allocated` (see [`INFO`]({{< relref "/commands/info" >}})'s `used_memory_dataset`) * `dataset.percentage`: The percentage of `dataset.bytes` out of the total memory usage * `peak.percentage`: The percentage of `total.allocated` out of `peak.allocated` -* `fragmentation`: See [`INFO`](/commands/info)'s `mem_fragmentation_ratio` +* `fragmentation`: See [`INFO`]({{< relref "/commands/info" >}})'s `mem_fragmentation_ratio` **A note about the word slave used in this man page**: Starting with Redis 5, if not for backward compatibility, the Redis project no longer uses the word slave. Unfortunately in this command the word slave is part of the protocol, so we'll be able to remove such occurrences only when this API will be naturally deprecated. diff --git a/content/commands/memory/index.md b/content/commands/memory/index.md index dd4975f16c..9a1d52e536 100644 --- a/content/commands/memory/index.md +++ b/content/commands/memory/index.md @@ -25,4 +25,4 @@ title: MEMORY --- This is a container command for memory introspection and management commands. -To see the list of available commands you can call [`MEMORY HELP`](/commands/memory-help). +To see the list of available commands you can call [`MEMORY HELP`]({{< relref "/commands/memory-help" >}}). diff --git a/content/commands/migrate/index.md b/content/commands/migrate/index.md index 43c68b382c..c9e8f0d2b3 100644 --- a/content/commands/migrate/index.md +++ b/content/commands/migrate/index.md @@ -131,9 +131,8 @@ key_specs: linkTitle: MIGRATE since: 2.6.0 summary: Atomically transfers a key from one Redis instance to another. -syntax_fmt: "MIGRATE host port destination-db timeout [COPY] [REPLACE] -\ - \ [AUTH\_password | AUTH2\_username password] [KEYS\_key [key ...]]" +syntax_fmt: "MIGRATE host port destination-db timeout [COPY] [REPLACE]\ + \ [AUTH\_password | AUTH2\_username password] [KEYS\_key [key ...]]" syntax_str: "port destination-db timeout [COPY] [REPLACE] [AUTH\_password\ \ | AUTH2\_username password] [KEYS\_key [key ...]]" title: MIGRATE @@ -149,11 +148,11 @@ instance or in the other instance, unless a timeout error occurs. In 3.2 and above, multiple keys can be pipelined in a single call to `MIGRATE` by passing the empty string ("") as key and adding the `KEYS` clause. -The command internally uses [`DUMP`](/commands/dump) to generate the serialized version of the key -value, and [`RESTORE`](/commands/restore) in order to synthesize the key in the target instance. +The command internally uses [`DUMP`]({{< relref "/commands/dump" >}}) to generate the serialized version of the key +value, and [`RESTORE`]({{< relref "/commands/restore" >}}) in order to synthesize the key in the target instance. The source instance acts as a client for the target instance. -If the target instance returns OK to the [`RESTORE`](/commands/restore) command, the source instance -deletes the key using [`DEL`](/commands/del). +If the target instance returns OK to the [`RESTORE`]({{< relref "/commands/restore" >}}) command, the source instance +deletes the key using [`DEL`]({{< relref "/commands/del" >}}). The timeout specifies the maximum idle time in any moment of the communication with the destination instance in milliseconds. diff --git a/content/commands/module-loadex/index.md b/content/commands/module-loadex/index.md index 1d741d8492..f2fa41480f 100644 --- a/content/commands/module-loadex/index.md +++ b/content/commands/module-loadex/index.md @@ -48,15 +48,14 @@ hidden: false linkTitle: MODULE LOADEX since: 7.0.0 summary: Loads a module using extended parameters. -syntax_fmt: "MODULE LOADEX path [CONFIG\_name value [CONFIG name value ...]] - [ARGS\_\ +syntax_fmt: "MODULE LOADEX path [CONFIG\_name value [CONFIG name value ...]] [ARGS\_\ args [args ...]]" syntax_str: "[CONFIG\_name value [CONFIG name value ...]] [ARGS\_args [args ...]]" title: MODULE LOADEX --- Loads a module from a dynamic library at runtime with configuration directives. -This is an extended version of the [`MODULE LOAD`](/commands/module-load) command. +This is an extended version of the [`MODULE LOAD`]({{< relref "/commands/module-load" >}}) command. It loads and initializes the Redis module from the dynamic library specified by the `path` argument. The `path` should be the absolute path of the library, including the full filename. diff --git a/content/commands/module-unload/index.md b/content/commands/module-unload/index.md index 7112bb041c..631a75e37f 100644 --- a/content/commands/module-unload/index.md +++ b/content/commands/module-unload/index.md @@ -36,7 +36,7 @@ title: MODULE UNLOAD Unloads a module. This command unloads the module specified by `name`. Note that the module's name -is reported by the [`MODULE LIST`](/commands/module-list) command, and may differ from the dynamic +is reported by the [`MODULE LIST`]({{< relref "/commands/module-list" >}}) command, and may differ from the dynamic library's filename. Known limitations: diff --git a/content/commands/module/index.md b/content/commands/module/index.md index adc0baae3e..c34e594554 100644 --- a/content/commands/module/index.md +++ b/content/commands/module/index.md @@ -25,4 +25,4 @@ title: MODULE --- This is a container command for module management commands. -To see the list of available commands you can call [`MODULE HELP`](/commands/module-help). +To see the list of available commands you can call [`MODULE HELP`]({{< relref "/commands/module-help" >}}). diff --git a/content/commands/monitor/index.md b/content/commands/monitor/index.md index 0f697c10c8..72dc149e0a 100644 --- a/content/commands/monitor/index.md +++ b/content/commands/monitor/index.md @@ -69,15 +69,15 @@ QUIT Connection closed by foreign host. ``` -Manually issue the [`QUIT`](/commands/quit) or [`RESET`](/commands/reset) commands to stop a `MONITOR` stream running +Manually issue the [`QUIT`]({{< relref "/commands/quit" >}}) or [`RESET`]({{< relref "/commands/reset" >}}) commands to stop a `MONITOR` stream running via `telnet`. ## Commands not logged by MONITOR Because of security concerns, no administrative commands are logged -by `MONITOR`'s output and sensitive data is redacted in the command [`AUTH`](/commands/auth). +by `MONITOR`'s output and sensitive data is redacted in the command [`AUTH`]({{< relref "/commands/auth" >}}). -Furthermore, the command [`QUIT`](/commands/quit) is also not logged. +Furthermore, the command [`QUIT`]({{< relref "/commands/quit" >}}) is also not logged. ## Cost of running MONITOR @@ -113,6 +113,6 @@ Running more `MONITOR` clients will reduce throughput even more. ## Behavior change history -* `>= 6.0.0`: [`AUTH`](/commands/auth) excluded from the command's output. -* `>= 6.2.0`: "[`RESET`](/commands/reset) can be called to exit monitor mode. -* `>= 6.2.4`: "[`AUTH`](/commands/auth), [`HELLO`](/commands/hello), [`EVAL`](/commands/eval), [`EVAL_RO`](/commands/eval_ro), [`EVALSHA`](/commands/evalsha) and [`EVALSHA_RO`](/commands/evalsha_ro) included in the command's output. \ No newline at end of file +* `>= 6.0.0`: [`AUTH`]({{< relref "/commands/auth" >}}) excluded from the command's output. +* `>= 6.2.0`: "[`RESET`]({{< relref "/commands/reset" >}}) can be called to exit monitor mode. +* `>= 6.2.4`: "[`AUTH`]({{< relref "/commands/auth" >}}), [`HELLO`]({{< relref "/commands/hello" >}}), [`EVAL`]({{< relref "/commands/eval" >}}), [`EVAL_RO`]({{< relref "/commands/eval_ro" >}}), [`EVALSHA`]({{< relref "/commands/evalsha" >}}) and [`EVALSHA_RO`]({{< relref "/commands/evalsha_ro" >}}) included in the command's output. \ No newline at end of file diff --git a/content/commands/move/index.md b/content/commands/move/index.md index 2c0d023039..bbb6761f3e 100644 --- a/content/commands/move/index.md +++ b/content/commands/move/index.md @@ -50,7 +50,7 @@ syntax_fmt: MOVE key db syntax_str: db title: MOVE --- -Move `key` from the currently selected database (see [`SELECT`](/commands/select)) to the specified +Move `key` from the currently selected database (see [`SELECT`]({{< relref "/commands/select" >}})) to the specified destination database. When `key` already exists in the destination database, or it does not exist in the source database, it does nothing. diff --git a/content/commands/mset/index.md b/content/commands/mset/index.md index 0c87df51c4..d82a2b946b 100644 --- a/content/commands/mset/index.md +++ b/content/commands/mset/index.md @@ -57,8 +57,8 @@ syntax_str: '' title: MSET --- Sets the given keys to their respective values. -`MSET` replaces existing values with new values, just as regular [`SET`](/commands/set). -See [`MSETNX`](/commands/msetnx) if you don't want to overwrite existing values. +`MSET` replaces existing values with new values, just as regular [`SET`]({{< relref "/commands/set" >}}). +See [`MSETNX`]({{< relref "/commands/msetnx" >}}) if you don't want to overwrite existing values. `MSET` is atomic, so all given keys are set at once. It is not possible for clients to see that some of the keys were updated while diff --git a/content/commands/multi/index.md b/content/commands/multi/index.md index 96ff277873..86f784b331 100644 --- a/content/commands/multi/index.md +++ b/content/commands/multi/index.md @@ -31,6 +31,6 @@ syntax_str: '' title: MULTI --- Marks the start of a [transaction][tt] block. -Subsequent commands will be queued for atomic execution using [`EXEC`](/commands/exec). +Subsequent commands will be queued for atomic execution using [`EXEC`]({{< relref "/commands/exec" >}}). [tt]: /topics/transactions diff --git a/content/commands/object/index.md b/content/commands/object/index.md index 1136a1ab16..389fd4675d 100644 --- a/content/commands/object/index.md +++ b/content/commands/object/index.md @@ -25,4 +25,4 @@ title: OBJECT --- This is a container command for object introspection commands. -To see the list of available commands you can call [`OBJECT HELP`](/commands/object-help). +To see the list of available commands you can call [`OBJECT HELP`]({{< relref "/commands/object-help" >}}). diff --git a/content/commands/pexpire/index.md b/content/commands/pexpire/index.md index 533d5db170..4c6cc1afad 100644 --- a/content/commands/pexpire/index.md +++ b/content/commands/pexpire/index.md @@ -73,7 +73,7 @@ syntax_fmt: PEXPIRE key milliseconds [NX | XX | GT | LT] syntax_str: milliseconds [NX | XX | GT | LT] title: PEXPIRE --- -This command works exactly like [`EXPIRE`](/commands/expire) but the time to live of the key is +This command works exactly like [`EXPIRE`]({{< relref "/commands/expire" >}}) but the time to live of the key is specified in milliseconds instead of seconds. ## Options diff --git a/content/commands/pexpireat/index.md b/content/commands/pexpireat/index.md index eac7543001..325740b6e6 100644 --- a/content/commands/pexpireat/index.md +++ b/content/commands/pexpireat/index.md @@ -73,7 +73,7 @@ syntax_fmt: PEXPIREAT key unix-time-milliseconds [NX | XX | GT | LT] syntax_str: unix-time-milliseconds [NX | XX | GT | LT] title: PEXPIREAT --- -`PEXPIREAT` has the same effect and semantic as [`EXPIREAT`](/commands/expireat), but the Unix time at +`PEXPIREAT` has the same effect and semantic as [`EXPIREAT`]({{< relref "/commands/expireat" >}}), but the Unix time at which the key will expire is specified in milliseconds instead of seconds. ## Options diff --git a/content/commands/pexpiretime/index.md b/content/commands/pexpiretime/index.md index c7683fb53b..d3f764abe4 100644 --- a/content/commands/pexpiretime/index.md +++ b/content/commands/pexpiretime/index.md @@ -46,7 +46,7 @@ syntax_fmt: PEXPIRETIME key syntax_str: '' title: PEXPIRETIME --- -`PEXPIRETIME` has the same semantic as [`EXPIRETIME`](/commands/expiretime), but returns the absolute Unix expiration timestamp in milliseconds instead of seconds. +`PEXPIRETIME` has the same semantic as [`EXPIRETIME`]({{< relref "/commands/expiretime" >}}), but returns the absolute Unix expiration timestamp in milliseconds instead of seconds. ## Examples diff --git a/content/commands/pfadd/index.md b/content/commands/pfadd/index.md index 7fb140e49e..309e4b82a2 100644 --- a/content/commands/pfadd/index.md +++ b/content/commands/pfadd/index.md @@ -60,7 +60,7 @@ If the approximated cardinality estimated by the HyperLogLog changed after execu To call the command without elements but just the variable name is valid, this will result into no operation performed if the variable already exists, or just the creation of the data structure if the key does not exist (in the latter case 1 is returned). -For an introduction to HyperLogLog data structure check the [`PFCOUNT`](/commands/pfcount) command page. +For an introduction to HyperLogLog data structure check the [`PFCOUNT`]({{< relref "/commands/pfcount" >}}) command page. ## Examples diff --git a/content/commands/pfcount/index.md b/content/commands/pfcount/index.md index 8102069009..01b4f412c5 100644 --- a/content/commands/pfcount/index.md +++ b/content/commands/pfcount/index.md @@ -60,7 +60,7 @@ The HyperLogLog data structure can be used in order to count **unique** elements The returned cardinality of the observed set is not exact, but approximated with a standard error of 0.81%. -For example in order to take the count of all the unique search queries performed in a day, a program needs to call [`PFADD`](/commands/pfadd) every time a query is processed. The estimated number of unique queries can be retrieved with `PFCOUNT` at any time. +For example in order to take the count of all the unique search queries performed in a day, a program needs to call [`PFADD`]({{< relref "/commands/pfadd" >}}) every time a query is processed. The estimated number of unique queries can be retrieved with `PFCOUNT` at any time. Note: as a side effect of calling this function, it is possible that the HyperLogLog is modified, since the last 8 bytes encode the latest computed cardinality for caching purposes. So `PFCOUNT` is technically a write command. @@ -83,7 +83,7 @@ Performances When `PFCOUNT` is called with a single key, performances are excellent even if in theory constant times to process a dense HyperLogLog are high. This is possible because the `PFCOUNT` uses caching in order to remember the cardinality -previously computed, that rarely changes because most [`PFADD`](/commands/pfadd) operations will +previously computed, that rarely changes because most [`PFADD`]({{< relref "/commands/pfadd" >}}) operations will not update any register. Hundreds of operations per second are possible. When `PFCOUNT` is called with multiple keys, an on-the-fly merge of the @@ -103,7 +103,7 @@ The sparse representation uses a run-length encoding optimized to store efficien Both representations are prefixed with a 16 bytes header, that includes a magic, an encoding / version field, and the cached cardinality estimation computed, stored in little endian format (the most significant bit is 1 if the estimation is invalid since the HyperLogLog was updated since the cardinality was computed). -The HyperLogLog, being a Redis string, can be retrieved with [`GET`](/commands/get) and restored with [`SET`](/commands/set). Calling [`PFADD`](/commands/pfadd), `PFCOUNT` or [`PFMERGE`](/commands/pfmerge) commands with a corrupted HyperLogLog is never a problem, it may return random values but does not affect the stability of the server. Most of the times when corrupting a sparse representation, the server recognizes the corruption and returns an error. +The HyperLogLog, being a Redis string, can be retrieved with [`GET`]({{< relref "/commands/get" >}}) and restored with [`SET`]({{< relref "/commands/set" >}}). Calling [`PFADD`]({{< relref "/commands/pfadd" >}}), `PFCOUNT` or [`PFMERGE`]({{< relref "/commands/pfmerge" >}}) commands with a corrupted HyperLogLog is never a problem, it may return random values but does not affect the stability of the server. Most of the times when corrupting a sparse representation, the server recognizes the corruption and returns an error. The representation is neutral from the point of view of the processor word size and endianness, so the same representation is used by 32 bit and 64 bit processor, big endian or little endian. diff --git a/content/commands/psetex/index.md b/content/commands/psetex/index.md index d15c0e2f69..13c8fd3d91 100644 --- a/content/commands/psetex/index.md +++ b/content/commands/psetex/index.md @@ -50,7 +50,7 @@ key_specs: type: range update: true linkTitle: PSETEX -replaced_by: '[`SET`](/commands/set) with the `PX` argument' +replaced_by: '[`SET`]({{< relref "/commands/set" >}}) with the `PX` argument' since: 2.6.0 summary: Sets both string value and expiration time in milliseconds of a key. The key is created if it doesn't exist. @@ -58,7 +58,7 @@ syntax_fmt: PSETEX key milliseconds value syntax_str: milliseconds value title: PSETEX --- -`PSETEX` works exactly like [`SETEX`](/commands/setex) with the sole difference that the expire +`PSETEX` works exactly like [`SETEX`]({{< relref "/commands/setex" >}}) with the sole difference that the expire time is specified in milliseconds instead of seconds. ## Examples diff --git a/content/commands/psubscribe/index.md b/content/commands/psubscribe/index.md index 747c0151ba..e70b305f61 100644 --- a/content/commands/psubscribe/index.md +++ b/content/commands/psubscribe/index.md @@ -44,11 +44,11 @@ Supported glob-style patterns: Use `\` to escape special characters if you want to match them verbatim. -Once the client enters the subscribed state it is not supposed to issue any other commands, except for additional [`SUBSCRIBE`](/commands/subscribe), [`SSUBSCRIBE`](/commands/ssubscribe), `PSUBSCRIBE`, [`UNSUBSCRIBE`](/commands/unsubscribe), [`SUNSUBSCRIBE`](/commands/sunsubscribe), [`PUNSUBSCRIBE`](/commands/punsubscribe), [`PING`](/commands/ping), [`RESET`](/commands/reset) and [`QUIT`](/commands/quit) commands. -However, if RESP3 is used (see [`HELLO`](/commands/hello)) it is possible for a client to issue any commands while in subscribed state. +Once the client enters the subscribed state it is not supposed to issue any other commands, except for additional [`SUBSCRIBE`]({{< relref "/commands/subscribe" >}}), [`SSUBSCRIBE`]({{< relref "/commands/ssubscribe" >}}), `PSUBSCRIBE`, [`UNSUBSCRIBE`]({{< relref "/commands/unsubscribe" >}}), [`SUNSUBSCRIBE`]({{< relref "/commands/sunsubscribe" >}}), [`PUNSUBSCRIBE`]({{< relref "/commands/punsubscribe" >}}), [`PING`]({{< relref "/commands/ping" >}}), [`RESET`]({{< relref "/commands/reset" >}}) and [`QUIT`]({{< relref "/commands/quit" >}}) commands. +However, if RESP3 is used (see [`HELLO`]({{< relref "/commands/hello" >}})) it is possible for a client to issue any commands while in subscribed state. -For more information, see [Pub/sub](/docs/interact/pubsub/). +For more information, see [Pub/sub]({{< relref "/develop/interact/pubsub" >}}). ## Behavior change history -* `>= 6.2.0`: [`RESET`](/commands/reset) can be called to exit subscribed state. +* `>= 6.2.0`: [`RESET`]({{< relref "/commands/reset" >}}) can be called to exit subscribed state. diff --git a/content/commands/pttl/index.md b/content/commands/pttl/index.md index 063686d772..14a867e46b 100644 --- a/content/commands/pttl/index.md +++ b/content/commands/pttl/index.md @@ -51,8 +51,8 @@ syntax_fmt: PTTL key syntax_str: '' title: PTTL --- -Like [`TTL`](/commands/ttl) this command returns the remaining time to live of a key that has an -expire set, with the sole difference that [`TTL`](/commands/ttl) returns the amount of remaining +Like [`TTL`]({{< relref "/commands/ttl" >}}) this command returns the remaining time to live of a key that has an +expire set, with the sole difference that [`TTL`]({{< relref "/commands/ttl" >}}) returns the amount of remaining time in seconds while `PTTL` returns it in milliseconds. In Redis 2.6 or older the command returns `-1` if the key does not exist or if the key exist but has no associated expire. diff --git a/content/commands/pubsub-channels/index.md b/content/commands/pubsub-channels/index.md index 2ff3be97e7..168326273a 100644 --- a/content/commands/pubsub-channels/index.md +++ b/content/commands/pubsub-channels/index.md @@ -40,4 +40,4 @@ An active channel is a Pub/Sub channel with one or more subscribers (excluding c If no `pattern` is specified, all the channels are listed, otherwise if pattern is specified only channels matching the specified glob-style pattern are listed. -Cluster note: in a Redis Cluster clients can subscribe to every node, and can also publish to every other node. The cluster will make sure that published messages are forwarded as needed. That said, [`PUBSUB`](/commands/pubsub)'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. +Cluster note: in a Redis Cluster clients can subscribe to every node, and can also publish to every other node. The cluster will make sure that published messages are forwarded as needed. That said, [`PUBSUB`]({{< relref "/commands/pubsub" >}})'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. diff --git a/content/commands/pubsub-numpat/index.md b/content/commands/pubsub-numpat/index.md index 1d192b2f2b..688220c1ce 100644 --- a/content/commands/pubsub-numpat/index.md +++ b/content/commands/pubsub-numpat/index.md @@ -28,8 +28,8 @@ syntax_fmt: PUBSUB NUMPAT syntax_str: '' title: PUBSUB NUMPAT --- -Returns the number of unique patterns that are subscribed to by clients (that are performed using the [`PSUBSCRIBE`](/commands/psubscribe) command). +Returns the number of unique patterns that are subscribed to by clients (that are performed using the [`PSUBSCRIBE`]({{< relref "/commands/psubscribe" >}}) command). Note that this isn't the count of clients subscribed to patterns, but the total number of unique patterns all the clients are subscribed to. -Cluster note: in a Redis Cluster clients can subscribe to every node, and can also publish to every other node. The cluster will make sure that published messages are forwarded as needed. That said, [`PUBSUB`](/commands/pubsub)'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. +Cluster note: in a Redis Cluster clients can subscribe to every node, and can also publish to every other node. The cluster will make sure that published messages are forwarded as needed. That said, [`PUBSUB`]({{< relref "/commands/pubsub" >}})'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. diff --git a/content/commands/pubsub-numsub/index.md b/content/commands/pubsub-numsub/index.md index 05ce2b7d2b..9fa6213eaf 100644 --- a/content/commands/pubsub-numsub/index.md +++ b/content/commands/pubsub-numsub/index.md @@ -38,4 +38,4 @@ Returns the number of subscribers (exclusive of clients subscribed to patterns) Note that it is valid to call this command without channels. In this case it will just return an empty list. -Cluster note: in a Redis Cluster clients can subscribe to every node, and can also publish to every other node. The cluster will make sure that published messages are forwarded as needed. That said, [`PUBSUB`](/commands/pubsub)'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. +Cluster note: in a Redis Cluster clients can subscribe to every node, and can also publish to every other node. The cluster will make sure that published messages are forwarded as needed. That said, [`PUBSUB`]({{< relref "/commands/pubsub" >}})'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. diff --git a/content/commands/pubsub-shardnumsub/index.md b/content/commands/pubsub-shardnumsub/index.md index 9d867fb6f8..ff35a76e98 100644 --- a/content/commands/pubsub-shardnumsub/index.md +++ b/content/commands/pubsub-shardnumsub/index.md @@ -39,7 +39,7 @@ Returns the number of subscribers for the specified shard channels. Note that it is valid to call this command without channels, in this case it will just return an empty list. -Cluster note: in a Redis Cluster, [`PUBSUB`](/commands/pubsub)'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. +Cluster note: in a Redis Cluster, [`PUBSUB`]({{< relref "/commands/pubsub" >}})'s replies in a cluster only report information from the node's Pub/Sub context, rather than the entire cluster. ## Examples diff --git a/content/commands/pubsub/index.md b/content/commands/pubsub/index.md index d774ac1542..67932cfa00 100644 --- a/content/commands/pubsub/index.md +++ b/content/commands/pubsub/index.md @@ -25,4 +25,4 @@ title: PUBSUB --- This is a container command for Pub/Sub introspection commands. -To see the list of available commands you can call [`PUBSUB HELP`](/commands/pubsub-help). +To see the list of available commands you can call [`PUBSUB HELP`]({{< relref "/commands/pubsub-help" >}}). diff --git a/content/commands/readwrite/index.md b/content/commands/readwrite/index.md index 6a0012f4c6..3aea260732 100644 --- a/content/commands/readwrite/index.md +++ b/content/commands/readwrite/index.md @@ -32,6 +32,6 @@ title: READWRITE Disables read queries for a connection to a Redis Cluster replica node. Read queries against a Redis Cluster replica node are disabled by default, -but you can use the [`READONLY`](/commands/readonly) command to change this behavior on a per- +but you can use the [`READONLY`]({{< relref "/commands/readonly" >}}) command to change this behavior on a per- connection basis. The `READWRITE` command resets the readonly mode flag of a connection back to readwrite. diff --git a/content/commands/rename/index.md b/content/commands/rename/index.md index 7b1131e58b..2a9e0e0803 100644 --- a/content/commands/rename/index.md +++ b/content/commands/rename/index.md @@ -64,7 +64,7 @@ title: RENAME --- Renames `key` to `newkey`. It returns an error when `key` does not exist. -If `newkey` already exists it is overwritten, when this happens `RENAME` executes an implicit [`DEL`](/commands/del) operation, so if the deleted key contains a very big value it may cause high latency even if `RENAME` itself is usually a constant-time operation. +If `newkey` already exists it is overwritten, when this happens `RENAME` executes an implicit [`DEL`]({{< relref "/commands/del" >}}) operation, so if the deleted key contains a very big value it may cause high latency even if `RENAME` itself is usually a constant-time operation. In Cluster mode, both `key` and `newkey` must be in the same **hash slot**, meaning that in practice only keys that have the same hash tag can be reliably renamed in cluster. diff --git a/content/commands/reset/index.md b/content/commands/reset/index.md index d16d4816d0..4fd07264c5 100644 --- a/content/commands/reset/index.md +++ b/content/commands/reset/index.md @@ -37,18 +37,18 @@ mimicking the effect of disconnecting and reconnecting again. When the command is called from a regular client connection, it does the following: -* Discards the current [`MULTI`](/commands/multi) transaction block, if one exists. -* Unwatches all keys [`WATCH`](/commands/watch)ed by the connection. -* Disables [`CLIENT TRACKING`](/commands/client-tracking), if in use. -* Sets the connection to [`READWRITE`](/commands/readwrite) mode. -* Cancels the connection's [`ASKING`](/commands/asking) mode, if previously set. -* Sets [`CLIENT REPLY`](/commands/client-reply) to `ON`. +* Discards the current [`MULTI`]({{< relref "/commands/multi" >}}) transaction block, if one exists. +* Unwatches all keys [`WATCH`]({{< relref "/commands/watch" >}})ed by the connection. +* Disables [`CLIENT TRACKING`]({{< relref "/commands/client-tracking" >}}), if in use. +* Sets the connection to [`READWRITE`]({{< relref "/commands/readwrite" >}}) mode. +* Cancels the connection's [`ASKING`]({{< relref "/commands/asking" >}}) mode, if previously set. +* Sets [`CLIENT REPLY`]({{< relref "/commands/client-reply" >}}) to `ON`. * Sets the protocol version to RESP2. -* [`SELECT`](/commands/select)s database 0. -* Exits [`MONITOR`](/commands/monitor) mode, when applicable. -* Aborts Pub/Sub's subscription state ([`SUBSCRIBE`](/commands/subscribe) and [`PSUBSCRIBE`](/commands/psubscribe)), when +* [`SELECT`]({{< relref "/commands/select" >}})s database 0. +* Exits [`MONITOR`]({{< relref "/commands/monitor" >}}) mode, when applicable. +* Aborts Pub/Sub's subscription state ([`SUBSCRIBE`]({{< relref "/commands/subscribe" >}}) and [`PSUBSCRIBE`]({{< relref "/commands/psubscribe" >}})), when appropriate. -* Deauthenticates the connection, requiring a call [`AUTH`](/commands/auth) to reauthenticate when +* Deauthenticates the connection, requiring a call [`AUTH`]({{< relref "/commands/auth" >}}) to reauthenticate when authentication is enabled. * Turns off `NO-EVICT` mode. * Turns off `NO-TOUCH` mode. diff --git a/content/commands/restore-asking/index.md b/content/commands/restore-asking/index.md index 1ee2a140c1..396ce33a9e 100644 --- a/content/commands/restore-asking/index.md +++ b/content/commands/restore-asking/index.md @@ -87,8 +87,7 @@ key_specs: linkTitle: RESTORE-ASKING since: 3.0.0 summary: An internal command for migrating keys in a cluster. -syntax_fmt: "RESTORE-ASKING key ttl serialized-value [REPLACE] [ABSTTL] - [IDLETIME\_\ +syntax_fmt: "RESTORE-ASKING key ttl serialized-value [REPLACE] [ABSTTL] [IDLETIME\_\ seconds] [FREQ\_frequency]" syntax_str: "ttl serialized-value [REPLACE] [ABSTTL] [IDLETIME\_seconds] [FREQ\_frequency]" title: RESTORE-ASKING diff --git a/content/commands/restore/index.md b/content/commands/restore/index.md index 6c75a9b03a..c2352a10d8 100644 --- a/content/commands/restore/index.md +++ b/content/commands/restore/index.md @@ -84,14 +84,13 @@ key_specs: linkTitle: RESTORE since: 2.6.0 summary: Creates a key from the serialized representation of a value. -syntax_fmt: "RESTORE key ttl serialized-value [REPLACE] [ABSTTL] - [IDLETIME\_seconds]\ +syntax_fmt: "RESTORE key ttl serialized-value [REPLACE] [ABSTTL] [IDLETIME\_seconds]\ \ [FREQ\_frequency]" syntax_str: "ttl serialized-value [REPLACE] [ABSTTL] [IDLETIME\_seconds] [FREQ\_frequency]" title: RESTORE --- Create a key associated with a value that is obtained by deserializing the -provided serialized value (obtained via [`DUMP`](/commands/dump)). +provided serialized value (obtained via [`DUMP`]({{< relref "/commands/dump" >}})). If `ttl` is 0 the key is created without any expire, otherwise the specified expire time (in milliseconds) is set. @@ -102,7 +101,7 @@ If the `ABSTTL` modifier was used, `ttl` should represent an absolute [hewowu]: http://en.wikipedia.org/wiki/Unix_time For eviction purposes, you may use the `IDLETIME` or `FREQ` modifiers. See -[`OBJECT`](/commands/object) for more information. +[`OBJECT`]({{< relref "/commands/object" >}}) for more information. `RESTORE` will return a "Target key name is busy" error when `key` already exists unless you use the `REPLACE` modifier. diff --git a/content/commands/rpoplpush/index.md b/content/commands/rpoplpush/index.md index b31325e7f0..93f9843587 100644 --- a/content/commands/rpoplpush/index.md +++ b/content/commands/rpoplpush/index.md @@ -61,7 +61,7 @@ key_specs: type: range insert: true linkTitle: RPOPLPUSH -replaced_by: '[`LMOVE`](/commands/lmove) with the `RIGHT` and `LEFT` arguments' +replaced_by: '[`LMOVE`]({{< relref "/commands/lmove" >}}) with the `RIGHT` and `LEFT` arguments' since: 1.2.0 summary: Returns the last element of a list after removing and pushing it to another list. Deletes the list if the last element was popped. @@ -101,18 +101,18 @@ LRANGE myotherlist 0 -1 Redis is often used as a messaging server to implement processing of background jobs or other kinds of messaging tasks. A simple form of queue is often obtained pushing values into a list in the -producer side, and waiting for this values in the consumer side using [`RPOP`](/commands/rpop) -(using polling), or [`BRPOP`](/commands/brpop) if the client is better served by a blocking +producer side, and waiting for this values in the consumer side using [`RPOP`]({{< relref "/commands/rpop" >}}) +(using polling), or [`BRPOP`]({{< relref "/commands/brpop" >}}) if the client is better served by a blocking operation. However in this context the obtained queue is not _reliable_ as messages can be lost, for example in the case there is a network problem or if the consumer crashes just after the message is received but before it can be processed. -`RPOPLPUSH` (or [`BRPOPLPUSH`](/commands/brpoplpush) for the blocking variant) offers a way to avoid +`RPOPLPUSH` (or [`BRPOPLPUSH`]({{< relref "/commands/brpoplpush" >}}) for the blocking variant) offers a way to avoid this problem: the consumer fetches the message and at the same time pushes it into a _processing_ list. -It will use the [`LREM`](/commands/lrem) command in order to remove the message from the +It will use the [`LREM`]({{< relref "/commands/lrem" >}}) command in order to remove the message from the _processing_ list once the message has been processed. An additional client may monitor the _processing_ list for items that remain @@ -123,7 +123,7 @@ again if needed. Using `RPOPLPUSH` with the same source and destination key, a client can visit all the elements of an N-elements list, one after the other, in O(N) without -transferring the full list from the server to the client using a single [`LRANGE`](/commands/lrange) +transferring the full list from the server to the client using a single [`LRANGE`]({{< relref "/commands/lrange" >}}) operation. The above pattern works even if one or both of the following conditions occur: diff --git a/content/commands/rpushx/index.md b/content/commands/rpushx/index.md index be7529733c..e5089e6ef7 100644 --- a/content/commands/rpushx/index.md +++ b/content/commands/rpushx/index.md @@ -57,7 +57,7 @@ title: RPUSHX --- Inserts specified values at the tail of the list stored at `key`, only if `key` already exists and holds a list. -In contrary to [`RPUSH`](/commands/rpush), no operation will be performed when `key` does not yet +In contrary to [`RPUSH`]({{< relref "/commands/rpush" >}}), no operation will be performed when `key` does not yet exist. ## Examples diff --git a/content/commands/save/index.md b/content/commands/save/index.md index e61711c099..d1aa1b4762 100644 --- a/content/commands/save/index.md +++ b/content/commands/save/index.md @@ -36,7 +36,7 @@ of an RDB file. You almost never want to call `SAVE` in production environments where it will block all the other clients. -Instead usually [`BGSAVE`](/commands/bgsave) is used. +Instead usually [`BGSAVE`]({{< relref "/commands/bgsave" >}}) is used. However in case of issues preventing Redis to create the background saving child (for instance errors in the fork(2) system call), the `SAVE` command can be a good last resort to perform the dump of the latest dataset. diff --git a/content/commands/scan/index.md b/content/commands/scan/index.md index 75e0cefcc4..d252a9abd2 100644 --- a/content/commands/scan/index.md +++ b/content/commands/scan/index.md @@ -56,18 +56,18 @@ syntax_fmt: "SCAN cursor [MATCH\_pattern] [COUNT\_count] [TYPE\_type]" syntax_str: "[MATCH\_pattern] [COUNT\_count] [TYPE\_type]" title: SCAN --- -The `SCAN` command and the closely related commands [`SSCAN`](/commands/sscan), [`HSCAN`](/commands/hscan) and [`ZSCAN`](/commands/zscan) are used in order to incrementally iterate over a collection of elements. +The `SCAN` command and the closely related commands [`SSCAN`]({{< relref "/commands/sscan" >}}), [`HSCAN`]({{< relref "/commands/hscan" >}}) and [`ZSCAN`]({{< relref "/commands/zscan" >}}) are used in order to incrementally iterate over a collection of elements. * `SCAN` iterates the set of keys in the currently selected Redis database. -* [`SSCAN`](/commands/sscan) iterates elements of Sets types. -* [`HSCAN`](/commands/hscan) iterates fields of Hash types and their associated values. -* [`ZSCAN`](/commands/zscan) iterates elements of Sorted Set types and their associated scores. +* [`SSCAN`]({{< relref "/commands/sscan" >}}) iterates elements of Sets types. +* [`HSCAN`]({{< relref "/commands/hscan" >}}) iterates fields of Hash types and their associated values. +* [`ZSCAN`]({{< relref "/commands/zscan" >}}) iterates elements of Sorted Set types and their associated scores. -Since these commands allow for incremental iteration, returning only a small number of elements per call, they can be used in production without the downside of commands like [`KEYS`](/commands/keys) or [`SMEMBERS`](/commands/smembers) that may block the server for a long time (even several seconds) when called against big collections of keys or elements. +Since these commands allow for incremental iteration, returning only a small number of elements per call, they can be used in production without the downside of commands like [`KEYS`]({{< relref "/commands/keys" >}}) or [`SMEMBERS`]({{< relref "/commands/smembers" >}}) that may block the server for a long time (even several seconds) when called against big collections of keys or elements. -However while blocking commands like [`SMEMBERS`](/commands/smembers) are able to provide all the elements that are part of a Set in a given moment, The SCAN family of commands only offer limited guarantees about the returned elements since the collection that we incrementally iterate can change during the iteration process. +However while blocking commands like [`SMEMBERS`]({{< relref "/commands/smembers" >}}) are able to provide all the elements that are part of a Set in a given moment, The SCAN family of commands only offer limited guarantees about the returned elements since the collection that we incrementally iterate can change during the iteration process. -Note that `SCAN`, [`SSCAN`](/commands/sscan), [`HSCAN`](/commands/hscan) and [`ZSCAN`](/commands/zscan) all work very similarly, so this documentation covers all four commands. However an obvious difference is that in the case of [`SSCAN`](/commands/sscan), [`HSCAN`](/commands/hscan) and [`ZSCAN`](/commands/zscan) the first argument is the name of the key holding the Set, Hash or Sorted Set value. The `SCAN` command does not need any key name argument as it iterates keys in the current database, so the iterated object is the database itself. +Note that `SCAN`, [`SSCAN`]({{< relref "/commands/sscan" >}}), [`HSCAN`]({{< relref "/commands/hscan" >}}) and [`ZSCAN`]({{< relref "/commands/zscan" >}}) all work very similarly, so this documentation covers all four commands. However an obvious difference is that in the case of [`SSCAN`]({{< relref "/commands/sscan" >}}), [`HSCAN`]({{< relref "/commands/hscan" >}}) and [`ZSCAN`]({{< relref "/commands/zscan" >}}) the first argument is the name of the key holding the Set, Hash or Sorted Set value. The `SCAN` command does not need any key name argument as it iterates keys in the current database, so the iterated object is the database itself. ## SCAN basic usage @@ -110,12 +110,12 @@ Since in the second call the returned cursor is 0, the server signaled to the ca ## Return value -`SCAN`, [`SSCAN`](/commands/sscan), [`HSCAN`](/commands/hscan) and [`ZSCAN`](/commands/zscan) return a two element multi-bulk reply, where the first element is a string representing an unsigned 64 bit number (the cursor), and the second element is a multi-bulk with an array of elements. +`SCAN`, [`SSCAN`]({{< relref "/commands/sscan" >}}), [`HSCAN`]({{< relref "/commands/hscan" >}}) and [`ZSCAN`]({{< relref "/commands/zscan" >}}) return a two element multi-bulk reply, where the first element is a string representing an unsigned 64 bit number (the cursor), and the second element is a multi-bulk with an array of elements. * `SCAN` array of elements is a list of keys. -* [`SSCAN`](/commands/sscan) array of elements is a list of Set members. -* [`HSCAN`](/commands/hscan) array of elements contain two elements, a field and a value, for every returned element of the Hash. -* [`ZSCAN`](/commands/zscan) array of elements contain two elements, a member and its associated score, for every returned element of the Sorted Set. +* [`SSCAN`]({{< relref "/commands/sscan" >}}) array of elements is a list of Set members. +* [`HSCAN`]({{< relref "/commands/hscan" >}}) array of elements contain two elements, a field and a value, for every returned element of the Hash. +* [`ZSCAN`]({{< relref "/commands/zscan" >}}) array of elements contain two elements, a member and its associated score, for every returned element of the Sorted Set. ## Scan guarantees @@ -149,7 +149,7 @@ Important: **there is no need to use the same COUNT value** for every iteration. ## The MATCH option -It is possible to only iterate elements matching a given glob-style pattern, similarly to the behavior of the [`KEYS`](/commands/keys) command that takes a pattern as its only argument. +It is possible to only iterate elements matching a given glob-style pattern, similarly to the behavior of the [`KEYS`]({{< relref "/commands/keys" >}}) command that takes a pattern as its only argument. To do so, just append the `MATCH ` arguments at the end of the `SCAN` command (it works with all the `SCAN` family commands). @@ -209,9 +209,9 @@ As you can see most of the calls returned zero elements, but the last call where ## The TYPE option -You can use the `TYPE` option to ask `SCAN` to only return objects that match a given `type`, allowing you to iterate through the database looking for keys of a specific type. The **TYPE** option is only available on the whole-database `SCAN`, not [`HSCAN`](/commands/hscan) or [`ZSCAN`](/commands/zscan) etc. +You can use the `TYPE` option to ask `SCAN` to only return objects that match a given `type`, allowing you to iterate through the database looking for keys of a specific type. The **TYPE** option is only available on the whole-database `SCAN`, not [`HSCAN`]({{< relref "/commands/hscan" >}}) or [`ZSCAN`]({{< relref "/commands/zscan" >}}) etc. -The `type` argument is the same string name that the [`TYPE`](/commands/type) command returns. Note a quirk where some Redis types, such as GeoHashes, HyperLogLogs, Bitmaps, and Bitfields, may internally be implemented using other Redis types, such as a string or zset, so can't be distinguished from other keys of that same type by `SCAN`. For example, a ZSET and GEOHASH: +The `type` argument is the same string name that the [`TYPE`]({{< relref "/commands/type" >}}) command returns. Note a quirk where some Redis types, such as GeoHashes, HyperLogLogs, Bitmaps, and Bitfields, may internally be implemented using other Redis types, such as a string or zset, so can't be distinguished from other keys of that same type by `SCAN`. For example, a ZSET and GEOHASH: ``` redis 127.0.0.1:6379> GEOADD geokey 0 0 value @@ -259,11 +259,11 @@ In the `COUNT` option documentation, we state that sometimes this family of comm However once the data structures are bigger and are promoted to use real hash tables, the `SCAN` family of commands will resort to the normal behavior. Note that since this special behavior of returning all the elements is true only for small aggregates, it has no effects on the command complexity or latency. However the exact limits to get converted into real hash tables are [user configurable](/topics/memory-optimization), so the maximum number of elements you can see returned in a single call depends on how big an aggregate data type could be and still use the packed representation. -Also note that this behavior is specific of [`SSCAN`](/commands/sscan), [`HSCAN`](/commands/hscan) and [`ZSCAN`](/commands/zscan). `SCAN` itself never shows this behavior because the key space is always represented by hash tables. +Also note that this behavior is specific of [`SSCAN`]({{< relref "/commands/sscan" >}}), [`HSCAN`]({{< relref "/commands/hscan" >}}) and [`ZSCAN`]({{< relref "/commands/zscan" >}}). `SCAN` itself never shows this behavior because the key space is always represented by hash tables. ## Further reading -For more information about managing keys, please refer to the [The Redis Keyspace](/docs/manual/keyspace) tutorial. +For more information about managing keys, please refer to the [The Redis Keyspace]({{< relref "/develop/use/keyspace" >}}) tutorial. ## Additional examples diff --git a/content/commands/script-debug/index.md b/content/commands/script-debug/index.md index be77979572..e7946b8173 100644 --- a/content/commands/script-debug/index.md +++ b/content/commands/script-debug/index.md @@ -42,7 +42,7 @@ syntax_fmt: SCRIPT DEBUG syntax_str: '' title: SCRIPT DEBUG --- -Set the debug mode for subsequent scripts executed with [`EVAL`](/commands/eval). Redis includes a +Set the debug mode for subsequent scripts executed with [`EVAL`]({{< relref "/commands/eval" >}}). Redis includes a complete Lua debugger, codename LDB, that can be used to make the task of writing complex scripts much simpler. In debug mode Redis acts as a remote debugging server and a client, such as `redis-cli`, can execute scripts step by @@ -63,4 +63,4 @@ is active and retains all changes to the data set once it ends. * `SYNC`. Enable blocking synchronous debugging of Lua scripts (saves changes to data). * `NO`. Disables scripts debug mode. -For more information about [`EVAL`](/commands/eval) scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). +For more information about [`EVAL`]({{< relref "/commands/eval" >}}) scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). diff --git a/content/commands/script-exists/index.md b/content/commands/script-exists/index.md index 580302fc34..2c9f646b11 100644 --- a/content/commands/script-exists/index.md +++ b/content/commands/script-exists/index.md @@ -41,8 +41,8 @@ This command accepts one or more SHA1 digests and returns a list of ones or zeros to signal if the scripts are already defined or not inside the script cache. This can be useful before a pipelining operation to ensure that scripts are -loaded (and if not, to load them using [`SCRIPT LOAD`](/commands/script-load)) so that the pipelining -operation can be performed solely using [`EVALSHA`](/commands/evalsha) instead of [`EVAL`](/commands/eval) to save +loaded (and if not, to load them using [`SCRIPT LOAD`]({{< relref "/commands/script-load" >}})) so that the pipelining +operation can be performed solely using [`EVALSHA`]({{< relref "/commands/evalsha" >}}) instead of [`EVAL`]({{< relref "/commands/eval" >}}) to save bandwidth. -For more information about [`EVAL`](/commands/eval) scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). +For more information about [`EVAL`]({{< relref "/commands/eval" >}}) scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). diff --git a/content/commands/script-flush/index.md b/content/commands/script-flush/index.md index b0c276d2cd..f77f064c91 100644 --- a/content/commands/script-flush/index.md +++ b/content/commands/script-flush/index.md @@ -56,7 +56,7 @@ It is possible to use one of the following modifiers to dictate the flushing mod * `ASYNC`: flushes the cache asynchronously * `SYNC`: flushes the cache synchronously -For more information about [`EVAL`](/commands/eval) scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). +For more information about [`EVAL`]({{< relref "/commands/eval" >}}) scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). ## Behavior change history diff --git a/content/commands/script-kill/index.md b/content/commands/script-kill/index.md index c579cd795e..8afdcdeda6 100644 --- a/content/commands/script-kill/index.md +++ b/content/commands/script-kill/index.md @@ -30,7 +30,7 @@ syntax_fmt: SCRIPT KILL syntax_str: '' title: SCRIPT KILL --- -Kills the currently executing [`EVAL`](/commands/eval) script, assuming no write operation was yet +Kills the currently executing [`EVAL`]({{< relref "/commands/eval" >}}) script, assuming no write operation was yet performed by the script. This command is mainly useful to kill a script that is running for too much @@ -44,4 +44,4 @@ In such a case, only `SHUTDOWN NOSAVE` can kill the script, killing the Redis process in a hard way and preventing it from persisting with half-written information. -For more information about [`EVAL`](/commands/eval) scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). +For more information about [`EVAL`]({{< relref "/commands/eval" >}}) scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). diff --git a/content/commands/script-load/index.md b/content/commands/script-load/index.md index dfab0383dc..2eb2abaf00 100644 --- a/content/commands/script-load/index.md +++ b/content/commands/script-load/index.md @@ -36,8 +36,8 @@ title: SCRIPT LOAD --- Load a script into the scripts cache, without executing it. After the specified command is loaded into the script cache it will be callable -using [`EVALSHA`](/commands/evalsha) with the correct SHA1 digest of the script, exactly like after -the first successful invocation of [`EVAL`](/commands/eval). +using [`EVALSHA`]({{< relref "/commands/evalsha" >}}) with the correct SHA1 digest of the script, exactly like after +the first successful invocation of [`EVAL`]({{< relref "/commands/eval" >}}). The script is guaranteed to stay in the script cache forever (unless `SCRIPT FLUSH` is called). @@ -45,4 +45,4 @@ FLUSH` is called). The command works in the same way even if the script was already present in the script cache. -For more information about [`EVAL`](/commands/eval) scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). +For more information about [`EVAL`]({{< relref "/commands/eval" >}}) scripts please refer to [Introduction to Eval Scripts](/topics/eval-intro). diff --git a/content/commands/script/index.md b/content/commands/script/index.md index 5b37c3a6d1..3d797bad91 100644 --- a/content/commands/script/index.md +++ b/content/commands/script/index.md @@ -25,4 +25,4 @@ title: SCRIPT --- This is a container command for script management commands. -To see the list of available commands you can call [`SCRIPT HELP`](/commands/script-help). +To see the list of available commands you can call [`SCRIPT HELP`]({{< relref "/commands/script-help" >}}). diff --git a/content/commands/sdiffstore/index.md b/content/commands/sdiffstore/index.md index abee79e310..56e5d962bf 100644 --- a/content/commands/sdiffstore/index.md +++ b/content/commands/sdiffstore/index.md @@ -63,7 +63,7 @@ syntax_fmt: SDIFFSTORE destination key [key ...] syntax_str: key [key ...] title: SDIFFSTORE --- -This command is equal to [`SDIFF`](/commands/sdiff), but instead of returning the resulting set, it +This command is equal to [`SDIFF`]({{< relref "/commands/sdiff" >}}), but instead of returning the resulting set, it is stored in `destination`. If `destination` already exists, it is overwritten. diff --git a/content/commands/select/index.md b/content/commands/select/index.md index ec25f3f7d1..89ca5a1ce4 100644 --- a/content/commands/select/index.md +++ b/content/commands/select/index.md @@ -35,10 +35,10 @@ title: SELECT Select the Redis logical database having the specified zero-based numeric index. New connections always use the database 0. -Selectable Redis databases are a form of namespacing: all databases are still persisted in the same RDB / AOF file. However different databases can have keys with the same name, and commands like [`FLUSHDB`](/commands/flushdb), [`SWAPDB`](/commands/swapdb) or [`RANDOMKEY`](/commands/randomkey) work on specific databases. +Selectable Redis databases are a form of namespacing: all databases are still persisted in the same RDB / AOF file. However different databases can have keys with the same name, and commands like [`FLUSHDB`]({{< relref "/commands/flushdb" >}}), [`SWAPDB`]({{< relref "/commands/swapdb" >}}) or [`RANDOMKEY`]({{< relref "/commands/randomkey" >}}) work on specific databases. In practical terms, Redis databases should be used to separate different keys belonging to the same application (if needed), and not to use a single Redis instance for multiple unrelated applications. When using Redis Cluster, the `SELECT` command cannot be used, since Redis Cluster only supports database zero. In the case of a Redis Cluster, having multiple databases would be useless and an unnecessary source of complexity. Commands operating atomically on a single database would not be possible with the Redis Cluster design and goals. -Since the currently selected database is a property of the connection, clients should track the currently selected database and re-select it on reconnection. While there is no command in order to query the selected database in the current connection, the [`CLIENT LIST`](/commands/client-list) output shows, for each client, the currently selected database. +Since the currently selected database is a property of the connection, clients should track the currently selected database and re-select it on reconnection. While there is no command in order to query the selected database in the current connection, the [`CLIENT LIST`]({{< relref "/commands/client-list" >}}) output shows, for each client, the currently selected database. diff --git a/content/commands/set/index.md b/content/commands/set/index.md index 0de5bdea01..ca3890f626 100644 --- a/content/commands/set/index.md +++ b/content/commands/set/index.md @@ -107,8 +107,7 @@ linkTitle: SET since: 1.0.0 summary: Sets the string value of a key, ignoring its type. The key is created if it doesn't exist. -syntax_fmt: "SET key value [NX | XX] [GET] [EX\_seconds | PX\_milliseconds | - EXAT\_\ +syntax_fmt: "SET key value [NX | XX] [GET] [EX\_seconds | PX\_milliseconds | EXAT\_\ unix-time-seconds | PXAT\_unix-time-milliseconds | KEEPTTL]" syntax_str: "value [NX | XX] [GET] [EX\_seconds | PX\_milliseconds | EXAT\_unix-time-seconds\ \ | PXAT\_unix-time-milliseconds | KEEPTTL]" @@ -131,7 +130,7 @@ The `SET` command supports a set of options that modify its behavior: * `KEEPTTL` -- Retain the time to live associated with the key. * `GET` -- Return the old string stored at key, or nil if key did not exist. An error is returned and `SET` aborted if the value stored at key is not a string. -Note: Since the `SET` command options can replace [`SETNX`](/commands/setnx), [`SETEX`](/commands/setex), [`PSETEX`](/commands/psetex), [`GETSET`](/commands/getset), it is possible that in future versions of Redis these commands will be deprecated and finally removed. +Note: Since the `SET` command options can replace [`SETNX`]({{< relref "/commands/setnx" >}}), [`SETEX`]({{< relref "/commands/setex" >}}), [`PSETEX`]({{< relref "/commands/psetex" >}}), [`GETSET`]({{< relref "/commands/getset" >}}), it is possible that in future versions of Redis these commands will be deprecated and finally removed. ## Examples @@ -153,14 +152,14 @@ SET anotherkey "will expire in a minute" EX 60 The command `SET resource-name anystring NX EX max-lock-time` is a simple way to implement a locking system with Redis. -A client can acquire the lock if the above command returns `OK` (or retry after some time if the command returns Nil), and remove the lock just using [`DEL`](/commands/del). +A client can acquire the lock if the above command returns `OK` (or retry after some time if the command returns Nil), and remove the lock just using [`DEL`]({{< relref "/commands/del" >}}). The lock will be auto-released after the expire time is reached. It is possible to make this system more robust modifying the unlock schema as follows: * Instead of setting a fixed string, set a non-guessable large random string, called token. -* Instead of releasing the lock with [`DEL`](/commands/del), send a script that only removes the key if the value matches. +* Instead of releasing the lock with [`DEL`]({{< relref "/commands/del" >}}), send a script that only removes the key if the value matches. This avoids that a client will try to release the lock after the expire time deleting the key created by another client that acquired the lock later. diff --git a/content/commands/setbit/index.md b/content/commands/setbit/index.md index 9c4b1f0bb3..9bc93830ab 100644 --- a/content/commands/setbit/index.md +++ b/content/commands/setbit/index.md @@ -91,14 +91,14 @@ GET mykey There are cases when you need to set all the bits of single bitmap at once, for example when initializing it to a default non-zero value. It is possible to do this with multiple calls to the `SETBIT` command, one for each bit that needs to -be set. However, so as an optimization you can use a single [`SET`](/commands/set) command to set +be set. However, so as an optimization you can use a single [`SET`]({{< relref "/commands/set" >}}) command to set the entire bitmap. Bitmaps are not an actual data type, but a set of bit-oriented operations defined on the String type (for more information refer to the [Bitmaps section of the Data Types Introduction page][ti]). This means that -bitmaps can be used with string commands, and most importantly with [`SET`](/commands/set) and -[`GET`](/commands/get). +bitmaps can be used with string commands, and most importantly with [`SET`]({{< relref "/commands/set" >}}) and +[`GET`]({{< relref "/commands/get" >}}). Because Redis' strings are binary-safe, a bitmap is trivially encoded as a bytes stream. The first byte of the string corresponds to offsets 0..7 of @@ -121,7 +121,7 @@ would look like this: By getting the string representation of a bitmap, the client can then parse the response's bytes by extracting the bit values using native bit operations in its native programming language. Symmetrically, it is also possible to set an entire -bitmap by performing the bits-to-bytes encoding in the client and calling [`SET`](/commands/set) +bitmap by performing the bits-to-bytes encoding in the client and calling [`SET`]({{< relref "/commands/set" >}}) with the resultant string. [ti]: /topics/data-types-intro#bitmaps @@ -130,7 +130,7 @@ with the resultant string. `SETBIT` excels at setting single bits, and can be called several times when multiple bits need to be set. To optimize this operation you can replace -multiple `SETBIT` calls with a single call to the variadic [`BITFIELD`](/commands/bitfield) command +multiple `SETBIT` calls with a single call to the variadic [`BITFIELD`]({{< relref "/commands/bitfield" >}}) command and the use of fields of type `u1`. For example, the example above could be replaced by: @@ -141,9 +141,9 @@ For example, the example above could be replaced by: ## Advanced Pattern: accessing bitmap ranges -It is also possible to use the [`GETRANGE`](/commands/getrange) and [`SETRANGE`](/commands/setrange) string commands to +It is also possible to use the [`GETRANGE`]({{< relref "/commands/getrange" >}}) and [`SETRANGE`]({{< relref "/commands/setrange" >}}) string commands to efficiently access a range of bit offsets in a bitmap. Below is a sample -implementation in idiomatic Redis Lua scripting that can be run with the [`EVAL`](/commands/eval) +implementation in idiomatic Redis Lua scripting that can be run with the [`EVAL`]({{< relref "/commands/eval" >}}) command: ``` @@ -151,7 +151,7 @@ command: Sets a bitmap range Bitmaps are stored as Strings in Redis. A range spans one or more bytes, -so we can call [`SETRANGE`](/commands/setrange) when entire bytes need to be set instead of flipping +so we can call [`SETRANGE`]({{< relref "/commands/setrange" >}}) when entire bytes need to be set instead of flipping individual bits. Also, to avoid multiple internal memory allocations in Redis, we traverse in reverse. Expected input: diff --git a/content/commands/setex/index.md b/content/commands/setex/index.md index b5abea9cb2..2eb1d4f91f 100644 --- a/content/commands/setex/index.md +++ b/content/commands/setex/index.md @@ -50,7 +50,7 @@ key_specs: type: range update: true linkTitle: SETEX -replaced_by: '[`SET`](/commands/set) with the `EX` argument' +replaced_by: '[`SET`]({{< relref "/commands/set" >}}) with the `EX` argument' since: 2.0.0 summary: Sets the string value and expiration time of a key. Creates the key if it doesn't exist. @@ -78,4 +78,4 @@ GET mykey ## See also -[`TTL`](/commands/ttl) \ No newline at end of file +[`TTL`]({{< relref "/commands/ttl" >}}) \ No newline at end of file diff --git a/content/commands/setnx/index.md b/content/commands/setnx/index.md index 339850a80b..ecd754a182 100644 --- a/content/commands/setnx/index.md +++ b/content/commands/setnx/index.md @@ -47,7 +47,7 @@ key_specs: type: range insert: true linkTitle: SETNX -replaced_by: '[`SET`](/commands/set) with the `NX` argument' +replaced_by: '[`SET`]({{< relref "/commands/set" >}}) with the `NX` argument' since: 1.0.0 summary: Set the string value of a key only when the key doesn't exist. syntax_fmt: SETNX key value @@ -55,7 +55,7 @@ syntax_str: value title: SETNX --- Set `key` to hold string `value` if `key` does not exist. -In that case, it is equal to [`SET`](/commands/set). +In that case, it is equal to [`SET`]({{< relref "/commands/set" >}}). When `key` already holds a value, no operation is performed. `SETNX` is short for "**SET** if **N**ot e**X**ists". @@ -74,7 +74,7 @@ GET mykey 1. The following pattern is discouraged in favor of [the Redlock algorithm](https://redis.io/topics/distlock) which is only a bit more complex to implement, but offers better guarantees and is fault tolerant. 2. We document the old pattern anyway because certain existing implementations link to this page as a reference. Moreover it is an interesting example of how Redis commands can be used in order to mount programming primitives. -3. Anyway even assuming a single-instance locking primitive, starting with 2.6.12 it is possible to create a much simpler locking primitive, equivalent to the one discussed here, using the [`SET`](/commands/set) command to acquire the lock, and a simple Lua script to release the lock. The pattern is documented in the [`SET`](/commands/set) command page. +3. Anyway even assuming a single-instance locking primitive, starting with 2.6.12 it is possible to create a much simpler locking primitive, equivalent to the one discussed here, using the [`SET`]({{< relref "/commands/set" >}}) command to acquire the lock, and a simple Lua script to release the lock. The pattern is documented in the [`SET`]({{< relref "/commands/set" >}}) command page. That said, `SETNX` can be used, and was historically used, as a locking primitive. For example, to acquire the lock of the key `foo`, the client could try the following: @@ -100,7 +100,7 @@ timestamp. If such a timestamp is equal to the current Unix time the lock is no longer valid. -When this happens we can't just call [`DEL`](/commands/del) against the key to remove the lock +When this happens we can't just call [`DEL`]({{< relref "/commands/del" >}}) against the key to remove the lock and then try to issue a `SETNX`, as there is a race condition here, when multiple clients detected an expired lock and are trying to release it. @@ -130,12 +130,12 @@ Let's see how C4, our sane client, uses the good algorithm: GETSET lock.foo ``` -* Because of the [`GETSET`](/commands/getset) semantic, C4 can check if the old value stored at +* Because of the [`GETSET`]({{< relref "/commands/getset" >}}) semantic, C4 can check if the old value stored at `key` is still an expired timestamp. If it is, the lock was acquired. * If another client, for instance C5, was faster than C4 and acquired the lock - with the [`GETSET`](/commands/getset) operation, the C4 [`GETSET`](/commands/getset) operation will return a non + with the [`GETSET`]({{< relref "/commands/getset" >}}) operation, the C4 [`GETSET`]({{< relref "/commands/getset" >}}) operation will return a non expired timestamp. C4 will simply restart from the first step. Note that even if C4 set the key a bit a few seconds in the future this is @@ -143,7 +143,7 @@ Let's see how C4, our sane client, uses the good algorithm: In order to make this locking algorithm more robust, a client holding a lock should always check the timeout didn't expire before -unlocking the key with [`DEL`](/commands/del) because client failures can be complex, not just +unlocking the key with [`DEL`]({{< relref "/commands/del" >}}) because client failures can be complex, not just crashing but also blocking a lot of time against some operations and trying -to issue [`DEL`](/commands/del) after a lot of time (when the LOCK is already held by another +to issue [`DEL`]({{< relref "/commands/del" >}}) after a lot of time (when the LOCK is already held by another client). diff --git a/content/commands/setrange/index.md b/content/commands/setrange/index.md index 33e9ddc3c3..001f7182b7 100644 --- a/content/commands/setrange/index.md +++ b/content/commands/setrange/index.md @@ -80,7 +80,7 @@ the same _key_ will not have the allocation overhead. ## Patterns -Thanks to `SETRANGE` and the analogous [`GETRANGE`](/commands/getrange) commands, you can use Redis +Thanks to `SETRANGE` and the analogous [`GETRANGE`]({{< relref "/commands/getrange" >}}) commands, you can use Redis strings as a linear array with O(1) random access. This is a very fast and efficient storage in many real world use cases. diff --git a/content/commands/shutdown/index.md b/content/commands/shutdown/index.md index b22bdcc3ae..495588ee83 100644 --- a/content/commands/shutdown/index.md +++ b/content/commands/shutdown/index.md @@ -71,7 +71,7 @@ title: SHUTDOWN The command behavior is the following: * If there are any replicas lagging behind in replication: - * Pause clients attempting to write by performing a [`CLIENT PAUSE`](/commands/client-pause) with the `WRITE` option. + * Pause clients attempting to write by performing a [`CLIENT PAUSE`]({{< relref "/commands/client-pause" >}}) with the `WRITE` option. * Wait up to the configured `shutdown-timeout` (default 10 seconds) for replicas to catch up the replication offset. * Stop all the clients. * Perform a blocking SAVE if at least one **save point** is configured. @@ -130,7 +130,7 @@ The second command will not have any problem to execute since the AOF is no long Since Redis 7.0, the server waits for lagging replicas up to a configurable `shutdown-timeout`, by default 10 seconds, before shutting down. This provides a best effort minimizing the risk of data loss in a situation where no save points are configured and AOF is disabled. Before version 7.0, shutting down a heavily loaded master node in a diskless setup was more likely to result in data loss. -To minimize the risk of data loss in such setups, it's advised to trigger a manual [`FAILOVER`](/commands/failover) (or [`CLUSTER FAILOVER`](/commands/cluster-failover)) to demote the master to a replica and promote one of the replicas to be the new master, before shutting down a master node. +To minimize the risk of data loss in such setups, it's advised to trigger a manual [`FAILOVER`]({{< relref "/commands/failover" >}}) (or [`CLUSTER FAILOVER`]({{< relref "/commands/cluster-failover" >}})) to demote the master to a replica and promote one of the replicas to be the new master, before shutting down a master node. ## Behavior change history diff --git a/content/commands/sintercard/index.md b/content/commands/sintercard/index.md index b9016dd073..a046f88ad6 100644 --- a/content/commands/sintercard/index.md +++ b/content/commands/sintercard/index.md @@ -56,7 +56,7 @@ syntax_fmt: "SINTERCARD numkeys key [key ...] [LIMIT\_limit]" syntax_str: "key [key ...] [LIMIT\_limit]" title: SINTERCARD --- -This command is similar to [`SINTER`](/commands/sinter), but instead of returning the result set, it returns just the cardinality of the result. +This command is similar to [`SINTER`]({{< relref "/commands/sinter" >}}), but instead of returning the result set, it returns just the cardinality of the result. Returns the cardinality of the set which would result from the intersection of all the given sets. Keys that do not exist are considered to be empty sets. diff --git a/content/commands/sinterstore/index.md b/content/commands/sinterstore/index.md index 2b04e9d5d8..503947daa2 100644 --- a/content/commands/sinterstore/index.md +++ b/content/commands/sinterstore/index.md @@ -64,7 +64,7 @@ syntax_fmt: SINTERSTORE destination key [key ...] syntax_str: key [key ...] title: SINTERSTORE --- -This command is equal to [`SINTER`](/commands/sinter), but instead of returning the resulting set, +This command is equal to [`SINTER`]({{< relref "/commands/sinter" >}}), but instead of returning the resulting set, it is stored in `destination`. If `destination` already exists, it is overwritten. diff --git a/content/commands/slaveof/index.md b/content/commands/slaveof/index.md index a8a45d0bf0..bed6d2ed64 100644 --- a/content/commands/slaveof/index.md +++ b/content/commands/slaveof/index.md @@ -52,14 +52,14 @@ doc_flags: group: server hidden: false linkTitle: SLAVEOF -replaced_by: '[`REPLICAOF`](/commands/replicaof)' +replaced_by: '[`REPLICAOF`]({{< relref "/commands/replicaof" >}})' since: 1.0.0 summary: Sets a Redis server as a replica of another, or promotes it to being a master. syntax_fmt: SLAVEOF syntax_str: '' title: SLAVEOF --- -**A note about the word slave used in this man page and command name**: starting with Redis version 5, if not for backward compatibility, the Redis project no longer uses the word slave. Please use the new command [`REPLICAOF`](/commands/replicaof). The command `SLAVEOF` will continue to work for backward compatibility. +**A note about the word slave used in this man page and command name**: starting with Redis version 5, if not for backward compatibility, the Redis project no longer uses the word slave. Please use the new command [`REPLICAOF`]({{< relref "/commands/replicaof" >}}). The command `SLAVEOF` will continue to work for backward compatibility. The `SLAVEOF` command can change the replication settings of a replica on the fly. If a Redis server is already acting as replica, the command `SLAVEOF` NO ONE will diff --git a/content/commands/slowlog-get/index.md b/content/commands/slowlog-get/index.md index 297a89cac8..52b5df6d0e 100644 --- a/content/commands/slowlog-get/index.md +++ b/content/commands/slowlog-get/index.md @@ -57,7 +57,7 @@ Each entry from the slow log is comprised of the following six values: 3. The amount of time needed for its execution, in microseconds. 4. The array composing the arguments of the command. 5. Client IP address and port. -6. Client name if set via the [`CLIENT SETNAME`](/commands/client-setname) command. +6. Client name if set via the [`CLIENT SETNAME`]({{< relref "/commands/client-setname" >}}) command. The entry's unique ID can be used in order to avoid processing slow log entries multiple times (for instance you may have a script sending you an email alert for every new slow log entry). The ID is never reset in the course of the Redis server execution, only a server diff --git a/content/commands/slowlog-len/index.md b/content/commands/slowlog-len/index.md index b2ed094218..6476aebf60 100644 --- a/content/commands/slowlog-len/index.md +++ b/content/commands/slowlog-len/index.md @@ -38,4 +38,4 @@ This command returns the current number of entries in the slow log. A new entry is added to the slow log whenever a command exceeds the execution time threshold defined by the `slowlog-log-slower-than` configuration directive. The maximum number of entries in the slow log is governed by the `slowlog-max-len` configuration directive. Once the slog log reaches its maximal size, the oldest entry is removed whenever a new entry is created. -The slow log can be cleared with the [`SLOWLOG RESET`](/commands/slowlog-reset) command. +The slow log can be cleared with the [`SLOWLOG RESET`]({{< relref "/commands/slowlog-reset" >}}) command. diff --git a/content/commands/slowlog/index.md b/content/commands/slowlog/index.md index 8942045d6b..0313889f83 100644 --- a/content/commands/slowlog/index.md +++ b/content/commands/slowlog/index.md @@ -25,4 +25,4 @@ title: SLOWLOG --- This is a container command for slow log management commands. -To see the list of available commands you can call [`SLOWLOG HELP`](/commands/slowlog-help). +To see the list of available commands you can call [`SLOWLOG HELP`]({{< relref "/commands/slowlog-help" >}}). diff --git a/content/commands/smembers/index.md b/content/commands/smembers/index.md index 71c58f5fc7..a4d74fd3ed 100644 --- a/content/commands/smembers/index.md +++ b/content/commands/smembers/index.md @@ -49,7 +49,7 @@ title: SMEMBERS --- Returns all the members of the set value stored at `key`. -This has the same effect as running [`SINTER`](/commands/sinter) with one argument `key`. +This has the same effect as running [`SINTER`]({{< relref "/commands/sinter" >}}) with one argument `key`. ## Examples diff --git a/content/commands/sort/index.md b/content/commands/sort/index.md index c5bc6ad6ed..ada01ae8f2 100644 --- a/content/commands/sort/index.md +++ b/content/commands/sort/index.md @@ -118,9 +118,8 @@ linkTitle: SORT since: 1.0.0 summary: Sorts the elements in a list, a set, or a sorted set, optionally storing the result. -syntax_fmt: "SORT key [BY\_pattern] [LIMIT\_offset count] [GET\_pattern [GET pattern -\ - \ ...]] [ASC | DESC] [ALPHA] [STORE\_destination]" +syntax_fmt: "SORT key [BY\_pattern] [LIMIT\_offset count] [GET\_pattern [GET pattern\ + \ ...]] [ASC | DESC] [ALPHA] [STORE\_destination]" syntax_str: "[BY\_pattern] [LIMIT\_offset count] [GET\_pattern [GET pattern ...]]\ \ [ASC | DESC] [ALPHA] [STORE\_destination]" title: SORT @@ -128,7 +127,7 @@ title: SORT Returns or stores the elements contained in the [list][tdtl], [set][tdts] or [sorted set][tdtss] at `key`. -There is also the [`SORT_RO`](/commands/sort_ro) read-only variant of this command. +There is also the [`SORT_RO`]({{< relref "/commands/sort_ro" >}}) read-only variant of this command. By default, sorting is numeric and elements are compared by their value interpreted as double precision floating point number. @@ -233,11 +232,11 @@ SORT mylist BY weight_* GET object_* GET # ## Restrictions for using external keys When enabling `Redis cluster-mode` there is no way to guarantee the existence of the external keys on the node which the command is processed on. -In this case, any use of [`GET`](/commands/get) or `BY` which reference external key pattern will cause the command to fail with an error. +In this case, any use of [`GET`]({{< relref "/commands/get" >}}) or `BY` which reference external key pattern will cause the command to fail with an error. -Starting from Redis 7.0, any use of [`GET`](/commands/get) or `BY` which reference external key pattern will only be allowed in case the current user running the command has full key read permissions. +Starting from Redis 7.0, any use of [`GET`]({{< relref "/commands/get" >}}) or `BY` which reference external key pattern will only be allowed in case the current user running the command has full key read permissions. Full key read permissions can be set for the user by, for example, specifying `'%R~*'` or `'~*` with the relevant command access rules. -You can check the [`ACL SETUSER`](/commands/acl-setuser) command manual for more information on setting ACL access rules. +You can check the [`ACL SETUSER`]({{< relref "/commands/acl-setuser" >}}) command manual for more information on setting ACL access rules. If full key read permissions aren't set, the command will fail with an error. ## Storing the result of a SORT operation @@ -251,7 +250,7 @@ SORT mylist BY weight_* STORE resultkey ``` An interesting pattern using `SORT ... STORE` consists in associating an -[`EXPIRE`](/commands/expire) timeout to the resulting key so that in applications where the result +[`EXPIRE`]({{< relref "/commands/expire" >}}) timeout to the resulting key so that in applications where the result of a `SORT` operation can be cached for some time. Other clients will use the cached list instead of calling `SORT` for every request. @@ -260,7 +259,7 @@ calling `SORT ... STORE` again. Note that for correctly implementing this pattern it is important to avoid multiple clients rebuilding the cache at the same time. -Some kind of locking is needed here (for instance using [`SETNX`](/commands/setnx)). +Some kind of locking is needed here (for instance using [`SETNX`]({{< relref "/commands/setnx" >}})). ## Using hashes in `BY` and `GET` diff --git a/content/commands/sort_ro/index.md b/content/commands/sort_ro/index.md index 2b05ec1df4..ff2073c4de 100644 --- a/content/commands/sort_ro/index.md +++ b/content/commands/sort_ro/index.md @@ -99,20 +99,19 @@ key_specs: linkTitle: SORT_RO since: 7.0.0 summary: Returns the sorted elements of a list, a set, or a sorted set. -syntax_fmt: "SORT_RO key [BY\_pattern] [LIMIT\_offset count] [GET\_pattern [GET -\ - \ pattern ...]] [ASC | DESC] [ALPHA]" +syntax_fmt: "SORT_RO key [BY\_pattern] [LIMIT\_offset count] [GET\_pattern [GET \ + \ pattern ...]] [ASC | DESC] [ALPHA]" syntax_str: "[BY\_pattern] [LIMIT\_offset count] [GET\_pattern [GET pattern ...]]\ \ [ASC | DESC] [ALPHA]" title: SORT_RO --- -Read-only variant of the [`SORT`](/commands/sort) command. It is exactly like the original [`SORT`](/commands/sort) but refuses the `STORE` option and can safely be used in read-only replicas. +Read-only variant of the [`SORT`]({{< relref "/commands/sort" >}}) command. It is exactly like the original [`SORT`]({{< relref "/commands/sort" >}}) but refuses the `STORE` option and can safely be used in read-only replicas. -Since the original [`SORT`](/commands/sort) has a `STORE` option it is technically flagged as a writing command in the Redis command table. For this reason read-only replicas in a Redis Cluster will redirect it to the master instance even if the connection is in read-only mode (see the [`READONLY`](/commands/readonly) command of Redis Cluster). +Since the original [`SORT`]({{< relref "/commands/sort" >}}) has a `STORE` option it is technically flagged as a writing command in the Redis command table. For this reason read-only replicas in a Redis Cluster will redirect it to the master instance even if the connection is in read-only mode (see the [`READONLY`]({{< relref "/commands/readonly" >}}) command of Redis Cluster). -The `SORT_RO` variant was introduced in order to allow [`SORT`](/commands/sort) behavior in read-only replicas without breaking compatibility on command flags. +The `SORT_RO` variant was introduced in order to allow [`SORT`]({{< relref "/commands/sort" >}}) behavior in read-only replicas without breaking compatibility on command flags. -See original [`SORT`](/commands/sort) for more details. +See original [`SORT`]({{< relref "/commands/sort" >}}) for more details. ## Examples diff --git a/content/commands/spop/index.md b/content/commands/spop/index.md index cd7841ed46..8bc9ae4533 100644 --- a/content/commands/spop/index.md +++ b/content/commands/spop/index.md @@ -62,7 +62,7 @@ title: SPOP --- Removes and returns one or more random members from the set value store at `key`. -This operation is similar to [`SRANDMEMBER`](/commands/srandmember), that returns one or more random elements from a set but does not remove it. +This operation is similar to [`SRANDMEMBER`]({{< relref "/commands/srandmember" >}}), that returns one or more random elements from a set but does not remove it. By default, the command pops a single member from the set. When provided with the optional `count` argument, the reply will consist of up to `count` members, diff --git a/content/commands/srandmember/index.md b/content/commands/srandmember/index.md index ec52e780e6..60b8cea921 100644 --- a/content/commands/srandmember/index.md +++ b/content/commands/srandmember/index.md @@ -59,7 +59,7 @@ title: SRANDMEMBER When called with just the `key` argument, return a random element from the set value stored at `key`. If the provided `count` argument is positive, return an array of **distinct elements**. -The array's length is either `count` or the set's cardinality ([`SCARD`](/commands/scard)), whichever is lower. +The array's length is either `count` or the set's cardinality ([`SCARD`]({{< relref "/commands/scard" >}})), whichever is lower. If called with a negative `count`, the behavior changes and the command is allowed to return the **same element multiple times**. In this case, the number of returned elements is the absolute value of the specified `count`. diff --git a/content/commands/sscan/index.md b/content/commands/sscan/index.md index 7c0f7f08b9..28b3c1a5d2 100644 --- a/content/commands/sscan/index.md +++ b/content/commands/sscan/index.md @@ -62,4 +62,4 @@ syntax_fmt: "SSCAN key cursor [MATCH\_pattern] [COUNT\_count]" syntax_str: "cursor [MATCH\_pattern] [COUNT\_count]" title: SSCAN --- -See [`SCAN`](/commands/scan) for `SSCAN` documentation. +See [`SCAN`]({{< relref "/commands/scan" >}}) for `SSCAN` documentation. diff --git a/content/commands/subscribe/index.md b/content/commands/subscribe/index.md index b3e37ef2af..6fd373977b 100644 --- a/content/commands/subscribe/index.md +++ b/content/commands/subscribe/index.md @@ -37,12 +37,12 @@ title: SUBSCRIBE Subscribes the client to the specified channels. Once the client enters the subscribed state it is not supposed to issue any -other commands, except for additional `SUBSCRIBE`, [`SSUBSCRIBE`](/commands/ssubscribe), [`PSUBSCRIBE`](/commands/psubscribe), [`UNSUBSCRIBE`](/commands/unsubscribe), [`SUNSUBSCRIBE`](/commands/sunsubscribe), -[`PUNSUBSCRIBE`](/commands/punsubscribe), [`PING`](/commands/ping), [`RESET`](/commands/reset) and [`QUIT`](/commands/quit) commands. -However, if RESP3 is used (see [`HELLO`](/commands/hello)) it is possible for a client to issue any commands while in subscribed state. +other commands, except for additional `SUBSCRIBE`, [`SSUBSCRIBE`]({{< relref "/commands/ssubscribe" >}}), [`PSUBSCRIBE`]({{< relref "/commands/psubscribe" >}}), [`UNSUBSCRIBE`]({{< relref "/commands/unsubscribe" >}}), [`SUNSUBSCRIBE`]({{< relref "/commands/sunsubscribe" >}}), +[`PUNSUBSCRIBE`]({{< relref "/commands/punsubscribe" >}}), [`PING`]({{< relref "/commands/ping" >}}), [`RESET`]({{< relref "/commands/reset" >}}) and [`QUIT`]({{< relref "/commands/quit" >}}) commands. +However, if RESP3 is used (see [`HELLO`]({{< relref "/commands/hello" >}})) it is possible for a client to issue any commands while in subscribed state. -For more information, see [Pub/sub](/docs/interact/pubsub/). +For more information, see [Pub/sub]({{< relref "/develop/interact/pubsub" >}}). ## Behavior change history -* `>= 6.2.0`: [`RESET`](/commands/reset) can be called to exit subscribed state. +* `>= 6.2.0`: [`RESET`]({{< relref "/commands/reset" >}}) can be called to exit subscribed state. diff --git a/content/commands/substr/index.md b/content/commands/substr/index.md index 77206d469d..d80b7f9cb8 100644 --- a/content/commands/substr/index.md +++ b/content/commands/substr/index.md @@ -50,7 +50,7 @@ key_specs: limit: 0 type: range linkTitle: SUBSTR -replaced_by: '[`GETRANGE`](/commands/getrange)' +replaced_by: '[`GETRANGE`]({{< relref "/commands/getrange" >}})' since: 1.0.0 summary: Returns a substring from a string value. syntax_fmt: SUBSTR key start end diff --git a/content/commands/sunionstore/index.md b/content/commands/sunionstore/index.md index c2ae426d2c..69c4b3a49b 100644 --- a/content/commands/sunionstore/index.md +++ b/content/commands/sunionstore/index.md @@ -63,7 +63,7 @@ syntax_fmt: SUNIONSTORE destination key [key ...] syntax_str: key [key ...] title: SUNIONSTORE --- -This command is equal to [`SUNION`](/commands/sunion), but instead of returning the resulting set, +This command is equal to [`SUNION`]({{< relref "/commands/sunion" >}}), but instead of returning the resulting set, it is stored in `destination`. If `destination` already exists, it is overwritten. diff --git a/content/commands/sync/index.md b/content/commands/sync/index.md index 3db39c81cd..cf771898f7 100644 --- a/content/commands/sync/index.md +++ b/content/commands/sync/index.md @@ -33,7 +33,7 @@ Initiates a replication stream from the master. The `SYNC` command is called by Redis replicas for initiating a replication stream from the master. It has been replaced in newer versions of Redis by - [`PSYNC`](/commands/psync). + [`PSYNC`]({{< relref "/commands/psync" >}}). For more information about replication in Redis please check the [replication page][tr]. diff --git a/content/commands/tdigest.add/index.md b/content/commands/tdigest.add/index.md index b7441a7908..d67142328e 100644 --- a/content/commands/tdigest.add/index.md +++ b/content/commands/tdigest.add/index.md @@ -45,7 +45,7 @@ is value of an observation (floating-point). ## Return value -[Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if executed correctly, or [] otherwise. +[Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if executed correctly, or [] otherwise. ## Examples diff --git a/content/commands/tdigest.byrank/index.md b/content/commands/tdigest.byrank/index.md index b2e83ae1d1..1023c0a214 100644 --- a/content/commands/tdigest.byrank/index.md +++ b/content/commands/tdigest.byrank/index.md @@ -52,7 +52,7 @@ _n_-1 is the rank of the value of the largest observation; _n_ denotes the numbe ## Return value -[Array reply](/docs/reference/protocol-spec#arrays) - an array of floating-points populated with value_1, value_2, ..., value_R: +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) - an array of floating-points populated with value_1, value_2, ..., value_R: - Return an accurate result when `rank` is 0 (the value of the smallest observation) - Return an accurate result when `rank` is _n_-1 (the value of the largest observation), where _n_ denotes the number of observations added to the sketch. diff --git a/content/commands/tdigest.byrevrank/index.md b/content/commands/tdigest.byrevrank/index.md index 5f1a390a0e..3d1d1c1416 100644 --- a/content/commands/tdigest.byrevrank/index.md +++ b/content/commands/tdigest.byrevrank/index.md @@ -51,7 +51,7 @@ _n_-1 is the reverse rank of the value of the smallest observation; _n_ denotes ## Return value -[Array reply](/docs/reference/protocol-spec#arrays) - an array of floating-points populated with value_1, value_2, ..., value_R: +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) - an array of floating-points populated with value_1, value_2, ..., value_R: - Return an accurate result when `revrank` is 0 (the value of the largest observation) - Return an accurate result when `revrank` is _n_-1 (the value of the smallest observation), where _n_ denotes the number of observations added to the sketch. diff --git a/content/commands/tdigest.cdf/index.md b/content/commands/tdigest.cdf/index.md index a512bdeeaa..854696a07c 100644 --- a/content/commands/tdigest.cdf/index.md +++ b/content/commands/tdigest.cdf/index.md @@ -48,7 +48,7 @@ is value for which the CDF (Cumulative Distribution Function) should be retrieve ## Return value -[Array reply](/docs/reference/protocol-spec#arrays) - the command returns an array of floating-points populated with fraction_1, fraction_2, ..., fraction_N. +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) - the command returns an array of floating-points populated with fraction_1, fraction_2, ..., fraction_N. All values are 'nan' if the sketch is empty. diff --git a/content/commands/tdigest.create/index.md b/content/commands/tdigest.create/index.md index 16fd1aeb86..c178d22909 100644 --- a/content/commands/tdigest.create/index.md +++ b/content/commands/tdigest.create/index.md @@ -47,7 +47,7 @@ is a controllable tradeoff between accuracy and memory consumption. 100 is a com ## Return value -[Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if executed correctly, or [] otherwise. +[Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if executed correctly, or [] otherwise. ## Examples diff --git a/content/commands/tdigest.info/index.md b/content/commands/tdigest.info/index.md index 248fd8f623..c80879f1ab 100644 --- a/content/commands/tdigest.info/index.md +++ b/content/commands/tdigest.info/index.md @@ -36,19 +36,19 @@ is key name for an existing t-digest sketch. ## Return value -[Array reply](/docs/reference/protocol-spec#arrays) with information about the sketch (name-value pairs): +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) with information about the sketch (name-value pairs): -| Name
[Simple string reply](/docs/reference/protocol-spec#simple-strings) | Description +| Name
[Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) | Description | ---------------------------- | - -| `Compression` | [Integer reply](/docs/reference/protocol-spec#integers)
The compression (controllable trade-off between accuracy and memory consumption) of the sketch -| `Capacity` | [Integer reply](/docs/reference/protocol-spec#integers)
Size of the buffer used for storing the centroids and for the incoming unmerged observations -| `Merged nodes` | [Integer reply](/docs/reference/protocol-spec#integers)
Number of merged observations -| `Unmerged nodes` | [Integer reply](/docs/reference/protocol-spec#integers)
Number of buffered nodes (uncompressed observations) -| `Merged weight` | [Integer reply](/docs/reference/protocol-spec#integers)
Weight of values of the merged nodes -| `Unmerged weight` | [Integer reply](/docs/reference/protocol-spec#integers)
Weight of values of the unmerged nodes (uncompressed observations) -| `Observations` | [Integer reply](/docs/reference/protocol-spec#integers)
Number of observations added to the sketch -| `Total compressions` | [Integer reply](/docs/reference/protocol-spec#integers)
Number of times this sketch compressed data together -| `Memory usage` | [Integer reply](/docs/reference/protocol-spec#integers)
Number of bytes allocated for the sketch +| `Compression` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
The compression (controllable trade-off between accuracy and memory consumption) of the sketch +| `Capacity` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
Size of the buffer used for storing the centroids and for the incoming unmerged observations +| `Merged nodes` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
Number of merged observations +| `Unmerged nodes` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
Number of buffered nodes (uncompressed observations) +| `Merged weight` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
Weight of values of the merged nodes +| `Unmerged weight` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
Weight of values of the unmerged nodes (uncompressed observations) +| `Observations` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
Number of observations added to the sketch +| `Total compressions` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
Number of times this sketch compressed data together +| `Memory usage` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
Number of bytes allocated for the sketch ## Examples diff --git a/content/commands/tdigest.max/index.md b/content/commands/tdigest.max/index.md index 4b2322d811..16fbaecb58 100644 --- a/content/commands/tdigest.max/index.md +++ b/content/commands/tdigest.max/index.md @@ -35,7 +35,7 @@ is key name for an existing t-digest sketch. ## Return value -[Simple string reply](/docs/reference/protocol-spec#simple-strings) of maximum observation value from a sketch. The result is always accurate. 'nan' if the sketch is empty. +[Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) of maximum observation value from a sketch. The result is always accurate. 'nan' if the sketch is empty. ## Examples diff --git a/content/commands/tdigest.merge/index.md b/content/commands/tdigest.merge/index.md index 45bdbaddcb..6e75fe1fe5 100644 --- a/content/commands/tdigest.merge/index.md +++ b/content/commands/tdigest.merge/index.md @@ -40,9 +40,8 @@ module: Bloom since: 2.4.0 stack_path: docs/data-types/probabilistic summary: Merges multiple t-digest sketches into a single sketch -syntax_fmt: "TDIGEST.MERGE destination-key numkeys source-key [source-key ...] - \ - \ [COMPRESSION compression] [OVERRIDE]" +syntax_fmt: TDIGEST.MERGE destination-key numkeys source-key [source-key ...] [COMPRESSION + compression] [OVERRIDE] syntax_str: numkeys source-key [source-key ...] [COMPRESSION compression] [OVERRIDE] title: TDIGEST.MERGE --- diff --git a/content/commands/tdigest.min/index.md b/content/commands/tdigest.min/index.md index 77f4fc8244..416efbe181 100644 --- a/content/commands/tdigest.min/index.md +++ b/content/commands/tdigest.min/index.md @@ -34,7 +34,7 @@ is key name for an existing t-digest sketch. ## Return value -[Simple string reply](/docs/reference/protocol-spec#simple-strings) of minimum observation value from a sketch. The result is always accurate. 'nan' if the sketch is empty. +[Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) of minimum observation value from a sketch. The result is always accurate. 'nan' if the sketch is empty. ## Examples diff --git a/content/commands/tdigest.quantile/index.md b/content/commands/tdigest.quantile/index.md index 4eae93b76b..7cf0e1707f 100644 --- a/content/commands/tdigest.quantile/index.md +++ b/content/commands/tdigest.quantile/index.md @@ -46,7 +46,7 @@ is the input fraction (between 0 and 1 inclusively) ## Return value -[Array reply](/docs/reference/protocol-spec#arrays) - an array of estimates (floating-point) populated with value_1, value_2, ..., value_N. +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) - an array of estimates (floating-point) populated with value_1, value_2, ..., value_N. - Return an accurate result when `quantile` is 0 (the value of the smallest observation) - Return an accurate result when `quantile` is 1 (the value of the largest observation) diff --git a/content/commands/tdigest.rank/index.md b/content/commands/tdigest.rank/index.md index 43d7a2cebc..8cea5c2cec 100644 --- a/content/commands/tdigest.rank/index.md +++ b/content/commands/tdigest.rank/index.md @@ -46,7 +46,7 @@ is input value for which the rank should be estimated. ## Return value -[Array reply](/docs/reference/protocol-spec#arrays) - an array of integers populated with rank_1, rank_2, ..., rank_V: +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) - an array of integers populated with rank_1, rank_2, ..., rank_V: - -1 - when `value` is smaller than the value of the smallest observation. - The number of observations - when `value` is larger than the value of the largest observation. diff --git a/content/commands/tdigest.reset/index.md b/content/commands/tdigest.reset/index.md index 4b7a489fcd..78bc11e6db 100644 --- a/content/commands/tdigest.reset/index.md +++ b/content/commands/tdigest.reset/index.md @@ -34,7 +34,7 @@ is key name for an existing t-digest sketch. ## Return value -[Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if executed correctly, or [] otherwise. +[Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if executed correctly, or [] otherwise. ## Examples diff --git a/content/commands/tdigest.revrank/index.md b/content/commands/tdigest.revrank/index.md index 0e8f39700b..7b486491a7 100644 --- a/content/commands/tdigest.revrank/index.md +++ b/content/commands/tdigest.revrank/index.md @@ -46,7 +46,7 @@ is input value for which the reverse rank should be estimated. ## Return value -[Array reply](/docs/reference/protocol-spec#arrays) - an array of integers populated with revrank_1, revrank_2, ..., revrank_V: +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) - an array of integers populated with revrank_1, revrank_2, ..., revrank_V: - -1 - when `value` is larger than the value of the largest observation. - The number of observations - when `value` is smaller than the value of the smallest observation. diff --git a/content/commands/tdigest.trimmed_mean/index.md b/content/commands/tdigest.trimmed_mean/index.md index 010d6e68b8..1f8516a9a6 100644 --- a/content/commands/tdigest.trimmed_mean/index.md +++ b/content/commands/tdigest.trimmed_mean/index.md @@ -59,7 +59,7 @@ When equal to 1: No high cut. ## Return value -[Simple string reply](/docs/reference/protocol-spec#simple-strings) estimation of the mean value. 'nan' if the sketch is empty. +[Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) estimation of the mean value. 'nan' if the sketch is empty. ## Examples diff --git a/content/commands/tfcall/index.md b/content/commands/tfcall/index.md index b5f3a8114d..cbbed00868 100644 --- a/content/commands/tfcall/index.md +++ b/content/commands/tfcall/index.md @@ -91,7 +91,7 @@ The arguments passed to the function. `TFCALL` returns either * The return value of the function. -* [Error reply](/docs/reference/protocol-spec/#resp-errors) when the function execution failed. +* [Error reply]({{< baseurl >}}/develop/reference/protocol-spec#resp-errors) when the function execution failed. ## Examples diff --git a/content/commands/tfcallasync/index.md b/content/commands/tfcallasync/index.md index 492b2c0cb5..6102407162 100644 --- a/content/commands/tfcallasync/index.md +++ b/content/commands/tfcallasync/index.md @@ -91,7 +91,7 @@ The arguments passed to the function. `TFCALLASYNC` returns either * The return value of the function. -* [Error reply](/docs/reference/protocol-spec/#resp-errors) when the function execution failed. +* [Error reply]({{< baseurl >}}/develop/reference/protocol-spec#resp-errors) when the function execution failed. ## Examples diff --git a/content/commands/tfunction-delete/index.md b/content/commands/tfunction-delete/index.md index 679751e5ba..133ad7de93 100644 --- a/content/commands/tfunction-delete/index.md +++ b/content/commands/tfunction-delete/index.md @@ -33,8 +33,7 @@ module: Triggers and functions since: 2.0.0 stack_path: docs/interact/programmability/triggers-and-functions summary: Delete a JavaScript library from Redis by name -syntax: "TFUNCTION DELETE \"\" -" +syntax: 'TFUNCTION DELETE "" ' syntax_fmt: TFUNCTION DELETE library name syntax_str: '' title: TFUNCTION DELETE @@ -54,8 +53,8 @@ The name of the library to delete. `TFUNCTION DELETE` returns either -* ["OK"](/docs/reference/protocol-spec/#resp-simple-strings) when the library was deleted correctly. -* [Error reply](/docs/reference/protocol-spec/#resp-errors) when the library could not be deleted. +* ["OK"]({{< baseurl >}}/develop/reference/protocol-spec#resp-simple-strings) when the library was deleted correctly. +* [Error reply]({{< baseurl >}}/develop/reference/protocol-spec#resp-errors) when the library could not be deleted. ## Examples diff --git a/content/commands/tfunction-list/index.md b/content/commands/tfunction-list/index.md index 4f0ac06fb8..f0123229b6 100644 --- a/content/commands/tfunction-list/index.md +++ b/content/commands/tfunction-list/index.md @@ -52,8 +52,7 @@ module: Triggers and functions since: 2.0.0 stack_path: docs/interact/programmability/triggers-and-functions summary: List all JavaScript libraries loaded into Redis -syntax: "TFUNCTION LIST [WITHCODE] [VERBOSE] [v] [LIBRARY ] -" +syntax: 'TFUNCTION LIST [WITHCODE] [VERBOSE] [v] [LIBRARY ] ' syntax_fmt: "TFUNCTION LIST [LIBRARYNAME\_library name] [WITHCODE] [VERBOSE] [V]" syntax_str: '[WITHCODE] [VERBOSE] [V]' title: TFUNCTION LIST diff --git a/content/commands/tfunction-load/index.md b/content/commands/tfunction-load/index.md index 725c65b18e..005a14771c 100644 --- a/content/commands/tfunction-load/index.md +++ b/content/commands/tfunction-load/index.md @@ -43,8 +43,7 @@ module: Triggers and functions since: 2.0.0 stack_path: docs/interact/programmability/triggers-and-functions summary: Load a new JavaScript library into Redis -syntax: "TFUNCTION LOAD [REPLACE] [CONFIG ] \"\" -" +syntax: 'TFUNCTION LOAD [REPLACE] [CONFIG ] "" ' syntax_fmt: "TFUNCTION LOAD [REPLACE] [CONFIG\_config] library code" syntax_str: "[CONFIG\_config] library code" title: TFUNCTION LOAD @@ -78,8 +77,8 @@ A string representation of a JSON object that will be provided to the library on TFUNCTION LOAD returns either -* ["OK"](/docs/reference/protocol-spec/#resp-simple-strings) when the library was loaded correctly. -* [Error reply](/docs/reference/protocol-spec/#resp-errors) when the library could not be loaded. +* ["OK"]({{< baseurl >}}/develop/reference/protocol-spec#resp-simple-strings) when the library was loaded correctly. +* [Error reply]({{< baseurl >}}/develop/reference/protocol-spec#resp-errors) when the library could not be loaded. ## Examples diff --git a/content/commands/topk.add/index.md b/content/commands/topk.add/index.md index 4d948c13d7..f35ebe2268 100644 --- a/content/commands/topk.add/index.md +++ b/content/commands/topk.add/index.md @@ -41,7 +41,7 @@ This allows dynamic heavy-hitter detection of items being entered or expelled fr ### Return -[Array reply](/docs/reference/protocol-spec#arrays) of [Simple string reply](/docs/reference/protocol-spec#simple-strings) - if an element was dropped from the TopK list, [Nil reply](/docs/reference/protocol-spec#bulk-strings) otherwise.. +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - if an element was dropped from the TopK list, [Nil reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}) otherwise.. #### Example diff --git a/content/commands/topk.count/index.md b/content/commands/topk.count/index.md index fdfbd45522..33300c952d 100644 --- a/content/commands/topk.count/index.md +++ b/content/commands/topk.count/index.md @@ -43,7 +43,7 @@ the number of appearances of an item. ## Return -[Array reply](/docs/reference/protocol-spec#arrays) of [Integer reply](/docs/reference/protocol-spec#integers) - count for responding item. +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - count for responding item. ## Examples diff --git a/content/commands/topk.incrby/index.md b/content/commands/topk.incrby/index.md index 15336e6f6c..21889447f3 100644 --- a/content/commands/topk.incrby/index.md +++ b/content/commands/topk.incrby/index.md @@ -46,7 +46,7 @@ If an item enters the Top-K list, the item which is expelled is returned. ## Return -[Array reply](/docs/reference/protocol-spec#arrays) of [Simple string reply](/docs/reference/protocol-spec#simple-strings) - if an element was dropped from the TopK list, [Nil reply](/docs/reference/protocol-spec#bulk-strings) otherwise.. +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - if an element was dropped from the TopK list, [Nil reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}) otherwise.. @example diff --git a/content/commands/topk.info/index.md b/content/commands/topk.info/index.md index 640ba823ac..fb700db931 100644 --- a/content/commands/topk.info/index.md +++ b/content/commands/topk.info/index.md @@ -33,7 +33,7 @@ Returns number of required items (k), width, depth and decay values. ## Return -[Array reply](/docs/reference/protocol-spec#arrays) with information of the filter. +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) with information of the filter. ## Examples diff --git a/content/commands/topk.list/index.md b/content/commands/topk.list/index.md index 77cb2ad45b..bb3941961d 100644 --- a/content/commands/topk.list/index.md +++ b/content/commands/topk.list/index.md @@ -40,9 +40,9 @@ Return full list of items in Top K list. k (or less) items in Top K list. -[Array reply](/docs/reference/protocol-spec#arrays) of [Simple string reply](/docs/reference/protocol-spec#simple-strings) - the names of items in the TopK list. -If `WITHCOUNT` is requested, [Array reply](/docs/reference/protocol-spec#arrays) of [Simple string reply](/docs/reference/protocol-spec#simple-strings) and -[Integer reply](/docs/reference/protocol-spec#integers) pairs of the names of items in the TopK list and their count. +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - the names of items in the TopK list. +If `WITHCOUNT` is requested, [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) and +[Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) pairs of the names of items in the TopK list and their count. ## Examples diff --git a/content/commands/topk.query/index.md b/content/commands/topk.query/index.md index 532ed92d75..5a87b05aa1 100644 --- a/content/commands/topk.query/index.md +++ b/content/commands/topk.query/index.md @@ -38,7 +38,7 @@ Multiple items can be checked at once. ## Return -[Array reply](/docs/reference/protocol-spec#arrays) of [Integer reply](/docs/reference/protocol-spec#integers) - "1" if item is in Top-K, otherwise "0". +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - "1" if item is in Top-K, otherwise "0". ## Examples diff --git a/content/commands/topk.reserve/index.md b/content/commands/topk.reserve/index.md index 73e0404d26..a01b83080f 100644 --- a/content/commands/topk.reserve/index.md +++ b/content/commands/topk.reserve/index.md @@ -51,7 +51,7 @@ Optional parameters ## Return -[Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if executed correctly, or [] otherwise. +[Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if executed correctly, or [] otherwise. ## Examples diff --git a/content/commands/ts.add/index.md b/content/commands/ts.add/index.md index b68c106e52..11b490f9e9 100644 --- a/content/commands/ts.add/index.md +++ b/content/commands/ts.add/index.md @@ -77,21 +77,11 @@ module: TimeSeries since: 1.0.0 stack_path: docs/data-types/timeseries summary: Append a sample to a time series -syntax: "TS.ADD key timestamp value - [RETENTION retentionPeriod] - [ENCODING\ - \ [COMPRESSED|UNCOMPRESSED]] - [CHUNK_SIZE size] - [ON_DUPLICATE policy] -\ - \ [LABELS {label value}...] -" -syntax_fmt: "TS.ADD key timestamp value [RETENTION\_retentionPeriod] - [ENCODING\_\ - ] [CHUNK_SIZE\_size] - [ON_DUPLICATE\_] - [LABELS\_label value [label value ...]]" +syntax: 'TS.ADD key timestamp value [RETENTION retentionPeriod] [ENCODING [COMPRESSED|UNCOMPRESSED]] + [CHUNK_SIZE size] [ON_DUPLICATE policy] [LABELS {label value}...] ' +syntax_fmt: "TS.ADD key timestamp value [RETENTION\_retentionPeriod] [ENCODING\_] [CHUNK_SIZE\_size] [ON_DUPLICATE\_] [LABELS\_label value [label value ...]]" syntax_str: "timestamp value [RETENTION\_retentionPeriod] [ENCODING\_] [CHUNK_SIZE\_size] [ON_DUPLICATE\_] [LABELS\_label value [label value ...]]" @@ -124,43 +114,43 @@ is (double) numeric data value of the sample. The double number should follow [R Notes: - When specified key does not exist, a new time series is created. - if a [COMPACTION_POLICY](/docs/stack/timeseries/configuration/#compaction_policy) configuration parameter is defined, compacted time series would be created as well. + if a [COMPACTION_POLICY]({{< baseurl >}}/develop/data-types/timeseries/configuration#compaction_policy) configuration parameter is defined, compacted time series would be created as well. - If `timestamp` is older than the retention period compared to the maximum existing timestamp, the sample is discarded and an error is returned. - When adding a sample to a time series for which compaction rules are defined: - If all the original samples for an affected aggregated time bucket are available, the compacted value is recalculated based on the reported sample and the original samples. - If only a part of the original samples for an affected aggregated time bucket is available due to trimming caused in accordance with the time series RETENTION policy, the compacted value is recalculated based on the reported sample and the available original samples. - If the original samples for an affected aggregated time bucket are not available due to trimming caused in accordance with the time series RETENTION policy, the compacted value bucket is not updated. -- Explicitly adding samples to a compacted time series (using `TS.ADD`, [`TS.MADD`](/commands/ts.madd), [`TS.INCRBY`](/commands/ts.incrby), or [`TS.DECRBY`](/commands/ts.decrby)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. +- Explicitly adding samples to a compacted time series (using `TS.ADD`, [`TS.MADD`]({{< baseurl >}}/commands/ts.madd), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby), or [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. ## Optional arguments -The following arguments are optional because they can be set by [`TS.CREATE`](/commands/ts.create). +The following arguments are optional because they can be set by [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
RETENTION retentionPeriod is maximum retention period, compared to the maximum existing timestamp, in milliseconds. -Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `RETENTION` in [`TS.CREATE`](/commands/ts.create). +Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `RETENTION` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
ENCODING enc specifies the series sample's encoding format. -Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `ENCODING` in [`TS.CREATE`](/commands/ts.create). +Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `ENCODING` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
CHUNK_SIZE size is memory size, in bytes, allocated for each data chunk. -Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `CHUNK_SIZE` in [`TS.CREATE`](/commands/ts.create). +Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `CHUNK_SIZE` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
ON_DUPLICATE policy -is overwrite key and database configuration for [DUPLICATE_POLICY](/docs/stack/timeseries/configuration/#duplicate_policy), the policy for handling samples with identical timestamps. -This override is effective only for this single command and does not set the time series duplication policy (which can be set with [`TS.ALTER`](/commands/ts.alter)). +is overwrite key and database configuration for [DUPLICATE_POLICY]({{< baseurl >}}/develop/data-types/timeseries/configuration#duplicate_policy), the policy for handling samples with identical timestamps. +This override is effective only for this single command and does not set the time series duplication policy (which can be set with [`TS.ALTER`]({{< baseurl >}}/commands/ts.alter)). `policy` can be one of the following values: - `BLOCK`: ignore any newly reported value and reply with an error @@ -177,7 +167,7 @@ This argument has no effect when a new time series is created by this command. is set of label-value pairs that represent metadata labels of the time series. -Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `LABELS` in [`TS.CREATE`](/commands/ts.create). +Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `LABELS` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
Notes: @@ -190,7 +180,7 @@ Use it only if you are creating a new time series. It is ignored if you are addi Returns one of these replies: -- [Integer reply](/docs/reference/protocol-spec#integers) - the timestamp of the upserted sample +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - the timestamp of the upserted sample - [] on error (invalid arguments, wrong key type, etc.), when duplication policy is `BLOCK`, or when `timestamp` is older than the retention period compared to the maximum existing timestamp ## Complexity @@ -221,8 +211,8 @@ Add a sample to the time series, setting the sample's timestamp to the current U ## See also -[`TS.CREATE`](/commands/ts.create) +[`TS.CREATE`]({{< baseurl >}}/commands/ts.create) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.alter/index.md b/content/commands/ts.alter/index.md index f44c5859af..58672c4ef9 100644 --- a/content/commands/ts.alter/index.md +++ b/content/commands/ts.alter/index.md @@ -64,17 +64,10 @@ since: 1.0.0 stack_path: docs/data-types/timeseries summary: Update the retention, chunk size, duplicate policy, and labels of an existing time series -syntax: "TS.ALTER key - [RETENTION retentionPeriod] - [CHUNK_SIZE size] - [DUPLICATE_POLICY\ - \ policy] - [LABELS [{label value}...]] -" -syntax_fmt: "TS.ALTER key [RETENTION\_retentionPeriod] [CHUNK_SIZE\_size] - [DUPLICATE_POLICY\_\ - ] - [LABELS\_label value [label value ...]]" +syntax: 'TS.ALTER key [RETENTION retentionPeriod] [CHUNK_SIZE size] [DUPLICATE_POLICY + policy] [LABELS [{label value}...]] ' +syntax_fmt: "TS.ALTER key [RETENTION\_retentionPeriod] [CHUNK_SIZE\_size] [DUPLICATE_POLICY\_\ + ] [LABELS\_label value [label value ...]]" syntax_str: "[RETENTION\_retentionPeriod] [CHUNK_SIZE\_size] [DUPLICATE_POLICY\_] [LABELS\_label value [label value ...]]" title: TS.ALTER @@ -97,31 +90,31 @@ is key name for the time series.
RETENTION retentionPeriod -is maximum retention period, compared to the maximum existing timestamp, in milliseconds. See `RETENTION` in [`TS.CREATE`](/commands/ts.create). +is maximum retention period, compared to the maximum existing timestamp, in milliseconds. See `RETENTION` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
CHUNK_SIZE size -is the initial allocation size, in bytes, for the data part of each new chunk. Actual chunks may consume more memory. See `CHUNK_SIZE` in [`TS.CREATE`](/commands/ts.create). Changing this value does not affect existing chunks. +is the initial allocation size, in bytes, for the data part of each new chunk. Actual chunks may consume more memory. See `CHUNK_SIZE` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). Changing this value does not affect existing chunks.
DUPLICATE_POLICY policy -is policy for handling multiple samples with identical timestamps. See `DUPLICATE_POLICY` in [`TS.CREATE`](/commands/ts.create). +is policy for handling multiple samples with identical timestamps. See `DUPLICATE_POLICY` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
LABELS [{label value}...] is set of label-value pairs that represent metadata labels of the key and serve as a secondary index. -If `LABELS` is specified, the given label list is applied. Labels that are not present in the given list are removed implicitly. Specifying `LABELS` with no label-value pairs removes all existing labels. See `LABELS` in [`TS.CREATE`](/commands/ts.create). +If `LABELS` is specified, the given label list is applied. Labels that are not present in the given list are removed implicitly. Specifying `LABELS` with no label-value pairs removes all existing labels. See `LABELS` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
## Return value Returns one of these replies: -- [Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if executed correctly +- [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if executed correctly - [] on error (invalid arguments, wrong key type, key does not exist, etc.) ## Examples @@ -145,8 +138,8 @@ OK ## See also -[`TS.CREATE`](/commands/ts.create) +[`TS.CREATE`]({{< baseurl >}}/commands/ts.create) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.create/index.md b/content/commands/ts.create/index.md index 7035d3bb99..fb35aba866 100644 --- a/content/commands/ts.create/index.md +++ b/content/commands/ts.create/index.md @@ -73,19 +73,11 @@ module: TimeSeries since: 1.0.0 stack_path: docs/data-types/timeseries summary: Create a new time series -syntax: "TS.CREATE key - [RETENTION retentionPeriod] - [ENCODING [UNCOMPRESSED|COMPRESSED]]\ - \ - [CHUNK_SIZE size] - [DUPLICATE_POLICY policy] - [LABELS {label value}...] -" -syntax_fmt: "TS.CREATE key [RETENTION\_retentionPeriod] [ENCODING\_] [CHUNK_SIZE\_size] [DUPLICATE_POLICY\_] [LABELS\_label value [label value ...]]" +syntax: 'TS.CREATE key [RETENTION retentionPeriod] [ENCODING [UNCOMPRESSED|COMPRESSED]] [CHUNK_SIZE + size] [DUPLICATE_POLICY policy] [LABELS {label value}...] ' +syntax_fmt: "TS.CREATE key [RETENTION\_retentionPeriod] [ENCODING\_] [CHUNK_SIZE\_size] [DUPLICATE_POLICY\_] [LABELS\_label value [label value ...]]" syntax_str: "[RETENTION\_retentionPeriod] [ENCODING\_]\ \ [CHUNK_SIZE\_size] [DUPLICATE_POLICY\_]\ \ [LABELS\_label value [label value ...]]" @@ -105,17 +97,17 @@ is key name for the time series. Notes: -- If a key already exists, you get a Redis error reply, `TSDB: key already exists`. You can check for the existence of a key with the [`EXISTS`](/commands/exists) command. -- Other commands that also create a new time series when called with a key that does not exist are [`TS.ADD`](/commands/ts.add), [`TS.INCRBY`](/commands/ts.incrby), and [`TS.DECRBY`](/commands/ts.decrby). +- If a key already exists, you get a Redis error reply, `TSDB: key already exists`. You can check for the existence of a key with the [`EXISTS`]({{< relref "/commands/exists" >}}) command. +- Other commands that also create a new time series when called with a key that does not exist are [`TS.ADD`]({{< baseurl >}}/commands/ts.add), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby), and [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby). ## Optional arguments
RETENTION retentionPeriod -is maximum age for samples compared to the highest reported timestamp, in milliseconds. Samples are expired based solely on the difference between their timestamp and the timestamps passed to subsequent [`TS.ADD`](/commands/ts.add), [`TS.MADD`](/commands/ts.madd), [`TS.INCRBY`](/commands/ts.incrby), and [`TS.DECRBY`](/commands/ts.decrby) calls with this key. +is maximum age for samples compared to the highest reported timestamp, in milliseconds. Samples are expired based solely on the difference between their timestamp and the timestamps passed to subsequent [`TS.ADD`]({{< baseurl >}}/commands/ts.add), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby), and [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby) calls with this key. -When set to 0, samples never expire. When not specified, the option is set to the global [RETENTION_POLICY](/docs/stack/timeseries/configuration/#retention_policy) configuration of the database, which by default is 0. +When set to 0, samples never expire. When not specified, the option is set to the global [RETENTION_POLICY]({{< baseurl >}}/develop/data-types/timeseries/configuration#retention_policy) configuration of the database, which by default is 0.
ENCODING enc @@ -131,9 +123,9 @@ When not specified, the option is set to `COMPRESSED`.
CHUNK_SIZE size -is initial allocation size, in bytes, for the data part of each new chunk. Actual chunks may consume more memory. Changing chunkSize (using [`TS.ALTER`](/commands/ts.alter)) does not affect existing chunks. +is initial allocation size, in bytes, for the data part of each new chunk. Actual chunks may consume more memory. Changing chunkSize (using [`TS.ALTER`]({{< baseurl >}}/commands/ts.alter)) does not affect existing chunks. -Must be a multiple of 8 in the range [48 .. 1048576]. When not specified, it is set to the global [CHUNK_SIZE_BYTES](/docs/stack/timeseries/configuration/#chunk_size_bytes) configuration of the database, which by default is 4096 (a single memory page). +Must be a multiple of 8 in the range [48 .. 1048576]. When not specified, it is set to the global [CHUNK_SIZE_BYTES]({{< baseurl >}}/develop/data-types/timeseries/configuration#chunk_size_bytes) configuration of the database, which by default is 4096 (a single memory page). Note: Before v1.6.10 no minimum was enforced. Between v1.6.10 and v1.6.17 and in v1.8.0 the minimum value was 128. Since v1.8.1 the minimum value is 48. @@ -148,7 +140,7 @@ The data in each key is stored in chunks. Each chunk contains header and data fo
DUPLICATE_POLICY policy -is policy for handling insertion ([`TS.ADD`](/commands/ts.add) and [`TS.MADD`](/commands/ts.madd)) of multiple samples with identical timestamps, with one of the following values: +is policy for handling insertion ([`TS.ADD`]({{< baseurl >}}/commands/ts.add) and [`TS.MADD`]({{< baseurl >}}/commands/ts.madd)) of multiple samples with identical timestamps, with one of the following values: - `BLOCK`: ignore any newly reported value and reply with an error - `FIRST`: ignore any newly reported value - `LAST`: override with the newly reported value @@ -156,21 +148,21 @@ is policy for handling insertion ([`TS.ADD`](/commands/ts.add) and [`TS.MADD`](/ - `MAX`: only override if the value is higher than the existing value - `SUM`: If a previous sample exists, add the new sample to it so that the updated value is equal to (previous + new). If no previous sample exists, set the updated value equal to the new value. - When not specified: set to the global [DUPLICATE_POLICY](/docs/stack/timeseries/configuration/#duplicate_policy) configuration of the database (which, by default, is `BLOCK`). + When not specified: set to the global [DUPLICATE_POLICY]({{< baseurl >}}/develop/data-types/timeseries/configuration#duplicate_policy) configuration of the database (which, by default, is `BLOCK`).
LABELS {label value}... is set of label-value pairs that represent metadata labels of the key and serve as a secondary index. -The [`TS.MGET`](/commands/ts.mget), [`TS.MRANGE`](/commands/ts.mrange), and [`TS.MREVRANGE`](/commands/ts.mrevrange) commands operate on multiple time series based on their labels. The [`TS.QUERYINDEX`](/commands/ts.queryindex) command returns all time series keys matching a given filter based on their labels. +The [`TS.MGET`]({{< baseurl >}}/commands/ts.mget), [`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange), and [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) commands operate on multiple time series based on their labels. The [`TS.QUERYINDEX`]({{< baseurl >}}/commands/ts.queryindex) command returns all time series keys matching a given filter based on their labels.
## Return value Returns one of these replies: -- [Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if executed correctly +- [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if executed correctly - [] on error (invalid arguments, key already exists, etc.) ## Examples @@ -185,9 +177,9 @@ OK ## See also -[`TS.ADD`](/commands/ts.add) | [`TS.INCRBY`](/commands/ts.incrby) | [`TS.DECRBY`](/commands/ts.decrby) | [`TS.MGET`](/commands/ts.mget) | [`TS.MRANGE`](/commands/ts.mrange) | [`TS.MREVRANGE`](/commands/ts.mrevrange) | [`TS.QUERYINDEX`](/commands/ts.queryindex) +[`TS.ADD`]({{< baseurl >}}/commands/ts.add) | [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby) | [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby) | [`TS.MGET`]({{< baseurl >}}/commands/ts.mget) | [`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) | [`TS.QUERYINDEX`]({{< baseurl >}}/commands/ts.queryindex) ## Related topics -- [RedisTimeSeries](/docs/stack/timeseries) +- [RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) - [RedisTimeSeries Version 1.2 Is Here!](https://redis.com/blog/redistimeseries-version-1-2-is-here/) diff --git a/content/commands/ts.createrule/index.md b/content/commands/ts.createrule/index.md index 78e4c225f3..5b6c1d9780 100644 --- a/content/commands/ts.createrule/index.md +++ b/content/commands/ts.createrule/index.md @@ -73,15 +73,9 @@ module: TimeSeries since: 1.0.0 stack_path: docs/data-types/timeseries summary: Create a compaction rule -syntax: "TS.CREATERULE sourceKey destKey - AGGREGATION aggregator bucketDuration\ - \ - [alignTimestamp] -" -syntax_fmt: "TS.CREATERULE sourceKey destKey AGGREGATION\_ bucketDuration\ +syntax: 'TS.CREATERULE sourceKey destKey AGGREGATION aggregator bucketDuration [alignTimestamp] ' +syntax_fmt: "TS.CREATERULE sourceKey destKey AGGREGATION\_ bucketDuration\ \ [alignTimestamp]" syntax_str: "destKey AGGREGATION\_ bucketDuration [alignTimestamp]" @@ -132,7 +126,7 @@ aggregates results into time buckets. - Only new samples that are added into the source series after the creation of the rule will be aggregated. - Calling `TS.CREATERULE` with a nonempty `destKey` may result in inconsistencies between the raw and the compacted data. -- Explicitly adding samples to a compacted time series (using [`TS.ADD`](/commands/ts.add), [`TS.MADD`](/commands/ts.madd), [`TS.INCRBY`](/commands/ts.incrby), or [`TS.DECRBY`](/commands/ts.decrby)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. +- Explicitly adding samples to a compacted time series (using [`TS.ADD`]({{< baseurl >}}/commands/ts.add), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby), or [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. - If no samples are added to the source time series during a bucket period. no _compacted sample_ is added to the destination time series. - The timestamp of a compacted sample added to the destination time series is set to the start timestamp the appropriate compaction bucket. For example, for a 10-minute compaction bucket with no alignment, the compacted samples timestamps are `x:00`, `x:10`, `x:20`, and so on. - Deleting `destKey` will cause the compaction rule to be deleted as well. @@ -153,7 +147,7 @@ For example, if `bucketDuration` is 24 hours (`24 * 3600 * 1000`), setting `alig Returns one of these replies: -- [Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if executed correctly +- [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if executed correctly - [] on error (invalid arguments, wrong key type, etc.), when `sourceKey` does not exist, when `destKey` does not exist, when `sourceKey` is already a destination of a compaction rule, when `destKey` is already a source or a destination of a compaction rule, or when `sourceKey` and `destKey` are identical ## Examples @@ -187,8 +181,8 @@ Now, also create a compacted time series named _dailyDiffTemp_. This time series ## See also -[`TS.DELETERULE`](/commands/ts.deleterule) +[`TS.DELETERULE`]({{< baseurl >}}/commands/ts.deleterule) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.decrby/index.md b/content/commands/ts.decrby/index.md index e3ce620ff0..d210e9b682 100644 --- a/content/commands/ts.decrby/index.md +++ b/content/commands/ts.decrby/index.md @@ -53,18 +53,10 @@ stack_path: docs/data-types/timeseries summary: Decrease the value of the sample with the maximum existing timestamp, or create a new sample with a value equal to the value of the sample with the maximum existing timestamp with a given decrement -syntax: "TS.DECRBY key subtrahend - [TIMESTAMP timestamp] - [RETENTION retentionPeriod]\ - \ - [UNCOMPRESSED] - [CHUNK_SIZE size] - [LABELS {label value}...] -" -syntax_fmt: "TS.DECRBY key value [TIMESTAMP\_timestamp] - [RETENTION\_retentionPeriod]\ - \ [UNCOMPRESSED] [CHUNK_SIZE\_size] - [LABELS\_label value [label value ...]]" +syntax: 'TS.DECRBY key subtrahend [TIMESTAMP timestamp] [RETENTION retentionPeriod] [UNCOMPRESSED] + [CHUNK_SIZE size] [LABELS {label value}...] ' +syntax_fmt: "TS.DECRBY key value [TIMESTAMP\_timestamp] [RETENTION\_retentionPeriod]\ + \ [UNCOMPRESSED] [CHUNK_SIZE\_size] [LABELS\_label value [label value ...]]" syntax_str: "value [TIMESTAMP\_timestamp] [RETENTION\_retentionPeriod] [UNCOMPRESSED]\ \ [CHUNK_SIZE\_size] [LABELS\_label value [label value ...]]" title: TS.DECRBY @@ -89,7 +81,7 @@ is numeric value of the subtrahend (double). Notes - When specified key does not exist, a new time series is created. - You can use this command as a counter or gauge that automatically gets history as a time series. -- Explicitly adding samples to a compacted time series (using [`TS.ADD`](/commands/ts.add), [`TS.MADD`](/commands/ts.madd), [`TS.INCRBY`](/commands/ts.incrby), or `TS.DECRBY`) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. +- Explicitly adding samples to a compacted time series (using [`TS.ADD`]({{< baseurl >}}/commands/ts.add), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby), or `TS.DECRBY`) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. ## Optional arguments @@ -109,22 +101,22 @@ When not specified, the timestamp is set to the Unix time of the server's clock.
RETENTION retentionPeriod -is maximum retention period, compared to the maximum existing timestamp, in milliseconds. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `RETENTION` in [`TS.CREATE`](/commands/ts.create). +is maximum retention period, compared to the maximum existing timestamp, in milliseconds. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `RETENTION` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
UNCOMPRESSED -changes data storage from compressed (default) to uncompressed. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `ENCODING` in [`TS.CREATE`](/commands/ts.create). +changes data storage from compressed (default) to uncompressed. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `ENCODING` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
CHUNK_SIZE size -is memory size, in bytes, allocated for each data chunk. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `CHUNK_SIZE` in [`TS.CREATE`](/commands/ts.create). +is memory size, in bytes, allocated for each data chunk. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `CHUNK_SIZE` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
LABELS [{label value}...] -is set of label-value pairs that represent metadata labels of the key and serve as a secondary index. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `LABELS` in [`TS.CREATE`](/commands/ts.create). +is set of label-value pairs that represent metadata labels of the key and serve as a secondary index. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `LABELS` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
Notes @@ -139,13 +131,13 @@ is set of label-value pairs that represent metadata labels of the key and serve Returns one of these replies: -- [Integer reply](/docs/reference/protocol-spec#integers) - the timestamp of the upserted sample +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - the timestamp of the upserted sample - [] on error (invalid arguments, wrong key type, etc.), or when `timestamp` is not equal to or higher than the maximum existing timestamp ## See also -[`TS.INCRBY`](/commands/ts.incrby) | [`TS.CREATE`](/commands/ts.create) +[`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby) | [`TS.CREATE`]({{< baseurl >}}/commands/ts.create) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.del/index.md b/content/commands/ts.del/index.md index bfb7b7295f..df7db06947 100644 --- a/content/commands/ts.del/index.md +++ b/content/commands/ts.del/index.md @@ -69,7 +69,7 @@ The given timestamp interval is closed (inclusive), meaning that samples whose t Returns one of these replies: -- [Integer reply](/docs/reference/protocol-spec#integers) - the number of samples that were deleted +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - the number of samples that were deleted - [] on error (invalid arguments, wrong key type, etc.), when `timestamp` is older than the retention period compared to the maximum existing timestamp, or when an affected compaction bucket cannot be recalculated ## Examples @@ -105,8 +105,8 @@ Delete the range of data points for temperature in Tel Aviv. ## See also -[`TS.ADD`](/commands/ts.add) +[`TS.ADD`]({{< baseurl >}}/commands/ts.add) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.deleterule/index.md b/content/commands/ts.deleterule/index.md index 465a9cb47a..9eab5396b7 100644 --- a/content/commands/ts.deleterule/index.md +++ b/content/commands/ts.deleterule/index.md @@ -51,13 +51,13 @@ is key name for destination (compacted) time series. Returns one of these replies: -- [Simple string reply](/docs/reference/protocol-spec#simple-strings) - `OK` if executed correctly +- [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) - `OK` if executed correctly - [] on error (invalid arguments, etc.), or when such rule does not exist ## See also -[`TS.CREATERULE`](/commands/ts.createrule) +[`TS.CREATERULE`]({{< baseurl >}}/commands/ts.createrule) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.get/index.md b/content/commands/ts.get/index.md index e79ac87a3b..8d0054ef23 100644 --- a/content/commands/ts.get/index.md +++ b/content/commands/ts.get/index.md @@ -25,9 +25,7 @@ module: TimeSeries since: 1.0.0 stack_path: docs/data-types/timeseries summary: Get the sample with the highest timestamp from a given time series -syntax: "TS.GET key - [LATEST] -" +syntax: 'TS.GET key [LATEST] ' syntax_fmt: TS.GET key [LATEST] syntax_str: '[LATEST]' title: TS.GET @@ -57,8 +55,8 @@ The data in the latest bucket of a compaction is possibly partial. A bucket is _ Returns one of these replies: -- [Array reply](/docs/reference/protocol-spec#arrays) of a single ([Integer reply](/docs/reference/protocol-spec#integers), [Simple string reply](/docs/reference/protocol-spec#simple-strings)) pair representing (timestamp, value(double)) of the sample with the highest timestamp -- An empty [Array reply](/docs/reference/protocol-spec#arrays) - when the time series is empty +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of a single ([Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}})) pair representing (timestamp, value(double)) of the sample with the highest timestamp +- An empty [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) - when the time series is empty - [] (invalid arguments, wrong key type, key does not exist, etc.) ## Examples @@ -136,8 +134,8 @@ Get the latest maximum daily temperature (the temperature with the highest times ## See also -[`TS.MGET`](/commands/ts.mget) +[`TS.MGET`]({{< baseurl >}}/commands/ts.mget) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.incrby/index.md b/content/commands/ts.incrby/index.md index 72d7ae7604..c7f8dd79d7 100644 --- a/content/commands/ts.incrby/index.md +++ b/content/commands/ts.incrby/index.md @@ -53,18 +53,10 @@ stack_path: docs/data-types/timeseries summary: Increase the value of the sample with the maximum existing timestamp, or create a new sample with a value equal to the value of the sample with the maximum existing timestamp with a given increment -syntax: "TS.INCRBY key addend - [TIMESTAMP timestamp] - [RETENTION retentionPeriod]\ - \ - [UNCOMPRESSED] - [CHUNK_SIZE size] - [LABELS {label value}...] -" -syntax_fmt: "TS.INCRBY key value [TIMESTAMP\_timestamp] - [RETENTION\_retentionPeriod]\ - \ [UNCOMPRESSED] [CHUNK_SIZE\_size] - [LABELS\_label value [label value ...]]" +syntax: 'TS.INCRBY key addend [TIMESTAMP timestamp] [RETENTION retentionPeriod] [UNCOMPRESSED] + [CHUNK_SIZE size] [LABELS {label value}...] ' +syntax_fmt: "TS.INCRBY key value [TIMESTAMP\_timestamp] [RETENTION\_retentionPeriod]\ + \ [UNCOMPRESSED] [CHUNK_SIZE\_size] [LABELS\_label value [label value ...]]" syntax_str: "value [TIMESTAMP\_timestamp] [RETENTION\_retentionPeriod] [UNCOMPRESSED]\ \ [CHUNK_SIZE\_size] [LABELS\_label value [label value ...]]" title: TS.INCRBY @@ -89,7 +81,7 @@ is numeric value of the addend (double). Notes - When specified key does not exist, a new time series is created. - You can use this command as a counter or gauge that automatically gets history as a time series. -- Explicitly adding samples to a compacted time series (using [`TS.ADD`](/commands/ts.add), [`TS.MADD`](/commands/ts.madd), `TS.INCRBY`, or [`TS.DECRBY`](/commands/ts.decrby)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. +- Explicitly adding samples to a compacted time series (using [`TS.ADD`]({{< baseurl >}}/commands/ts.add), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd), `TS.INCRBY`, or [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. ## Optional arguments @@ -109,23 +101,23 @@ When not specified, the timestamp is set to the Unix time of the server's clock.
RETENTION retentionPeriod -is maximum retention period, compared to the maximum existing timestamp, in milliseconds. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `RETENTION` in [`TS.CREATE`](/commands/ts.create). +is maximum retention period, compared to the maximum existing timestamp, in milliseconds. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `RETENTION` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
UNCOMPRESSED -changes data storage from compressed (default) to uncompressed. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `ENCODING` in [`TS.CREATE`](/commands/ts.create). +changes data storage from compressed (default) to uncompressed. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `ENCODING` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
CHUNK_SIZE size -is memory size, in bytes, allocated for each data chunk. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `CHUNK_SIZE` in [`TS.CREATE`](/commands/ts.create). +is memory size, in bytes, allocated for each data chunk. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `CHUNK_SIZE` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
LABELS [{label value}...] -is set of label-value pairs that represent metadata labels of the key and serve as a secondary index. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `LABELS` in [`TS.CREATE`](/commands/ts.create). +is set of label-value pairs that represent metadata labels of the key and serve as a secondary index. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `LABELS` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create).
Notes @@ -137,7 +129,7 @@ is set of label-value pairs that represent metadata labels of the key and serve Returns one of these replies: -- [Integer reply](/docs/reference/protocol-spec#integers) - the timestamp of the upserted sample +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - the timestamp of the upserted sample - [] on error (invalid arguments, wrong key type, etc.), or when `timestamp` is not equal to or higher than the maximum existing timestamp ## Examples @@ -179,8 +171,8 @@ The timestamp is filled automatically. ## See also -[`TS.DECRBY`](/commands/ts.decrby) | [`TS.CREATE`](/commands/ts.create) +[`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby) | [`TS.CREATE`]({{< baseurl >}}/commands/ts.create) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.info/index.md b/content/commands/ts.info/index.md index 6b0f028b42..1395a70cab 100644 --- a/content/commands/ts.info/index.md +++ b/content/commands/ts.info/index.md @@ -24,9 +24,7 @@ module: TimeSeries since: 1.0.0 stack_path: docs/data-types/timeseries summary: Returns information and statistics for a time series -syntax: "TS.INFO key - [DEBUG] -" +syntax: 'TS.INFO key [DEBUG] ' syntax_fmt: TS.INFO key [DEBUG] syntax_str: '[DEBUG]' title: TS.INFO @@ -53,29 +51,29 @@ is an optional flag to get a more detailed information about the chunks. ## Return value -[Array reply](/docs/reference/protocol-spec#arrays) with information about the time series (name-value pairs): +[Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) with information about the time series (name-value pairs): -| Name
[Simple string reply](/docs/reference/protocol-spec#simple-strings) | Description +| Name
[Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) | Description | ---------------------------- | - -| `totalSamples` | [Integer reply](/docs/reference/protocol-spec#integers)
Total number of samples in this time series -| `memoryUsage` | [Integer reply](/docs/reference/protocol-spec#integers)
Total number of bytes allocated for this time series, which is the sum of
- The memory used for storing the series' configuration parameters (retention period, duplication policy, etc.)
- The memory used for storing the series' compaction rules
- The memory used for storing the series' labels (key-value pairs)
- The memory used for storing the chunks (chunk header + compressed/uncompressed data) -| `firstTimestamp` | [Integer reply](/docs/reference/protocol-spec#integers)
First timestamp present in this time series (Unix timestamp in milliseconds) -| `lastTimestamp` | [Integer reply](/docs/reference/protocol-spec#integers)
Last timestamp present in this time series (Unix timestamp in milliseconds) -| `retentionTime` | [Integer reply](/docs/reference/protocol-spec#integers)
The retention period, in milliseconds, for this time series -| `chunkCount` | [Integer reply](/docs/reference/protocol-spec#integers)
Number of chunks used for this time series -| `chunkSize` | [Integer reply](/docs/reference/protocol-spec#integers)
The initial allocation size, in bytes, for the data part of each new chunk.
Actual chunks may consume more memory. Changing the chunk size (using [`TS.ALTER`](/commands/ts.alter)) does not affect existing chunks. -| `chunkType` | [Simple string reply](/docs/reference/protocol-spec#simple-strings)
The chunks type: `compressed` or `uncompressed` -| `duplicatePolicy` | [Simple string reply](/docs/reference/protocol-spec#simple-strings) or [Nil reply](/docs/reference/protocol-spec#bulk-strings)
The [duplicate policy](/docs/stack/timeseries/configuration/#duplicate_policy) of this time series -| `labels` | [Array reply](/docs/reference/protocol-spec#arrays) or [Nil reply](/docs/reference/protocol-spec#bulk-strings)
Metadata labels of this time series
Each element is a 2-elements [Array reply](/docs/reference/protocol-spec#arrays) of ([Bulk string reply](/docs/reference/protocol-spec#bulk-strings), [Bulk string reply](/docs/reference/protocol-spec#bulk-strings)) representing (label, value) -| `sourceKey` | [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) or [Nil reply](/docs/reference/protocol-spec#bulk-strings)
Key name for source time series in case the current series is a target of a [compaction rule](/commands/ts.createrule/) -| `rules` | [Array reply](/docs/reference/protocol-spec#arrays)
[Compaction rules](/commands/ts.createrule/) defined in this time series
Each rule is an [Array reply](/docs/reference/protocol-spec#arrays) with 4 elements:
- [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): The compaction key
- [Integer reply](/docs/reference/protocol-spec#integers): The bucket duration
- [Simple string reply](/docs/reference/protocol-spec#simple-strings): The aggregator
- [Integer reply](/docs/reference/protocol-spec#integers): The alignment (since RedisTimeSeries v1.8) - -When [`DEBUG`](/commands/debug) is specified, the response also contains: - -| Name
[Simple string reply](/docs/reference/protocol-spec#simple-strings) | Description +| `totalSamples` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
Total number of samples in this time series +| `memoryUsage` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
Total number of bytes allocated for this time series, which is the sum of
- The memory used for storing the series' configuration parameters (retention period, duplication policy, etc.)
- The memory used for storing the series' compaction rules
- The memory used for storing the series' labels (key-value pairs)
- The memory used for storing the chunks (chunk header + compressed/uncompressed data) +| `firstTimestamp` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
First timestamp present in this time series (Unix timestamp in milliseconds) +| `lastTimestamp` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
Last timestamp present in this time series (Unix timestamp in milliseconds) +| `retentionTime` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
The retention period, in milliseconds, for this time series +| `chunkCount` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
Number of chunks used for this time series +| `chunkSize` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
The initial allocation size, in bytes, for the data part of each new chunk.
Actual chunks may consume more memory. Changing the chunk size (using [`TS.ALTER`]({{< baseurl >}}/commands/ts.alter)) does not affect existing chunks. +| `chunkType` | [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}})
The chunks type: `compressed` or `uncompressed` +| `duplicatePolicy` | [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) or [Nil reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})
The [duplicate policy]({{< baseurl >}}/develop/data-types/timeseries/configuration#duplicate_policy) of this time series +| `labels` | [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) or [Nil reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})
Metadata labels of this time series
Each element is a 2-elements [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of ([Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}), [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})) representing (label, value) +| `sourceKey` | [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}) or [Nil reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})
Key name for source time series in case the current series is a target of a [compaction rule]({{< baseurl >}}/commands/ts.createrule/) +| `rules` | [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}})
[Compaction rules]({{< baseurl >}}/commands/ts.createrule/) defined in this time series
Each rule is an [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) with 4 elements:
- [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}): The compaction key
- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}): The bucket duration
- [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}): The aggregator
- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}): The alignment (since RedisTimeSeries v1.8) + +When [`DEBUG`]({{< relref "/commands/debug" >}}) is specified, the response also contains: + +| Name
[Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) | Description | ---------------------------- | - -| `keySelfName` | [Bulk string reply](/docs/reference/protocol-spec#bulk-strings)
Name of the key -| `Chunks` | [Array reply](/docs/reference/protocol-spec#arrays) with information about the chunks
Each element is an [Array reply](/docs/reference/protocol-spec#arrays) of information about a single chunk in a name([Simple string reply](/docs/reference/protocol-spec#simple-strings))-value pairs:
- `startTimestamp` - [Integer reply](/docs/reference/protocol-spec#integers) - First timestamp present in the chunk
- `endTimestamp` - [Integer reply](/docs/reference/protocol-spec#integers) - Last timestamp present in the chunk
- `samples` - [Integer reply](/docs/reference/protocol-spec#integers) - Total number of samples in the chunk
- `size` - [Integer reply](/docs/reference/protocol-spec#integers) - the chunk's internal data size (without overheads) in bytes
- `bytesPerSample` - [Bulk string reply](/docs/reference/protocol-spec#bulk-strings) (double) - Ratio of `size` and `samples` +| `keySelfName` | [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})
Name of the key +| `Chunks` | [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) with information about the chunks
Each element is an [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of information about a single chunk in a name([Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}))-value pairs:
- `startTimestamp` - [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - First timestamp present in the chunk
- `endTimestamp` - [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - Last timestamp present in the chunk
- `samples` - [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - Total number of samples in the chunk
- `size` - [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - the chunk's internal data size (without overheads) in bytes
- `bytesPerSample` - [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}) (double) - Ratio of `size` and `samples` ## Examples @@ -178,8 +176,8 @@ Query the time series using DEBUG to get more information about the chunks. ## See also -[`TS.RANGE`](/commands/ts.range) | [`TS.QUERYINDEX`](/commands/ts.queryindex) | [`TS.GET`](/commands/ts.get) +[`TS.RANGE`]({{< baseurl >}}/commands/ts.range) | [`TS.QUERYINDEX`]({{< baseurl >}}/commands/ts.queryindex) | [`TS.GET`]({{< baseurl >}}/commands/ts.get) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.madd/index.md b/content/commands/ts.madd/index.md index 1c7556cd01..d24c51f9d9 100644 --- a/content/commands/ts.madd/index.md +++ b/content/commands/ts.madd/index.md @@ -66,14 +66,14 @@ is numeric data value of the sample (double). The double number should follow Notes: - If `timestamp` is older than the retention period compared to the maximum existing timestamp, the sample is discarded and an error is returned. -- Explicitly adding samples to a compacted time series (using [`TS.ADD`](/commands/ts.add), `TS.MADD`, [`TS.INCRBY`](/commands/ts.incrby), or [`TS.DECRBY`](/commands/ts.decrby)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. +- Explicitly adding samples to a compacted time series (using [`TS.ADD`]({{< baseurl >}}/commands/ts.add), `TS.MADD`, [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby), or [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples.
## Return value Returns one of these replies: -- [Array reply](/docs/reference/protocol-spec#arrays), where each element is an [Integer reply](/docs/reference/protocol-spec#integers) representing the timestamp of a upserted sample or an [] (when duplication policy is `BLOCK`, or when `timestamp` is older than the retention period compared to the maximum existing timestamp) +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}), where each element is an [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) representing the timestamp of a upserted sample or an [] (when duplication policy is `BLOCK`, or when `timestamp` is older than the retention period compared to the maximum existing timestamp) - [] (invalid arguments, wrong key type, etc.) ## Complexity @@ -105,8 +105,8 @@ OK ## See also -[`TS.MRANGE`](/commands/ts.mrange) | [`TS.RANGE`](/commands/ts.range) | [`TS.MREVRANGE`](/commands/ts.mrevrange) | [`TS.REVRANGE`](/commands/ts.revrange) +[`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange) | [`TS.RANGE`]({{< baseurl >}}/commands/ts.range) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.mget/index.md b/content/commands/ts.mget/index.md index b19da5525e..954f715690 100644 --- a/content/commands/ts.mget/index.md +++ b/content/commands/ts.mget/index.md @@ -61,11 +61,9 @@ summary: Get the sample with the highest timestamp from each time series matchin syntax: 'TS.MGET [LATEST] [WITHLABELS | SELECTED_LABELS label...] FILTER filterExpr... ' -syntax_fmt: "TS.MGET [LATEST] [WITHLABELS | SELECTED_LABELS label1 [label1 ...]] -\ - \ FILTER\_" +syntax_fmt: "TS.MGET [LATEST] [WITHLABELS | SELECTED_LABELS label1 [label1 ...]] \ + \ FILTER\_" syntax_str: "[WITHLABELS | SELECTED_LABELS label1 [label1 ...]] FILTER\_" @@ -124,17 +122,17 @@ If `WITHLABELS` or `SELECTED_LABELS` are not specified, by default, an empty lis
-Note: The [`MGET`](/commands/mget) command cannot be part of a transaction when running on a Redis cluster. +Note: The [`MGET`]({{< relref "/commands/mget" >}}) command cannot be part of a transaction when running on a Redis cluster. ## Return value -- [Array reply](/docs/reference/protocol-spec#arrays): for each time series matching the specified filters, the following is reported: +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): for each time series matching the specified filters, the following is reported: - bulk-string-reply: The time series key name - - [Array reply](/docs/reference/protocol-spec#arrays): label-value pairs ([Bulk string reply](/docs/reference/protocol-spec#bulk-strings), [Bulk string reply](/docs/reference/protocol-spec#bulk-strings)) + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): label-value pairs ([Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}), [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})) - By default, an empty array is reported - If `WITHLABELS` is specified, all labels associated with this time series are reported - If `SELECTED_LABELS label...` is specified, the selected labels are reported (null value when no such label defined) - - [Array reply](/docs/reference/protocol-spec#arrays): a single timestamp-value pair ([Integer reply](/docs/reference/protocol-spec#integers), [Simple string reply](/docs/reference/protocol-spec#simple-strings) (double)) + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): a single timestamp-value pair ([Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) (double)) ## Examples @@ -199,9 +197,9 @@ To get only the `location` label for each last sample, use `SELECTED_LABELS`. ## See also -[`TS.MRANGE`](/commands/ts.mrange) | [`TS.RANGE`](/commands/ts.range) | [`TS.MREVRANGE`](/commands/ts.mrevrange) | [`TS.REVRANGE`](/commands/ts.revrange) +[`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange) | [`TS.RANGE`]({{< baseurl >}}/commands/ts.range) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.mrange/index.md b/content/commands/ts.mrange/index.md index a1faea70a0..e61bb76b11 100644 --- a/content/commands/ts.mrange/index.md +++ b/content/commands/ts.mrange/index.md @@ -158,34 +158,17 @@ module: TimeSeries since: 1.0.0 stack_path: docs/data-types/timeseries summary: Query a range across multiple time series by filters in forward direction -syntax: "TS.MRANGE fromTimestamp toTimestamp - [LATEST] - [FILTER_BY_TS ts...] -\ - \ [FILTER_BY_VALUE min max] - [WITHLABELS | SELECTED_LABELS label...] - [COUNT\ - \ count] - [[ALIGN align] AGGREGATION aggregator bucketDuration [BUCKETTIMESTAMP\ - \ bt] [EMPTY]] - FILTER filterExpr... - [GROUPBY label REDUCE reducer] -" -syntax_fmt: "TS.MRANGE fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp -\ - \ [Timestamp ...]] [FILTER_BY_VALUE min max] [WITHLABELS | - SELECTED_LABELS label1\ - \ [label1 ...]] [COUNT\_count] [[ALIGN\_value] - AGGREGATION\_ bucketDuration -\ - \ [BUCKETTIMESTAMP] [EMPTY]] FILTER\_\ - \ [GROUPBY label REDUCE - reducer]" +syntax: 'TS.MRANGE fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS ts...] [FILTER_BY_VALUE + min max] [WITHLABELS | SELECTED_LABELS label...] [COUNT count] [[ALIGN align] AGGREGATION + aggregator bucketDuration [BUCKETTIMESTAMP bt] [EMPTY]] FILTER filterExpr... [GROUPBY + label REDUCE reducer] ' +syntax_fmt: "TS.MRANGE fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp\ + \ [Timestamp ...]] [FILTER_BY_VALUE min max] [WITHLABELS | SELECTED_LABELS label1\ + \ [label1 ...]] [COUNT\_count] [[ALIGN\_value] AGGREGATION\_ bucketDuration\ + \ [BUCKETTIMESTAMP] [EMPTY]] FILTER\_ [GROUPBY\ + \ label REDUCE reducer]" syntax_str: "toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp [Timestamp ...]] [FILTER_BY_VALUE\ \ min max] [WITHLABELS | SELECTED_LABELS label1 [label1 ...]] [COUNT\_count] [[ALIGN\_\ value] AGGREGATION\_}}): for each time series matching the specified filters, the following is reported: - bulk-string-reply: The time series key name - - [Array reply](/docs/reference/protocol-spec#arrays): label-value pairs ([Bulk string reply](/docs/reference/protocol-spec#bulk-strings), [Bulk string reply](/docs/reference/protocol-spec#bulk-strings)) + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): label-value pairs ([Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}), [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})) - By default, an empty array is reported - If `WITHLABELS` is specified, all labels associated with this time series are reported - If `SELECTED_LABELS label...` is specified, the selected labels are reported (null value when no such label defined) - - [Array reply](/docs/reference/protocol-spec#arrays): timestamp-value pairs ([Integer reply](/docs/reference/protocol-spec#integers), [Simple string reply](/docs/reference/protocol-spec#simple-strings) (double)): all samples/aggregations matching the range + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): timestamp-value pairs ([Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) (double)): all samples/aggregations matching the range If `GROUPBY label REDUCE reducer` is specified: -- [Array reply](/docs/reference/protocol-spec#arrays): for each group of time series matching the specified filters, the following is reported: +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): for each group of time series matching the specified filters, the following is reported: - bulk-string-reply with the format `label=value` where `label` is the `GROUPBY` label argument - - [Array reply](/docs/reference/protocol-spec#arrays): label-value pairs ([Bulk string reply](/docs/reference/protocol-spec#bulk-strings), [Bulk string reply](/docs/reference/protocol-spec#bulk-strings)): + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): label-value pairs ([Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}), [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})): - By default, an empty array is reported - If `WITHLABELS` is specified, the `GROUPBY` label argument and value are reported - If `SELECTED_LABELS label...` is specified, the selected labels are reported (null value when no such label defined or label does not have the same value for all grouped time series) - - [Array reply](/docs/reference/protocol-spec#arrays): either a single pair ([Bulk string reply](/docs/reference/protocol-spec#bulk-strings), [Bulk string reply](/docs/reference/protocol-spec#bulk-strings)): the `GROUPBY` label argument and value, or empty array if - - [Array reply](/docs/reference/protocol-spec#arrays): a single pair ([Bulk string reply](/docs/reference/protocol-spec#bulk-strings), [Bulk string reply](/docs/reference/protocol-spec#bulk-strings)): the string `__reducer__` and the reducer argument - - [Array reply](/docs/reference/protocol-spec#arrays): a single pair ([Bulk string reply](/docs/reference/protocol-spec#bulk-strings), [Bulk string reply](/docs/reference/protocol-spec#bulk-strings)): the string `__source__` and the time series key names separated by `,` - - [Array reply](/docs/reference/protocol-spec#arrays): timestamp-value pairs ([Integer reply](/docs/reference/protocol-spec#integers), [Simple string reply](/docs/reference/protocol-spec#simple-strings) (double)): all samples/aggregations matching the range + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): either a single pair ([Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}), [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})): the `GROUPBY` label argument and value, or empty array if + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): a single pair ([Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}), [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})): the string `__reducer__` and the reducer argument + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): a single pair ([Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}), [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})): the string `__source__` and the time series key names separated by `,` + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): timestamp-value pairs ([Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) (double)): all samples/aggregations matching the range ## Examples @@ -610,8 +593,8 @@ Query all time series with the metric label equal to `cpu`, but only return the ## See also -[`TS.RANGE`](/commands/ts.range) | [`TS.MREVRANGE`](/commands/ts.mrevrange) | [`TS.REVRANGE`](/commands/ts.revrange) +[`TS.RANGE`]({{< baseurl >}}/commands/ts.range) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.mrevrange/index.md b/content/commands/ts.mrevrange/index.md index e95616b8d7..37ebba8cbc 100644 --- a/content/commands/ts.mrevrange/index.md +++ b/content/commands/ts.mrevrange/index.md @@ -158,34 +158,17 @@ module: TimeSeries since: 1.4.0 stack_path: docs/data-types/timeseries summary: Query a range across multiple time-series by filters in reverse direction -syntax: "TS.MREVRANGE fromTimestamp toTimestamp - [LATEST] - [FILTER_BY_TS TS...] -\ - \ [FILTER_BY_VALUE min max] - [WITHLABELS | SELECTED_LABELS label...] - [COUNT\ - \ count] - [[ALIGN align] AGGREGATION aggregator bucketDuration [BUCKETTIMESTAMP\ - \ bt] [EMPTY]] - FILTER filterExpr... - [GROUPBY label REDUCE reducer] -" -syntax_fmt: "TS.MREVRANGE fromTimestamp toTimestamp [LATEST] - [FILTER_BY_TS\_Timestamp\ - \ [Timestamp ...]] [FILTER_BY_VALUE min max] - [WITHLABELS | SELECTED_LABELS label1\ - \ [label1 ...]] [COUNT\_count] - [[ALIGN\_value] AGGREGATION\_ -\ - \ bucketDuration [BUCKETTIMESTAMP] [EMPTY]] FILTER\_ [GROUPBY label REDUCE - reducer]" +syntax: 'TS.MREVRANGE fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS TS...] [FILTER_BY_VALUE + min max] [WITHLABELS | SELECTED_LABELS label...] [COUNT count] [[ALIGN align] AGGREGATION + aggregator bucketDuration [BUCKETTIMESTAMP bt] [EMPTY]] FILTER filterExpr... [GROUPBY + label REDUCE reducer] ' +syntax_fmt: "TS.MREVRANGE fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp\ + \ [Timestamp ...]] [FILTER_BY_VALUE min max] [WITHLABELS | SELECTED_LABELS label1\ + \ [label1 ...]] [COUNT\_count] [[ALIGN\_value] AGGREGATION\_ bucketDuration\ + \ [BUCKETTIMESTAMP] [EMPTY]] FILTER\_ [GROUPBY label REDUCE\ + \ reducer]" syntax_str: "toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp [Timestamp ...]] [FILTER_BY_VALUE\ \ min max] [WITHLABELS | SELECTED_LABELS label1 [label1 ...]] [COUNT\_count] [[ALIGN\_\ value] AGGREGATION\_}}): for each time series matching the specified filters, the following is reported: - bulk-string-reply: The time series key name - - [Array reply](/docs/reference/protocol-spec#arrays): label-value pairs ([Bulk string reply](/docs/reference/protocol-spec#bulk-strings), [Bulk string reply](/docs/reference/protocol-spec#bulk-strings)) + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): label-value pairs ([Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}), [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})) - By default, an empty list is reported - If `WITHLABELS` is specified, all labels associated with this time series are reported - If `SELECTED_LABELS label...` is specified, the selected labels are reported - - [Array reply](/docs/reference/protocol-spec#arrays): timestamp-value pairs ([Integer reply](/docs/reference/protocol-spec#integers), [Simple string reply](/docs/reference/protocol-spec#simple-strings) (double)): all samples/aggregations matching the range + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): timestamp-value pairs ([Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) (double)): all samples/aggregations matching the range If `GROUPBY label REDUCE reducer` is specified: -- [Array reply](/docs/reference/protocol-spec#arrays): for each group of time series matching the specified filters, the following is reported: +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): for each group of time series matching the specified filters, the following is reported: - bulk-string-reply with the format `label=value` where `label` is the `GROUPBY` label argument - - [Array reply](/docs/reference/protocol-spec#arrays): a single pair ([Bulk string reply](/docs/reference/protocol-spec#bulk-strings), [Bulk string reply](/docs/reference/protocol-spec#bulk-strings)): the `GROUPBY` label argument and value - - [Array reply](/docs/reference/protocol-spec#arrays): a single pair ([Bulk string reply](/docs/reference/protocol-spec#bulk-strings), [Bulk string reply](/docs/reference/protocol-spec#bulk-strings)): the string `__reducer__` and the reducer argument - - [Array reply](/docs/reference/protocol-spec#arrays): a single pair ([Bulk string reply](/docs/reference/protocol-spec#bulk-strings), [Bulk string reply](/docs/reference/protocol-spec#bulk-strings)): the string `__source__` and the time series key names separated by `,` - - [Array reply](/docs/reference/protocol-spec#arrays): timestamp-value pairs ([Integer reply](/docs/reference/protocol-spec#integers), [Simple string reply](/docs/reference/protocol-spec#simple-strings) (double)): all samples/aggregations matching the range + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): a single pair ([Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}), [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})): the `GROUPBY` label argument and value + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): a single pair ([Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}), [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})): the string `__reducer__` and the reducer argument + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): a single pair ([Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}), [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})): the string `__source__` and the time series key names separated by `,` + - [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}): timestamp-value pairs ([Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) (double)): all samples/aggregations matching the range ## Examples @@ -605,8 +588,8 @@ Query all time series with the metric label equal to `cpu`, but only return the ## See also -[`TS.MRANGE`](/commands/ts.mrange) | [`TS.RANGE`](/commands/ts.range) | [`TS.REVRANGE`](/commands/ts.revrange) +[`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange) | [`TS.RANGE`]({{< baseurl >}}/commands/ts.range) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.queryindex/index.md b/content/commands/ts.queryindex/index.md index e3d28c3329..814a462688 100644 --- a/content/commands/ts.queryindex/index.md +++ b/content/commands/ts.queryindex/index.md @@ -38,10 +38,8 @@ summary: Get all time series keys matching a filter list syntax: 'TS.QUERYINDEX filterExpr... ' -syntax_fmt: "TS.QUERYINDEX " +syntax_fmt: TS.QUERYINDEX syntax_str: '' title: TS.QUERYINDEX --- @@ -77,7 +75,7 @@ filters time series based on their labels and label values. Each filter expressi Returns one of these replies: -- [Array reply](/docs/reference/protocol-spec#arrays) where each element is a [Bulk string reply](/docs/reference/protocol-spec#bulk-strings): a time series key. The array is empty if no time series matches the filter. +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) where each element is a [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}): a time series key. The array is empty if no time series matches the filter. - [] (e.g., on invalid filter expression) ## Examples @@ -117,8 +115,8 @@ To retrieve the keys of all time series representing sensors that measure temper ## See also -[`TS.CREATE`](/commands/ts.create) | [`TS.MRANGE`](/commands/ts.mrange) | [`TS.MREVRANGE`](/commands/ts.mrevrange) | [`TS.MGET`](/commands/ts.mget) +[`TS.CREATE`]({{< baseurl >}}/commands/ts.create) | [`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) | [`TS.MGET`]({{< baseurl >}}/commands/ts.mget) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.range/index.md b/content/commands/ts.range/index.md index ae87acc468..05f8384d56 100644 --- a/content/commands/ts.range/index.md +++ b/content/commands/ts.range/index.md @@ -114,23 +114,13 @@ module: TimeSeries since: 1.0.0 stack_path: docs/data-types/timeseries summary: Query a range in forward direction -syntax: "TS.RANGE key fromTimestamp toTimestamp - [LATEST] - [FILTER_BY_TS ts...] -\ - \ [FILTER_BY_VALUE min max] - [COUNT count] - [[ALIGN align] AGGREGATION aggregator\ - \ bucketDuration [BUCKETTIMESTAMP bt] [EMPTY]] -" -syntax_fmt: "TS.RANGE key fromTimestamp toTimestamp [LATEST] - [FILTER_BY_TS\_Timestamp\ - \ [Timestamp ...]] [FILTER_BY_VALUE min max] - [COUNT\_count] [[ALIGN\_value] AGGREGATION\_\ - - bucketDuration [BUCKETTIMESTAMP] [EMPTY]]" +syntax: 'TS.RANGE key fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS ts...] [FILTER_BY_VALUE + min max] [COUNT count] [[ALIGN align] AGGREGATION aggregator bucketDuration [BUCKETTIMESTAMP + bt] [EMPTY]] ' +syntax_fmt: "TS.RANGE key fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp\ + \ [Timestamp ...]] [FILTER_BY_VALUE min max] [COUNT\_count] [[ALIGN\_value] AGGREGATION\_\ + bucketDuration [BUCKETTIMESTAMP] [EMPTY]]" syntax_str: "fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp [Timestamp\ \ ...]] [FILTER_BY_VALUE min max] [COUNT\_count] [[ALIGN\_value] AGGREGATION\_}}) of ([Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}})) pairs representing (timestamp, value(double)) - [] (e.g., on invalid filter value) ## Complexity @@ -398,8 +388,8 @@ Similarly, when the end timestamp for the range query is explicitly stated, you ## See also -[`TS.MRANGE`](/commands/ts.mrange) | [`TS.REVRANGE`](/commands/ts.revrange) | [`TS.MREVRANGE`](/commands/ts.mrevrange) +[`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ts.revrange/index.md b/content/commands/ts.revrange/index.md index febecc0786..4b296544e6 100644 --- a/content/commands/ts.revrange/index.md +++ b/content/commands/ts.revrange/index.md @@ -114,23 +114,13 @@ module: TimeSeries since: 1.4.0 stack_path: docs/data-types/timeseries summary: Query a range in reverse direction -syntax: "TS.REVRANGE key fromTimestamp toTimestamp - [LATEST] - [FILTER_BY_TS TS...] -\ - \ [FILTER_BY_VALUE min max] - [COUNT count] - [[ALIGN align] AGGREGATION aggregator\ - \ bucketDuration [BUCKETTIMESTAMP bt] [EMPTY]] -" -syntax_fmt: "TS.REVRANGE key fromTimestamp toTimestamp [LATEST] - [FILTER_BY_TS\_\ - Timestamp [Timestamp ...]] [FILTER_BY_VALUE min max] - [COUNT\_count] [[ALIGN\_\ - value] AGGREGATION\_ - bucketDuration [BUCKETTIMESTAMP] [EMPTY]]" +syntax: 'TS.REVRANGE key fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS TS...] [FILTER_BY_VALUE + min max] [COUNT count] [[ALIGN align] AGGREGATION aggregator bucketDuration [BUCKETTIMESTAMP + bt] [EMPTY]] ' +syntax_fmt: "TS.REVRANGE key fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp\ + \ [Timestamp ...]] [FILTER_BY_VALUE min max] [COUNT\_count] [[ALIGN\_value] AGGREGATION\_\ + bucketDuration [BUCKETTIMESTAMP] [EMPTY]]" syntax_str: "fromTimestamp toTimestamp [LATEST] [FILTER_BY_TS\_Timestamp [Timestamp\ \ ...]] [FILTER_BY_VALUE min max] [COUNT\_count] [[ALIGN\_value] AGGREGATION\_}}) of ([Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}})) pairs representing (timestamp, value(double)) - [] (e.g., on invalid filter value) ## Complexity @@ -401,8 +391,8 @@ Similarly, when the end timestamp for the range query is explicitly stated, you ## See also -[`TS.RANGE`](/commands/ts.range) | [`TS.MRANGE`](/commands/ts.mrange) | [`TS.MREVRANGE`](/commands/ts.mrevrange) +[`TS.RANGE`]({{< baseurl >}}/commands/ts.range) | [`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) ## Related topics -[RedisTimeSeries](/docs/stack/timeseries) +[RedisTimeSeries]({{< relref "/develop/data-types/timeseries/" >}}) diff --git a/content/commands/ttl/index.md b/content/commands/ttl/index.md index 821588b90a..b545991351 100644 --- a/content/commands/ttl/index.md +++ b/content/commands/ttl/index.md @@ -62,7 +62,7 @@ Starting with Redis 2.8 the return value in case of error changed: * The command returns `-2` if the key does not exist. * The command returns `-1` if the key exists but has no associated expire. -See also the [`PTTL`](/commands/pttl) command that returns the same information with milliseconds resolution (Only available in Redis 2.6 or greater). +See also the [`PTTL`]({{< relref "/commands/pttl" >}}) command that returns the same information with milliseconds resolution (Only available in Redis 2.6 or greater). ## Examples diff --git a/content/commands/unlink/index.md b/content/commands/unlink/index.md index 3bae8e1448..e25e92d114 100644 --- a/content/commands/unlink/index.md +++ b/content/commands/unlink/index.md @@ -52,10 +52,10 @@ syntax_fmt: UNLINK key [key ...] syntax_str: '' title: UNLINK --- -This command is very similar to [`DEL`](/commands/del): it removes the specified keys. -Just like [`DEL`](/commands/del) a key is ignored if it does not exist. However the command +This command is very similar to [`DEL`]({{< relref "/commands/del" >}}): it removes the specified keys. +Just like [`DEL`]({{< relref "/commands/del" >}}) a key is ignored if it does not exist. However the command performs the actual memory reclaiming in a different thread, so it is not -blocking, while [`DEL`](/commands/del) is. This is where the command name comes from: the +blocking, while [`DEL`]({{< relref "/commands/del" >}}) is. This is where the command name comes from: the command just **unlinks** the keys from the keyspace. The actual removal will happen later asynchronously. diff --git a/content/commands/unwatch/index.md b/content/commands/unwatch/index.md index 256c85b3d2..b0c3e26b0c 100644 --- a/content/commands/unwatch/index.md +++ b/content/commands/unwatch/index.md @@ -34,4 +34,4 @@ Flushes all the previously watched keys for a [transaction][tt]. [tt]: /topics/transactions -If you call [`EXEC`](/commands/exec) or [`DISCARD`](/commands/discard), there's no need to manually call `UNWATCH`. +If you call [`EXEC`]({{< relref "/commands/exec" >}}) or [`DISCARD`]({{< relref "/commands/discard" >}}), there's no need to manually call `UNWATCH`. diff --git a/content/commands/wait/index.md b/content/commands/wait/index.md index b575f79aec..ab3e4d5c0a 100644 --- a/content/commands/wait/index.md +++ b/content/commands/wait/index.md @@ -48,7 +48,7 @@ the specified number of replicas are reached, or when the timeout is reached. A few remarks: 1. When `WAIT` returns, all the previous write commands sent in the context of the current connection are guaranteed to be received by the number of replicas returned by `WAIT`. -2. If the command is sent as part of a [`MULTI`](/commands/multi) transaction (since Redis 7.0, any context that does not allow blocking, such as inside scripts), the command does not block but instead just return ASAP the number of replicas that acknowledged the previous write commands. +2. If the command is sent as part of a [`MULTI`]({{< relref "/commands/multi" >}}) transaction (since Redis 7.0, any context that does not allow blocking, such as inside scripts), the command does not block but instead just return ASAP the number of replicas that acknowledged the previous write commands. 3. A timeout of 0 means to block forever. 4. Since `WAIT` returns the number of replicas reached both in case of failure and success, the client should check that the returned value is equal or greater to the replication level it demanded. diff --git a/content/commands/waitaof/index.md b/content/commands/waitaof/index.md index b1cd29bcba..223a5c85c6 100644 --- a/content/commands/waitaof/index.md +++ b/content/commands/waitaof/index.md @@ -54,7 +54,7 @@ The command **will always return** the number of masters and replicas that have A few remarks: 1. When `WAITAOF` returns, all the previous write commands sent in the context of the current connection are guaranteed to be fsynced to the AOF of at least the number of masters and replicas returned by `WAITAOF`. -2. If the command is sent as part of a [`MULTI`](/commands/multi) transaction (or any other context that does not allow blocking, such as inside scripts), the command does not block but instead returns immediately the number of masters and replicas that fsynced all previous write commands. +2. If the command is sent as part of a [`MULTI`]({{< relref "/commands/multi" >}}) transaction (or any other context that does not allow blocking, such as inside scripts), the command does not block but instead returns immediately the number of masters and replicas that fsynced all previous write commands. 3. A timeout of 0 means to block forever. 4. Since `WAITAOF` returns the number of fsyncs completed both in case of success and timeout, the client should check that the returned values are equal or greater than the persistence level required. 5. `WAITAOF` cannot be used on replica instances, and the `numlocal` argument cannot be non-zero if the local Redis does not have AOF enabled. @@ -69,7 +69,7 @@ These features are incompatible with the `WAITAOF` command as it is currently im Consistency and WAITAOF --- -Note that, similarly to [`WAIT`](/commands/wait), `WAITAOF` does not make Redis a strongly-consistent store. +Note that, similarly to [`WAIT`]({{< relref "/commands/wait" >}}), `WAITAOF` does not make Redis a strongly-consistent store. Unless waiting for all members of a cluster to fsync writes to disk, data can still be lost during a failover or a Redis restart. However, `WAITAOF` does improve real-world data safety. diff --git a/content/commands/xack/index.md b/content/commands/xack/index.md index b0337a371f..6a1b42644b 100644 --- a/content/commands/xack/index.md +++ b/content/commands/xack/index.md @@ -58,12 +58,12 @@ title: XACK The `XACK` command removes one or multiple messages from the *Pending Entries List* (PEL) of a stream consumer group. A message is pending, and as such stored inside the PEL, when it was delivered to some consumer, -normally as a side effect of calling [`XREADGROUP`](/commands/xreadgroup), or when a consumer took -ownership of a message calling [`XCLAIM`](/commands/xclaim). The pending message was delivered to +normally as a side effect of calling [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}), or when a consumer took +ownership of a message calling [`XCLAIM`]({{< relref "/commands/xclaim" >}}). The pending message was delivered to some consumer but the server is yet not sure it was processed at least once. -So new calls to [`XREADGROUP`](/commands/xreadgroup) to grab the messages history for a consumer +So new calls to [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) to grab the messages history for a consumer (for instance using an ID of 0), will return such message. -Similarly the pending message will be listed by the [`XPENDING`](/commands/xpending) command, +Similarly the pending message will be listed by the [`XPENDING`]({{< relref "/commands/xpending" >}}) command, that inspects the PEL. Once a consumer *successfully* processes a message, it should call `XACK` diff --git a/content/commands/xadd/index.md b/content/commands/xadd/index.md index d754e28646..50fdfb97a6 100644 --- a/content/commands/xadd/index.md +++ b/content/commands/xadd/index.md @@ -115,9 +115,8 @@ key_specs: linkTitle: XADD since: 5.0.0 summary: Appends a new message to a stream. Creates the key if it doesn't exist. -syntax_fmt: "XADD key [NOMKSTREAM] [ [= | ~] threshold - [LIMIT\_\ - count]] <* | id> field value [field value ...]" +syntax_fmt: "XADD key [NOMKSTREAM] [ [= | ~] threshold [LIMIT\_count]]\ + \ <* | id> field value [field value ...]" syntax_str: "[NOMKSTREAM] [ [= | ~] threshold [LIMIT\_count]] <* |\ \ id> field value [field value ...]" title: XADD @@ -129,10 +128,10 @@ disabled with the `NOMKSTREAM` option. An entry is composed of a list of field-value pairs. The field-value pairs are stored in the same order they are given by the user. -Commands that read the stream, such as [`XRANGE`](/commands/xrange) or [`XREAD`](/commands/xread), are guaranteed to return the fields and values exactly in the same order they were added by `XADD`. +Commands that read the stream, such as [`XRANGE`]({{< relref "/commands/xrange" >}}) or [`XREAD`]({{< relref "/commands/xread" >}}), are guaranteed to return the fields and values exactly in the same order they were added by `XADD`. `XADD` is the *only Redis command* that can add data to a stream, but -there are other commands, such as [`XDEL`](/commands/xdel) and [`XTRIM`](/commands/xtrim), that are able to +there are other commands, such as [`XDEL`]({{< relref "/commands/xdel" >}}) and [`XTRIM`]({{< relref "/commands/xtrim" >}}), that are able to remove data from a stream. ## Specifying a Stream ID as an argument @@ -181,7 +180,7 @@ IDs to match the one of this other system. ## Capped streams -`XADD` incorporates the same semantics as the [`XTRIM`](/commands/xtrim) command - refer to its documentation page for more information. +`XADD` incorporates the same semantics as the [`XTRIM`]({{< relref "/commands/xtrim" >}}) command - refer to its documentation page for more information. This allows adding new entries and keeping the stream's size in check with a single call to `XADD`, effectively capping the stream with an arbitrary threshold. Although exact trimming is possible and is the default, due to the internal representation of steams it is more efficient to add an entry and trim stream with `XADD` using **almost exact** trimming (the `~` argument). diff --git a/content/commands/xautoclaim/index.md b/content/commands/xautoclaim/index.md index 0a1b04e514..03e99c4f8a 100644 --- a/content/commands/xautoclaim/index.md +++ b/content/commands/xautoclaim/index.md @@ -72,15 +72,14 @@ linkTitle: XAUTOCLAIM since: 6.2.0 summary: Changes, or acquires, ownership of messages in a consumer group, as if the messages were delivered to as consumer group member. -syntax_fmt: "XAUTOCLAIM key group consumer min-idle-time start [COUNT\_count] - [JUSTID]" +syntax_fmt: "XAUTOCLAIM key group consumer min-idle-time start [COUNT\_count] [JUSTID]" syntax_str: "group consumer min-idle-time start [COUNT\_count] [JUSTID]" title: XAUTOCLAIM --- -This command transfers ownership of pending stream entries that match the specified criteria. Conceptually, `XAUTOCLAIM` is equivalent to calling [`XPENDING`](/commands/xpending) and then [`XCLAIM`](/commands/xclaim), -but provides a more straightforward way to deal with message delivery failures via [`SCAN`](/commands/scan)-like semantics. +This command transfers ownership of pending stream entries that match the specified criteria. Conceptually, `XAUTOCLAIM` is equivalent to calling [`XPENDING`]({{< relref "/commands/xpending" >}}) and then [`XCLAIM`]({{< relref "/commands/xclaim" >}}), +but provides a more straightforward way to deal with message delivery failures via [`SCAN`]({{< relref "/commands/scan" >}})-like semantics. -Like [`XCLAIM`](/commands/xclaim), the command operates on the stream entries at `` and in the context of the provided ``. +Like [`XCLAIM`]({{< relref "/commands/xclaim" >}}), the command operates on the stream entries at `` and in the context of the provided ``. It transfers ownership to `` of messages pending for more than `` milliseconds and having an equal or greater ID than ``. The optional `` argument, which defaults to 100, is the upper limit of the number of entries that the command attempts to claim. @@ -98,7 +97,7 @@ However, note that you may want to continue calling `XAUTOCLAIM` even after the Note that only messages that are idle longer than `` are claimed, and claiming a message resets its idle time. This ensures that only a single consumer can successfully claim a given pending message at a specific instant of time and trivially reduces the probability of processing the same message multiple times. -While iterating the PEL, if `XAUTOCLAIM` stumbles upon a message which doesn't exist in the stream anymore (either trimmed or deleted by [`XDEL`](/commands/xdel)) it does not claim it, and deletes it from the PEL in which it was found. This feature was introduced in Redis 7.0. +While iterating the PEL, if `XAUTOCLAIM` stumbles upon a message which doesn't exist in the stream anymore (either trimmed or deleted by [`XDEL`]({{< relref "/commands/xdel" >}})) it does not claim it, and deletes it from the PEL in which it was found. This feature was introduced in Redis 7.0. These message IDs are returned to the caller as a part of `XAUTOCLAIM`s reply. Lastly, claiming a message with `XAUTOCLAIM` also increments the attempted deliveries count for that message, unless the `JUSTID` option has been specified (which only delivers the message ID, not the message itself). diff --git a/content/commands/xclaim/index.md b/content/commands/xclaim/index.md index 904e5f1440..93d910d9ec 100644 --- a/content/commands/xclaim/index.md +++ b/content/commands/xclaim/index.md @@ -90,10 +90,8 @@ linkTitle: XCLAIM since: 5.0.0 summary: Changes, or acquires, ownership of a message in a consumer group, as if the message was delivered a consumer group member. -syntax_fmt: "XCLAIM key group consumer min-idle-time id [id ...] [IDLE\_ms] - [TIME\_\ - unix-time-milliseconds] [RETRYCOUNT\_count] [FORCE] [JUSTID] - [LASTID\_lastid]" +syntax_fmt: "XCLAIM key group consumer min-idle-time id [id ...] [IDLE\_ms] [TIME\_\ + unix-time-milliseconds] [RETRYCOUNT\_count] [FORCE] [JUSTID] [LASTID\_lastid]" syntax_str: "group consumer min-idle-time id [id ...] [IDLE\_ms] [TIME\_unix-time-milliseconds]\ \ [RETRYCOUNT\_count] [FORCE] [JUSTID] [LASTID\_lastid]" title: XCLAIM @@ -103,10 +101,10 @@ of a pending message, so that the new owner is the consumer specified as the command argument. Normally this is what happens: 1. There is a stream with an associated consumer group. -2. Some consumer A reads a message via [`XREADGROUP`](/commands/xreadgroup) from a stream, in the context of that consumer group. -3. As a side effect a pending message entry is created in the Pending Entries List (PEL) of the consumer group: it means the message was delivered to a given consumer, but it was not yet acknowledged via [`XACK`](/commands/xack). +2. Some consumer A reads a message via [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) from a stream, in the context of that consumer group. +3. As a side effect a pending message entry is created in the Pending Entries List (PEL) of the consumer group: it means the message was delivered to a given consumer, but it was not yet acknowledged via [`XACK`]({{< relref "/commands/xack" >}}). 4. Then suddenly that consumer fails forever. -5. Other consumers may inspect the list of pending messages, that are stale for quite some time, using the [`XPENDING`](/commands/xpending) command. In order to continue processing such messages, they use `XCLAIM` to acquire the ownership of the message and continue. Consumers can also use the [`XAUTOCLAIM`](/commands/xautoclaim) command to automatically scan and claim stale pending messages. +5. Other consumers may inspect the list of pending messages, that are stale for quite some time, using the [`XPENDING`]({{< relref "/commands/xpending" >}}) command. In order to continue processing such messages, they use `XCLAIM` to acquire the ownership of the message and continue. Consumers can also use the [`XAUTOCLAIM`]({{< relref "/commands/xautoclaim" >}}) command to automatically scan and claim stale pending messages. This dynamic is clearly explained in the [Stream intro documentation](/topics/streams-intro). @@ -117,7 +115,7 @@ Moreover, as a side effect, `XCLAIM` will increment the count of attempted deliv `XCLAIM` will not claim a message in the following cases: 1. The message doesn't exist in the group PEL (i.e. it was never read by any consumer) -2. The message exists in the group PEL but not in the stream itself (i.e. the message was read but never acknowledged, and then was deleted from the stream, either by trimming or by [`XDEL`](/commands/xdel)) +2. The message exists in the group PEL but not in the stream itself (i.e. the message was read but never acknowledged, and then was deleted from the stream, either by trimming or by [`XDEL`]({{< relref "/commands/xdel" >}})) In both cases the reply will not contain a corresponding entry to that message (i.e. the length of the reply array may be smaller than the number of IDs provided to `XCLAIM`). In the latter case, the message will also be deleted from the PEL in which it was found. This feature was introduced in Redis 7.0. diff --git a/content/commands/xgroup-create/index.md b/content/commands/xgroup-create/index.md index fda2ac8f73..e8ca64a484 100644 --- a/content/commands/xgroup-create/index.md +++ b/content/commands/xgroup-create/index.md @@ -68,8 +68,7 @@ key_specs: linkTitle: XGROUP CREATE since: 5.0.0 summary: Creates a consumer group. -syntax_fmt: "XGROUP CREATE key group [MKSTREAM] - [ENTRIESREAD\_entries-read]" +syntax_fmt: "XGROUP CREATE key group [MKSTREAM] [ENTRIESREAD\_entries-read]" syntax_str: "group [MKSTREAM] [ENTRIESREAD\_entries-read]" title: XGROUP CREATE --- diff --git a/content/commands/xgroup-createconsumer/index.md b/content/commands/xgroup-createconsumer/index.md index 64f7857175..bf5ec547af 100644 --- a/content/commands/xgroup-createconsumer/index.md +++ b/content/commands/xgroup-createconsumer/index.md @@ -54,5 +54,5 @@ title: XGROUP CREATECONSUMER --- Create a consumer named `` in the consumer group `` of the stream that's stored at ``. -Consumers are also created automatically whenever an operation, such as [`XREADGROUP`](/commands/xreadgroup), references a consumer that doesn't exist. -This is valid for [`XREADGROUP`](/commands/xreadgroup) only when there is data in the stream. +Consumers are also created automatically whenever an operation, such as [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}), references a consumer that doesn't exist. +This is valid for [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) only when there is data in the stream. diff --git a/content/commands/xgroup-setid/index.md b/content/commands/xgroup-setid/index.md index de10d70f7b..00a46c2e35 100644 --- a/content/commands/xgroup-setid/index.md +++ b/content/commands/xgroup-setid/index.md @@ -68,7 +68,7 @@ title: XGROUP SETID --- Set the **last delivered ID** for a consumer group. -Normally, a consumer group's last delivered ID is set when the group is created with [`XGROUP CREATE`](/commands/xgroup-create). +Normally, a consumer group's last delivered ID is set when the group is created with [`XGROUP CREATE`]({{< relref "/commands/xgroup-create" >}}). The `XGROUP SETID` command allows modifying the group's last delivered ID, without having to delete and recreate the group. For instance if you want the consumers in a consumer group to re-process all the messages in a stream, you may want to set its next ID to 0: diff --git a/content/commands/xgroup/index.md b/content/commands/xgroup/index.md index 128c643e29..06ba07cacb 100644 --- a/content/commands/xgroup/index.md +++ b/content/commands/xgroup/index.md @@ -25,4 +25,4 @@ title: XGROUP --- This is a container command for stream consumer group management commands. -To see the list of available commands you can call [`XGROUP HELP`](/commands/xgroup-help). +To see the list of available commands you can call [`XGROUP HELP`]({{< relref "/commands/xgroup-help" >}}). diff --git a/content/commands/xinfo-consumers/index.md b/content/commands/xinfo-consumers/index.md index b530528602..6d89f97ca1 100644 --- a/content/commands/xinfo-consumers/index.md +++ b/content/commands/xinfo-consumers/index.md @@ -59,8 +59,8 @@ The following information is provided for each consumer in the group: * **name**: the consumer's name * **pending**: the number of entries in the PEL: pending messages for the consumer, which are messages that were delivered but are yet to be acknowledged -* **idle**: the number of milliseconds that have passed since the consumer's last attempted interaction (Examples: [`XREADGROUP`](/commands/xreadgroup), [`XCLAIM`](/commands/xclaim), [`XAUTOCLAIM`](/commands/xautoclaim)) -* **inactive**: the number of milliseconds that have passed since the consumer's last successful interaction (Examples: [`XREADGROUP`](/commands/xreadgroup) that actually read some entries into the PEL, [`XCLAIM`](/commands/xclaim)/[`XAUTOCLAIM`](/commands/xautoclaim) that actually claimed some entries) +* **idle**: the number of milliseconds that have passed since the consumer's last attempted interaction (Examples: [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}), [`XCLAIM`]({{< relref "/commands/xclaim" >}}), [`XAUTOCLAIM`]({{< relref "/commands/xautoclaim" >}})) +* **inactive**: the number of milliseconds that have passed since the consumer's last successful interaction (Examples: [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) that actually read some entries into the PEL, [`XCLAIM`]({{< relref "/commands/xclaim" >}})/[`XAUTOCLAIM`]({{< relref "/commands/xautoclaim" >}}) that actually claimed some entries) ## Examples diff --git a/content/commands/xinfo-groups/index.md b/content/commands/xinfo-groups/index.md index 4590f979e6..51c8382b6f 100644 --- a/content/commands/xinfo-groups/index.md +++ b/content/commands/xinfo-groups/index.md @@ -70,7 +70,7 @@ You can address high lag values by adding more consumers to the group, whereas l Redis reports the lag of a consumer group by keeping two counters: the number of all entries added to the stream and the number of logical reads made by the consumer group. The lag is the difference between these two. -The stream's counter (the `entries_added` field of the [`XINFO STREAM`](/commands/xinfo-stream) command) is incremented by one with every [`XADD`](/commands/xadd) and counts all of the entries added to the stream during its lifetime. +The stream's counter (the `entries_added` field of the [`XINFO STREAM`]({{< relref "/commands/xinfo-stream" >}}) command) is incremented by one with every [`XADD`]({{< relref "/commands/xadd" >}}) and counts all of the entries added to the stream during its lifetime. The consumer group's counter, `entries_read`, is the logical counter of entries the group had read. It is important to note that this counter is only a heuristic rather than an accurate counter, and therefore the use of the term "logical". @@ -79,9 +79,9 @@ The `entries_read` counter is accurate only in a perfect world, where a consumer There are two special cases in which this mechanism is unable to report the lag: -1. A consumer group is created or set with an arbitrary last delivered ID (the [`XGROUP CREATE`](/commands/xgroup-create) and [`XGROUP SETID`](/commands/xgroup-setid) commands, respectively). +1. A consumer group is created or set with an arbitrary last delivered ID (the [`XGROUP CREATE`]({{< relref "/commands/xgroup-create" >}}) and [`XGROUP SETID`]({{< relref "/commands/xgroup-setid" >}}) commands, respectively). An arbitrary ID is any ID that isn't the ID of the stream's first entry, its last entry or the zero ("0-0") ID. -2. One or more entries between the group's `last-delivered-id` and the stream's `last-generated-id` were deleted (with [`XDEL`](/commands/xdel) or a trimming operation). +2. One or more entries between the group's `last-delivered-id` and the stream's `last-generated-id` were deleted (with [`XDEL`]({{< relref "/commands/xdel" >}}) or a trimming operation). In both cases, the group's read counter is considered invalid, and the returned value is set to NULL to signal that the lag isn't currently available. diff --git a/content/commands/xinfo-stream/index.md b/content/commands/xinfo-stream/index.md index be326d684c..f3b352f7d4 100644 --- a/content/commands/xinfo-stream/index.md +++ b/content/commands/xinfo-stream/index.md @@ -70,7 +70,7 @@ This command returns information about the stream stored at ``. The informative details provided by this command are: -* **length**: the number of entries in the stream (see [`XLEN`](/commands/xlen)) +* **length**: the number of entries in the stream (see [`XLEN`]({{< relref "/commands/xlen" >}})) * **radix-tree-keys**: the number of keys in the underlying radix data structure * **radix-tree-nodes**: the number of nodes in the underlying radix data structure * **groups**: the number of consumer groups defined for the stream @@ -82,7 +82,7 @@ The informative details provided by this command are: The optional `FULL` modifier provides a more verbose reply. When provided, the `FULL` reply includes an **entries** array that consists of the stream entries (ID and field-value tuples) in ascending order. -Furthermore, **groups** is also an array, and for each of the consumer groups it consists of the information reported by [`XINFO GROUPS`](/commands/xinfo-groups) and [`XINFO CONSUMERS`](/commands/xinfo-consumers). +Furthermore, **groups** is also an array, and for each of the consumer groups it consists of the information reported by [`XINFO GROUPS`]({{< relref "/commands/xinfo-groups" >}}) and [`XINFO CONSUMERS`]({{< relref "/commands/xinfo-consumers" >}}). The `COUNT` option can be used to limit the number of stream and PEL entries that are returned (The first `` entries are returned). The default `COUNT` is 10 and a `COUNT` of 0 means that all entries will be returned (execution time may be long if the stream has a lot of entries). diff --git a/content/commands/xinfo/index.md b/content/commands/xinfo/index.md index 3cdff2d893..aab40f11ce 100644 --- a/content/commands/xinfo/index.md +++ b/content/commands/xinfo/index.md @@ -25,4 +25,4 @@ title: XINFO --- This is a container command for stream introspection commands. -To see the list of available commands you can call [`XINFO HELP`](/commands/xinfo-help). +To see the list of available commands you can call [`XINFO HELP`]({{< relref "/commands/xinfo-help" >}}). diff --git a/content/commands/xlen/index.md b/content/commands/xlen/index.md index a693c02429..54e7bfb7d6 100644 --- a/content/commands/xlen/index.md +++ b/content/commands/xlen/index.md @@ -48,11 +48,11 @@ title: XLEN Returns the number of entries inside a stream. If the specified key does not exist the command returns zero, as if the stream was empty. However note that unlike other Redis types, zero-length streams are -possible, so you should call [`TYPE`](/commands/type) or [`EXISTS`](/commands/exists) in order to check if +possible, so you should call [`TYPE`]({{< relref "/commands/type" >}}) or [`EXISTS`]({{< relref "/commands/exists" >}}) in order to check if a key exists or not. Streams are not auto-deleted once they have no entries inside (for instance -after an [`XDEL`](/commands/xdel) call), because the stream may have consumer groups +after an [`XDEL`]({{< relref "/commands/xdel" >}}) call), because the stream may have consumer groups associated with it. ## Examples diff --git a/content/commands/xpending/index.md b/content/commands/xpending/index.md index 11d46b57f0..5cd19b335b 100644 --- a/content/commands/xpending/index.md +++ b/content/commands/xpending/index.md @@ -84,8 +84,8 @@ title: XPENDING --- Fetching data from a stream via a consumer group, and not acknowledging such data, has the effect of creating *pending entries*. This is -well explained in the [`XREADGROUP`](/commands/xreadgroup) command, and even better in our -[introduction to Redis Streams](/topics/streams-intro). The [`XACK`](/commands/xack) command +well explained in the [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) command, and even better in our +[introduction to Redis Streams](/topics/streams-intro). The [`XACK`]({{< relref "/commands/xack" >}}) command will immediately remove the pending entry from the Pending Entries List (PEL) since once a message is successfully processed, there is no longer need for the consumer group to track it and to remember the current owner @@ -95,12 +95,12 @@ The `XPENDING` command is the interface to inspect the list of pending messages, and is as thus a very important command in order to observe and understand what is happening with a streams consumer groups: what clients are active, what messages are pending to be consumed, or to see -if there are idle messages. Moreover this command, together with [`XCLAIM`](/commands/xclaim) +if there are idle messages. Moreover this command, together with [`XCLAIM`]({{< relref "/commands/xclaim" >}}) is used in order to implement recovering of consumers that are failing for a long time, and as a result certain messages are not processed: a different consumer can claim the message and continue. This is better explained in the [streams intro](/topics/streams-intro) and in the -[`XCLAIM`](/commands/xclaim) command page, and is not covered here. +[`XCLAIM`]({{< relref "/commands/xclaim" >}}) command page, and is not covered here. ## Summary form of XPENDING @@ -108,7 +108,7 @@ When `XPENDING` is called with just a key name and a consumer group name, it just outputs a summary about the pending messages in a given consumer group. In the following example, we create a consumer group and immediately create a pending message by reading from the group with -[`XREADGROUP`](/commands/xreadgroup). +[`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}). ``` > XGROUP CREATE mystream group55 0-0 @@ -149,7 +149,7 @@ at least one pending message, and the number of pending messages it has. The summary provides a good overview, but sometimes we are interested in the details. In order to see all the pending messages with more associated information we need to also pass a range of IDs, in a similar way we do it with -[`XRANGE`](/commands/xrange), and a non optional *count* argument, to limit the number +[`XRANGE`]({{< relref "/commands/xrange" >}}), and a non optional *count* argument, to limit the number of messages returned per call: ``` @@ -170,9 +170,9 @@ each message four attributes are returned: 4. The number of times this message was delivered. The deliveries counter, that is the fourth element in the array, is incremented -when some other consumer *claims* the message with [`XCLAIM`](/commands/xclaim), or when the -message is delivered again via [`XREADGROUP`](/commands/xreadgroup), when accessing the history -of a consumer in a consumer group (see the [`XREADGROUP`](/commands/xreadgroup) page for more info). +when some other consumer *claims* the message with [`XCLAIM`]({{< relref "/commands/xclaim" >}}), or when the +message is delivered again via [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}), when accessing the history +of a consumer in a consumer group (see the [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) page for more info). It is possible to pass an additional argument to the command, in order to see the messages having a specific owner: @@ -192,7 +192,7 @@ a single consumer. ## Idle time filter It is also possible to filter pending stream entries by their idle-time, -given in milliseconds (useful for [`XCLAIM`](/commands/xclaim)ing entries that have not been +given in milliseconds (useful for [`XCLAIM`]({{< relref "/commands/xclaim" >}})ing entries that have not been processed for some time): ``` @@ -207,7 +207,7 @@ that are idle for over 9 seconds, whereas in the second case only those of ## Exclusive ranges and iterating the PEL The `XPENDING` command allows iterating over the pending entries just like -[`XRANGE`](/commands/xrange) and [`XREVRANGE`](/commands/xrevrange) allow for the stream's entries. You can do this by +[`XRANGE`]({{< relref "/commands/xrange" >}}) and [`XREVRANGE`]({{< relref "/commands/xrevrange" >}}) allow for the stream's entries. You can do this by prefixing the ID of the last-read pending entry with the `(` character that denotes an open (exclusive) range, and proving it to the subsequent call to the command. diff --git a/content/commands/xrange/index.md b/content/commands/xrange/index.md index 07109e4129..0c536a82dc 100644 --- a/content/commands/xrange/index.md +++ b/content/commands/xrange/index.md @@ -71,12 +71,12 @@ The `XRANGE` command has a number of applications: Stream IDs are [related to time](/topics/streams-intro). * Iterating a stream incrementally, returning just a few items at every iteration. However it is semantically much more - robust than the [`SCAN`](/commands/scan) family of functions. + robust than the [`SCAN`]({{< relref "/commands/scan" >}}) family of functions. * Fetching a single entry from a stream, providing the ID of the entry to fetch two times: as start and end of the query interval. The command also has a reciprocal command returning items in the -reverse order, called [`XREVRANGE`](/commands/xrevrange), which is otherwise identical. +reverse order, called [`XREVRANGE`]({{< relref "/commands/xrevrange" >}}), which is otherwise identical. ## `-` and `+` special IDs @@ -204,8 +204,8 @@ a specific time, by providing a given incomplete start ID. Moreover, we can limit the iteration to a given ID or time, by providing an end ID or incomplete ID instead of `+`. -The command [`XREAD`](/commands/xread) is also able to iterate the stream. -The command [`XREVRANGE`](/commands/xrevrange) can iterate the stream reverse, from higher IDs +The command [`XREAD`]({{< relref "/commands/xread" >}}) is also able to iterate the stream. +The command [`XREVRANGE`]({{< relref "/commands/xrevrange" >}}) can iterate the stream reverse, from higher IDs (or times) to lower IDs (or times). ### Iterating with earlier versions of Redis @@ -234,7 +234,7 @@ Also, note that once the sequence part of the last ID equals sequence part to 0. For example, incrementing the ID `1526985685298-18446744073709551615` should result in `1526985685299-0`. -A symmetrical pattern applies to iterating the stream with [`XREVRANGE`](/commands/xrevrange). The +A symmetrical pattern applies to iterating the stream with [`XREVRANGE`]({{< relref "/commands/xrevrange" >}}). The only difference is that the client needs to decrement the ID for the subsequent calls. When decrementing an ID with a sequence part of 0, the timestamp needs to be decremented by 1 and the sequence set to 18446744073709551615. diff --git a/content/commands/xread/index.md b/content/commands/xread/index.md index 042decfba2..c27464b361 100644 --- a/content/commands/xread/index.md +++ b/content/commands/xread/index.md @@ -65,16 +65,15 @@ linkTitle: XREAD since: 5.0.0 summary: Returns messages from multiple streams with IDs greater than the ones requested. Blocks until a message is available otherwise. -syntax_fmt: "XREAD [COUNT\_count] [BLOCK\_milliseconds] STREAMS\_key [key ...] id -\ - \ [id ...]" +syntax_fmt: "XREAD [COUNT\_count] [BLOCK\_milliseconds] STREAMS\_key [key ...] id\ + \ [id ...]" syntax_str: "[BLOCK\_milliseconds] STREAMS\_key [key ...] id [id ...]" title: XREAD --- Read data from one or multiple streams, only returning entries with an ID greater than the last received ID reported by the caller. This command has an option to block if items are not available, in a similar -fashion to [`BRPOP`](/commands/brpop) or [`BZPOPMIN`](/commands/bzpopmin) and others. +fashion to [`BRPOP`]({{< relref "/commands/brpop" >}}) or [`BZPOPMIN`]({{< relref "/commands/bzpopmin" >}}) and others. Please note that before reading this page, if you are new to streams, we recommend to read [our introduction to Redis Streams](/topics/streams-intro). @@ -82,15 +81,15 @@ we recommend to read [our introduction to Redis Streams](/topics/streams-intro). ## Non-blocking usage If the **BLOCK** option is not used, the command is synchronous, and can -be considered somewhat related to [`XRANGE`](/commands/xrange): it will return a range of items -inside streams, however it has two fundamental differences compared to [`XRANGE`](/commands/xrange) +be considered somewhat related to [`XRANGE`]({{< relref "/commands/xrange" >}}): it will return a range of items +inside streams, however it has two fundamental differences compared to [`XRANGE`]({{< relref "/commands/xrange" >}}) even if we just consider the synchronous usage: * This command can be called with multiple streams if we want to read at the same time from a number of keys. This is a key feature of `XREAD` because especially when blocking with **BLOCK**, to be able to listen with a single connection to multiple keys is a vital feature. -* While [`XRANGE`](/commands/xrange) returns items in a range of IDs, `XREAD` is more suited in +* While [`XRANGE`]({{< relref "/commands/xrange" >}}) returns items in a range of IDs, `XREAD` is more suited in order to consume the stream starting from the first entry which is greater than any other entry we saw so far. So what we pass to `XREAD` is, for each stream, the ID of the last element that we received from that stream. @@ -177,7 +176,7 @@ also supports a blocking mode). ## Incomplete IDs -To use incomplete IDs is valid, like it is valid for [`XRANGE`](/commands/xrange). However +To use incomplete IDs is valid, like it is valid for [`XRANGE`]({{< relref "/commands/xrange" >}}). However here the sequence part of the ID, if missing, is always interpreted as zero, so the command: @@ -195,7 +194,7 @@ is exactly equivalent to In its synchronous form, the command can get new data as long as there are more items available. However, at some point, we'll have to wait for -producers of data to use [`XADD`](/commands/xadd) to push new entries inside the streams +producers of data to use [`XADD`]({{< relref "/commands/xadd" >}}) to push new entries inside the streams we are consuming. In order to avoid polling at a fixed or adaptive interval the command is able to block if it could not return any data, according to the specified streams and IDs, and automatically unblock once one of @@ -229,7 +228,7 @@ a null reply because the timeout has elapsed without new data arriving: ## The special `$` ID. When blocking sometimes we want to receive just entries that are added -to the stream via [`XADD`](/commands/xadd) starting from the moment we block. In such a case +to the stream via [`XADD`]({{< relref "/commands/xadd" >}}) starting from the moment we block. In such a case we are not interested in the history of already added entries. For this use case, we would have to check the stream top element ID, and use such ID in the `XREAD` command line. This is not clean and requires to @@ -267,7 +266,7 @@ use cases. However note that with streams this is not a problem: stream entries are not removed from the stream when clients are served, so every -client waiting will be served as soon as an [`XADD`](/commands/xadd) command provides +client waiting will be served as soon as an [`XADD`]({{< relref "/commands/xadd" >}}) command provides data to the stream. Reading the [Redis Streams introduction](/topics/streams-intro) is highly diff --git a/content/commands/xreadgroup/index.md b/content/commands/xreadgroup/index.md index 2acc027737..a9dc8d902b 100644 --- a/content/commands/xreadgroup/index.md +++ b/content/commands/xreadgroup/index.md @@ -84,16 +84,15 @@ linkTitle: XREADGROUP since: 5.0.0 summary: Returns new or historical messages from a stream for a consumer in a group. Blocks until a message is available otherwise. -syntax_fmt: "XREADGROUP GROUP\_group consumer [COUNT\_count] [BLOCK\_milliseconds] -\ - \ [NOACK] STREAMS\_key [key ...] id [id ...]" +syntax_fmt: "XREADGROUP GROUP\_group consumer [COUNT\_count] [BLOCK\_milliseconds]\ + \ [NOACK] STREAMS\_key [key ...] id [id ...]" syntax_str: "[COUNT\_count] [BLOCK\_milliseconds] [NOACK] STREAMS\_key [key ...] id\ \ [id ...]" title: XREADGROUP --- -The `XREADGROUP` command is a special version of the [`XREAD`](/commands/xread) command +The `XREADGROUP` command is a special version of the [`XREAD`]({{< relref "/commands/xread" >}}) command with support for consumer groups. Probably you will have to understand the -[`XREAD`](/commands/xread) command before reading this page will makes sense. +[`XREAD`]({{< relref "/commands/xread" >}}) command before reading this page will makes sense. Moreover, if you are new to streams, we recommend to read our [introduction to Redis Streams](/topics/streams-intro). @@ -102,14 +101,14 @@ so that following how this command works will be simpler. ## Consumer groups in 30 seconds -The difference between this command and the vanilla [`XREAD`](/commands/xread) is that this +The difference between this command and the vanilla [`XREAD`]({{< relref "/commands/xread" >}}) is that this one supports consumer groups. -Without consumer groups, just using [`XREAD`](/commands/xread), all the clients are served with all the entries arriving in a stream. Instead using consumer groups with `XREADGROUP`, it is possible to create groups of clients that consume different parts of the messages arriving in a given stream. If, for instance, the stream gets the new entries A, B, and C and there are two consumers reading via a consumer group, one client will get, for instance, the messages A and C, and the other the message B, and so forth. +Without consumer groups, just using [`XREAD`]({{< relref "/commands/xread" >}}), all the clients are served with all the entries arriving in a stream. Instead using consumer groups with `XREADGROUP`, it is possible to create groups of clients that consume different parts of the messages arriving in a given stream. If, for instance, the stream gets the new entries A, B, and C and there are two consumers reading via a consumer group, one client will get, for instance, the messages A and C, and the other the message B, and so forth. Within a consumer group, a given consumer (that is, just a client consuming messages from the stream), has to identify with a unique *consumer name*. Which is just a string. -One of the guarantees of consumer groups is that a given consumer can only see the history of messages that were delivered to it, so a message has just a single owner. However there is a special feature called *message claiming* that allows other consumers to claim messages in case there is a non recoverable failure of some consumer. In order to implement such semantics, consumer groups require explicit acknowledgment of the messages successfully processed by the consumer, via the [`XACK`](/commands/xack) command. This is needed because the stream will track, for each consumer group, who is processing what message. +One of the guarantees of consumer groups is that a given consumer can only see the history of messages that were delivered to it, so a message has just a single owner. However there is a special feature called *message claiming* that allows other consumers to claim messages in case there is a non recoverable failure of some consumer. In order to implement such semantics, consumer groups require explicit acknowledgment of the messages successfully processed by the consumer, via the [`XACK`]({{< relref "/commands/xack" >}}) command. This is needed because the stream will track, for each consumer group, who is processing what message. This is how to understand if you want to use a consumer group or not: @@ -124,7 +123,7 @@ however `XREADGROUP` *requires* a special and mandatory option: GROUP The group name is just the name of a consumer group associated to the stream. -The group is created using the [`XGROUP`](/commands/xgroup) command. The consumer name is the +The group is created using the [`XGROUP`]({{< relref "/commands/xgroup" >}}) command. The consumer name is the string that is used by the client to identify itself inside the group. The consumer is auto created inside the consumer group the first time it is saw. Different clients should select a different consumer name. @@ -134,9 +133,9 @@ message was delivered to you: the message will be stored inside the consumer group in what is called a Pending Entries List (PEL), that is a list of message IDs delivered but not yet acknowledged. -The client will have to acknowledge the message processing using [`XACK`](/commands/xack) +The client will have to acknowledge the message processing using [`XACK`]({{< relref "/commands/xack" >}}) in order for the pending entry to be removed from the PEL. The PEL -can be inspected using the [`XPENDING`](/commands/xpending) command. +can be inspected using the [`XPENDING`]({{< relref "/commands/xpending" >}}) command. The `NOACK` subcommand can be used to avoid adding the message to the PEL in cases where reliability is not a requirement and the occasional message loss @@ -148,7 +147,7 @@ be one of the following two: * The special `>` ID, which means that the consumer want to receive only messages that were *never delivered to any other consumer*. It just means, give me new messages. * Any other ID, that is, 0 or any other valid ID or incomplete ID (just the millisecond time part), will have the effect of returning entries that are pending for the consumer sending the command with IDs greater than the one provided. So basically if the ID is not `>`, then the command will just let the client access its pending entries: messages delivered to it, but not yet acknowledged. Note that in this case, both `BLOCK` and `NOACK` are ignored. -Like [`XREAD`](/commands/xread) the `XREADGROUP` command can be used in a blocking way. There +Like [`XREAD`]({{< relref "/commands/xread" >}}) the `XREADGROUP` command can be used in a blocking way. There are no differences in this regard. ## What happens when a message is delivered to a consumer? @@ -156,7 +155,7 @@ are no differences in this regard. Two things: 1. If the message was never delivered to anyone, that is, if we are talking about a new message, then a PEL (Pending Entries List) is created. -2. If instead the message was already delivered to this consumer, and it is just re-fetching the same message again, then the *last delivery counter* is updated to the current time, and the *number of deliveries* is incremented by one. You can access those message properties using the [`XPENDING`](/commands/xpending) command. +2. If instead the message was already delivered to this consumer, and it is just re-fetching the same message again, then the *last delivery counter* is updated to the current time, and the *number of deliveries* is incremented by one. You can access those message properties using the [`XPENDING`]({{< relref "/commands/xpending" >}}) command. ## Usage example @@ -183,7 +182,7 @@ END ``` In this way the example consumer code will fetch only new messages, process -them, and acknowledge them via [`XACK`](/commands/xack). However the example code above is +them, and acknowledge them via [`XACK`]({{< relref "/commands/xack" >}}). However the example code above is not complete, because it does not handle recovering after a crash. What will happen if we crash in the middle of processing messages, is that our messages will remain in the pending entries list, so we can access our @@ -193,11 +192,11 @@ know that we processed and acknowledged all the pending messages: we can start to use `>` as ID, in order to get the new messages and rejoin the consumers that are processing new things. -To see how the command actually replies, please check the [`XREAD`](/commands/xread) command page. +To see how the command actually replies, please check the [`XREAD`]({{< relref "/commands/xread" >}}) command page. ## What happens when a pending message is deleted? -Entries may be deleted from the stream due to trimming or explicit calls to [`XDEL`](/commands/xdel) at any time. +Entries may be deleted from the stream due to trimming or explicit calls to [`XDEL`]({{< relref "/commands/xdel" >}}) at any time. By design, Redis doesn't prevent the deletion of entries that are present in the stream's PELs. When this happens, the PELs retain the deleted entries' IDs, but the actual entry payload is no longer available. Therefore, when reading such PEL entries, Redis will return a null value in place of their respective data. diff --git a/content/commands/xrevrange/index.md b/content/commands/xrevrange/index.md index 5bb7e5ac90..a93e6498c7 100644 --- a/content/commands/xrevrange/index.md +++ b/content/commands/xrevrange/index.md @@ -60,7 +60,7 @@ syntax_fmt: "XREVRANGE key end start [COUNT\_count]" syntax_str: "end start [COUNT\_count]" title: XREVRANGE --- -This command is exactly like [`XRANGE`](/commands/xrange), but with the notable difference of +This command is exactly like [`XRANGE`]({{< relref "/commands/xrange" >}}), but with the notable difference of returning the entries in reverse order, and also taking the start-end range in reverse order: in `XREVRANGE` you need to state the *end* ID and later the *start* ID, and the command will produce all the element diff --git a/content/commands/xsetid/index.md b/content/commands/xsetid/index.md index b4b4fd4f8e..e9e49c0096 100644 --- a/content/commands/xsetid/index.md +++ b/content/commands/xsetid/index.md @@ -61,8 +61,7 @@ key_specs: linkTitle: XSETID since: 5.0.0 summary: An internal command for replicating stream values. -syntax_fmt: "XSETID key last-id [ENTRIESADDED\_entries-added] - [MAXDELETEDID\_max-deleted-id]" +syntax_fmt: "XSETID key last-id [ENTRIESADDED\_entries-added] [MAXDELETEDID\_max-deleted-id]" syntax_str: "last-id [ENTRIESADDED\_entries-added] [MAXDELETEDID\_max-deleted-id]" title: XSETID --- diff --git a/content/commands/zadd/index.md b/content/commands/zadd/index.md index 9f4b431186..2f09f1b804 100644 --- a/content/commands/zadd/index.md +++ b/content/commands/zadd/index.md @@ -101,9 +101,7 @@ linkTitle: ZADD since: 1.2.0 summary: Adds one or more members to a sorted set, or updates their scores. Creates the key if it doesn't exist. -syntax_fmt: "ZADD key [NX | XX] [GT | LT] [CH] [INCR] score member [score member -\ - \ ...]" +syntax_fmt: ZADD key [NX | XX] [GT | LT] [CH] [INCR] score member [score member ...] syntax_str: '[NX | XX] [GT | LT] [CH] [INCR] score member [score member ...]' title: ZADD --- @@ -130,7 +128,7 @@ the first score argument. Options are: * **LT**: Only update existing elements if the new score is **less than** the current score. This flag doesn't prevent adding new elements. * **GT**: Only update existing elements if the new score is **greater than** the current score. This flag doesn't prevent adding new elements. * **CH**: Modify the return value from the number of new elements added, to the total number of elements changed (CH is an abbreviation of *changed*). Changed elements are **new elements added** and elements already existing for which **the score was updated**. So elements specified in the command line having the same score as they had in the past are not counted. Note: normally the return value of `ZADD` only counts the number of new elements added. -* **INCR**: When this option is specified `ZADD` acts like [`ZINCRBY`](/commands/zincrby). Only one score-element pair can be specified in this mode. +* **INCR**: When this option is specified `ZADD` acts like [`ZINCRBY`]({{< relref "/commands/zincrby" >}}). Only one score-element pair can be specified in this mode. Note: The **GT**, **LT** and **NX** options are mutually exclusive. @@ -146,10 +144,10 @@ Sorted sets are sorted by their score in an ascending way. The same element only exists a single time, no repeated elements are permitted. The score can be modified both by `ZADD` that will update the element score, and as a side effect, its position on the sorted set, and -by [`ZINCRBY`](/commands/zincrby) that can be used in order to update the score relatively to its +by [`ZINCRBY`]({{< relref "/commands/zincrby" >}}) that can be used in order to update the score relatively to its previous value. -The current score of an element can be retrieved using the [`ZSCORE`](/commands/zscore) command, +The current score of an element can be retrieved using the [`ZSCORE`]({{< relref "/commands/zscore" >}}) command, that can also be used to verify if an element already exists or not. For an introduction to sorted sets, see the data types page on [sorted @@ -165,7 +163,7 @@ is unique, it is possible to add multiple different elements *having the same sc The lexicographic ordering used is binary, it compares strings as array of bytes. -If the user inserts all the elements in a sorted set with the same score (for example 0), all the elements of the sorted set are sorted lexicographically, and range queries on elements are possible using the command [`ZRANGEBYLEX`](/commands/zrangebylex) (Note: it is also possible to query sorted sets by range of scores using [`ZRANGEBYSCORE`](/commands/zrangebyscore)). +If the user inserts all the elements in a sorted set with the same score (for example 0), all the elements of the sorted set are sorted lexicographically, and range queries on elements are possible using the command [`ZRANGEBYLEX`]({{< relref "/commands/zrangebylex" >}}) (Note: it is also possible to query sorted sets by range of scores using [`ZRANGEBYSCORE`]({{< relref "/commands/zrangebyscore" >}})). ## Examples diff --git a/content/commands/zcount/index.md b/content/commands/zcount/index.md index 263f73de57..76fe1e4359 100644 --- a/content/commands/zcount/index.md +++ b/content/commands/zcount/index.md @@ -57,9 +57,9 @@ Returns the number of elements in the sorted set at `key` with a score between `min` and `max`. The `min` and `max` arguments have the same semantic as described for -[`ZRANGEBYSCORE`](/commands/zrangebyscore). +[`ZRANGEBYSCORE`]({{< relref "/commands/zrangebyscore" >}}). -Note: the command has a complexity of just O(log(N)) because it uses elements ranks (see [`ZRANK`](/commands/zrank)) to get an idea of the range. Because of this there is no need to do a work proportional to the size of the range. +Note: the command has a complexity of just O(log(N)) because it uses elements ranks (see [`ZRANK`]({{< relref "/commands/zrank" >}})) to get an idea of the range. Because of this there is no need to do a work proportional to the size of the range. ## Examples diff --git a/content/commands/zdiff/index.md b/content/commands/zdiff/index.md index 96ebe038f0..835a975e89 100644 --- a/content/commands/zdiff/index.md +++ b/content/commands/zdiff/index.md @@ -57,7 +57,7 @@ syntax_fmt: ZDIFF numkeys key [key ...] [WITHSCORES] syntax_str: key [key ...] [WITHSCORES] title: ZDIFF --- -This command is similar to [`ZDIFFSTORE`](/commands/zdiffstore), but instead of storing the resulting +This command is similar to [`ZDIFFSTORE`]({{< relref "/commands/zdiffstore" >}}), but instead of storing the resulting sorted set, it is returned to the client. ## Examples diff --git a/content/commands/zinter/index.md b/content/commands/zinter/index.md index fef7d43f51..c0a71cf33e 100644 --- a/content/commands/zinter/index.md +++ b/content/commands/zinter/index.md @@ -76,17 +76,16 @@ key_specs: linkTitle: ZINTER since: 6.2.0 summary: Returns the intersect of multiple sorted sets. -syntax_fmt: "ZINTER numkeys key [key ...] [WEIGHTS\_weight [weight ...]] - [AGGREGATE\_\ +syntax_fmt: "ZINTER numkeys key [key ...] [WEIGHTS\_weight [weight ...]] [AGGREGATE\_\ ] [WITHSCORES]" syntax_str: "key [key ...] [WEIGHTS\_weight [weight ...]] [AGGREGATE\_] [WITHSCORES]" title: ZINTER --- -This command is similar to [`ZINTERSTORE`](/commands/zinterstore), but instead of storing the resulting +This command is similar to [`ZINTERSTORE`]({{< relref "/commands/zinterstore" >}}), but instead of storing the resulting sorted set, it is returned to the client. -For a description of the `WEIGHTS` and `AGGREGATE` options, see [`ZUNIONSTORE`](/commands/zunionstore). +For a description of the `WEIGHTS` and `AGGREGATE` options, see [`ZUNIONSTORE`]({{< relref "/commands/zunionstore" >}}). ## Examples diff --git a/content/commands/zintercard/index.md b/content/commands/zintercard/index.md index 0ef60ded42..e2c14a4921 100644 --- a/content/commands/zintercard/index.md +++ b/content/commands/zintercard/index.md @@ -56,7 +56,7 @@ syntax_fmt: "ZINTERCARD numkeys key [key ...] [LIMIT\_limit]" syntax_str: "key [key ...] [LIMIT\_limit]" title: ZINTERCARD --- -This command is similar to [`ZINTER`](/commands/zinter), but instead of returning the result set, it returns just the cardinality of the result. +This command is similar to [`ZINTER`]({{< relref "/commands/zinter" >}}), but instead of returning the result set, it returns just the cardinality of the result. Keys that do not exist are considered to be empty sets. With one of the keys being an empty set, the resulting set is also empty (since set intersection with an empty set always results in an empty set). diff --git a/content/commands/zinterstore/index.md b/content/commands/zinterstore/index.md index e6267cff96..da25adef9f 100644 --- a/content/commands/zinterstore/index.md +++ b/content/commands/zinterstore/index.md @@ -88,8 +88,7 @@ key_specs: linkTitle: ZINTERSTORE since: 2.0.0 summary: Stores the intersect of multiple sorted sets in a key. -syntax_fmt: "ZINTERSTORE destination numkeys key [key ...] [WEIGHTS\_weight - [weight\ +syntax_fmt: "ZINTERSTORE destination numkeys key [key ...] [WEIGHTS\_weight [weight\ \ ...]] [AGGREGATE\_]" syntax_str: "numkeys key [key ...] [WEIGHTS\_weight [weight ...]] [AGGREGATE\_]" @@ -106,7 +105,7 @@ Because intersection requires an element to be a member of every given sorted set, this results in the score of every element in the resulting sorted set to be equal to the number of input sorted sets. -For a description of the `WEIGHTS` and `AGGREGATE` options, see [`ZUNIONSTORE`](/commands/zunionstore). +For a description of the `WEIGHTS` and `AGGREGATE` options, see [`ZUNIONSTORE`]({{< relref "/commands/zunionstore" >}}). If `destination` already exists, it is overwritten. diff --git a/content/commands/zlexcount/index.md b/content/commands/zlexcount/index.md index 82fc1b50f6..98e5ad2b7b 100644 --- a/content/commands/zlexcount/index.md +++ b/content/commands/zlexcount/index.md @@ -56,9 +56,9 @@ title: ZLEXCOUNT When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns the number of elements in the sorted set at `key` with a value between `min` and `max`. The `min` and `max` arguments have the same meaning as described for -[`ZRANGEBYLEX`](/commands/zrangebylex). +[`ZRANGEBYLEX`]({{< relref "/commands/zrangebylex" >}}). -Note: the command has a complexity of just O(log(N)) because it uses elements ranks (see [`ZRANK`](/commands/zrank)) to get an idea of the range. Because of this there is no need to do a work proportional to the size of the range. +Note: the command has a complexity of just O(log(N)) because it uses elements ranks (see [`ZRANK`]({{< relref "/commands/zrank" >}})) to get an idea of the range. Because of this there is no need to do a work proportional to the size of the range. ## Examples diff --git a/content/commands/zmpop/index.md b/content/commands/zmpop/index.md index 754d7a1ea2..e3382c5b7d 100644 --- a/content/commands/zmpop/index.md +++ b/content/commands/zmpop/index.md @@ -72,12 +72,12 @@ title: ZMPOP --- Pops one or more elements, that are member-score pairs, from the first non-empty sorted set in the provided list of key names. -`ZMPOP` and [`BZMPOP`](/commands/bzmpop) are similar to the following, more limited, commands: +`ZMPOP` and [`BZMPOP`]({{< relref "/commands/bzmpop" >}}) are similar to the following, more limited, commands: -- [`ZPOPMIN`](/commands/zpopmin) or [`ZPOPMAX`](/commands/zpopmax) which take only one key, and can return multiple elements. -- [`BZPOPMIN`](/commands/bzpopmin) or [`BZPOPMAX`](/commands/bzpopmax) which take multiple keys, but return only one element from just one key. +- [`ZPOPMIN`]({{< relref "/commands/zpopmin" >}}) or [`ZPOPMAX`]({{< relref "/commands/zpopmax" >}}) which take only one key, and can return multiple elements. +- [`BZPOPMIN`]({{< relref "/commands/bzpopmin" >}}) or [`BZPOPMAX`]({{< relref "/commands/bzpopmax" >}}) which take multiple keys, but return only one element from just one key. -See [`BZMPOP`](/commands/bzmpop) for the blocking variant of this command. +See [`BZMPOP`]({{< relref "/commands/bzmpop" >}}) for the blocking variant of this command. When the `MIN` modifier is used, the elements popped are those with the lowest scores from the first non-empty sorted set. The `MAX` modifier causes elements with the highest scores to be popped. The optional `COUNT` can be used to specify the number of elements to pop, and is set to 1 by default. diff --git a/content/commands/zrandmember/index.md b/content/commands/zrandmember/index.md index e436ce63b8..dc43e7c317 100644 --- a/content/commands/zrandmember/index.md +++ b/content/commands/zrandmember/index.md @@ -62,7 +62,7 @@ title: ZRANDMEMBER When called with just the `key` argument, return a random element from the sorted set value stored at `key`. If the provided `count` argument is positive, return an array of **distinct elements**. -The array's length is either `count` or the sorted set's cardinality ([`ZCARD`](/commands/zcard)), whichever is lower. +The array's length is either `count` or the sorted set's cardinality ([`ZCARD`]({{< relref "/commands/zcard" >}})), whichever is lower. If called with a negative `count`, the behavior changes and the command is allowed to return the **same element multiple times**. In this case, the number of returned elements is the absolute value of the specified `count`. diff --git a/content/commands/zrange/index.md b/content/commands/zrange/index.md index 49e2e7d7cd..a4588ed6b8 100644 --- a/content/commands/zrange/index.md +++ b/content/commands/zrange/index.md @@ -87,8 +87,7 @@ key_specs: linkTitle: ZRANGE since: 1.2.0 summary: Returns members in a sorted set within a range of indexes. -syntax_fmt: "ZRANGE key start stop [BYSCORE | BYLEX] [REV] [LIMIT\_offset count] -\ +syntax_fmt: "ZRANGE key start stop [BYSCORE | BYLEX] [REV] [LIMIT\_offset count] \ \ [WITHSCORES]" syntax_str: "start stop [BYSCORE | BYLEX] [REV] [LIMIT\_offset count] [WITHSCORES]" title: ZRANGE @@ -97,7 +96,7 @@ Returns the specified range of elements in the sorted set stored at ``. `ZRANGE` can perform different types of range queries: by index (rank), by the score, or by lexicographical order. -Starting with Redis 6.2.0, this command can replace the following commands: [`ZREVRANGE`](/commands/zrevrange), [`ZRANGEBYSCORE`](/commands/zrangebyscore), [`ZREVRANGEBYSCORE`](/commands/zrevrangebyscore), [`ZRANGEBYLEX`](/commands/zrangebylex) and [`ZREVRANGEBYLEX`](/commands/zrevrangebylex). +Starting with Redis 6.2.0, this command can replace the following commands: [`ZREVRANGE`]({{< relref "/commands/zrevrange" >}}), [`ZRANGEBYSCORE`]({{< relref "/commands/zrangebyscore" >}}), [`ZREVRANGEBYSCORE`]({{< relref "/commands/zrevrangebyscore" >}}), [`ZRANGEBYLEX`]({{< relref "/commands/zrangebylex" >}}) and [`ZREVRANGEBYLEX`]({{< relref "/commands/zrevrangebylex" >}}). ## Common behavior and options @@ -124,7 +123,7 @@ If `` is greater than the end index of the sorted set, Redis will use the ## Score ranges -When the `BYSCORE` option is provided, the command behaves like [`ZRANGEBYSCORE`](/commands/zrangebyscore) and returns the range of elements from the sorted set having scores equal or between `` and ``. +When the `BYSCORE` option is provided, the command behaves like [`ZRANGEBYSCORE`]({{< relref "/commands/zrangebyscore" >}}) and returns the range of elements from the sorted set having scores equal or between `` and ``. `` and `` can be `-inf` and `+inf`, denoting the negative and positive infinities, respectively. This means that you are not required to know the highest or lowest score in the sorted set to get all elements from or up to a certain score. @@ -169,7 +168,7 @@ Will return all elements with scores less than 10 and greater than 5. ## Lexicographical ranges -When the `BYLEX` option is used, the command behaves like [`ZRANGEBYLEX`](/commands/zrangebylex) and returns the range of elements from the sorted set between the `` and `` lexicographical closed range intervals. +When the `BYLEX` option is used, the command behaves like [`ZRANGEBYLEX`]({{< relref "/commands/zrangebylex" >}}) and returns the range of elements from the sorted set between the `` and `` lexicographical closed range intervals. Note that lexicographical ordering relies on all elements having the same score. The reply is unspecified when the elements have different scores. diff --git a/content/commands/zrangebylex/index.md b/content/commands/zrangebylex/index.md index f01fd7a0a0..1c3232cef8 100644 --- a/content/commands/zrangebylex/index.md +++ b/content/commands/zrangebylex/index.md @@ -61,7 +61,7 @@ key_specs: limit: 0 type: range linkTitle: ZRANGEBYLEX -replaced_by: '[`ZRANGE`](/commands/zrange) with the `BYLEX` argument' +replaced_by: '[`ZRANGE`]({{< relref "/commands/zrange" >}}) with the `BYLEX` argument' since: 2.8.9 summary: Returns members in a sorted set within a lexicographical range. syntax_fmt: "ZRANGEBYLEX key min max [LIMIT\_offset count]" diff --git a/content/commands/zrangebyscore/index.md b/content/commands/zrangebyscore/index.md index 7a613a618d..56f6df8c9c 100644 --- a/content/commands/zrangebyscore/index.md +++ b/content/commands/zrangebyscore/index.md @@ -70,7 +70,7 @@ key_specs: limit: 0 type: range linkTitle: ZRANGEBYSCORE -replaced_by: '[`ZRANGE`](/commands/zrange) with the `BYSCORE` argument' +replaced_by: '[`ZRANGE`]({{< relref "/commands/zrange" >}}) with the `BYSCORE` argument' since: 1.0.5 summary: Returns members in a sorted set within a range of scores. syntax_fmt: "ZRANGEBYSCORE key min max [WITHSCORES] [LIMIT\_offset count]" diff --git a/content/commands/zrangestore/index.md b/content/commands/zrangestore/index.md index df82edfe63..fde30e79eb 100644 --- a/content/commands/zrangestore/index.md +++ b/content/commands/zrangestore/index.md @@ -93,13 +93,12 @@ key_specs: linkTitle: ZRANGESTORE since: 6.2.0 summary: Stores a range of members from sorted set in a key. -syntax_fmt: "ZRANGESTORE dst src min max [BYSCORE | BYLEX] [REV] [LIMIT\_offset -\ - \ count]" +syntax_fmt: "ZRANGESTORE dst src min max [BYSCORE | BYLEX] [REV] [LIMIT\_offset \ + \ count]" syntax_str: "src min max [BYSCORE | BYLEX] [REV] [LIMIT\_offset count]" title: ZRANGESTORE --- -This command is like [`ZRANGE`](/commands/zrange), but stores the result in the `` destination key. +This command is like [`ZRANGE`]({{< relref "/commands/zrange" >}}), but stores the result in the `` destination key. ## Examples diff --git a/content/commands/zrank/index.md b/content/commands/zrank/index.md index d2abe39001..feb1deb2e8 100644 --- a/content/commands/zrank/index.md +++ b/content/commands/zrank/index.md @@ -64,7 +64,7 @@ score has rank `0`. The optional `WITHSCORE` argument supplements the command's reply with the score of the element returned. -Use [`ZREVRANK`](/commands/zrevrank) to get the rank of an element with the scores ordered from high +Use [`ZREVRANK`]({{< relref "/commands/zrevrank" >}}) to get the rank of an element with the scores ordered from high to low. ## Examples diff --git a/content/commands/zremrangebylex/index.md b/content/commands/zremrangebylex/index.md index e4fbbed799..cf882d5610 100644 --- a/content/commands/zremrangebylex/index.md +++ b/content/commands/zremrangebylex/index.md @@ -56,7 +56,7 @@ title: ZREMRANGEBYLEX --- When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command removes all elements in the sorted set stored at `key` between the lexicographical range specified by `min` and `max`. -The meaning of `min` and `max` are the same of the [`ZRANGEBYLEX`](/commands/zrangebylex) command. Similarly, this command actually removes the same elements that [`ZRANGEBYLEX`](/commands/zrangebylex) would return if called with the same `min` and `max` arguments. +The meaning of `min` and `max` are the same of the [`ZRANGEBYLEX`]({{< relref "/commands/zrangebylex" >}}) command. Similarly, this command actually removes the same elements that [`ZRANGEBYLEX`]({{< relref "/commands/zrangebylex" >}}) would return if called with the same `min` and `max` arguments. ## Examples diff --git a/content/commands/zrevrange/index.md b/content/commands/zrevrange/index.md index 827f151a41..f8e391c1ae 100644 --- a/content/commands/zrevrange/index.md +++ b/content/commands/zrevrange/index.md @@ -55,7 +55,7 @@ key_specs: limit: 0 type: range linkTitle: ZREVRANGE -replaced_by: '[`ZRANGE`](/commands/zrange) with the `REV` argument' +replaced_by: '[`ZRANGE`]({{< relref "/commands/zrange" >}}) with the `REV` argument' since: 1.2.0 summary: Returns members in a sorted set within a range of indexes in reverse order. syntax_fmt: ZREVRANGE key start stop [WITHSCORES] @@ -66,7 +66,7 @@ Returns the specified range of elements in the sorted set stored at `key`. The elements are considered to be ordered from the highest to the lowest score. Descending lexicographical order is used for elements with equal score. -Apart from the reversed ordering, `ZREVRANGE` is similar to [`ZRANGE`](/commands/zrange). +Apart from the reversed ordering, `ZREVRANGE` is similar to [`ZRANGE`]({{< relref "/commands/zrange" >}}). ## Examples diff --git a/content/commands/zrevrangebylex/index.md b/content/commands/zrevrangebylex/index.md index 9aca773491..789b9e58bf 100644 --- a/content/commands/zrevrangebylex/index.md +++ b/content/commands/zrevrangebylex/index.md @@ -62,7 +62,7 @@ key_specs: limit: 0 type: range linkTitle: ZREVRANGEBYLEX -replaced_by: '[`ZRANGE`](/commands/zrange) with the `REV` and `BYLEX` arguments' +replaced_by: '[`ZRANGE`]({{< relref "/commands/zrange" >}}) with the `REV` and `BYLEX` arguments' since: 2.8.9 summary: Returns members in a sorted set within a lexicographical range in reverse order. @@ -72,7 +72,7 @@ title: ZREVRANGEBYLEX --- When all the elements in a sorted set are inserted with the same score, in order to force lexicographical ordering, this command returns all the elements in the sorted set at `key` with a value between `max` and `min`. -Apart from the reversed ordering, `ZREVRANGEBYLEX` is similar to [`ZRANGEBYLEX`](/commands/zrangebylex). +Apart from the reversed ordering, `ZREVRANGEBYLEX` is similar to [`ZRANGEBYLEX`]({{< relref "/commands/zrangebylex" >}}). ## Examples diff --git a/content/commands/zrevrangebyscore/index.md b/content/commands/zrevrangebyscore/index.md index 9c26ea08a9..4199915a4b 100644 --- a/content/commands/zrevrangebyscore/index.md +++ b/content/commands/zrevrangebyscore/index.md @@ -69,7 +69,7 @@ key_specs: limit: 0 type: range linkTitle: ZREVRANGEBYSCORE -replaced_by: '[`ZRANGE`](/commands/zrange) with the `REV` and `BYSCORE` arguments' +replaced_by: '[`ZRANGE`]({{< relref "/commands/zrange" >}}) with the `REV` and `BYSCORE` arguments' since: 2.2.0 summary: Returns members in a sorted set within a range of scores in reverse order. syntax_fmt: "ZREVRANGEBYSCORE key max min [WITHSCORES] [LIMIT\_offset count]" @@ -85,7 +85,7 @@ The elements having the same score are returned in reverse lexicographical order. Apart from the reversed ordering, `ZREVRANGEBYSCORE` is similar to -[`ZRANGEBYSCORE`](/commands/zrangebyscore). +[`ZRANGEBYSCORE`]({{< relref "/commands/zrangebyscore" >}}). ## Examples diff --git a/content/commands/zrevrank/index.md b/content/commands/zrevrank/index.md index d6ec45c909..f1ddbd708c 100644 --- a/content/commands/zrevrank/index.md +++ b/content/commands/zrevrank/index.md @@ -64,7 +64,7 @@ score has rank `0`. The optional `WITHSCORE` argument supplements the command's reply with the score of the element returned. -Use [`ZRANK`](/commands/zrank) to get the rank of an element with the scores ordered from low to +Use [`ZRANK`]({{< relref "/commands/zrank" >}}) to get the rank of an element with the scores ordered from low to high. ## Examples diff --git a/content/commands/zscan/index.md b/content/commands/zscan/index.md index ed674050c9..c41659913d 100644 --- a/content/commands/zscan/index.md +++ b/content/commands/zscan/index.md @@ -62,4 +62,4 @@ syntax_fmt: "ZSCAN key cursor [MATCH\_pattern] [COUNT\_count]" syntax_str: "cursor [MATCH\_pattern] [COUNT\_count]" title: ZSCAN --- -See [`SCAN`](/commands/scan) for `ZSCAN` documentation. +See [`SCAN`]({{< relref "/commands/scan" >}}) for `ZSCAN` documentation. diff --git a/content/commands/zunion/index.md b/content/commands/zunion/index.md index 8db2507c4f..5e04505f73 100644 --- a/content/commands/zunion/index.md +++ b/content/commands/zunion/index.md @@ -75,17 +75,16 @@ key_specs: linkTitle: ZUNION since: 6.2.0 summary: Returns the union of multiple sorted sets. -syntax_fmt: "ZUNION numkeys key [key ...] [WEIGHTS\_weight [weight ...]] - [AGGREGATE\_\ +syntax_fmt: "ZUNION numkeys key [key ...] [WEIGHTS\_weight [weight ...]] [AGGREGATE\_\ ] [WITHSCORES]" syntax_str: "key [key ...] [WEIGHTS\_weight [weight ...]] [AGGREGATE\_] [WITHSCORES]" title: ZUNION --- -This command is similar to [`ZUNIONSTORE`](/commands/zunionstore), but instead of storing the resulting +This command is similar to [`ZUNIONSTORE`]({{< relref "/commands/zunionstore" >}}), but instead of storing the resulting sorted set, it is returned to the client. -For a description of the `WEIGHTS` and `AGGREGATE` options, see [`ZUNIONSTORE`](/commands/zunionstore). +For a description of the `WEIGHTS` and `AGGREGATE` options, see [`ZUNIONSTORE`]({{< relref "/commands/zunionstore" >}}). ## Examples diff --git a/content/commands/zunionstore/index.md b/content/commands/zunionstore/index.md index e8d56160aa..2f0ccb3a60 100644 --- a/content/commands/zunionstore/index.md +++ b/content/commands/zunionstore/index.md @@ -87,8 +87,7 @@ key_specs: linkTitle: ZUNIONSTORE since: 2.0.0 summary: Stores the union of multiple sorted sets in a key. -syntax_fmt: "ZUNIONSTORE destination numkeys key [key ...] [WEIGHTS\_weight - [weight\ +syntax_fmt: "ZUNIONSTORE destination numkeys key [key ...] [WEIGHTS\_weight [weight\ \ ...]] [AGGREGATE\_]" syntax_str: "numkeys key [key ...] [WEIGHTS\_weight [weight ...]] [AGGREGATE\_]" diff --git a/content/develop/_index.md b/content/develop/_index.md index e939c970da..c0af2cf267 100644 --- a/content/develop/_index.md +++ b/content/develop/_index.md @@ -1,30 +1,4 @@ --- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: Learn how to develop with Redis -hideListLinks: true -linkTitle: Develop title: Develop ---- - -The following gets you started with Redis: - -1. Try one of our [quick start guides](/develop/get-started) -2. [Connect](/develop/connect) to your Redis database -3. Understand [data types](/develop/data-types) and [commands](/commands) -4. Learn how to [interact with your data](/develop/interact) beyond simple data structures - -If you already know how to use Redis and you are searching for the installation instructions, the the following resources will help you: - -* [Get started with Redis Cloud](/operate/rc/rc-quickstart) -* [Install Redis OSS or Stack](/operate/oss_and_stack/install) -* [Install Redis Enterprise Software](/operate/rs/installing-upgrading/install) -* [Get started with Redis Enterprise on Kubernetes](/operate/kubernetes/deployment/quick-start) \ No newline at end of file +description: Learn how to develop with Redis +--- \ No newline at end of file diff --git a/content/develop/connect/cli.md b/content/develop/connect/cli.md index 9f64661f99..885b124584 100644 --- a/content/develop/connect/cli.md +++ b/content/develop/connect/cli.md @@ -103,7 +103,7 @@ You can change the port using several command line options. To specify a differe PONG If your instance is password protected, the `-a ` option will -perform authentication saving the need of explicitly using the [`AUTH`](/commands/auth) command: +perform authentication saving the need of explicitly using the [`AUTH`]({{< relref "/commands/auth" >}}) command: $ redis-cli -a myUnguessablePazzzzzword123 PING PONG @@ -191,7 +191,7 @@ arguments with spaces, newlines, or other special characters: It is possible to execute a single command a specified number of times with a user-selected pause between executions. This is useful in different contexts - for example when we want to continuously monitor some -key content or [`INFO`](/commands/info) field output, or when we want to simulate some +key content or [`INFO`]({{< relref "/commands/info" >}}) field output, or when we want to simulate some recurring write event, such as pushing a new item into a list every 5 seconds. This feature is controlled by two options: `-r ` and `-i `. @@ -248,15 +248,15 @@ run scripts from a file as an argument: $ redis-cli --eval /tmp/script.lua location:hastings:temp , 23 OK -The Redis [`EVAL`](/commands/eval) command takes the list of keys the script uses, and the -other non key arguments, as different arrays. When calling [`EVAL`](/commands/eval) you +The Redis [`EVAL`]({{< relref "/commands/eval" >}}) command takes the list of keys the script uses, and the +other non key arguments, as different arrays. When calling [`EVAL`]({{< relref "/commands/eval" >}}) you provide the number of keys as a number. When calling `redis-cli` with the `--eval` option above, there is no need to specify the number of keys explicitly. Instead it uses the convention of separating keys and arguments with a comma. This is why in the above call you see `location:hastings:temp , 23` as arguments. -So `location:hastings:temp` will populate the [`KEYS`](/commands/keys) array, and `23` the `ARGV` array. +So `location:hastings:temp` will populate the [`KEYS`]({{< relref "/commands/keys" >}}) array, and `23` the `ARGV` array. The `--eval` option is useful when writing simple scripts. For more complex work, the Lua debugger is recommended. It is possible to mix the two approaches, since the debugger can also execute scripts from an external file. @@ -397,7 +397,7 @@ name by a number: ## Showing help about Redis commands -`redis-cli` provides online help for most Redis [commands](/commands), using the `HELP` command. The command can be used +`redis-cli` provides online help for most Redis [commands]({{< relref "/commands" >}}), using the `HELP` command. The command can be used in two forms: * `HELP @` shows all the commands about a given category. The @@ -419,7 +419,7 @@ categories are: - `@stream` * `HELP ` shows specific help for the command given as argument. -For example in order to show help for the [`PFADD`](/commands/pfadd) command, use: +For example in order to show help for the [`PFADD`]({{< relref "/commands/pfadd" >}}) command, use: 127.0.0.1:6379> HELP PFADD @@ -516,10 +516,10 @@ In the first part of the output, each new key larger than the previous larger key (of the same type) encountered is reported. The summary section provides general stats about the data inside the Redis instance. -The program uses the [`SCAN`](/commands/scan) command, so it can be executed against a busy +The program uses the [`SCAN`]({{< relref "/commands/scan" >}}) command, so it can be executed against a busy server without impacting the operations, however the `-i` option can be used in order to throttle the scanning process of the specified fraction -of second for each [`SCAN`](/commands/scan) command. +of second for each [`SCAN`]({{< relref "/commands/scan" >}}) command. For example, `-i 0.01` will slow down the program execution considerably, but will also reduce the load on the server to a negligible amount. @@ -533,7 +533,7 @@ ASAP if running against a very large data set. It is also possible to scan the key space, again in a way that does not block the Redis server (which does happen when you use a command like `KEYS *`), and print all the key names, or filter them for specific -patterns. This mode, like the `--bigkeys` option, uses the [`SCAN`](/commands/scan) command, +patterns. This mode, like the `--bigkeys` option, uses the [`SCAN`]({{< relref "/commands/scan" >}}) command, so keys may be reported multiple times if the dataset is changing, but no key would ever be missing, if that key was present since the start of the iteration. Because of the command that it uses this option is called `--scan`. @@ -554,7 +554,7 @@ Note that `head -10` is used in order to print only the first ten lines of the output. Scanning is able to use the underlying pattern matching capability of -the [`SCAN`](/commands/scan) command with the `--pattern` option. +the [`SCAN`]({{< relref "/commands/scan" >}}) command with the `--pattern` option. $ redis-cli --scan --pattern '*-11*' key-114 @@ -575,17 +575,17 @@ kind of objects, by key name: $ redis-cli --scan --pattern 'user:*' | wc -l 3829433 -You can use `-i 0.01` to add a delay between calls to the [`SCAN`](/commands/scan) command. +You can use `-i 0.01` to add a delay between calls to the [`SCAN`]({{< relref "/commands/scan" >}}) command. This will make the command slower but will significantly reduce load on the server. ## Pub/sub mode The CLI is able to publish messages in Redis Pub/Sub channels using -the [`PUBLISH`](/commands/publish) command. Subscribing to channels in order to receive +the [`PUBLISH`]({{< relref "/commands/publish" >}}) command. Subscribing to channels in order to receive messages is different - the terminal is blocked and waits for messages, so this is implemented as a special mode in `redis-cli`. Unlike other special modes this mode is not enabled by using a special option, -but simply by using the [`SUBSCRIBE`](/commands/subscribe) or [`PSUBSCRIBE`](/commands/psubscribe) command, which are available in +but simply by using the [`SUBSCRIBE`]({{< relref "/commands/subscribe" >}}) or [`PSUBSCRIBE`]({{< relref "/commands/psubscribe" >}}) command, which are available in interactive or command mode: $ redis-cli PSUBSCRIBE '*' @@ -608,7 +608,7 @@ To exit the Pub/Sub mode just process `CTRL-C`. ## Monitoring commands executed in Redis Similarly to the Pub/Sub mode, the monitoring mode is entered automatically -once you use the [`MONITOR`](/commands/monitor) command. All commands received by the active Redis instance will be printed to the standard output: +once you use the [`MONITOR`]({{< relref "/commands/monitor" >}}) command. All commands received by the active Redis instance will be printed to the standard output: $ redis-cli MONITOR OK @@ -628,7 +628,7 @@ The `redis-cli` has multiple facilities for studying the latency of a Redis instance and understanding the latency's maximum, average and distribution. The basic latency-checking tool is the `--latency` option. Using this -option the CLI runs a loop where the [`PING`](/commands/ping) command is sent to the Redis +option the CLI runs a loop where the [`PING`]({{< relref "/commands/ping" >}}) command is sent to the Redis instance and the time to receive a reply is measured. This happens 100 times per second, and stats are updated in a real time in the console: diff --git a/content/develop/connect/clients/_index.md b/content/develop/connect/clients/_index.md index 80f7309059..ff01b667d0 100644 --- a/content/develop/connect/clients/_index.md +++ b/content/develop/connect/clients/_index.md @@ -17,7 +17,7 @@ weight: 45 Here, you will learn how to connect your application to a Redis database. If you're new to Redis, you might first want to [install Redis with Redis Stack and RedisInsight]({{< relref "/operate/oss_and_stack/install/install-stack/" >}}). -For more Redis topics, see [Using]({{< relref "/develop/manual/" >}}) and [Managing]({{< relref "/operate/oss_and_stack/management/" >}}) Redis. +For more Redis topics, see [Using]({{< relref "/develop/use/" >}}) and [Managing]({{< relref "/operate/oss_and_stack/management/" >}}) Redis. If you're ready to get started, see the following guides for the official client libraries you can use with Redis. For a complete list of community-driven clients, see [Clients](/resources/clients/). diff --git a/content/develop/connect/clients/dotnet.md b/content/develop/connect/clients/dotnet.md index e9e469c636..d060095016 100644 --- a/content/develop/connect/clients/dotnet.md +++ b/content/develop/connect/clients/dotnet.md @@ -234,7 +234,7 @@ ft.Create( schema); ``` -Use [`JSON.SET`](/commands/json.set) to set each user value at the specified path. +Use [`JSON.SET`]({{< baseurl >}}/commands/json.set) to set each user value at the specified path. ```csharp json.Set("user:1", "$", user1); diff --git a/content/develop/connect/clients/java.md b/content/develop/connect/clients/java.md index 97dcfbc9f3..d1a28473af 100644 --- a/content/develop/connect/clients/java.md +++ b/content/develop/connect/clients/java.md @@ -288,7 +288,7 @@ In general, Jedis can throw the following exceptions while executing commands: - `JedisException` - this exception is a catch-all exception that can be thrown for any other unexpected errors. Conditions when `JedisException` can be thrown: -- Bad return from a health check with the [`PING`](/commands/ping) command +- Bad return from a health check with the [`PING`]({{< relref "/commands/ping" >}}) command - Failure during SHUTDOWN - Pub/Sub failure when issuing commands (disconnect) - Any unknown server messages diff --git a/content/develop/connect/clients/om-clients/stack-python.md b/content/develop/connect/clients/om-clients/stack-python.md index bdb2b4060b..8d03c20f45 100644 --- a/content/develop/connect/clients/om-clients/stack-python.md +++ b/content/develop/connect/clients/om-clients/stack-python.md @@ -723,7 +723,7 @@ The server responds with an `ok` response regardless of whether the ID provided This is an example of how to run arbitrary Redis commands against instances of a model saved in Redis. Let's see how we can set the time to live (TTL) on a person, so that Redis will expire the JSON document after a configurable number of seconds have passed. -The function `expire_by_id` in `app.py` handles this as follows. It takes two parameters: `id` - the ID of a person to expire, and `seconds` - the number of seconds in the future to expire the person after. This requires us to run the Redis [`EXPIRE`](/commands/expire) command against the person's key. To do this, we need to access the Redis connection from the `Person` model like so: +The function `expire_by_id` in `app.py` handles this as follows. It takes two parameters: `id` - the ID of a person to expire, and `seconds` - the number of seconds in the future to expire the person after. This requires us to run the Redis [`EXPIRE`]({{< relref "/commands/expire" >}}) command against the person's key. To do this, we need to access the Redis connection from the `Person` model like so: ```py person_to_expire = Person.get(id) diff --git a/content/develop/connect/clients/om-clients/stack-spring.md b/content/develop/connect/clients/om-clients/stack-spring.md index 13d461ad28..174127329d 100644 --- a/content/develop/connect/clients/om-clients/stack-spring.md +++ b/content/develop/connect/clients/om-clients/stack-spring.md @@ -343,7 +343,7 @@ Several Redis commands were executed on application startup. Let’s break them ### Index Creation -The first one is a call to [`FT.CREATE`](/commands/ft.create), which happens after Redis OM Spring scanned the `@Document` annotations. As you can see, since it encountered the annotation on `Person`, it creates the `PersonIdx` index. +The first one is a call to [`FT.CREATE`]({{< baseurl >}}/commands/ft.create), which happens after Redis OM Spring scanned the `@Document` annotations. As you can see, since it encountered the annotation on `Person`, it creates the `PersonIdx` index. {{< highlight bash >}} "FT.CREATE" @@ -391,7 +391,7 @@ Let's break it down: * The first call uses the generated ULID to check if the id is in the set of primary keys (if it is, it’ll be removed) * The second call checks if JSON document exists (if it is, it’ll be removed) -* The third call uses the [`JSON.SET`](/commands/json.set) command to save the JSON payload +* The third call uses the [`JSON.SET`]({{< baseurl >}}/commands/json.set) command to save the JSON payload * The last call adds the primary key of the saved document to the set of primary keys Now that we’ve seen the repository in action via the `.save` method, we know that the trip from Java to Redis work. Now let’s add some more data to make the interactions more interesting: @@ -494,7 +494,7 @@ Optional byId(@PathVariable String id) { } {{< / highlight >}} -Refreshing the Swagger UI, we should see the newly added endpoint. We can grab an id using the [`SRANDMEMBER`](/commands/srandmember) command on the RedisInsight CLI like this: +Refreshing the Swagger UI, we should see the newly added endpoint. We can grab an id using the [`SRANDMEMBER`]({{< relref "/commands/srandmember" >}}) command on the RedisInsight CLI like this: {{< highlight bash >}} SRANDMEMBER com.redis.om.skeleton.models.Person diff --git a/content/develop/connect/clients/python.md b/content/develop/connect/clients/python.md index 66535e802e..92cdf0d22b 100644 --- a/content/develop/connect/clients/python.md +++ b/content/develop/connect/clients/python.md @@ -183,7 +183,7 @@ rs.create_index( # b'OK' ``` -Use [`JSON.SET`](/commands/json.set) to set each user value at the specified path. +Use [`JSON.SET`]({{< baseurl >}}/commands/json.set) to set each user value at the specified path. ```python r.json().set("user:1", Path.root_path(), user1) @@ -209,7 +209,7 @@ rs.search( # [Document {'id': 'user:1', 'payload': None, 'city': 'London'}, Document {'id': 'user:3', 'payload': None, 'city': 'Tel Aviv'}] ``` -Aggregate your results using [`FT.AGGREGATE`](/commands/ft.aggregate). +Aggregate your results using [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate). ```python req = aggregations.AggregateRequest("*").group_by('@city', reducers.count().alias('count')) diff --git a/content/develop/connect/insight/tutorials/insight-stream-consumer.md b/content/develop/connect/insight/tutorials/insight-stream-consumer.md index 4d43e0e84a..5e354a6ee6 100644 --- a/content/develop/connect/insight/tutorials/insight-stream-consumer.md +++ b/content/develop/connect/insight/tutorials/insight-stream-consumer.md @@ -36,13 +36,13 @@ For example, if the temperature is above a certain threshold, it puts a message It is possible to have multiple consumers doing different jobs, one measuring humidity, and another taking temperature measurements over periods of time. Redis stores a copy of the entire dataset in memory, which is a finite resource. To avoid runaway data, streams can be trimmed when you add something to them. -When adding to a stream with [`XADD`](/commands/xadd), you can optionally specify that the stream should be trimmed to a specific or approximate number of the newest entries, or to only include entries whose ID is higher than the ID specified. +When adding to a stream with [`XADD`]({{< relref "/commands/xadd" >}}), you can optionally specify that the stream should be trimmed to a specific or approximate number of the newest entries, or to only include entries whose ID is higher than the ID specified. You can also manage the storage required for streaming data using key expiry. For example, by writing each day's data to its own stream in Redis and expiring each stream's key after a period of time, say a week. An ID can be any number, but each new entry in the stream must have an ID whose value is higher than the last ID added to the stream. ## Adding new entries -Use [`XADD`](/commands/xadd) with `*` for the ID to have Redis automatically generate a new ID for you consisting of a millisecond precision timestamp, a dash and a sequence number. For example `1656416957625-0`. Then supply the field names and values to store in the new stream entry. +Use [`XADD`]({{< relref "/commands/xadd" >}}) with `*` for the ID to have Redis automatically generate a new ID for you consisting of a millisecond precision timestamp, a dash and a sequence number. For example `1656416957625-0`. Then supply the field names and values to store in the new stream entry. There are a couple of ways of retrieving things. You can retrieve entries by time range or you could ask for everything that's happened since a timestamp or ID that you specify. Using a single command you can ask for anything from 10:30 until 11:15 am on a given day. @@ -74,7 +74,7 @@ Then, enter fields and values using + to add more than one (for example, name an Now you have a stream that appears in the **Streams** view and you can continue adding fields and values to it. RedisInsight runs read commands for you so you can see the stream entries in the **Streams** view. -And the **Consumer Groups** view shows each consumers in a given consumer group and the last time Redis allocated a message, what the ID of it was and how many times that process has happened, and whether a consumer has you have told Redis that you are finished working with that task using the [`XACK`](/commands/xack) command. +And the **Consumer Groups** view shows each consumers in a given consumer group and the last time Redis allocated a message, what the ID of it was and how many times that process has happened, and whether a consumer has you have told Redis that you are finished working with that task using the [`XACK`]({{< relref "/commands/xack" >}}) command. ## Monitor temperature and humidity from sensors in RedisInsight @@ -197,9 +197,9 @@ Note that in this model, each consumer instance does not receive all of the entr You can now toggle between **Stream** and **Consumer Groups** views to see your data. As mentioned earlier in this topic, a stream is an append-only log so you can't modify the contents of an entry, but you can delete an entire entry. -A case when that's useful is in the event of a so-called _poison-pill message_ that can cause consumers to crash. You can physically remove such messages in the **Streams** view or use the [`XDEL`](/commands/xdel) command at the command-line interface (CLI). +A case when that's useful is in the event of a so-called _poison-pill message_ that can cause consumers to crash. You can physically remove such messages in the **Streams** view or use the [`XDEL`]({{< relref "/commands/xdel" >}}) command at the command-line interface (CLI). -You can continue interacting with your stream at the CLI. For example, to get the current length of a stream, use the [`XLEN`](/commands/xlen) command: +You can continue interacting with your stream at the CLI. For example, to get the current length of a stream, use the [`XLEN`]({{< relref "/commands/xlen" >}}) command: {{< highlight bash >}} XLEN ingest:temphumidity diff --git a/content/develop/data-types/_index.md b/content/develop/data-types/_index.md index f00d11f595..545f0c1d41 100644 --- a/content/develop/data-types/_index.md +++ b/content/develop/data-types/_index.md @@ -30,7 +30,7 @@ If you'd like to try a comprehensive tutorial for each data structure, see their For more information, see: * [Overview of Redis strings]({{< relref "/develop/data-types/strings" >}}) -* [Redis string command reference](/commands/?group=string) +* [Redis string command reference]({{< relref "/commands/?group=string" >}}) ### Lists @@ -38,7 +38,7 @@ For more information, see: For more information, see: * [Overview of Redis lists]({{< relref "/develop/data-types/lists" >}}) -* [Redis list command reference](/commands/?group=list) +* [Redis list command reference]({{< relref "/commands/?group=list" >}}) ### Sets @@ -47,7 +47,7 @@ With a Redis set, you can add, remove, and test for existence in O(1) time (in o For more information, see: * [Overview of Redis sets]({{< relref "/develop/data-types/sets" >}}) -* [Redis set command reference](/commands/?group=set) +* [Redis set command reference]({{< relref "/commands/?group=set" >}}) ### Hashes @@ -56,7 +56,7 @@ As such, Redis hashes resemble [Python dictionaries](https://docs.python.org/3/t For more information, see: * [Overview of Redis hashes]({{< relref "/develop/data-types/hashes" >}}) -* [Redis hashes command reference](/commands/?group=hash) +* [Redis hashes command reference]({{< relref "/commands/?group=hash" >}}) ### Sorted sets @@ -64,7 +64,7 @@ For more information, see: For more information, see: * [Overview of Redis sorted sets]({{< relref "/develop/data-types/sorted-sets" >}}) -* [Redis sorted set command reference](/commands/?group=sorted-set) +* [Redis sorted set command reference]({{< relref "/commands/?group=sorted-set" >}}) ### Streams @@ -73,7 +73,7 @@ Streams help record events in the order they occur and then syndicate them for p For more information, see: * [Overview of Redis Streams]({{< relref "/develop/data-types/streams" >}}) -* [Redis Streams command reference](/commands/?group=stream) +* [Redis Streams command reference]({{< relref "/commands/?group=stream" >}}) ### Geospatial indexes @@ -81,7 +81,7 @@ For more information, see: For more information, see: * [Overview of Redis geospatial indexes]({{< relref "/develop/data-types/geospatial" >}}) -* [Redis geospatial indexes command reference](/commands/?group=geo) +* [Redis geospatial indexes command reference]({{< relref "/commands/?group=geo" >}}) ### Bitmaps @@ -89,7 +89,7 @@ For more information, see: For more information, see: * [Overview of Redis bitmaps]({{< relref "/develop/data-types/bitmaps" >}}) -* [Redis bitmap command reference](/commands/?group=bitmap) +* [Redis bitmap command reference]({{< relref "/commands/?group=bitmap" >}}) ### Bitfields @@ -98,14 +98,14 @@ Bitfields provide atomic get, set, and increment operations and support differen For more information, see: * [Overview of Redis bitfields]({{< relref "/develop/data-types/bitfields" >}}) -* The [`BITFIELD`](/commands/bitfield) command. +* The [`BITFIELD`]({{< relref "/commands/bitfield" >}}) command. ### HyperLogLog The [Redis HyperLogLog]({{< relref "/develop/data-types/probabilistic/hyperloglogs" >}}) data structures provide probabilistic estimates of the cardinality (i.e., number of elements) of large sets. For more information, see: * [Overview of Redis HyperLogLog]({{< relref "/develop/data-types/probabilistic/hyperloglogs" >}}) -* [Redis HyperLogLog command reference](/commands/?group=hyperloglog) +* [Redis HyperLogLog command reference]({{< relref "/commands/?group=hyperloglog" >}}) ## Extensions diff --git a/content/develop/data-types/bitfields.md b/content/develop/data-types/bitfields.md index e36de9b457..7baf748628 100644 --- a/content/develop/data-types/bitfields.md +++ b/content/develop/data-types/bitfields.md @@ -26,8 +26,8 @@ Bitfields support atomic read, write and increment operations, making them a goo ## Basic commands -* [`BITFIELD`](/commands/bitfield) atomically sets, increments and reads one or more values. -* [`BITFIELD_RO`](/commands/bitfield_ro) is a read-only variant of [`BITFIELD`](/commands/bitfield). +* [`BITFIELD`]({{< relref "/commands/bitfield" >}}) atomically sets, increments and reads one or more values. +* [`BITFIELD_RO`]({{< relref "/commands/bitfield_ro" >}}) is a read-only variant of [`BITFIELD`]({{< relref "/commands/bitfield" >}}). ## Examples @@ -55,4 +55,4 @@ Suppose you want to maintain two metrics for various bicycles: the current price ## Performance -[`BITFIELD`](/commands/bitfield) is O(n), where _n_ is the number of counters accessed. +[`BITFIELD`]({{< relref "/commands/bitfield" >}}) is O(n), where _n_ is the number of counters accessed. diff --git a/content/develop/data-types/bitmaps.md b/content/develop/data-types/bitmaps.md index 69e0ef53ee..e80a203f51 100644 --- a/content/develop/data-types/bitmaps.md +++ b/content/develop/data-types/bitmaps.md @@ -30,8 +30,8 @@ Some examples of bitmap use cases include: ## Basic commands -* [`SETBIT`](/commands/setbit) sets a bit at the provided offset to 0 or 1. -* [`GETBIT`](/commands/getbit) returns the value of a bit at a given offset. +* [`SETBIT`]({{< relref "/commands/setbit" >}}) sets a bit at the provided offset to 0 or 1. +* [`GETBIT`]({{< relref "/commands/getbit" >}}) returns the value of a bit at a given offset. See the [complete list of bitmap commands](https://redis.io/commands/?group=bitmap). @@ -68,22 +68,22 @@ where different users are represented by incremental user IDs, it is possible to remember a single bit information (for example, knowing whether a user wants to receive a newsletter) of 4 billion users using just 512 MB of memory. -The [`SETBIT`](/commands/setbit) command takes as its first argument the bit number, and as its second +The [`SETBIT`]({{< relref "/commands/setbit" >}}) command takes as its first argument the bit number, and as its second argument the value to set the bit to, which is 1 or 0. The command automatically enlarges the string if the addressed bit is outside the current string length. -[`GETBIT`](/commands/getbit) just returns the value of the bit at the specified index. +[`GETBIT`]({{< relref "/commands/getbit" >}}) just returns the value of the bit at the specified index. Out of range bits (addressing a bit that is outside the length of the string stored into the target key) are always considered to be zero. There are three commands operating on group of bits: -1. [`BITOP`](/commands/bitop) performs bit-wise operations between different strings. The provided operations are AND, OR, XOR and NOT. -2. [`BITCOUNT`](/commands/bitcount) performs population counting, reporting the number of bits set to 1. -3. [`BITPOS`](/commands/bitpos) finds the first bit having the specified value of 0 or 1. +1. [`BITOP`]({{< relref "/commands/bitop" >}}) performs bit-wise operations between different strings. The provided operations are AND, OR, XOR and NOT. +2. [`BITCOUNT`]({{< relref "/commands/bitcount" >}}) performs population counting, reporting the number of bits set to 1. +3. [`BITPOS`]({{< relref "/commands/bitpos" >}}) finds the first bit having the specified value of 0 or 1. -Both [`BITPOS`](/commands/bitpos) and [`BITCOUNT`](/commands/bitcount) are able to operate with byte ranges of the +Both [`BITPOS`]({{< relref "/commands/bitpos" >}}) and [`BITCOUNT`]({{< relref "/commands/bitcount" >}}) are able to operate with byte ranges of the string, instead of running for the whole length of the string. We can trivially see the number of bits that have been set in a bitmap. {{< clients-example bitmap_tutorial bitcount >}} @@ -93,15 +93,15 @@ string, instead of running for the whole length of the string. We can trivially For example imagine you want to know the longest streak of daily visits of your web site users. You start counting days starting from zero, that is the -day you made your web site public, and set a bit with [`SETBIT`](/commands/setbit) every time +day you made your web site public, and set a bit with [`SETBIT`]({{< relref "/commands/setbit" >}}) every time the user visits the web site. As a bit index you simply take the current unix time, subtract the initial offset, and divide by the number of seconds in a day (normally, 3600\*24). This way for each user you have a small string containing the visit -information for each day. With [`BITCOUNT`](/commands/bitcount) it is possible to easily get +information for each day. With [`BITCOUNT`]({{< relref "/commands/bitcount" >}}) it is possible to easily get the number of days a given user visited the web site, while with -a few [`BITPOS`](/commands/bitpos) calls, or simply fetching and analyzing the bitmap client-side, +a few [`BITPOS`]({{< relref "/commands/bitpos" >}}) calls, or simply fetching and analyzing the bitmap client-side, it is possible to easily compute the longest streak. Bitmaps are trivial to split into multiple keys, for example for @@ -115,8 +115,8 @@ the Nth bit to address inside the key with `bit-number MOD M`. ## Performance -[`SETBIT`](/commands/setbit) and [`GETBIT`](/commands/getbit) are O(1). -[`BITOP`](/commands/bitop) is O(n), where _n_ is the length of the longest string in the comparison. +[`SETBIT`]({{< relref "/commands/setbit" >}}) and [`GETBIT`]({{< relref "/commands/getbit" >}}) are O(1). +[`BITOP`]({{< relref "/commands/bitop" >}}) is O(n), where _n_ is the length of the longest string in the comparison. ## Learn more diff --git a/content/develop/data-types/geospatial.md b/content/develop/data-types/geospatial.md index 9eff1179b1..79e6e79e81 100644 --- a/content/develop/data-types/geospatial.md +++ b/content/develop/data-types/geospatial.md @@ -22,8 +22,8 @@ This data structure is useful for finding nearby points within a given radius or ## Basic commands -* [`GEOADD`](/commands/geoadd) adds a location to a given geospatial index (note that longitude comes before latitude with this command). -* [`GEOSEARCH`](/commands/geosearch) returns locations with a given radius or a bounding box. +* [`GEOADD`]({{< relref "/commands/geoadd" >}}) adds a location to a given geospatial index (note that longitude comes before latitude with this command). +* [`GEOSEARCH`]({{< relref "/commands/geosearch" >}}) returns locations with a given radius or a bounding box. See the [complete list of geospatial index commands](https://redis.io/commands/?group=geo). diff --git a/content/develop/data-types/hashes.md b/content/develop/data-types/hashes.md index 87e23ed170..8897be2bea 100644 --- a/content/develop/data-types/hashes.md +++ b/content/develop/data-types/hashes.md @@ -43,8 +43,8 @@ While hashes are handy to represent *objects*, actually the number of fields you put inside a hash has no practical limits (other than available memory), so you can use hashes in many different ways inside your application. -The command [`HSET`](/commands/hset) sets multiple fields of the hash, while [`HGET`](/commands/hget) retrieves -a single field. [`HMGET`](/commands/hmget) is similar to [`HGET`](/commands/hget) but returns an array of values: +The command [`HSET`]({{< relref "/commands/hset" >}}) sets multiple fields of the hash, while [`HGET`]({{< relref "/commands/hget" >}}) retrieves +a single field. [`HMGET`]({{< relref "/commands/hmget" >}}) is similar to [`HGET`]({{< relref "/commands/hget" >}}) but returns an array of values: {{< clients-example hash_tutorial hmget >}} > HMGET bike:1 model price no-such-field @@ -54,7 +54,7 @@ a single field. [`HMGET`](/commands/hmget) is similar to [`HGET`](/commands/hget {{< /clients-example >}} There are commands that are able to perform operations on individual fields -as well, like [`HINCRBY`](/commands/hincrby): +as well, like [`HINCRBY`]({{< relref "/commands/hincrby" >}}): {{< clients-example hash_tutorial hincrby >}} > HINCRBY bike:1 price 100 @@ -70,10 +70,10 @@ encoded in special way in memory that make them very memory efficient. ## Basic commands -* [`HSET`](/commands/hset) sets the value of one or more fields on a hash. -* [`HGET`](/commands/hget) returns the value at a given field. -* [`HMGET`](/commands/hmget) returns the values at one or more given fields. -* [`HINCRBY`](/commands/hincrby) increments the value at a given field by the integer provided. +* [`HSET`]({{< relref "/commands/hset" >}}) sets the value of one or more fields on a hash. +* [`HGET`]({{< relref "/commands/hget" >}}) returns the value at a given field. +* [`HMGET`]({{< relref "/commands/hmget" >}}) returns the values at one or more given fields. +* [`HINCRBY`]({{< relref "/commands/hincrby" >}}) increments the value at a given field by the integer provided. See the [complete list of hash commands](https://redis.io/commands/?group=hash). @@ -104,7 +104,7 @@ See the [complete list of hash commands](https://redis.io/commands/?group=hash). Most Redis hash commands are O(1). -A few commands - such as [`HKEYS`](/commands/hkeys), [`HVALS`](/commands/hvals), and [`HGETALL`](/commands/hgetall) - are O(n), where _n_ is the number of field-value pairs. +A few commands - such as [`HKEYS`]({{< relref "/commands/hkeys" >}}), [`HVALS`]({{< relref "/commands/hvals" >}}), and [`HGETALL`]({{< relref "/commands/hgetall" >}}) - are O(n), where _n_ is the number of field-value pairs. ## Limits diff --git a/content/develop/data-types/json/_index.md b/content/develop/data-types/json/_index.md index 415f8b570b..e32b268154 100644 --- a/content/develop/data-types/json/_index.md +++ b/content/develop/data-types/json/_index.md @@ -36,7 +36,7 @@ To learn how to use JSON, it's best to start with the Redis CLI. The following e First, start [`redis-cli`](http://redis.io/topics/rediscli) in interactive mode. -The first JSON command to try is [`JSON.SET`](/commands/json.set), which sets a Redis key with a JSON value. [`JSON.SET`](/commands/json.set) accepts all JSON value types. This example creates a JSON string: +The first JSON command to try is [`JSON.SET`]({{< baseurl >}}/commands/json.set), which sets a Redis key with a JSON value. [`JSON.SET`]({{< baseurl >}}/commands/json.set) accepts all JSON value types. This example creates a JSON string: ```sh > JSON.SET animal $ '"dog"' @@ -49,7 +49,7 @@ The first JSON command to try is [`JSON.SET`](/commands/json.set), which sets a Note how the commands include the dollar sign character `$`. This is the [path]({{< relref "/develop/data-types/json/path" >}}) to the value in the JSON document (in this case it just means the root). -Here are a few more string operations. [`JSON.STRLEN`](/commands/json.strlen) tells you the length of the string, and you can append another string to it with [`JSON.STRAPPEND`](/commands/json.strappend). +Here are a few more string operations. [`JSON.STRLEN`]({{< baseurl >}}/commands/json.strlen) tells you the length of the string, and you can append another string to it with [`JSON.STRAPPEND`]({{< baseurl >}}/commands/json.strappend). ```sh > JSON.STRLEN animal $ @@ -60,7 +60,7 @@ Here are a few more string operations. [`JSON.STRLEN`](/commands/json.strlen) te "[\"dog (Canis familiaris)\"]" ``` -Numbers can be [incremented](/commands/json.numincrby) and [multiplied](/commands/json.nummultby): +Numbers can be [incremented]({{< baseurl >}}/commands/json.numincrby) and [multiplied]({{< baseurl >}}/commands/json.nummultby): ``` > JSON.SET num $ 0 @@ -90,7 +90,7 @@ OK "[[true,{\"answer\":42}]]" ``` -The [`JSON.DEL`](/commands/json.del) command deletes any JSON value you specify with the `path` parameter. +The [`JSON.DEL`]({{< baseurl >}}/commands/json.del) command deletes any JSON value you specify with the `path` parameter. You can manipulate arrays with a dedicated subset of JSON commands: @@ -128,7 +128,7 @@ OK 3) "loggedOut" ``` -To return a JSON response in a more human-readable format, run `redis-cli` in raw output mode and include formatting keywords such as `INDENT`, `NEWLINE`, and `SPACE` with the [`JSON.GET`](/commands/json.get) command: +To return a JSON response in a more human-readable format, run `redis-cli` in raw output mode and include formatting keywords such as `INDENT`, `NEWLINE`, and `SPACE` with the [`JSON.GET`]({{< baseurl >}}/commands/json.get) command: ```sh $ redis-cli --raw @@ -225,7 +225,7 @@ To load the RedisJSON module, use one of the following methods: * [Makefile recipe](#makefile-recipe) * [Configuration file](#configuration-file) * [Command-line option](#command-line-option) -* [MODULE LOAD command](/commands/module-load/) +* [MODULE LOAD command]({{< relref "/commands/module-load" >}}) #### Makefile recipe @@ -265,9 +265,9 @@ Alternatively, you can have Redis load the module using the following command-li In the above lines replace `/path/to/module/` with the actual path to the module's library. -#### [`MODULE LOAD`](/commands/module-load) command +#### [`MODULE LOAD`]({{< relref "/commands/module-load" >}}) command -You can also use the [`MODULE LOAD`](/commands/module-load) command to load RedisJSON. Note that [`MODULE LOAD`](/commands/module-load) is a **dangerous command** and may be blocked/deprecated in the future due to security considerations. +You can also use the [`MODULE LOAD`]({{< relref "/commands/module-load" >}}) command to load RedisJSON. Note that [`MODULE LOAD`]({{< relref "/commands/module-load" >}}) is a **dangerous command** and may be blocked/deprecated in the future due to security considerations. After the module has been loaded successfully, the Redis log should have lines similar to: diff --git a/content/develop/data-types/json/developer.md b/content/develop/data-types/json/developer.md index 794f942c68..4a50f72947 100644 --- a/content/develop/data-types/json/developer.md +++ b/content/develop/data-types/json/developer.md @@ -165,7 +165,7 @@ $ REDIS_PORT=6379 make test ``` ## Debugging -To include debugging information, you need to set the [`DEBUG`](/commands/debug) environment variable before you compile RedisJSON. For example, run `export DEBUG=1`. +To include debugging information, you need to set the [`DEBUG`]({{< relref "/commands/debug" >}}) environment variable before you compile RedisJSON. For example, run `export DEBUG=1`. You can add breakpoints to Python tests in single-test mode. To set a breakpoint, call the ```BB()``` function inside a test. diff --git a/content/develop/data-types/json/path.md b/content/develop/data-types/json/path.md index c5dde8e104..fb32361a1b 100644 --- a/content/develop/data-types/json/path.md +++ b/content/develop/data-types/json/path.md @@ -30,7 +30,7 @@ RedisJSON v2.0 introduced [JSONPath](http://goessner.net/articles/JsonPath/) sup A JSONPath query can resolve to several locations in a JSON document. In this case, the JSON commands apply the operation to every possible location. This is a major improvement over [legacy path](#legacy-path-syntax) queries, which only operate on the first path. -Notice that the structure of the command response often differs when using JSONPath. See the [Commands](/commands/?group=json) page for more details. +Notice that the structure of the command response often differs when using JSONPath. See the [Commands]({{< relref "/commands/?group=json" >}}) page for more details. The new syntax supports bracket notation, which allows the use of special characters like colon ":" or whitespace in key names. @@ -133,7 +133,7 @@ JSON.SET store $ '{"inventory":{"headphones":[{"id":12345,"name":"Noise-cancelli #### Access JSON examples -The following examples use the [`JSON.GET`](/commands/json.get) command to retrieve data from various paths in the JSON document. +The following examples use the [`JSON.GET`]({{< baseurl >}}/commands/json.get) command to retrieve data from various paths in the JSON document. You can use the wildcard operator `*` to return a list of all items in the inventory: @@ -225,7 +225,7 @@ Now we can match against the value of `regex_pat` instead of a hard-coded regula You can also use JSONPath queries when you want to update specific sections of a JSON document. -For example, you can pass a JSONPath to the [`JSON.SET`](/commands/json.set) command to update a specific field. This example changes the price of the first item in the headphones list: +For example, you can pass a JSONPath to the [`JSON.SET`]({{< baseurl >}}/commands/json.set) command to update a specific field. This example changes the price of the first item in the headphones list: ```sh 127.0.0.1:6379> JSON.GET store $..headphones[0].price @@ -245,7 +245,7 @@ You can use filter expressions to update only JSON elements that match certain c "[\"Noise-cancelling Bluetooth headphones\",\"Wireless earbuds\"]" ``` -JSONPath queries also work with other JSON commands that accept a path as an argument. For example, you can add a new color option for a set of headphones with [`JSON.ARRAPPEND`](/commands/json.arrappend): +JSONPath queries also work with other JSON commands that accept a path as an argument. For example, you can add a new color option for a set of headphones with [`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend): ```sh 127.0.0.1:6379> JSON.GET store $..headphones[0].colors diff --git a/content/develop/data-types/json/performance/_index.md b/content/develop/data-types/json/performance/_index.md index accb6468b5..d2a13a9092 100644 --- a/content/develop/data-types/json/performance/_index.md +++ b/content/develop/data-types/json/performance/_index.md @@ -69,7 +69,7 @@ Last but not least, some adding and multiplying: ### Baseline -To establish a baseline, we'll use the Redis [`PING`](/commands/ping) command. +To establish a baseline, we'll use the Redis [`PING`]({{< relref "/commands/ping" >}}) command. First, lets see what `redis-benchmark` reports: ``` diff --git a/content/develop/data-types/json/ram.md b/content/develop/data-types/json/ram.md index 192505e599..df61870734 100644 --- a/content/develop/data-types/json/ram.md +++ b/content/develop/data-types/json/ram.md @@ -24,7 +24,7 @@ RAM. Redis JSON stores JSON values as binary data after deserializing them. This representation is often more expensive, size-wize, than the serialized form. The JSON data type uses at least 24 bytes (on 64-bit architectures) for every value, as can be seen by sampling an empty string with the -[`JSON.DEBUG MEMORY`](/commands/json.debug-memory) command: +[`JSON.DEBUG MEMORY`]({{< baseurl >}}/commands/json.debug-memory) command: ``` 127.0.0.1:6379> JSON.SET emptystring . '""' diff --git a/content/develop/data-types/lists.md b/content/develop/data-types/lists.md index 3939e6609d..f8c0112f73 100644 --- a/content/develop/data-types/lists.md +++ b/content/develop/data-types/lists.md @@ -25,20 +25,20 @@ Redis lists are frequently used to: ## Basic commands -* [`LPUSH`](/commands/lpush) adds a new element to the head of a list; [`RPUSH`](/commands/rpush) adds to the tail. -* [`LPOP`](/commands/lpop) removes and returns an element from the head of a list; [`RPOP`](/commands/rpop) does the same but from the tails of a list. -* [`LLEN`](/commands/llen) returns the length of a list. -* [`LMOVE`](/commands/lmove) atomically moves elements from one list to another. -* [`LTRIM`](/commands/ltrim) reduces a list to the specified range of elements. +* [`LPUSH`]({{< relref "/commands/lpush" >}}) adds a new element to the head of a list; [`RPUSH`]({{< relref "/commands/rpush" >}}) adds to the tail. +* [`LPOP`]({{< relref "/commands/lpop" >}}) removes and returns an element from the head of a list; [`RPOP`]({{< relref "/commands/rpop" >}}) does the same but from the tails of a list. +* [`LLEN`]({{< relref "/commands/llen" >}}) returns the length of a list. +* [`LMOVE`]({{< relref "/commands/lmove" >}}) atomically moves elements from one list to another. +* [`LTRIM`]({{< relref "/commands/ltrim" >}}) reduces a list to the specified range of elements. ### Blocking commands Lists support several blocking commands. For example: -* [`BLPOP`](/commands/blpop) removes and returns an element from the head of a list. +* [`BLPOP`]({{< relref "/commands/blpop" >}}) removes and returns an element from the head of a list. If the list is empty, the command blocks until an element becomes available or until the specified timeout is reached. -* [`BLMOVE`](/commands/blmove) atomically moves elements from a source list to a target list. +* [`BLMOVE`]({{< relref "/commands/blmove" >}}) atomically moves elements from a source list to a target list. If the source list is empty, the command will block until a new element becomes available. See the [complete series of list commands](https://redis.io/commands/?group=list). @@ -89,7 +89,7 @@ See the [complete series of list commands](https://redis.io/commands/?group=list 1) "bike:2" {{< /clients-example >}} -* To limit the length of a list you can call [`LTRIM`](/commands/ltrim): +* To limit the length of a list you can call [`LTRIM`]({{< relref "/commands/ltrim" >}}): {{< clients-example list_tutorial ltrim.1 >}} > RPUSH bikes:repairs bike:1 bike:2 bike:3 bike:4 bike:5 (integer) 5 @@ -116,7 +116,7 @@ an Array are very different from the properties of a List implemented using a Redis lists are implemented via Linked Lists. This means that even if you have millions of elements inside a list, the operation of adding a new element in the head or in the tail of the list is performed *in constant time*. The speed of adding a -new element with the [`LPUSH`](/commands/lpush) command to the head of a list with ten +new element with the [`LPUSH`]({{< relref "/commands/lpush" >}}) command to the head of a list with ten elements is the same as adding an element to the head of list with 10 million elements. @@ -136,10 +136,10 @@ Sorted sets are covered in the [Sorted sets]({{< relref "/develop/data-types/sor ### First steps with Redis Lists -The [`LPUSH`](/commands/lpush) command adds a new element into a list, on the -left (at the head), while the [`RPUSH`](/commands/rpush) command adds a new +The [`LPUSH`]({{< relref "/commands/lpush" >}}) command adds a new element into a list, on the +left (at the head), while the [`RPUSH`]({{< relref "/commands/rpush" >}}) command adds a new element into a list, on the right (at the tail). Finally the -[`LRANGE`](/commands/lrange) command extracts ranges of elements from lists: +[`LRANGE`]({{< relref "/commands/lrange" >}}) command extracts ranges of elements from lists: {{< clients-example list_tutorial lpush_rpush >}} > RPUSH bikes:repairs bike:1 @@ -154,13 +154,13 @@ element into a list, on the right (at the tail). Finally the 3) "bike:2" {{< /clients-example >}} -Note that [`LRANGE`](/commands/lrange) takes two indexes, the first and the last +Note that [`LRANGE`]({{< relref "/commands/lrange" >}}) takes two indexes, the first and the last element of the range to return. Both the indexes can be negative, telling Redis to start counting from the end: so -1 is the last element, -2 is the penultimate element of the list, and so forth. -As you can see [`RPUSH`](/commands/rpush) appended the elements on the right of the list, while -the final [`LPUSH`](/commands/lpush) appended the element on the left. +As you can see [`RPUSH`]({{< relref "/commands/rpush" >}}) appended the elements on the right of the list, while +the final [`LPUSH`]({{< relref "/commands/lpush" >}}) appended the element on the left. Both commands are *variadic commands*, meaning that you are free to push multiple elements into a list in a single call: @@ -219,7 +219,7 @@ posted by users into Redis lists. To describe a common use case step by step, imagine your home page shows the latest photos published in a photo sharing social network and you want to speedup access. -* Every time a user posts a new photo, we add its ID into a list with [`LPUSH`](/commands/lpush). +* Every time a user posts a new photo, we add its ID into a list with [`LPUSH`]({{< relref "/commands/lpush" >}}). * When users visit the home page, we use `LRANGE 0 9` in order to get the latest 10 posted items. ### Capped lists @@ -228,9 +228,9 @@ In many use cases we just want to use lists to store the *latest items*, whatever they are: social network updates, logs, or anything else. Redis allows us to use lists as a capped collection, only remembering the latest -N items and discarding all the oldest items using the [`LTRIM`](/commands/ltrim) command. +N items and discarding all the oldest items using the [`LTRIM`]({{< relref "/commands/ltrim" >}}) command. -The [`LTRIM`](/commands/ltrim) command is similar to [`LRANGE`](/commands/lrange), but **instead of displaying the +The [`LTRIM`]({{< relref "/commands/ltrim" >}}) command is similar to [`LRANGE`]({{< relref "/commands/lrange" >}}), but **instead of displaying the specified range of elements** it sets this range as the new list value. All the elements outside the given range are removed. @@ -248,11 +248,11 @@ OK 3) "bike:3" {{< /clients-example >}} -The above [`LTRIM`](/commands/ltrim) command tells Redis to keep just list elements from index +The above [`LTRIM`]({{< relref "/commands/ltrim" >}}) command tells Redis to keep just list elements from index 0 to 2, everything else will be discarded. This allows for a very simple but useful pattern: doing a List push operation + a List trim operation together to add a new element and discard elements exceeding a limit. Using -[`LTRIM`](/commands/ltrim) with negative indexes can then be used to keep only the 3 most recently added: +[`LTRIM`]({{< relref "/commands/ltrim" >}}) with negative indexes can then be used to keep only the 3 most recently added: {{< clients-example list_tutorial ltrim_end_of_list >}} > RPUSH bikes:repairs bike:1 bike:2 bike:3 bike:4 bike:5 @@ -266,10 +266,10 @@ OK {{< /clients-example >}} The above combination adds new elements and keeps only the 3 -newest elements into the list. With [`LRANGE`](/commands/lrange) you can access the top items +newest elements into the list. With [`LRANGE`]({{< relref "/commands/lrange" >}}) you can access the top items without any need to remember very old data. -Note: while [`LRANGE`](/commands/lrange) is technically an O(N) command, accessing small ranges +Note: while [`LRANGE`]({{< relref "/commands/lrange" >}}) is technically an O(N) command, accessing small ranges towards the head or the tail of the list is a constant time operation. Blocking operations on lists @@ -284,23 +284,23 @@ a different process in order to actually do some kind of work with those items. This is the usual producer / consumer setup, and can be implemented in the following simple way: -* To push items into the list, producers call [`LPUSH`](/commands/lpush). -* To extract / process items from the list, consumers call [`RPOP`](/commands/rpop). +* To push items into the list, producers call [`LPUSH`]({{< relref "/commands/lpush" >}}). +* To extract / process items from the list, consumers call [`RPOP`]({{< relref "/commands/rpop" >}}). However it is possible that sometimes the list is empty and there is nothing -to process, so [`RPOP`](/commands/rpop) just returns NULL. In this case a consumer is forced to wait -some time and retry again with [`RPOP`](/commands/rpop). This is called *polling*, and is not +to process, so [`RPOP`]({{< relref "/commands/rpop" >}}) just returns NULL. In this case a consumer is forced to wait +some time and retry again with [`RPOP`]({{< relref "/commands/rpop" >}}). This is called *polling*, and is not a good idea in this context because it has several drawbacks: 1. Forces Redis and clients to process useless commands (all the requests when the list is empty will get no actual work done, they'll just return NULL). -2. Adds a delay to the processing of items, since after a worker receives a NULL, it waits some time. To make the delay smaller, we could wait less between calls to [`RPOP`](/commands/rpop), with the effect of amplifying problem number 1, i.e. more useless calls to Redis. +2. Adds a delay to the processing of items, since after a worker receives a NULL, it waits some time. To make the delay smaller, we could wait less between calls to [`RPOP`]({{< relref "/commands/rpop" >}}), with the effect of amplifying problem number 1, i.e. more useless calls to Redis. -So Redis implements commands called [`BRPOP`](/commands/brpop) and [`BLPOP`](/commands/blpop) which are versions -of [`RPOP`](/commands/rpop) and [`LPOP`](/commands/lpop) able to block if the list is empty: they'll return to +So Redis implements commands called [`BRPOP`]({{< relref "/commands/brpop" >}}) and [`BLPOP`]({{< relref "/commands/blpop" >}}) which are versions +of [`RPOP`]({{< relref "/commands/rpop" >}}) and [`LPOP`]({{< relref "/commands/lpop" >}}) able to block if the list is empty: they'll return to the caller only when a new element is added to the list, or when a user-specified timeout is reached. -This is an example of a [`BRPOP`](/commands/brpop) call we could use in the worker: +This is an example of a [`BRPOP`]({{< relref "/commands/brpop" >}}) call we could use in the worker: {{< clients-example list_tutorial brpop >}} > RPUSH bikes:repairs bike:1 bike:2 @@ -324,17 +324,17 @@ also specify multiple lists and not just one, in order to wait on multiple lists at the same time, and get notified when the first list receives an element. -A few things to note about [`BRPOP`](/commands/brpop): +A few things to note about [`BRPOP`]({{< relref "/commands/brpop" >}}): 1. Clients are served in an ordered way: the first client that blocked waiting for a list, is served first when an element is pushed by some other client, and so forth. -2. The return value is different compared to [`RPOP`](/commands/rpop): it is a two-element array since it also includes the name of the key, because [`BRPOP`](/commands/brpop) and [`BLPOP`](/commands/blpop) are able to block waiting for elements from multiple lists. +2. The return value is different compared to [`RPOP`]({{< relref "/commands/rpop" >}}): it is a two-element array since it also includes the name of the key, because [`BRPOP`]({{< relref "/commands/brpop" >}}) and [`BLPOP`]({{< relref "/commands/blpop" >}}) are able to block waiting for elements from multiple lists. 3. If the timeout is reached, NULL is returned. There are more things you should know about lists and blocking ops. We suggest that you read more on the following: -* It is possible to build safer queues or rotating queues using [`LMOVE`](/commands/lmove). -* There is also a blocking variant of the command, called [`BLMOVE`](/commands/blmove). +* It is possible to build safer queues or rotating queues using [`LMOVE`]({{< relref "/commands/lmove" >}}). +* There is also a blocking variant of the command, called [`BLMOVE`]({{< relref "/commands/blmove" >}}). ## Automatic creation and removal of keys @@ -342,7 +342,7 @@ So far in our examples we never had to create empty lists before pushing elements, or removing empty lists when they no longer have elements inside. It is Redis' responsibility to delete keys when lists are left empty, or to create an empty list if the key does not exist and we are trying to add elements -to it, for example, with [`LPUSH`](/commands/lpush). +to it, for example, with [`LPUSH`]({{< relref "/commands/lpush" >}}). This is not specific to lists, it applies to all the Redis data types composed of multiple elements -- Streams, Sets, Sorted Sets and Hashes. @@ -351,7 +351,7 @@ Basically we can summarize the behavior with three rules: 1. When we add an element to an aggregate data type, if the target key does not exist, an empty aggregate data type is created before adding the element. 2. When we remove elements from an aggregate data type, if the value remains empty, the key is automatically destroyed. The Stream data type is the only exception to this rule. -3. Calling a read-only command such as [`LLEN`](/commands/llen) (which returns the length of the list), or a write command removing elements, with an empty key, always produces the same result as if the key is holding an empty aggregate type of the type the command expects to find. +3. Calling a read-only command such as [`LLEN`]({{< relref "/commands/llen" >}}) (which returns the length of the list), or a write command removing elements, with an empty key, always produces the same result as if the key is holding an empty aggregate type of the type the command expects to find. Examples of rule 1: @@ -413,7 +413,7 @@ The max length of a Redis list is 2^32 - 1 (4,294,967,295) elements. List operations that access its head or tail are O(1), which means they're highly efficient. However, commands that manipulate elements within a list are usually O(n). -Examples of these include [`LINDEX`](/commands/lindex), [`LINSERT`](/commands/linsert), and [`LSET`](/commands/lset). +Examples of these include [`LINDEX`]({{< relref "/commands/lindex" >}}), [`LINSERT`]({{< relref "/commands/linsert" >}}), and [`LSET`]({{< relref "/commands/lset" >}}). Exercise caution when running these commands, mainly when operating on large lists. ## Alternatives diff --git a/content/develop/data-types/probabilistic/Configuration.md b/content/develop/data-types/probabilistic/Configuration.md index e99c2e1d02..2500c0ee7c 100644 --- a/content/develop/data-types/probabilistic/Configuration.md +++ b/content/develop/data-types/probabilistic/Configuration.md @@ -28,7 +28,7 @@ In [redis.conf]({{< relref "/operate/oss_and_stack/management/config" >}}): loadmodule ./redisbloom.so [OPT VAL]... ``` -From the [Redis CLI]({{< relref "/develop/connect/cli" >}}), using the [MODULE LOAD](/commands/module-load/) command: +From the [Redis CLI]({{< relref "/develop/connect/cli" >}}), using the [MODULE LOAD]({{< relref "/commands/module-load" >}}) command: ``` 127.0.0.6379> MODULE LOAD redisbloom.so [OPT VAL]... diff --git a/content/develop/data-types/probabilistic/count-min-sketch.md b/content/develop/data-types/probabilistic/count-min-sketch.md index 8c181ba0af..6627571147 100644 --- a/content/develop/data-types/probabilistic/count-min-sketch.md +++ b/content/develop/data-types/probabilistic/count-min-sketch.md @@ -102,7 +102,7 @@ or error = threshold/total_count ``` -where `total_count` is the sum of the count of all elements that can be obtained from the `count` key of the result of the [`CMS.INFO`](/commands/cms.info) command and is of course dynamic - it changes with every new increment in the sketch. At creation time you can approximate the `total_count` ratio as a product of the average count you'll be expecting in the sketch and the average number of elements. +where `total_count` is the sum of the count of all elements that can be obtained from the `count` key of the result of the [`CMS.INFO`]({{< baseurl >}}/commands/cms.info) command and is of course dynamic - it changes with every new increment in the sketch. At creation time you can approximate the `total_count` ratio as a product of the average count you'll be expecting in the sketch and the average number of elements. Since the threshold is a function of the total count in the filter it's very important to note that it will grow as the count grows, but knowing the total count we can always dynamically calculate the threshold. If a result is below it - it can be discarded. diff --git a/content/develop/data-types/probabilistic/cuckoo-filter.md b/content/develop/data-types/probabilistic/cuckoo-filter.md index 24c41989b6..3c036b364e 100644 --- a/content/develop/data-types/probabilistic/cuckoo-filter.md +++ b/content/develop/data-types/probabilistic/cuckoo-filter.md @@ -49,7 +49,7 @@ Note> In addition to these two cases, Cuckoo filters serve very well all the Blo ## Examples -> You'll learn how to create an empty cuckoo filter with an initial capacity for 1,000 items, add items, check their existence, and remove them. Even though the [`CF.ADD`](/commands/cf.add) command can create a new filter if one isn't present, it might not be optimally sized for your needs. It's better to use the [`CF.RESERVE`](/commands/cf.reserve) command to set up a filter with your preferred capacity. +> You'll learn how to create an empty cuckoo filter with an initial capacity for 1,000 items, add items, check their existence, and remove them. Even though the [`CF.ADD`]({{< baseurl >}}/commands/cf.add) command can create a new filter if one isn't present, it might not be optimally sized for your needs. It's better to use the [`CF.RESERVE`]({{< baseurl >}}/commands/cf.reserve) command to set up a filter with your preferred capacity. {{< clients-example cuckoo_tutorial cuckoo >}} > CF.RESERVE bikes:models 1000000 diff --git a/content/develop/data-types/probabilistic/hyperloglogs.md b/content/develop/data-types/probabilistic/hyperloglogs.md index a63fb0a07b..94d68c2dcc 100644 --- a/content/develop/data-types/probabilistic/hyperloglogs.md +++ b/content/develop/data-types/probabilistic/hyperloglogs.md @@ -34,20 +34,20 @@ constant amount of memory; 12k bytes in the worst case, or a lot less if your HyperLogLog (We'll just call them HLL from now) has seen very few elements. HLLs in Redis, while technically a different data structure, are encoded -as a Redis string, so you can call [`GET`](/commands/get) to serialize a HLL, and [`SET`](/commands/set) +as a Redis string, so you can call [`GET`]({{< relref "/commands/get" >}}) to serialize a HLL, and [`SET`]({{< relref "/commands/set" >}}) to deserialize it back to the server. Conceptually the HLL API is like using Sets to do the same task. You would -[`SADD`](/commands/sadd) every observed element into a set, and would use [`SCARD`](/commands/scard) to check the -number of elements inside the set, which are unique since [`SADD`](/commands/sadd) will not +[`SADD`]({{< relref "/commands/sadd" >}}) every observed element into a set, and would use [`SCARD`]({{< relref "/commands/scard" >}}) to check the +number of elements inside the set, which are unique since [`SADD`]({{< relref "/commands/sadd" >}}) will not re-add an existing element. While you don't really *add items* into an HLL, because the data structure only contains a state that does not include actual elements, the API is the same: -* Every time you see a new element, you add it to the count with [`PFADD`](/commands/pfadd). -* When you want to retrieve the current approximation of unique elements added using the [`PFADD`](/commands/pfadd) command, you can use the [`PFCOUNT`](/commands/pfcount) command. If you need to merge two different HLLs, the [`PFMERGE`](/commands/pfmerge) command is available. Since HLLs provide approximate counts of unique elements, the result of the merge will give you an approximation of the number of unique elements across both source HLLs. +* Every time you see a new element, you add it to the count with [`PFADD`]({{< relref "/commands/pfadd" >}}). +* When you want to retrieve the current approximation of unique elements added using the [`PFADD`]({{< relref "/commands/pfadd" >}}) command, you can use the [`PFCOUNT`]({{< relref "/commands/pfcount" >}}) command. If you need to merge two different HLLs, the [`PFMERGE`]({{< relref "/commands/pfmerge" >}}) command is available. Since HLLs provide approximate counts of unique elements, the result of the merge will give you an approximation of the number of unique elements across both source HLLs. {{< clients-example hll_tutorial pfadd >}} > PFADD bikes Hyperion Deimos Phoebe Quaoar @@ -66,7 +66,7 @@ Some examples of use cases for this data structure is counting unique queries performed by users in a search form every day, number of unique visitors to a web page and other similar cases. Redis is also able to perform the union of HLLs, please check the -[full documentation](/commands#hyperloglog) for more information. +[full documentation]({{< relref "/commands#hyperloglog" >}}) for more information. ## Use cases @@ -88,15 +88,15 @@ One HyperLogLog is created per page (video/song) per period, and every IP/identi ## Basic commands -* [`PFADD`](/commands/pfadd) adds an item to a HyperLogLog. -* [`PFCOUNT`](/commands/pfcount) returns an estimate of the number of items in the set. -* [`PFMERGE`](/commands/pfmerge) combines two or more HyperLogLogs into one. +* [`PFADD`]({{< relref "/commands/pfadd" >}}) adds an item to a HyperLogLog. +* [`PFCOUNT`]({{< relref "/commands/pfcount" >}}) returns an estimate of the number of items in the set. +* [`PFMERGE`]({{< relref "/commands/pfmerge" >}}) combines two or more HyperLogLogs into one. See the [complete list of HyperLogLog commands](https://redis.io/commands/?group=hyperloglog). ## Performance -Writing ([`PFADD`](/commands/pfadd)) to and reading from ([`PFCOUNT`](/commands/pfcount)) the HyperLogLog is done in constant time and space. +Writing ([`PFADD`]({{< relref "/commands/pfadd" >}})) to and reading from ([`PFCOUNT`]({{< relref "/commands/pfcount" >}})) the HyperLogLog is done in constant time and space. Merging HLLs is O(n), where _n_ is the number of sketches. ## Limits diff --git a/content/develop/data-types/probabilistic/t-digest.md b/content/develop/data-types/probabilistic/t-digest.md index e6602ace5a..19b45a3abd 100644 --- a/content/develop/data-types/probabilistic/t-digest.md +++ b/content/develop/data-types/probabilistic/t-digest.md @@ -72,7 +72,7 @@ You measure the IP packets transferred over your network each second and try to ## Examples -In the following example, you'll create a t-digest with a compression of 100 and add items to it. The `COMPRESSION` argument is used to specify the tradeoff between accuracy and memory consumption. The default value is 100. Higher values mean more accuracy. Note: unlike some of the other probabilistic data structures, the [`TDIGEST.ADD`](/commands/tdigest.add) command will not create a new structure if the key does not exist. +In the following example, you'll create a t-digest with a compression of 100 and add items to it. The `COMPRESSION` argument is used to specify the tradeoff between accuracy and memory consumption. The default value is 100. Higher values mean more accuracy. Note: unlike some of the other probabilistic data structures, the [`TDIGEST.ADD`]({{< baseurl >}}/commands/tdigest.add) command will not create a new structure if the key does not exist. {{< clients-example tdigest_tutorial tdig_start >}} > TDIGEST.CREATE bikes:sales COMPRESSION 100 @@ -90,7 +90,7 @@ You can repeat calling [TDIGEST.ADD](https://redis.io/commands/tdigest.add/) whe Another helpful feature in t-digest is CDF (definition of rank) which gives us the fraction of observations smaller or equal to a certain value. This command is very useful to answer questions like "*What's the percentage of observations with a value lower or equal to X*". ->More precisely, [`TDIGEST.CDF`](/commands/tdigest.cdf) will return the estimated fraction of observations in the sketch that are smaller than X plus half the number of observations that are equal to X. We can also use the [`TDIGEST.RANK`](/commands/tdigest.rank) command, which is very similar. Instead of returning a fraction, it returns the ----estimated---- rank of a value. The [`TDIGEST.RANK`](/commands/tdigest.rank) command is also variadic, meaning you can use a single command to retrieve estimations for one or more values. +>More precisely, [`TDIGEST.CDF`]({{< baseurl >}}/commands/tdigest.cdf) will return the estimated fraction of observations in the sketch that are smaller than X plus half the number of observations that are equal to X. We can also use the [`TDIGEST.RANK`]({{< baseurl >}}/commands/tdigest.rank) command, which is very similar. Instead of returning a fraction, it returns the ----estimated---- rank of a value. The [`TDIGEST.RANK`]({{< baseurl >}}/commands/tdigest.rank) command is also variadic, meaning you can use a single command to retrieve estimations for one or more values. Here's an example. Given a set of biker's ages, you can ask a question like "What's the percentage of bike racers that are younger than 50 years?" @@ -143,7 +143,7 @@ If `destKey` is an existing sketch, its values are merged with the values of the #### Retrieving sketch information -Use [`TDIGEST.MIN`](/commands/tdigest.min) and [`TDIGEST.MAX`](/commands/tdigest.max) to retrieve the minimal and maximal values in the sketch, respectively. +Use [`TDIGEST.MIN`]({{< baseurl >}}/commands/tdigest.min) and [`TDIGEST.MAX`]({{< baseurl >}}/commands/tdigest.max) to retrieve the minimal and maximal values in the sketch, respectively. {{< clients-example tdigest_tutorial tdig_min >}} > TDIGEST.MIN racer_ages diff --git a/content/develop/data-types/probabilistic/top-k.md b/content/develop/data-types/probabilistic/top-k.md index f88fff5e86..1a7fd90e09 100644 --- a/content/develop/data-types/probabilistic/top-k.md +++ b/content/develop/data-types/probabilistic/top-k.md @@ -37,25 +37,25 @@ This application answers these questions: Data flow is the incoming social media posts from which you parse out the different hashtags. -The [`TOPK.LIST`](/commands/topk.list) command has a time complexity of `O(K)` so if `K` is small, there is no need to keep a separate set or sorted set of all the hashtags. You can query directly from the Top K itself. +The [`TOPK.LIST`]({{< baseurl >}}/commands/topk.list) command has a time complexity of `O(K)` so if `K` is small, there is no need to keep a separate set or sorted set of all the hashtags. You can query directly from the Top K itself. ## Example This example will show you how to track key words used "bike" when shopping online; e.g., "bike store" and "bike handlebars". Proceed as follows. ​ -* Use [`TOPK.RESERVE`](/commands/topk.reserve) to initialize a top K sketch with specific parameters. Note: the `width`, `depth`, and `decay_constant` parameters can be omitted, as they will be set to the default values 7, 8, and 0.9, respectively, if not present. +* Use [`TOPK.RESERVE`]({{< baseurl >}}/commands/topk.reserve) to initialize a top K sketch with specific parameters. Note: the `width`, `depth`, and `decay_constant` parameters can be omitted, as they will be set to the default values 7, 8, and 0.9, respectively, if not present. ​ ``` > TOPK.RESERVE key k width depth decay_constant ``` - * Use [`TOPK.ADD`](/commands/topk.add) to add items to the sketch. As you can see, multiple items can be added at the same time. If an item is returned when adding additional items, it means that item was demoted out of the min heap of the top items, below it will mean the returned item is no longer in the top 5, otherwise `nil` is returned. This allows dynamic heavy-hitter detection of items being entered or expelled from top K list. + * Use [`TOPK.ADD`]({{< baseurl >}}/commands/topk.add) to add items to the sketch. As you can see, multiple items can be added at the same time. If an item is returned when adding additional items, it means that item was demoted out of the min heap of the top items, below it will mean the returned item is no longer in the top 5, otherwise `nil` is returned. This allows dynamic heavy-hitter detection of items being entered or expelled from top K list. ​ In the example below, "pedals" displaces "handlebars", which is returned after "pedals" is added. Also note that the addition of both "store" and "seat" a second time don't return anything, as they're already in the top K. - * Use [`TOPK.LIST`](/commands/topk.list) to list the items entered thus far. + * Use [`TOPK.LIST`]({{< baseurl >}}/commands/topk.list) to list the items entered thus far. ​ - * Use [`TOPK.QUERY`](/commands/topk.query) to see if an item is on the top K list. Just like [`TOPK.ADD`](/commands/topk.add) multiple items can be queried at the same time. + * Use [`TOPK.QUERY`]({{< baseurl >}}/commands/topk.query) to see if an item is on the top K list. Just like [`TOPK.ADD`]({{< baseurl >}}/commands/topk.add) multiple items can be queried at the same time. {{< clients-example topk_tutorial topk >}} > TOPK.RESERVE bikes:keywords 5 2000 7 0.925 OK diff --git a/content/develop/data-types/sets.md b/content/develop/data-types/sets.md index e3623ad944..b595affeed 100644 --- a/content/develop/data-types/sets.md +++ b/content/develop/data-types/sets.md @@ -26,11 +26,11 @@ You can use Redis sets to efficiently: ## Basic commands -* [`SADD`](/commands/sadd) adds a new member to a set. -* [`SREM`](/commands/srem) removes the specified member from the set. -* [`SISMEMBER`](/commands/sismember) tests a string for set membership. -* [`SINTER`](/commands/sinter) returns the set of members that two or more sets have in common (i.e., the intersection). -* [`SCARD`](/commands/scard) returns the size (a.k.a. cardinality) of a set. +* [`SADD`]({{< relref "/commands/sadd" >}}) adds a new member to a set. +* [`SREM`]({{< relref "/commands/srem" >}}) removes the specified member from the set. +* [`SISMEMBER`]({{< relref "/commands/sismember" >}}) tests a string for set membership. +* [`SINTER`]({{< relref "/commands/sinter" >}}) returns the set of members that two or more sets have in common (i.e., the intersection). +* [`SCARD`]({{< relref "/commands/scard" >}}) returns the size (a.k.a. cardinality) of a set. See the [complete list of set commands](https://redis.io/commands/?group=set). @@ -70,7 +70,7 @@ if you add a member that already exists, it will be ignored. {{< /clients-example >}} ## Tutorial -The [`SADD`](/commands/sadd) command adds new elements to a set. It's also possible +The [`SADD`]({{< relref "/commands/sadd" >}}) command adds new elements to a set. It's also possible to do a number of other operations against sets like testing if a given element already exists, performing the intersection, union or difference between multiple sets, and so forth. @@ -113,7 +113,7 @@ to know which bikes are racing in France but not in the USA: There are other non trivial operations that are still easy to implement using the right Redis commands. For instance we may want a list of all the bikes racing in France, the USA, and some other races. We can do this using -the [`SINTER`](/commands/sinter) command, which performs the intersection between different +the [`SINTER`]({{< relref "/commands/sinter" >}}) command, which performs the intersection between different sets. In addition to intersection you can also perform unions, difference, and more. For example if we add a third race we can see some of these commands in action: @@ -141,14 +141,14 @@ if we add a third race we can see some of these commands in action: 1) "bike:4" {{< /clients-example >}} -You'll note that the [`SDIFF`](/commands/sdiff) command returns an empty array when the +You'll note that the [`SDIFF`]({{< relref "/commands/sdiff" >}}) command returns an empty array when the difference between all sets is empty. You'll also note that the order of sets -passed to [`SDIFF`](/commands/sdiff) matters, since the difference is not commutative. +passed to [`SDIFF`]({{< relref "/commands/sdiff" >}}) matters, since the difference is not commutative. -When you want to remove items from a set, you can use the [`SREM`](/commands/srem) command to -remove one or more items from a set, or you can use the [`SPOP`](/commands/spop) command to +When you want to remove items from a set, you can use the [`SREM`]({{< relref "/commands/srem" >}}) command to +remove one or more items from a set, or you can use the [`SPOP`]({{< relref "/commands/spop" >}}) command to remove a random item from a set. You can also _return_ a random item from a -set without removing it using the [`SRANDMEMBER`](/commands/srandmember) command: +set without removing it using the [`SRANDMEMBER`]({{< relref "/commands/srandmember" >}}) command: {{< clients-example sets_tutorial srem >}} > SADD bikes:racing:france bike:1 bike:2 bike:3 bike:4 bike:5 @@ -173,9 +173,9 @@ The max size of a Redis set is 2^32 - 1 (4,294,967,295) members. Most set operations, including adding, removing, and checking whether an item is a set member, are O(1). This means that they're highly efficient. -However, for large sets with hundreds of thousands of members or more, you should exercise caution when running the [`SMEMBERS`](/commands/smembers) command. +However, for large sets with hundreds of thousands of members or more, you should exercise caution when running the [`SMEMBERS`]({{< relref "/commands/smembers" >}}) command. This command is O(n) and returns the entire set in a single response. -As an alternative, consider the [`SSCAN`](/commands/sscan), which lets you retrieve all members of a set iteratively. +As an alternative, consider the [`SSCAN`]({{< relref "/commands/sscan" >}}), which lets you retrieve all members of a set iteratively. ## Alternatives diff --git a/content/develop/data-types/sorted-sets.md b/content/develop/data-types/sorted-sets.md index e1d68e068f..84f564b396 100644 --- a/content/develop/data-types/sorted-sets.md +++ b/content/develop/data-types/sorted-sets.md @@ -52,9 +52,9 @@ Let's start with a simple example, we'll add all our racers and the score they g {{< /clients-example >}} -As you can see [`ZADD`](/commands/zadd) is similar to [`SADD`](/commands/sadd), but takes one additional argument +As you can see [`ZADD`]({{< relref "/commands/zadd" >}}) is similar to [`SADD`]({{< relref "/commands/sadd" >}}), but takes one additional argument (placed before the element to be added) which is the score. -[`ZADD`](/commands/zadd) is also variadic, so you are free to specify multiple score-value +[`ZADD`]({{< relref "/commands/zadd" >}}) is also variadic, so you are free to specify multiple score-value pairs, even if this is not used in the example above. With sorted sets it is trivial to return a list of hackers sorted by their @@ -64,7 +64,7 @@ Implementation note: Sorted sets are implemented via a dual-ported data structure containing both a skip list and a hash table, so every time we add an element Redis performs an O(log(N)) operation. That's good, but when we ask for sorted elements Redis does not have to do any work at -all, it's already sorted. Note that the [`ZRANGE`](/commands/zrange) order is low to high, while the [`ZREVRANGE`](/commands/zrevrange) order is high to low: +all, it's already sorted. Note that the [`ZRANGE`]({{< relref "/commands/zrange" >}}) order is low to high, while the [`ZREVRANGE`]({{< relref "/commands/zrevrange" >}}) order is high to low: {{< clients-example ss_tutorial zrange >}} > ZRANGE racer_scores 0 -1 @@ -84,7 +84,7 @@ all, it's already sorted. Note that the [`ZRANGE`](/commands/zrange) order is lo {{< /clients-example >}} Note: 0 and -1 means from element index 0 to the last element (-1 works -here just as it does in the case of the [`LRANGE`](/commands/lrange) command). +here just as it does in the case of the [`LRANGE`]({{< relref "/commands/lrange" >}}) command). It is possible to return scores as well, using the `WITHSCORES` argument: @@ -108,7 +108,7 @@ It is possible to return scores as well, using the `WITHSCORES` argument: Sorted sets are more powerful than this. They can operate on ranges. Let's get all the racers with 10 or fewer points. We -use the [`ZRANGEBYSCORE`](/commands/zrangebyscore) command to do it: +use the [`ZRANGEBYSCORE`]({{< relref "/commands/zrangebyscore" >}}) command to do it: {{< clients-example ss_tutorial zrangebyscore >}} > ZRANGEBYSCORE racer_scores -inf 10 @@ -121,7 +121,7 @@ use the [`ZRANGEBYSCORE`](/commands/zrangebyscore) command to do it: We asked Redis to return all the elements with a score between negative infinity and 10 (both extremes are included). -To remove an element we'd simply call [`ZREM`](/commands/zrem) with the racer's name. +To remove an element we'd simply call [`ZREM`]({{< relref "/commands/zrem" >}}) with the racer's name. It's also possible to remove ranges of elements. Let's remove racer Castilla along with all the racers with strictly fewer than 10 points: @@ -136,13 +136,13 @@ the racers with strictly fewer than 10 points: 3) "Prickett" {{< /clients-example >}} -[`ZREMRANGEBYSCORE`](/commands/zremrangebyscore) is perhaps not the best command name, +[`ZREMRANGEBYSCORE`]({{< relref "/commands/zremrangebyscore" >}}) is perhaps not the best command name, but it can be very useful, and returns the number of removed elements. Another extremely useful operation defined for sorted set elements is the get-rank operation. It is possible to ask what is the position of an element in the set of ordered elements. -The [`ZREVRANK`](/commands/zrevrank) command is also available in order to get the rank, considering +The [`ZREVRANK`]({{< relref "/commands/zrevrank" >}}) command is also available in order to get the rank, considering the elements sorted in a descending way. {{< clients-example ss_tutorial zrank >}} @@ -160,11 +160,11 @@ inserted with the same identical score (elements are compared with the C `memcmp` function, so it is guaranteed that there is no collation, and every Redis instance will reply with the same output). -The main commands to operate with lexicographical ranges are [`ZRANGEBYLEX`](/commands/zrangebylex), -[`ZREVRANGEBYLEX`](/commands/zrevrangebylex), [`ZREMRANGEBYLEX`](/commands/zremrangebylex) and [`ZLEXCOUNT`](/commands/zlexcount). +The main commands to operate with lexicographical ranges are [`ZRANGEBYLEX`]({{< relref "/commands/zrangebylex" >}}), +[`ZREVRANGEBYLEX`]({{< relref "/commands/zrevrangebylex" >}}), [`ZREMRANGEBYLEX`]({{< relref "/commands/zremrangebylex" >}}) and [`ZLEXCOUNT`]({{< relref "/commands/zlexcount" >}}). For example, let's add again our list of famous hackers, but this time -using a score of zero for all the elements. We'll see that because of the sorted sets ordering rules, they are already sorted lexicographically. Using [`ZRANGEBYLEX`](/commands/zrangebylex) we can ask for lexicographical ranges: +using a score of zero for all the elements. We'll see that because of the sorted sets ordering rules, they are already sorted lexicographically. Using [`ZRANGEBYLEX`]({{< relref "/commands/zrangebylex" >}}) we can ask for lexicographical ranges: {{< clients-example ss_tutorial zadd_lex >}} > ZADD racer_scores 0 "Norem" 0 "Sam-Bodden" 0 "Royce" 0 "Castilla" 0 "Prickett" 0 "Ford" @@ -201,7 +201,7 @@ Updating the score: leaderboards --- Just a final note about sorted sets before switching to the next topic. -Sorted sets' scores can be updated at any time. Just calling [`ZADD`](/commands/zadd) against +Sorted sets' scores can be updated at any time. Just calling [`ZADD`]({{< relref "/commands/zadd" >}}) against an element already included in the sorted set will update its score (and position) with O(log(N)) time complexity. As such, sorted sets are suitable when there are tons of updates. @@ -214,7 +214,7 @@ the #4932 best score here"). ## Examples -* There are two ways we can use a sorted set to represent a leaderbaord. If we know a racer's new score, we can update it directly via the [`ZADD`](/commands/zadd) command. However, if we want to add points to an existing score, we can use the [`ZINCRBY`](/commands/zincrby) command. +* There are two ways we can use a sorted set to represent a leaderbaord. If we know a racer's new score, we can update it directly via the [`ZADD`]({{< relref "/commands/zadd" >}}) command. However, if we want to add points to an existing score, we can use the [`ZINCRBY`]({{< relref "/commands/zincrby" >}}) command. {{< clients-example ss_tutorial leaderboard >}} > ZADD racer_scores 100 "Wood" (integer) 1 @@ -228,14 +228,14 @@ the #4932 best score here"). "200" {{< /clients-example >}} -You'll see that [`ZADD`](/commands/zadd) returns 0 when the member already exists (the score is updated), while [`ZINCRBY`](/commands/zincrby) returns the new score. The score for racer Henshaw went from 100, was changed to 150 with no regard for what score was there before, and then was incremented by 50 to 200. +You'll see that [`ZADD`]({{< relref "/commands/zadd" >}}) returns 0 when the member already exists (the score is updated), while [`ZINCRBY`]({{< relref "/commands/zincrby" >}}) returns the new score. The score for racer Henshaw went from 100, was changed to 150 with no regard for what score was there before, and then was incremented by 50 to 200. ## Basic commands -* [`ZADD`](/commands/zadd) adds a new member and associated score to a sorted set. If the member already exists, the score is updated. -* [`ZRANGE`](/commands/zrange) returns members of a sorted set, sorted within a given range. -* [`ZRANK`](/commands/zrank) returns the rank of the provided member, assuming the sorted is in ascending order. -* [`ZREVRANK`](/commands/zrevrank) returns the rank of the provided member, assuming the sorted set is in descending order. +* [`ZADD`]({{< relref "/commands/zadd" >}}) adds a new member and associated score to a sorted set. If the member already exists, the score is updated. +* [`ZRANGE`]({{< relref "/commands/zrange" >}}) returns members of a sorted set, sorted within a given range. +* [`ZRANK`]({{< relref "/commands/zrank" >}}) returns the rank of the provided member, assuming the sorted is in ascending order. +* [`ZREVRANK`]({{< relref "/commands/zrevrank" >}}) returns the rank of the provided member, assuming the sorted set is in descending order. See the [complete list of sorted set commands](https://redis.io/commands/?group=sorted-set). @@ -243,7 +243,7 @@ See the [complete list of sorted set commands](https://redis.io/commands/?group= Most sorted set operations are O(log(n)), where _n_ is the number of members. -Exercise some caution when running the [`ZRANGE`](/commands/zrange) command with large returns values (e.g., in the tens of thousands or more). +Exercise some caution when running the [`ZRANGE`]({{< relref "/commands/zrange" >}}) command with large returns values (e.g., in the tens of thousands or more). This command's time complexity is O(log(n) + m), where _m_ is the number of results returned. ## Alternatives diff --git a/content/develop/data-types/streams.md b/content/develop/data-types/streams.md index a47bfc921d..096203ba71 100644 --- a/content/develop/data-types/streams.md +++ b/content/develop/data-types/streams.md @@ -28,13 +28,13 @@ Examples of Redis stream use cases include: Redis generates a unique ID for each stream entry. You can use these IDs to retrieve their associated entries later or to read and process all subsequent entries in the stream. Note that because these IDs are related to time, the ones shown here may vary and will be different from the IDs you see in your own Redis instance. -Redis streams support several trimming strategies (to prevent streams from growing unbounded) and more than one consumption strategy (see [`XREAD`](/commands/xread), [`XREADGROUP`](/commands/xreadgroup), and [`XRANGE`](/commands/xrange)). +Redis streams support several trimming strategies (to prevent streams from growing unbounded) and more than one consumption strategy (see [`XREAD`]({{< relref "/commands/xread" >}}), [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}), and [`XRANGE`]({{< relref "/commands/xrange" >}})). ## Basic commands -* [`XADD`](/commands/xadd) adds a new entry to a stream. -* [`XREAD`](/commands/xread) reads one or more entries, starting at a given position and moving forward in time. -* [`XRANGE`](/commands/xrange) returns a range of entries between two supplied entry IDs. -* [`XLEN`](/commands/xlen) returns the length of a stream. +* [`XADD`]({{< relref "/commands/xadd" >}}) adds a new entry to a stream. +* [`XREAD`]({{< relref "/commands/xread" >}}) reads one or more entries, starting at a given position and moving forward in time. +* [`XRANGE`]({{< relref "/commands/xrange" >}}) returns a range of entries between two supplied entry IDs. +* [`XLEN`]({{< relref "/commands/xlen" >}}) returns the length of a stream. See the [complete list of stream commands](https://redis.io/commands/?group=stream). @@ -93,7 +93,7 @@ See each command's time complexity for the details. ## Streams basics -Streams are an append-only data structure. The fundamental write command, called [`XADD`](/commands/xadd), appends a new entry to the specified stream. +Streams are an append-only data structure. The fundamental write command, called [`XADD`]({{< relref "/commands/xadd" >}}), appends a new entry to the specified stream. Each stream entry consists of one or more field-value pairs, somewhat like a dictionary or a Redis hash: @@ -102,9 +102,9 @@ Each stream entry consists of one or more field-value pairs, somewhat like a dic "1692632147973-0" {{< /clients-example >}} -The above call to the [`XADD`](/commands/xadd) command adds an entry `rider: Castilla, speed: 29.9, position: 1, location_id: 2` to the stream at key `race:france`, using an auto-generated entry ID, which is the one returned by the command, specifically `1692632147973-0`. It gets as its first argument the key name `race:france`, the second argument is the entry ID that identifies every entry inside a stream. However, in this case, we passed `*` because we want the server to generate a new ID for us. Every new ID will be monotonically increasing, so in more simple terms, every new entry added will have a higher ID compared to all the past entries. Auto-generation of IDs by the server is almost always what you want, and the reasons for specifying an ID explicitly are very rare. We'll talk more about this later. The fact that each Stream entry has an ID is another similarity with log files, where line numbers, or the byte offset inside the file, can be used in order to identify a given entry. Returning back at our [`XADD`](/commands/xadd) example, after the key name and ID, the next arguments are the field-value pairs composing our stream entry. +The above call to the [`XADD`]({{< relref "/commands/xadd" >}}) command adds an entry `rider: Castilla, speed: 29.9, position: 1, location_id: 2` to the stream at key `race:france`, using an auto-generated entry ID, which is the one returned by the command, specifically `1692632147973-0`. It gets as its first argument the key name `race:france`, the second argument is the entry ID that identifies every entry inside a stream. However, in this case, we passed `*` because we want the server to generate a new ID for us. Every new ID will be monotonically increasing, so in more simple terms, every new entry added will have a higher ID compared to all the past entries. Auto-generation of IDs by the server is almost always what you want, and the reasons for specifying an ID explicitly are very rare. We'll talk more about this later. The fact that each Stream entry has an ID is another similarity with log files, where line numbers, or the byte offset inside the file, can be used in order to identify a given entry. Returning back at our [`XADD`]({{< relref "/commands/xadd" >}}) example, after the key name and ID, the next arguments are the field-value pairs composing our stream entry. -It is possible to get the number of items inside a Stream just using the [`XLEN`](/commands/xlen) command: +It is possible to get the number of items inside a Stream just using the [`XLEN`]({{< relref "/commands/xlen" >}}) command: {{< clients-example stream_tutorial xlen >}} > XLEN race:france @@ -113,7 +113,7 @@ It is possible to get the number of items inside a Stream just using the [`XLEN` ### Entry IDs -The entry ID returned by the [`XADD`](/commands/xadd) command, and identifying univocally each entry inside a given stream, is composed of two parts: +The entry ID returned by the [`XADD`]({{< relref "/commands/xadd" >}}) command, and identifying univocally each entry inside a given stream, is composed of two parts: ``` - @@ -121,9 +121,9 @@ The entry ID returned by the [`XADD`](/commands/xadd) command, and identifying u The milliseconds time part is actually the local time in the local Redis node generating the stream ID, however if the current milliseconds time happens to be smaller than the previous entry time, then the previous entry time is used instead, so if a clock jumps backward the monotonically incrementing ID property still holds. The sequence number is used for entries created in the same millisecond. Since the sequence number is 64 bit wide, in practical terms there is no limit to the number of entries that can be generated within the same millisecond. -The format of such IDs may look strange at first, and the gentle reader may wonder why the time is part of the ID. The reason is that Redis streams support range queries by ID. Because the ID is related to the time the entry is generated, this gives the ability to query for time ranges basically for free. We will see this soon while covering the [`XRANGE`](/commands/xrange) command. +The format of such IDs may look strange at first, and the gentle reader may wonder why the time is part of the ID. The reason is that Redis streams support range queries by ID. Because the ID is related to the time the entry is generated, this gives the ability to query for time ranges basically for free. We will see this soon while covering the [`XRANGE`]({{< relref "/commands/xrange" >}}) command. -If for some reason the user needs incremental IDs that are not related to time but are actually associated to another external system ID, as previously mentioned, the [`XADD`](/commands/xadd) command can take an explicit ID instead of the `*` wildcard ID that triggers auto-generation, like in the following examples: +If for some reason the user needs incremental IDs that are not related to time but are actually associated to another external system ID, as previously mentioned, the [`XADD`]({{< relref "/commands/xadd" >}}) command can take an explicit ID instead of the `*` wildcard ID that triggers auto-generation, like in the following examples: {{< clients-example stream_tutorial xadd_id >}} > XADD race:usa 0-1 racer Castilla @@ -148,7 +148,7 @@ If you're running Redis 7 or later, you can also provide an explicit ID consisti ## Getting data from Streams -Now we are finally able to append entries in our stream via [`XADD`](/commands/xadd). However, while appending data to a stream is quite obvious, the way streams can be queried in order to extract data is not so obvious. If we continue with the analogy of the log file, one obvious way is to mimic what we normally do with the Unix command `tail -f`, that is, we may start to listen in order to get the new messages that are appended to the stream. Note that unlike the blocking list operations of Redis, where a given element will reach a single client which is blocking in a *pop style* operation like [`BLPOP`](/commands/blpop), with streams we want multiple consumers to see the new messages appended to the stream (the same way many `tail -f` processes can see what is added to a log). Using the traditional terminology we want the streams to be able to *fan out* messages to multiple clients. +Now we are finally able to append entries in our stream via [`XADD`]({{< relref "/commands/xadd" >}}). However, while appending data to a stream is quite obvious, the way streams can be queried in order to extract data is not so obvious. If we continue with the analogy of the log file, one obvious way is to mimic what we normally do with the Unix command `tail -f`, that is, we may start to listen in order to get the new messages that are appended to the stream. Note that unlike the blocking list operations of Redis, where a given element will reach a single client which is blocking in a *pop style* operation like [`BLPOP`]({{< relref "/commands/blpop" >}}), with streams we want multiple consumers to see the new messages appended to the stream (the same way many `tail -f` processes can see what is added to a log). Using the traditional terminology we want the streams to be able to *fan out* messages to multiple clients. However, this is just one potential access mode. We could also see a stream in quite a different way: not as a messaging system, but as a *time series store*. In this case, maybe it's also useful to get the new messages appended, but another natural query mode is to get messages by ranges of time, or alternatively to iterate the messages using a cursor to incrementally check all the history. This is definitely another useful access mode. @@ -200,7 +200,7 @@ To query the stream by range we are only required to specify two IDs, *start* an 8) "2" {{< /clients-example >}} -Each entry returned is an array of two items: the ID and the list of field-value pairs. We already said that the entry IDs have a relation with the time, because the part at the left of the `-` character is the Unix time in milliseconds of the local node that created the stream entry, at the moment the entry was created (however note that streams are replicated with fully specified [`XADD`](/commands/xadd) commands, so the replicas will have identical IDs to the master). This means that I could query a range of time using [`XRANGE`](/commands/xrange). In order to do so, however, I may want to omit the sequence part of the ID: if omitted, in the start of the range it will be assumed to be 0, while in the end part it will be assumed to be the maximum sequence number available. This way, querying using just two milliseconds Unix times, we get all the entries that were generated in that range of time, in an inclusive way. For instance, if I want to query a two milliseconds period I could use: +Each entry returned is an array of two items: the ID and the list of field-value pairs. We already said that the entry IDs have a relation with the time, because the part at the left of the `-` character is the Unix time in milliseconds of the local node that created the stream entry, at the moment the entry was created (however note that streams are replicated with fully specified [`XADD`]({{< relref "/commands/xadd" >}}) commands, so the replicas will have identical IDs to the master). This means that I could query a range of time using [`XRANGE`]({{< relref "/commands/xrange" >}}). In order to do so, however, I may want to omit the sequence part of the ID: if omitted, in the start of the range it will be assumed to be 0, while in the end part it will be assumed to be the maximum sequence number available. This way, querying using just two milliseconds Unix times, we get all the entries that were generated in that range of time, in an inclusive way. For instance, if I want to query a two milliseconds period I could use: {{< clients-example stream_tutorial xrange_time >}} > XRANGE race:france 1692632086369 1692632086371 @@ -215,7 +215,7 @@ Each entry returned is an array of two items: the ID and the list of field-value 8) "1" {{< /clients-example >}} -I have only a single entry in this range. However in real data sets, I could query for ranges of hours, or there could be many items in just two milliseconds, and the result returned could be huge. For this reason, [`XRANGE`](/commands/xrange) supports an optional **COUNT** option at the end. By specifying a count, I can just get the first *N* items. If I want more, I can get the last ID returned, increment the sequence part by one, and query again. Let's see this in the following example. Let's assume that the stream `race:france` was populated with 4 items. To start my iteration, getting 2 items per command, I start with the full range, but with a count of 2. +I have only a single entry in this range. However in real data sets, I could query for ranges of hours, or there could be many items in just two milliseconds, and the result returned could be huge. For this reason, [`XRANGE`]({{< relref "/commands/xrange" >}}) supports an optional **COUNT** option at the end. By specifying a count, I can just get the first *N* items. If I want more, I can get the last ID returned, increment the sequence part by one, and query again. Let's see this in the following example. Let's assume that the stream `race:france` was populated with 4 items. To start my iteration, getting 2 items per command, I start with the full range, but with a count of 2. {{< clients-example stream_tutorial xrange_step_1 >}} > XRANGE race:france - + COUNT 2 @@ -239,7 +239,7 @@ I have only a single entry in this range. However in real data sets, I could que 8) "1" {{< /clients-example >}} -To continue the iteration with the next two items, I have to pick the last ID returned, that is `1692632094485-0`, and add the prefix `(` to it. The resulting exclusive range interval, that is `(1692632094485-0` in this case, can now be used as the new *start* argument for the next [`XRANGE`](/commands/xrange) call: +To continue the iteration with the next two items, I have to pick the last ID returned, that is `1692632094485-0`, and add the prefix `(` to it. The resulting exclusive range interval, that is `(1692632094485-0` in this case, can now be used as the new *start* argument for the next [`XRANGE`]({{< relref "/commands/xrange" >}}) call: {{< clients-example stream_tutorial xrange_step_2 >}} > XRANGE race:france (1692632094485-0 + COUNT 2 @@ -270,9 +270,9 @@ Now that we've retrieved 4 items out of a stream that only had 4 entries in it, (empty array) {{< /clients-example >}} -Since [`XRANGE`](/commands/xrange) complexity is *O(log(N))* to seek, and then *O(M)* to return M elements, with a small count the command has a logarithmic time complexity, which means that each step of the iteration is fast. So [`XRANGE`](/commands/xrange) is also the de facto *streams iterator* and does not require an **XSCAN** command. +Since [`XRANGE`]({{< relref "/commands/xrange" >}}) complexity is *O(log(N))* to seek, and then *O(M)* to return M elements, with a small count the command has a logarithmic time complexity, which means that each step of the iteration is fast. So [`XRANGE`]({{< relref "/commands/xrange" >}}) is also the de facto *streams iterator* and does not require an **XSCAN** command. -The command [`XREVRANGE`](/commands/xrevrange) is the equivalent of [`XRANGE`](/commands/xrange) but returning the elements in inverted order, so a practical use for [`XREVRANGE`](/commands/xrevrange) is to check what is the last item in a Stream: +The command [`XREVRANGE`]({{< relref "/commands/xrevrange" >}}) is the equivalent of [`XRANGE`]({{< relref "/commands/xrange" >}}) but returning the elements in inverted order, so a practical use for [`XREVRANGE`]({{< relref "/commands/xrevrange" >}}) is to check what is the last item in a Stream: {{< clients-example stream_tutorial xrevrange >}} > XREVRANGE race:france + - COUNT 1 @@ -287,7 +287,7 @@ The command [`XREVRANGE`](/commands/xrevrange) is the equivalent of [`XRANGE`](/ 8) "2" {{< /clients-example >}} -Note that the [`XREVRANGE`](/commands/xrevrange) command takes the *start* and *stop* arguments in reverse order. +Note that the [`XREVRANGE`]({{< relref "/commands/xrevrange" >}}) command takes the *start* and *stop* arguments in reverse order. ## Listening for new items with XREAD @@ -297,7 +297,7 @@ When we do not want to access items by a range in a stream, usually what we want 2. While in Pub/Sub messages are *fire and forget* and are never stored anyway, and while when using blocking lists, when a message is received by the client it is *popped* (effectively removed) from the list, streams work in a fundamentally different way. All the messages are appended in the stream indefinitely (unless the user explicitly asks to delete entries): different consumers will know what is a new message from its point of view by remembering the ID of the last message received. 3. Streams Consumer Groups provide a level of control that Pub/Sub or blocking lists cannot achieve, with different groups for the same stream, explicit acknowledgment of processed items, ability to inspect the pending items, claiming of unprocessed messages, and coherent history visibility for each single client, that is only able to see its private past history of messages. -The command that provides the ability to listen for new messages arriving into a stream is called [`XREAD`](/commands/xread). It's a bit more complex than [`XRANGE`](/commands/xrange), so we'll start showing simple forms, and later the whole command layout will be provided. +The command that provides the ability to listen for new messages arriving into a stream is called [`XREAD`]({{< relref "/commands/xread" >}}). It's a bit more complex than [`XRANGE`]({{< relref "/commands/xrange" >}}), so we'll start showing simple forms, and later the whole command layout will be provided. {{< clients-example stream_tutorial xread >}} > XREAD COUNT 2 STREAMS race:france 0 @@ -322,30 +322,30 @@ The command that provides the ability to listen for new messages arriving into a 8) "1" {{< /clients-example >}} -The above is the non-blocking form of [`XREAD`](/commands/xread). Note that the **COUNT** option is not mandatory, in fact the only mandatory option of the command is the **STREAMS** option, that specifies a list of keys together with the corresponding maximum ID already seen for each stream by the calling consumer, so that the command will provide the client only with messages with an ID greater than the one we specified. +The above is the non-blocking form of [`XREAD`]({{< relref "/commands/xread" >}}). Note that the **COUNT** option is not mandatory, in fact the only mandatory option of the command is the **STREAMS** option, that specifies a list of keys together with the corresponding maximum ID already seen for each stream by the calling consumer, so that the command will provide the client only with messages with an ID greater than the one we specified. In the above command we wrote `STREAMS race:france 0` so we want all the messages in the Stream `race:france` having an ID greater than `0-0`. As you can see in the example above, the command returns the key name, because actually it is possible to call this command with more than one key to read from different streams at the same time. I could write, for instance: `STREAMS race:france race:italy 0 0`. Note how after the **STREAMS** option we need to provide the key names, and later the IDs. For this reason, the **STREAMS** option must always be the last option. Any other options must come before the **STREAMS** option. -Apart from the fact that [`XREAD`](/commands/xread) can access multiple streams at once, and that we are able to specify the last ID we own to just get newer messages, in this simple form the command is not doing something so different compared to [`XRANGE`](/commands/xrange). However, the interesting part is that we can turn [`XREAD`](/commands/xread) into a *blocking command* easily, by specifying the **BLOCK** argument: +Apart from the fact that [`XREAD`]({{< relref "/commands/xread" >}}) can access multiple streams at once, and that we are able to specify the last ID we own to just get newer messages, in this simple form the command is not doing something so different compared to [`XRANGE`]({{< relref "/commands/xrange" >}}). However, the interesting part is that we can turn [`XREAD`]({{< relref "/commands/xread" >}}) into a *blocking command* easily, by specifying the **BLOCK** argument: ``` > XREAD BLOCK 0 STREAMS race:france $ ``` -Note that in the example above, other than removing **COUNT**, I specified the new **BLOCK** option with a timeout of 0 milliseconds (that means to never timeout). Moreover, instead of passing a normal ID for the stream `mystream` I passed the special ID `$`. This special ID means that [`XREAD`](/commands/xread) should use as last ID the maximum ID already stored in the stream `mystream`, so that we will receive only *new* messages, starting from the time we started listening. This is similar to the `tail -f` Unix command in some way. +Note that in the example above, other than removing **COUNT**, I specified the new **BLOCK** option with a timeout of 0 milliseconds (that means to never timeout). Moreover, instead of passing a normal ID for the stream `mystream` I passed the special ID `$`. This special ID means that [`XREAD`]({{< relref "/commands/xread" >}}) should use as last ID the maximum ID already stored in the stream `mystream`, so that we will receive only *new* messages, starting from the time we started listening. This is similar to the `tail -f` Unix command in some way. Note that when the **BLOCK** option is used, we do not have to use the special ID `$`. We can use any valid ID. If the command is able to serve our request immediately without blocking, it will do so, otherwise it will block. Normally if we want to consume the stream starting from new entries, we start with the ID `$`, and after that we continue using the ID of the last message received to make the next call, and so forth. -The blocking form of [`XREAD`](/commands/xread) is also able to listen to multiple Streams, just by specifying multiple key names. If the request can be served synchronously because there is at least one stream with elements greater than the corresponding ID we specified, it returns with the results. Otherwise, the command will block and will return the items of the first stream which gets new data (according to the specified ID). +The blocking form of [`XREAD`]({{< relref "/commands/xread" >}}) is also able to listen to multiple Streams, just by specifying multiple key names. If the request can be served synchronously because there is at least one stream with elements greater than the corresponding ID we specified, it returns with the results. Otherwise, the command will block and will return the items of the first stream which gets new data (according to the specified ID). Similarly to blocking list operations, blocking stream reads are *fair* from the point of view of clients waiting for data, since the semantics is FIFO style. The first client that blocked for a given stream will be the first to be unblocked when new items are available. -[`XREAD`](/commands/xread) has no other options than **COUNT** and **BLOCK**, so it's a pretty basic command with a specific purpose to attach consumers to one or multiple streams. More powerful features to consume streams are available using the consumer groups API, however reading via consumer groups is implemented by a different command called [`XREADGROUP`](/commands/xreadgroup), covered in the next section of this guide. +[`XREAD`]({{< relref "/commands/xread" >}}) has no other options than **COUNT** and **BLOCK**, so it's a pretty basic command with a specific purpose to attach consumers to one or multiple streams. More powerful features to consume streams are available using the consumer groups API, however reading via consumer groups is implemented by a different command called [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}), covered in the next section of this guide. ## Consumer groups -When the task at hand is to consume the same stream from different clients, then [`XREAD`](/commands/xread) already offers a way to *fan-out* to N clients, potentially also using replicas in order to provide more read scalability. However in certain problems what we want to do is not to provide the same stream of messages to many clients, but to provide a *different subset* of messages from the same stream to many clients. An obvious case where this is useful is that of messages which are slow to process: the ability to have N different workers that will receive different parts of the stream allows us to scale message processing, by routing different messages to different workers that are ready to do more work. +When the task at hand is to consume the same stream from different clients, then [`XREAD`]({{< relref "/commands/xread" >}}) already offers a way to *fan-out* to N clients, potentially also using replicas in order to provide more read scalability. However in certain problems what we want to do is not to provide the same stream of messages to many clients, but to provide a *different subset* of messages from the same stream to many clients. An obvious case where this is useful is that of messages which are slow to process: the ability to have N different workers that will receive different parts of the stream allows us to scale message processing, by routing different messages to different workers that are ready to do more work. In practical terms, if we imagine having three consumers C1, C2, C3, and a stream that contains the messages 1, 2, 3, 4, 5, 6, 7 then what we want is to serve the messages according to the following diagram: @@ -386,13 +386,13 @@ In a way, a consumer group can be imagined as some *amount of state* about a str +----------------------------------------+ ``` -If you see this from this point of view, it is very simple to understand what a consumer group can do, how it is able to just provide consumers with their history of pending messages, and how consumers asking for new messages will just be served with message IDs greater than `last_delivered_id`. At the same time, if you look at the consumer group as an auxiliary data structure for Redis streams, it is obvious that a single stream can have multiple consumer groups, that have a different set of consumers. Actually, it is even possible for the same stream to have clients reading without consumer groups via [`XREAD`](/commands/xread), and clients reading via [`XREADGROUP`](/commands/xreadgroup) in different consumer groups. +If you see this from this point of view, it is very simple to understand what a consumer group can do, how it is able to just provide consumers with their history of pending messages, and how consumers asking for new messages will just be served with message IDs greater than `last_delivered_id`. At the same time, if you look at the consumer group as an auxiliary data structure for Redis streams, it is obvious that a single stream can have multiple consumer groups, that have a different set of consumers. Actually, it is even possible for the same stream to have clients reading without consumer groups via [`XREAD`]({{< relref "/commands/xread" >}}), and clients reading via [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) in different consumer groups. Now it's time to zoom in to see the fundamental consumer group commands. They are the following: -* [`XGROUP`](/commands/xgroup) is used in order to create, destroy and manage consumer groups. -* [`XREADGROUP`](/commands/xreadgroup) is used to read from a stream via a consumer group. -* [`XACK`](/commands/xack) is the command that allows a consumer to mark a pending message as correctly processed. +* [`XGROUP`]({{< relref "/commands/xgroup" >}}) is used in order to create, destroy and manage consumer groups. +* [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) is used to read from a stream via a consumer group. +* [`XACK`]({{< relref "/commands/xack" >}}) is the command that allows a consumer to mark a pending message as correctly processed. ## Creating a consumer group @@ -405,16 +405,16 @@ OK As you can see in the command above when creating the consumer group we have to specify an ID, which in the example is just `$`. This is needed because the consumer group, among the other states, must have an idea about what message to serve next at the first consumer connecting, that is, what was the *last message ID* when the group was just created. If we provide `$` as we did, then only new messages arriving in the stream from now on will be provided to the consumers in the group. If we specify `0` instead the consumer group will consume *all* the messages in the stream history to start with. Of course, you can specify any other valid ID. What you know is that the consumer group will start delivering messages that are greater than the ID you specify. Because `$` means the current greatest ID in the stream, specifying `$` will have the effect of consuming only new messages. -[`XGROUP CREATE`](/commands/xgroup-create) also supports creating the stream automatically, if it doesn't exist, using the optional `MKSTREAM` subcommand as the last argument: +[`XGROUP CREATE`]({{< relref "/commands/xgroup-create" >}}) also supports creating the stream automatically, if it doesn't exist, using the optional `MKSTREAM` subcommand as the last argument: {{< clients-example stream_tutorial xgroup_create_mkstream >}} > XGROUP CREATE race:italy italy_riders $ MKSTREAM OK {{< /clients-example >}} -Now that the consumer group is created we can immediately try to read messages via the consumer group using the [`XREADGROUP`](/commands/xreadgroup) command. We'll read from consumers, that we will call Alice and Bob, to see how the system will return different messages to Alice or Bob. +Now that the consumer group is created we can immediately try to read messages via the consumer group using the [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) command. We'll read from consumers, that we will call Alice and Bob, to see how the system will return different messages to Alice or Bob. -[`XREADGROUP`](/commands/xreadgroup) is very similar to [`XREAD`](/commands/xread) and provides the same **BLOCK** option, otherwise it is a synchronous command. However there is a *mandatory* option that must be always specified, which is **GROUP** and has two arguments: the name of the consumer group, and the name of the consumer that is attempting to read. The option **COUNT** is also supported and is identical to the one in [`XREAD`](/commands/xread). +[`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) is very similar to [`XREAD`]({{< relref "/commands/xread" >}}) and provides the same **BLOCK** option, otherwise it is a synchronous command. However there is a *mandatory* option that must be always specified, which is **GROUP** and has two arguments: the name of the consumer group, and the name of the consumer that is attempting to read. The option **COUNT** is also supported and is identical to the one in [`XREAD`]({{< relref "/commands/xread" >}}). We'll add riders to the race:italy stream and try reading something using the consumer group: Note: *here rider is the field name, and the name is the associated value. Remember that stream items are small dictionaries.* @@ -437,14 +437,14 @@ Note: *here rider is the field name, and the name is the associated value. Remem 2) "Castilla" {{< /clients-example >}} -[`XREADGROUP`](/commands/xreadgroup) replies are just like [`XREAD`](/commands/xread) replies. Note however the `GROUP ` provided above. It states that I want to read from the stream using the consumer group `mygroup` and I'm the consumer `Alice`. Every time a consumer performs an operation with a consumer group, it must specify its name, uniquely identifying this consumer inside the group. +[`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) replies are just like [`XREAD`]({{< relref "/commands/xread" >}}) replies. Note however the `GROUP ` provided above. It states that I want to read from the stream using the consumer group `mygroup` and I'm the consumer `Alice`. Every time a consumer performs an operation with a consumer group, it must specify its name, uniquely identifying this consumer inside the group. There is another very important detail in the command line above, after the mandatory **STREAMS** option the ID requested for the key `mystream` is the special ID `>`. This special ID is only valid in the context of consumer groups, and it means: **messages never delivered to other consumers so far**. -This is almost always what you want, however it is also possible to specify a real ID, such as `0` or any other valid ID, in this case, however, what happens is that we request from [`XREADGROUP`](/commands/xreadgroup) to just provide us with the **history of pending messages**, and in such case, will never see new messages in the group. So basically [`XREADGROUP`](/commands/xreadgroup) has the following behavior based on the ID we specify: +This is almost always what you want, however it is also possible to specify a real ID, such as `0` or any other valid ID, in this case, however, what happens is that we request from [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) to just provide us with the **history of pending messages**, and in such case, will never see new messages in the group. So basically [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) has the following behavior based on the ID we specify: * If the ID is the special ID `>` then the command will return only new messages never delivered to other consumers so far, and as a side effect, will update the consumer group's *last ID*. -* If the ID is any other valid numerical ID, then the command will let us access our *history of pending messages*. That is, the set of messages that were delivered to this specified consumer (identified by the provided name), and never acknowledged so far with [`XACK`](/commands/xack). +* If the ID is any other valid numerical ID, then the command will let us access our *history of pending messages*. That is, the set of messages that were delivered to this specified consumer (identified by the provided name), and never acknowledged so far with [`XACK`]({{< relref "/commands/xack" >}}). We can test this behavior immediately specifying an ID of 0, without any **COUNT** option: we'll just see the only pending message, that is, the one about Castilla: @@ -466,7 +466,7 @@ However, if we acknowledge the message as processed, it will no longer be part o 2) (empty array) {{< /clients-example >}} -Don't worry if you yet don't know how [`XACK`](/commands/xack) works, the idea is just that processed messages are no longer part of the history that we can access. +Don't worry if you yet don't know how [`XACK`]({{< relref "/commands/xack" >}}) works, the idea is just that processed messages are no longer part of the history that we can access. Now it's Bob's turn to read something: @@ -488,8 +488,8 @@ This way Alice, Bob, and any other consumer in the group, are able to read diffe There are a few things to keep in mind: * Consumers are auto-created the first time they are mentioned, no need for explicit creation. -* Even with [`XREADGROUP`](/commands/xreadgroup) you can read from multiple keys at the same time, however for this to work, you need to create a consumer group with the same name in every stream. This is not a common need, but it is worth mentioning that the feature is technically available. -* [`XREADGROUP`](/commands/xreadgroup) is a *write command* because even if it reads from the stream, the consumer group is modified as a side effect of reading, so it can only be called on master instances. +* Even with [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) you can read from multiple keys at the same time, however for this to work, you need to create a consumer group with the same name in every stream. This is not a common need, but it is worth mentioning that the feature is technically available. +* [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) is a *write command* because even if it reads from the stream, the consumer group is modified as a side effect of reading, so it can only be called on master instances. An example of a consumer implementation, using consumer groups, written in the Ruby language could be the following. The Ruby code is aimed to be readable by virtually any experienced programmer, even if they do not know Ruby: @@ -558,7 +558,7 @@ The example above allows us to write consumers that participate in the same cons Redis consumer groups offer a feature that is used in these situations in order to *claim* the pending messages of a given consumer so that such messages will change ownership and will be re-assigned to a different consumer. The feature is very explicit. A consumer has to inspect the list of pending messages, and will have to claim specific messages using a special command, otherwise the server will leave the messages pending forever and assigned to the old consumer. In this way different applications can choose if to use such a feature or not, and exactly how to use it. -The first step of this process is just a command that provides observability of pending entries in the consumer group and is called [`XPENDING`](/commands/xpending). +The first step of this process is just a command that provides observability of pending entries in the consumer group and is called [`XPENDING`]({{< relref "/commands/xpending" >}}). This is a read-only command which is always safe to call and will not change ownership of any message. In its simplest form, the command is called with two arguments, which are the name of the stream and the name of the consumer group. @@ -572,15 +572,15 @@ In its simplest form, the command is called with two arguments, which are the na {{< /clients-example >}} When called in this way, the command outputs the total number of pending messages in the consumer group (two in this case), the lower and higher message ID among the pending messages, and finally a list of consumers and the number of pending messages they have. -We have only Bob with two pending messages because the single message that Alice requested was acknowledged using [`XACK`](/commands/xack). +We have only Bob with two pending messages because the single message that Alice requested was acknowledged using [`XACK`]({{< relref "/commands/xack" >}}). -We can ask for more information by giving more arguments to [`XPENDING`](/commands/xpending), because the full command signature is the following: +We can ask for more information by giving more arguments to [`XPENDING`]({{< relref "/commands/xpending" >}}), because the full command signature is the following: ``` XPENDING [[IDLE ] []] ``` -By providing a start and end ID (that can be just `-` and `+` as in [`XRANGE`](/commands/xrange)) and a count to control the amount of information returned by the command, we are able to know more about the pending messages. The optional final argument, the consumer name, is used if we want to limit the output to just messages pending for a given consumer, but won't use this feature in the following example. +By providing a start and end ID (that can be just `-` and `+` as in [`XRANGE`]({{< relref "/commands/xrange" >}})) and a count to control the amount of information returned by the command, we are able to know more about the pending messages. The optional final argument, the consumer name, is used if we want to limit the output to just messages pending for a given consumer, but won't use this feature in the following example. {{< clients-example stream_tutorial xpending_plus_minus >}} > XPENDING race:italy italy_riders - + 10 @@ -597,7 +597,7 @@ By providing a start and end ID (that can be just `-` and `+` as in [`XRANGE`](/ Now we have the details for each message: the ID, the consumer name, the *idle time* in milliseconds, which is how many milliseconds have passed since the last time the message was delivered to some consumer, and finally the number of times that a given message was delivered. We have two messages from Bob, and they are idle for 60000+ milliseconds, about a minute. -Note that nobody prevents us from checking what the first message content was by just using [`XRANGE`](/commands/xrange). +Note that nobody prevents us from checking what the first message content was by just using [`XRANGE`]({{< relref "/commands/xrange" >}}). {{< clients-example stream_tutorial xrange_pending >}} > XRANGE race:italy 1692632647899-0 1692632647899-0 @@ -606,7 +606,7 @@ Note that nobody prevents us from checking what the first message content was by 2) "Royce" {{< /clients-example >}} -We have just to repeat the same ID twice in the arguments. Now that we have some ideas, Alice may decide that after 1 minute of not processing messages, Bob will probably not recover quickly, and it's time to *claim* such messages and resume the processing in place of Bob. To do so, we use the [`XCLAIM`](/commands/xclaim) command. +We have just to repeat the same ID twice in the arguments. Now that we have some ideas, Alice may decide that after 1 minute of not processing messages, Bob will probably not recover quickly, and it's time to *claim* such messages and resume the processing in place of Bob. To do so, we use the [`XCLAIM`]({{< relref "/commands/xclaim" >}}) command. This command is very complex and full of options in its full form, since it is used for replication of consumer groups changes, but we'll use just the arguments that we need normally. In this case it is as simple as: @@ -634,17 +634,17 @@ This is the result of the command execution: The message was successfully claimed by Alice, who can now process the message and acknowledge it, and move things forward even if the original consumer is not recovering. -It is clear from the example above that as a side effect of successfully claiming a given message, the [`XCLAIM`](/commands/xclaim) command also returns it. However this is not mandatory. The **JUSTID** option can be used in order to return just the IDs of the message successfully claimed. This is useful if you want to reduce the bandwidth used between the client and the server (and also the performance of the command) and you are not interested in the message because your consumer is implemented in a way that it will rescan the history of pending messages from time to time. +It is clear from the example above that as a side effect of successfully claiming a given message, the [`XCLAIM`]({{< relref "/commands/xclaim" >}}) command also returns it. However this is not mandatory. The **JUSTID** option can be used in order to return just the IDs of the message successfully claimed. This is useful if you want to reduce the bandwidth used between the client and the server (and also the performance of the command) and you are not interested in the message because your consumer is implemented in a way that it will rescan the history of pending messages from time to time. Claiming may also be implemented by a separate process: one that just checks the list of pending messages, and assigns idle messages to consumers that appear to be active. Active consumers can be obtained using one of the observability features of Redis streams. This is the topic of the next section. ## Automatic claiming -The [`XAUTOCLAIM`](/commands/xautoclaim) command, added in Redis 6.2, implements the claiming process that we've described above. -[`XPENDING`](/commands/xpending) and [`XCLAIM`](/commands/xclaim) provide the basic building blocks for different types of recovery mechanisms. +The [`XAUTOCLAIM`]({{< relref "/commands/xautoclaim" >}}) command, added in Redis 6.2, implements the claiming process that we've described above. +[`XPENDING`]({{< relref "/commands/xpending" >}}) and [`XCLAIM`]({{< relref "/commands/xclaim" >}}) provide the basic building blocks for different types of recovery mechanisms. This command optimizes the generic process by having Redis manage it and offers a simple solution for most recovery needs. -[`XAUTOCLAIM`](/commands/xautoclaim) identifies idle pending messages and transfers ownership of them to a consumer. +[`XAUTOCLAIM`]({{< relref "/commands/xautoclaim" >}}) identifies idle pending messages and transfers ownership of them to a consumer. The command's signature looks like this: ``` @@ -661,7 +661,7 @@ So, in the example above, I could have used automatic claiming to claim a single 2) "Sam-Bodden" {{< /clients-example >}} -Like [`XCLAIM`](/commands/xclaim), the command replies with an array of the claimed messages, but it also returns a stream ID that allows iterating the pending entries. +Like [`XCLAIM`]({{< relref "/commands/xclaim" >}}), the command replies with an array of the claimed messages, but it also returns a stream ID that allows iterating the pending entries. The stream ID is a cursor, and I can use it in my next call to continue in claiming idle pending messages: {{< clients-example stream_tutorial xautoclaim_cursor >}} @@ -672,20 +672,20 @@ The stream ID is a cursor, and I can use it in my next call to continue in claim 2) "Royce" {{< /clients-example >}} -When [`XAUTOCLAIM`](/commands/xautoclaim) returns the "0-0" stream ID as a cursor, that means that it reached the end of the consumer group pending entries list. -That doesn't mean that there are no new idle pending messages, so the process continues by calling [`XAUTOCLAIM`](/commands/xautoclaim) from the beginning of the stream. +When [`XAUTOCLAIM`]({{< relref "/commands/xautoclaim" >}}) returns the "0-0" stream ID as a cursor, that means that it reached the end of the consumer group pending entries list. +That doesn't mean that there are no new idle pending messages, so the process continues by calling [`XAUTOCLAIM`]({{< relref "/commands/xautoclaim" >}}) from the beginning of the stream. ## Claiming and the delivery counter -The counter that you observe in the [`XPENDING`](/commands/xpending) output is the number of deliveries of each message. The counter is incremented in two ways: when a message is successfully claimed via [`XCLAIM`](/commands/xclaim) or when an [`XREADGROUP`](/commands/xreadgroup) call is used in order to access the history of pending messages. +The counter that you observe in the [`XPENDING`]({{< relref "/commands/xpending" >}}) output is the number of deliveries of each message. The counter is incremented in two ways: when a message is successfully claimed via [`XCLAIM`]({{< relref "/commands/xclaim" >}}) or when an [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) call is used in order to access the history of pending messages. When there are failures, it is normal that messages will be delivered multiple times, but eventually they usually get processed and acknowledged. However there might be a problem processing some specific message, because it is corrupted or crafted in a way that triggers a bug in the processing code. In such a case what happens is that consumers will continuously fail to process this particular message. Because we have the counter of the delivery attempts, we can use that counter to detect messages that for some reason are not processable. So once the deliveries counter reaches a given large number that you chose, it is probably wiser to put such messages in another stream and send a notification to the system administrator. This is basically the way that Redis Streams implements the *dead letter* concept. ## Streams observability -Messaging systems that lack observability are very hard to work with. Not knowing who is consuming messages, what messages are pending, the set of consumer groups active in a given stream, makes everything opaque. For this reason, Redis Streams and consumer groups have different ways to observe what is happening. We already covered [`XPENDING`](/commands/xpending), which allows us to inspect the list of messages that are under processing at a given moment, together with their idle time and number of deliveries. +Messaging systems that lack observability are very hard to work with. Not knowing who is consuming messages, what messages are pending, the set of consumer groups active in a given stream, makes everything opaque. For this reason, Redis Streams and consumer groups have different ways to observe what is happening. We already covered [`XPENDING`]({{< relref "/commands/xpending" >}}), which allows us to inspect the list of messages that are under processing at a given moment, together with their idle time and number of deliveries. -However we may want to do more than that, and the [`XINFO`](/commands/xinfo) command is an observability interface that can be used with sub-commands in order to get information about streams or consumer groups. +However we may want to do more than that, and the [`XINFO`]({{< relref "/commands/xinfo" >}}) command is an observability interface that can be used with sub-commands in order to get information about streams or consumer groups. This command uses subcommands in order to show different information about the status of the stream and its consumer groups. For instance **XINFO STREAM ** reports information about the stream itself. @@ -725,7 +725,7 @@ The output shows information about how the stream is encoded internally, and als 8) "1692632662819-0" {{< /clients-example >}} -As you can see in this and in the previous output, the [`XINFO`](/commands/xinfo) command outputs a sequence of field-value items. Because it is an observability command this allows the human user to immediately understand what information is reported, and allows the command to report more information in the future by adding more fields without breaking compatibility with older clients. Other commands that must be more bandwidth efficient, like [`XPENDING`](/commands/xpending), just report the information without the field names. +As you can see in this and in the previous output, the [`XINFO`]({{< relref "/commands/xinfo" >}}) command outputs a sequence of field-value items. Because it is an observability command this allows the human user to immediately understand what information is reported, and allows the command to report more information in the future by adding more fields without breaking compatibility with older clients. Other commands that must be more bandwidth efficient, like [`XPENDING`]({{< relref "/commands/xpending" >}}), just report the information without the field names. The output of the example above, where the **GROUPS** subcommand is used, should be clear observing the field names. We can check in more detail the state of a specific consumer group by checking the consumers that are registered in the group. @@ -784,7 +784,7 @@ So basically Kafka partitions are more similar to using N different Redis keys, ## Capped Streams -Many applications do not want to collect data into a stream forever. Sometimes it is useful to have at maximum a given number of items inside a stream, other times once a given size is reached, it is useful to move data from Redis to a storage which is not in memory and not as fast but suited to store the history for, potentially, decades to come. Redis streams have some support for this. One is the **MAXLEN** option of the [`XADD`](/commands/xadd) command. This option is very simple to use: +Many applications do not want to collect data into a stream forever. Sometimes it is useful to have at maximum a given number of items inside a stream, other times once a given size is reached, it is useful to move data from Redis to a storage which is not in memory and not as fast but suited to store the history for, potentially, decades to come. Redis streams have some support for this. One is the **MAXLEN** option of the [`XADD`]({{< relref "/commands/xadd" >}}) command. This option is very simple to use: {{< clients-example stream_tutorial maxlen >}} > XADD race:italy MAXLEN 2 * rider Jones @@ -814,39 +814,39 @@ XADD race:italy MAXLEN ~ 1000 * ... entry fields here ... The `~` argument between the **MAXLEN** option and the actual count means, I don't really need this to be exactly 1000 items. It can be 1000 or 1010 or 1030, just make sure to save at least 1000 items. With this argument, the trimming is performed only when we can remove a whole node. This makes it much more efficient, and it is usually what you want. You'll note here that the client libraries have various implementations of this. For example, the Python client defaults to approximate and has to be explicitly set to a true length. -There is also the [`XTRIM`](/commands/xtrim) command, which performs something very similar to what the **MAXLEN** option does above, except that it can be run by itself: +There is also the [`XTRIM`]({{< relref "/commands/xtrim" >}}) command, which performs something very similar to what the **MAXLEN** option does above, except that it can be run by itself: {{< clients-example stream_tutorial xtrim >}} > XTRIM race:italy MAXLEN 10 (integer) 0 {{< /clients-example >}} -Or, as for the [`XADD`](/commands/xadd) option: +Or, as for the [`XADD`]({{< relref "/commands/xadd" >}}) option: {{< clients-example stream_tutorial xtrim2 >}} > XTRIM mystream MAXLEN ~ 10 (integer) 0 {{< /clients-example >}} -However, [`XTRIM`](/commands/xtrim) is designed to accept different trimming strategies. Another trimming strategy is **MINID**, that evicts entries with IDs lower than the one specified. +However, [`XTRIM`]({{< relref "/commands/xtrim" >}}) is designed to accept different trimming strategies. Another trimming strategy is **MINID**, that evicts entries with IDs lower than the one specified. -As [`XTRIM`](/commands/xtrim) is an explicit command, the user is expected to know about the possible shortcomings of different trimming strategies. +As [`XTRIM`]({{< relref "/commands/xtrim" >}}) is an explicit command, the user is expected to know about the possible shortcomings of different trimming strategies. -Another useful eviction strategy that may be added to [`XTRIM`](/commands/xtrim) in the future, is to remove by a range of IDs to ease use of [`XRANGE`](/commands/xrange) and [`XTRIM`](/commands/xtrim) to move data from Redis to other storage systems if needed. +Another useful eviction strategy that may be added to [`XTRIM`]({{< relref "/commands/xtrim" >}}) in the future, is to remove by a range of IDs to ease use of [`XRANGE`]({{< relref "/commands/xrange" >}}) and [`XTRIM`]({{< relref "/commands/xtrim" >}}) to move data from Redis to other storage systems if needed. ## Special IDs in the streams API You may have noticed that there are several special IDs that can be used in the Redis API. Here is a short recap, so that they can make more sense in the future. -The first two special IDs are `-` and `+`, and are used in range queries with the [`XRANGE`](/commands/xrange) command. Those two IDs respectively mean the smallest ID possible (that is basically `0-1`) and the greatest ID possible (that is `18446744073709551615-18446744073709551615`). As you can see it is a lot cleaner to write `-` and `+` instead of those numbers. +The first two special IDs are `-` and `+`, and are used in range queries with the [`XRANGE`]({{< relref "/commands/xrange" >}}) command. Those two IDs respectively mean the smallest ID possible (that is basically `0-1`) and the greatest ID possible (that is `18446744073709551615-18446744073709551615`). As you can see it is a lot cleaner to write `-` and `+` instead of those numbers. -Then there are APIs where we want to say, the ID of the item with the greatest ID inside the stream. This is what `$` means. So for instance if I want only new entries with [`XREADGROUP`](/commands/xreadgroup) I use this ID to signify I already have all the existing entries, but not the new ones that will be inserted in the future. Similarly when I create or set the ID of a consumer group, I can set the last delivered item to `$` in order to just deliver new entries to the consumers in the group. +Then there are APIs where we want to say, the ID of the item with the greatest ID inside the stream. This is what `$` means. So for instance if I want only new entries with [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) I use this ID to signify I already have all the existing entries, but not the new ones that will be inserted in the future. Similarly when I create or set the ID of a consumer group, I can set the last delivered item to `$` in order to just deliver new entries to the consumers in the group. As you can see `$` does not mean `+`, they are two different things, as `+` is the greatest ID possible in every possible stream, while `$` is the greatest ID in a given stream containing given entries. Moreover APIs will usually only understand `+` or `$`, yet it was useful to avoid loading a given symbol with multiple meanings. -Another special ID is `>`, that is a special meaning only related to consumer groups and only when the [`XREADGROUP`](/commands/xreadgroup) command is used. This special ID means that we want only entries that were never delivered to other consumers so far. So basically the `>` ID is the *last delivered ID* of a consumer group. +Another special ID is `>`, that is a special meaning only related to consumer groups and only when the [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) command is used. This special ID means that we want only entries that were never delivered to other consumers so far. So basically the `>` ID is the *last delivered ID* of a consumer group. -Finally the special ID `*`, that can be used only with the [`XADD`](/commands/xadd) command, means to auto select an ID for us for the new entry. +Finally the special ID `*`, that can be used only with the [`XADD`]({{< relref "/commands/xadd" >}}) command, means to auto select an ID for us for the new entry. So we have `-`, `+`, `$`, `>` and `*`, and all have a different meaning, and most of the time, can be used in different contexts. @@ -857,14 +857,14 @@ A Stream, like any other Redis data structure, is asynchronously replicated to r However note that Redis streams and consumer groups are persisted and replicated using the Redis default replication, so: * AOF must be used with a strong fsync policy if persistence of messages is important in your application. -* By default the asynchronous replication will not guarantee that [`XADD`](/commands/xadd) commands or consumer groups state changes are replicated: after a failover something can be missing depending on the ability of replicas to receive the data from the master. -* The [`WAIT`](/commands/wait) command may be used in order to force the propagation of the changes to a set of replicas. However note that while this makes it very unlikely that data is lost, the Redis failover process as operated by Sentinel or Redis Cluster performs only a *best effort* check to failover to the replica which is the most updated, and under certain specific failure conditions may promote a replica that lacks some data. +* By default the asynchronous replication will not guarantee that [`XADD`]({{< relref "/commands/xadd" >}}) commands or consumer groups state changes are replicated: after a failover something can be missing depending on the ability of replicas to receive the data from the master. +* The [`WAIT`]({{< relref "/commands/wait" >}}) command may be used in order to force the propagation of the changes to a set of replicas. However note that while this makes it very unlikely that data is lost, the Redis failover process as operated by Sentinel or Redis Cluster performs only a *best effort* check to failover to the replica which is the most updated, and under certain specific failure conditions may promote a replica that lacks some data. So when designing an application using Redis streams and consumer groups, make sure to understand the semantical properties your application should have during failures, and configure things accordingly, evaluating whether it is safe enough for your use case. ## Removing single items from a stream -Streams also have a special command for removing items from the middle of a stream, just by ID. Normally for an append only data structure this may look like an odd feature, but it is actually useful for applications involving, for instance, privacy regulations. The command is called [`XDEL`](/commands/xdel) and receives the name of the stream followed by the IDs to delete: +Streams also have a special command for removing items from the middle of a stream, just by ID. Normally for an append only data structure this may look like an odd feature, but it is actually useful for applications involving, for instance, privacy regulations. The command is called [`XDEL`]({{< relref "/commands/xdel" >}}) and receives the name of the stream followed by the IDs to delete: {{< clients-example stream_tutorial xdel >}} > XRANGE race:italy - + COUNT 2 @@ -886,27 +886,27 @@ However in the current implementation, memory is not really reclaimed until a ma ## Zero length streams -A difference between streams and other Redis data structures is that when the other data structures no longer have any elements, as a side effect of calling commands that remove elements, the key itself will be removed. So for instance, a sorted set will be completely removed when a call to [`ZREM`](/commands/zrem) will remove the last element in the sorted set. Streams, on the other hand, are allowed to stay at zero elements, both as a result of using a **MAXLEN** option with a count of zero ([`XADD`](/commands/xadd) and [`XTRIM`](/commands/xtrim) commands), or because [`XDEL`](/commands/xdel) was called. +A difference between streams and other Redis data structures is that when the other data structures no longer have any elements, as a side effect of calling commands that remove elements, the key itself will be removed. So for instance, a sorted set will be completely removed when a call to [`ZREM`]({{< relref "/commands/zrem" >}}) will remove the last element in the sorted set. Streams, on the other hand, are allowed to stay at zero elements, both as a result of using a **MAXLEN** option with a count of zero ([`XADD`]({{< relref "/commands/xadd" >}}) and [`XTRIM`]({{< relref "/commands/xtrim" >}}) commands), or because [`XDEL`]({{< relref "/commands/xdel" >}}) was called. The reason why such an asymmetry exists is because Streams may have associated consumer groups, and we do not want to lose the state that the consumer groups defined just because there are no longer any items in the stream. Currently the stream is not deleted even when it has no associated consumer groups. ## Total latency of consuming a message -Non blocking stream commands like [`XRANGE`](/commands/xrange) and [`XREAD`](/commands/xread) or [`XREADGROUP`](/commands/xreadgroup) without the BLOCK option are served synchronously like any other Redis command, so to discuss latency of such commands is meaningless: it is more interesting to check the time complexity of the commands in the Redis documentation. It should be enough to say that stream commands are at least as fast as sorted set commands when extracting ranges, and that [`XADD`](/commands/xadd) is very fast and can easily insert from half a million to one million items per second in an average machine if pipelining is used. +Non blocking stream commands like [`XRANGE`]({{< relref "/commands/xrange" >}}) and [`XREAD`]({{< relref "/commands/xread" >}}) or [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) without the BLOCK option are served synchronously like any other Redis command, so to discuss latency of such commands is meaningless: it is more interesting to check the time complexity of the commands in the Redis documentation. It should be enough to say that stream commands are at least as fast as sorted set commands when extracting ranges, and that [`XADD`]({{< relref "/commands/xadd" >}}) is very fast and can easily insert from half a million to one million items per second in an average machine if pipelining is used. -However latency becomes an interesting parameter if we want to understand the delay of processing a message, in the context of blocking consumers in a consumer group, from the moment the message is produced via [`XADD`](/commands/xadd), to the moment the message is obtained by the consumer because [`XREADGROUP`](/commands/xreadgroup) returned with the message. +However latency becomes an interesting parameter if we want to understand the delay of processing a message, in the context of blocking consumers in a consumer group, from the moment the message is produced via [`XADD`]({{< relref "/commands/xadd" >}}), to the moment the message is obtained by the consumer because [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) returned with the message. ## How serving blocked consumers works Before providing the results of performed tests, it is interesting to understand what model Redis uses in order to route stream messages (and in general actually how any blocking operation waiting for data is managed). * The blocked client is referenced in a hash table that maps keys for which there is at least one blocking consumer, to a list of consumers that are waiting for such key. This way, given a key that received data, we can resolve all the clients that are waiting for such data. -* When a write happens, in this case when the [`XADD`](/commands/xadd) command is called, it calls the `signalKeyAsReady()` function. This function will put the key into a list of keys that need to be processed, because such keys may have new data for blocked consumers. Note that such *ready keys* will be processed later, so in the course of the same event loop cycle, it is possible that the key will receive other writes. +* When a write happens, in this case when the [`XADD`]({{< relref "/commands/xadd" >}}) command is called, it calls the `signalKeyAsReady()` function. This function will put the key into a list of keys that need to be processed, because such keys may have new data for blocked consumers. Note that such *ready keys* will be processed later, so in the course of the same event loop cycle, it is possible that the key will receive other writes. * Finally, before returning into the event loop, the *ready keys* are finally processed. For each key the list of clients waiting for data is scanned, and if applicable, such clients will receive the new data that arrived. In the case of streams the data is the messages in the applicable range requested by the consumer. -As you can see, basically, before returning to the event loop both the client calling [`XADD`](/commands/xadd) and the clients blocked to consume messages, will have their reply in the output buffers, so the caller of [`XADD`](/commands/xadd) should receive the reply from Redis at about the same time the consumers will receive the new messages. +As you can see, basically, before returning to the event loop both the client calling [`XADD`]({{< relref "/commands/xadd" >}}) and the clients blocked to consume messages, will have their reply in the output buffers, so the caller of [`XADD`]({{< relref "/commands/xadd" >}}) should receive the reply from Redis at about the same time the consumers will receive the new messages. -This model is *push-based*, since adding data to the consumers buffers will be performed directly by the action of calling [`XADD`](/commands/xadd), so the latency tends to be quite predictable. +This model is *push-based*, since adding data to the consumers buffers will be performed directly by the action of calling [`XADD`]({{< relref "/commands/xadd" >}}), so the latency tends to be quite predictable. ## Latency tests results @@ -928,7 +928,7 @@ Adding a few million unacknowledged messages to the stream does not change the g A few remarks: -* Here we processed up to 10k messages per iteration, this means that the `COUNT` parameter of [`XREADGROUP`](/commands/xreadgroup) was set to 10000. This adds a lot of latency but is needed in order to allow the slow consumers to be able to keep with the message flow. So you can expect a real world latency that is a lot smaller. +* Here we processed up to 10k messages per iteration, this means that the `COUNT` parameter of [`XREADGROUP`]({{< relref "/commands/xreadgroup" >}}) was set to 10000. This adds a lot of latency but is needed in order to allow the slow consumers to be able to keep with the message flow. So you can expect a real world latency that is a lot smaller. * The system used for this benchmark is very slow compared to today's standards. diff --git a/content/develop/data-types/strings.md b/content/develop/data-types/strings.md index 7b4c4d863c..252b8c53e7 100644 --- a/content/develop/data-types/strings.md +++ b/content/develop/data-types/strings.md @@ -33,16 +33,16 @@ for a number of use cases, like caching HTML fragments or pages. "Deimos" {{< /clients-example >}} -As you can see using the [`SET`](/commands/set) and the [`GET`](/commands/get) commands are the way we set -and retrieve a string value. Note that [`SET`](/commands/set) will replace any existing value +As you can see using the [`SET`]({{< relref "/commands/set" >}}) and the [`GET`]({{< relref "/commands/get" >}}) commands are the way we set +and retrieve a string value. Note that [`SET`]({{< relref "/commands/set" >}}) will replace any existing value already stored into the key, in the case that the key already exists, even if -the key is associated with a non-string value. So [`SET`](/commands/set) performs an assignment. +the key is associated with a non-string value. So [`SET`]({{< relref "/commands/set" >}}) performs an assignment. Values can be strings (including binary data) of every kind, for instance you can store a jpeg image inside a value. A value can't be bigger than 512 MB. -The [`SET`](/commands/set) command has interesting options, that are provided as additional -arguments. For example, I may ask [`SET`](/commands/set) to fail if the key already exists, +The [`SET`]({{< relref "/commands/set" >}}) command has interesting options, that are provided as additional +arguments. For example, I may ask [`SET`]({{< relref "/commands/set" >}}) to fail if the key already exists, or the opposite, that it only succeed if the key already exists: {{< clients-example set_tutorial setnx_xx >}} @@ -53,17 +53,17 @@ or the opposite, that it only succeed if the key already exists: {{< /clients-example >}} There are a number of other commands for operating on strings. For example -the [`GETSET`](/commands/getset) command sets a key to a new value, returning the old value as the +the [`GETSET`]({{< relref "/commands/getset" >}}) command sets a key to a new value, returning the old value as the result. You can use this command, for example, if you have a -system that increments a Redis key using [`INCR`](/commands/incr) +system that increments a Redis key using [`INCR`]({{< relref "/commands/incr" >}}) every time your web site receives a new visitor. You may want to collect this information once every hour, without losing a single increment. -You can [`GETSET`](/commands/getset) the key, assigning it the new value of "0" and reading the +You can [`GETSET`]({{< relref "/commands/getset" >}}) the key, assigning it the new value of "0" and reading the old value back. The ability to set or retrieve the value of multiple keys in a single command is also useful for reduced latency. For this reason there are -the [`MSET`](/commands/mset) and [`MGET`](/commands/mget) commands: +the [`MSET`]({{< relref "/commands/mset" >}}) and [`MGET`]({{< relref "/commands/mget" >}}) commands: {{< clients-example set_tutorial mset >}} > mset bike:1 "Deimos" bike:2 "Ares" bike:3 "Vanth" @@ -74,7 +74,7 @@ the [`MSET`](/commands/mset) and [`MGET`](/commands/mget) commands: 3) "Vanth" {{< /clients-example >}} -When [`MGET`](/commands/mget) is used, Redis returns an array of values. +When [`MGET`]({{< relref "/commands/mget" >}}) is used, Redis returns an array of values. ### Strings as counters Even if strings are the basic values of Redis, there are interesting operations @@ -89,10 +89,10 @@ you can perform with them. For instance, one is atomic increment: (integer) 11 {{< /clients-example >}} -The [`INCR`](/commands/incr) command parses the string value as an integer, +The [`INCR`]({{< relref "/commands/incr" >}}) command parses the string value as an integer, increments it by one, and finally sets the obtained value as the new value. -There are other similar commands like [`INCRBY`](/commands/incrby), -[`DECR`](/commands/decr) and [`DECRBY`](/commands/decrby). Internally it's +There are other similar commands like [`INCRBY`]({{< relref "/commands/incrby" >}}), +[`DECR`]({{< relref "/commands/decr" >}}) and [`DECRBY`]({{< relref "/commands/decrby" >}}). Internally it's always the same command, acting in a slightly different way. What does it mean that INCR is atomic? @@ -112,26 +112,26 @@ By default, a single Redis string can be a maximum of 512 MB. ### Getting and setting Strings -* [`SET`](/commands/set) stores a string value. -* [`SETNX`](/commands/setnx) stores a string value only if the key doesn't already exist. Useful for implementing locks. -* [`GET`](/commands/get) retrieves a string value. -* [`MGET`](/commands/mget) retrieves multiple string values in a single operation. +* [`SET`]({{< relref "/commands/set" >}}) stores a string value. +* [`SETNX`]({{< relref "/commands/setnx" >}}) stores a string value only if the key doesn't already exist. Useful for implementing locks. +* [`GET`]({{< relref "/commands/get" >}}) retrieves a string value. +* [`MGET`]({{< relref "/commands/mget" >}}) retrieves multiple string values in a single operation. ### Managing counters -* [`INCRBY`](/commands/incrby) atomically increments (and decrements when passing a negative number) counters stored at a given key. -* Another command exists for floating point counters: [`INCRBYFLOAT`](/commands/incrbyfloat). +* [`INCRBY`]({{< relref "/commands/incrby" >}}) atomically increments (and decrements when passing a negative number) counters stored at a given key. +* Another command exists for floating point counters: [`INCRBYFLOAT`]({{< relref "/commands/incrbyfloat" >}}). ### Bitwise operations To perform bitwise operations on a string, see the [bitmaps data type]({{< relref "/develop/data-types/bitmaps" >}}) docs. -See the [complete list of string commands](/commands/?group=string). +See the [complete list of string commands]({{< relref "/commands/?group=string" >}}). ## Performance Most string operations are O(1), which means they're highly efficient. -However, be careful with the [`SUBSTR`](/commands/substr), [`GETRANGE`](/commands/getrange), and [`SETRANGE`](/commands/setrange) commands, which can be O(n). +However, be careful with the [`SUBSTR`]({{< relref "/commands/substr" >}}), [`GETRANGE`]({{< relref "/commands/getrange" >}}), and [`SETRANGE`]({{< relref "/commands/setrange" >}}) commands, which can be O(n). These random-access string commands may cause performance issues when dealing with large strings. ## Alternatives diff --git a/content/develop/data-types/timeseries/configuration.md b/content/develop/data-types/timeseries/configuration.md index 18ad929852..fd5c4241eb 100644 --- a/content/develop/data-types/timeseries/configuration.md +++ b/content/develop/data-types/timeseries/configuration.md @@ -28,7 +28,7 @@ In [redis.conf]({{< relref "/operate/oss_and_stack/management/config" >}}): loadmodule ./redistimeseries.so [OPT VAL]... ``` -From the [Redis CLI]({{< relref "/develop/connect/cli" >}}), using the [MODULE LOAD](/commands/module-load/) command: +From the [Redis CLI]({{< relref "/develop/connect/cli" >}}), using the [MODULE LOAD]({{< relref "/commands/module-load" >}}) command: ``` 127.0.0.6379> MODULE LOAD redistimeseries.so [OPT VAL]... @@ -70,9 +70,9 @@ $ redis-server --loadmodule ./redistimeseries.so NUM_THREADS 3 ### COMPACTION_POLICY -Default compaction rules for newly created key with [`TS.ADD`](/commands/ts.add). +Default compaction rules for newly created key with [`TS.ADD`]({{< baseurl >}}/commands/ts.add). -Note that `COMPACTION_POLICY` has no effect on keys created with [`TS.CREATE`](/commands/ts.create). To understand the motivation for this behavior, consider the following scenario: Suppose a `COMPACTION_POLICY` is defined, but then one wants to manually create an additional compaction rule (using [`TS.CREATERULE`](/commands/ts.createrule)) which requires first creating an empty destination key (using [`TS.CREATE`](/commands/ts.create)). But now there is a problem: due to the `COMPACTION_POLICY`, automatic compactions would be undesirably created for that destination key. +Note that `COMPACTION_POLICY` has no effect on keys created with [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). To understand the motivation for this behavior, consider the following scenario: Suppose a `COMPACTION_POLICY` is defined, but then one wants to manually create an additional compaction rule (using [`TS.CREATERULE`]({{< baseurl >}}/commands/ts.createrule)) which requires first creating an empty destination key (using [`TS.CREATE`]({{< baseurl >}}/commands/ts.create)). But now there is a problem: due to the `COMPACTION_POLICY`, automatic compactions would be undesirably created for that destination key. Each rule is separated by a semicolon (`;`), the rule consists of multiple fields that are separated by a colon (`:`): @@ -152,7 +152,7 @@ $ redis-server --loadmodule ./redistimeseries.so COMPACTION_POLICY max:1m:1h;min Default retention period, in milliseconds, for newly created keys. -Retention period is the maximum age of samples compared to highest reported timestamp, per key. Samples are expired based solely on the difference between their timestamp and the timestamps passed to subsequent [`TS.ADD`](/commands/ts.add), [`TS.MADD`](/commands/ts.madd), [`TS.INCRBY`](/commands/ts.incrby), and [`TS.DECRBY`](/commands/ts.decrby) calls. +Retention period is the maximum age of samples compared to highest reported timestamp, per key. Samples are expired based solely on the difference between their timestamp and the timestamps passed to subsequent [`TS.ADD`]({{< baseurl >}}/commands/ts.add), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby), and [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby) calls. The value `0` means no expiration. @@ -172,7 +172,7 @@ $ redis-server --loadmodule ./redistimeseries.so RETENTION_POLICY 25920000000 ### DUPLICATE_POLICY -Is policy for handling insertion ([`TS.ADD`](/commands/ts.add) and [`TS.MADD`](/commands/ts.madd)) of multiple samples with identical timestamps, with one of the following values: +Is policy for handling insertion ([`TS.ADD`]({{< baseurl >}}/commands/ts.add) and [`TS.MADD`]({{< baseurl >}}/commands/ts.madd)) of multiple samples with identical timestamps, with one of the following values: | policy | description | | ---------- | ---------------------------------------------------------------- | @@ -186,8 +186,8 @@ Is policy for handling insertion ([`TS.ADD`](/commands/ts.add) and [`TS.MADD`](/ #### Precedence order Since the duplication policy can be provided at different levels, the actual precedence of the used policy will be: -1. [`TS.ADD`](/commands/ts.add)'s `ON_DUPLICATE_policy` optional argument -2. Key-level policy (as set with [`TS.CREATE`](/commands/ts.create)'s and [`TS.ALTER`](/commands/ts.alter)'s `DUPLICATE_POLICY` optional argument) +1. [`TS.ADD`]({{< baseurl >}}/commands/ts.add)'s `ON_DUPLICATE_policy` optional argument +2. Key-level policy (as set with [`TS.CREATE`]({{< baseurl >}}/commands/ts.create)'s and [`TS.ALTER`]({{< baseurl >}}/commands/ts.alter)'s `DUPLICATE_POLICY` optional argument) 3. The `DUPLICATE_POLICY` module configuration parameter 4. The default policy diff --git a/content/develop/data-types/timeseries/quickstart.md b/content/develop/data-types/timeseries/quickstart.md index 733f571421..006cebbbf8 100644 --- a/content/develop/data-types/timeseries/quickstart.md +++ b/content/develop/data-types/timeseries/quickstart.md @@ -107,7 +107,7 @@ OK ## Creating a timeseries -A new timeseries can be created with the [`TS.CREATE`](/commands/ts.create) command; for example, to create a timeseries named `sensor1` run the following: +A new timeseries can be created with the [`TS.CREATE`]({{< baseurl >}}/commands/ts.create) command; for example, to create a timeseries named `sensor1` run the following: ``` TS.CREATE sensor1 @@ -122,7 +122,7 @@ This will create a timeseries called `sensor1` and trim it to values of up to on ## Adding data points -For adding new data points to a timeseries we use the [`TS.ADD`](/commands/ts.add) command: +For adding new data points to a timeseries we use the [`TS.ADD`]({{< baseurl >}}/commands/ts.add) command: ``` TS.ADD key timestamp value @@ -141,14 +141,14 @@ To **add a datapoint with the current timestamp** you can use a `*` instead of a TS.ADD sensor1 * 26 ``` -You can **append data points to multiple timeseries** at the same time with the [`TS.MADD`](/commands/ts.madd) command: +You can **append data points to multiple timeseries** at the same time with the [`TS.MADD`]({{< baseurl >}}/commands/ts.madd) command: ``` TS.MADD key timestamp value [key timestamp value ...] ``` ## Deleting data points -Data points between two timestamps (inclusive) can be deleted with the [`TS.DEL`](/commands/ts.del) command: +Data points between two timestamps (inclusive) can be deleted with the [`TS.DEL`]({{< baseurl >}}/commands/ts.del) command: ``` TS.DEL key fromTimestamp toTimestamp ``` @@ -175,7 +175,7 @@ TS.CREATE sensor1 LABELS region east ## Compaction -Another useful feature of Redis Time Series is compacting data by creating a rule for compaction ([`TS.CREATERULE`](/commands/ts.createrule)). For example, if you have collected more than one billion data points in a day, you could aggregate the data by every minute in order to downsample it, thereby reducing the dataset size to 24 * 60 = 1,440 data points. You can choose one of the many available aggregation types in order to aggregate multiple data points from a certain minute into a single one. The currently supported aggregation types are: `avg, sum, min, max, range, count, first, last, std.p, std.s, var.p, var.s and twa`. +Another useful feature of Redis Time Series is compacting data by creating a rule for compaction ([`TS.CREATERULE`]({{< baseurl >}}/commands/ts.createrule)). For example, if you have collected more than one billion data points in a day, you could aggregate the data by every minute in order to downsample it, thereby reducing the dataset size to 24 * 60 = 1,440 data points. You can choose one of the many available aggregation types in order to aggregate multiple data points from a certain minute into a single one. The currently supported aggregation types are: `avg, sum, min, max, range, count, first, last, std.p, std.s, var.p, var.s and twa`. It's important to point out that there is no data rewriting on the original timeseries; the compaction happens in a new series, while the original one stays the same. In order to prevent the original timeseries from growing indefinitely, you can use the retention option, which will trim it down to a certain period of time. @@ -207,7 +207,7 @@ TS.MRANGE - + FILTER area_id=32 This query will show data from all sensors (timeseries) that have a label of `area_id` with a value of `32`. The results will be grouped by timeseries. -Or we can also use the [`TS.MGET`](/commands/ts.mget) command to get the last sample that matches the specific filter: +Or we can also use the [`TS.MGET`]({{< baseurl >}}/commands/ts.mget) command to get the last sample that matches the specific filter: ``` TS.MGET FILTER area_id=32 diff --git a/content/develop/get-started/data-store.md b/content/develop/get-started/data-store.md index 7b593f63cf..845913246b 100644 --- a/content/develop/get-started/data-store.md +++ b/content/develop/get-started/data-store.md @@ -44,7 +44,7 @@ The first step is to connect to Redis. You can find further details about the co {{< /clients-example>}}
{{% alert title="Tip" color="warning" %}} -You can copy and paste the connection details from the Redis Cloud database configuration page. Here is an example connection string of a Cloud database that is hosted in the AWS region `us-east-1` and listens on port 16379: `redis-16379.c283.us-east-1-4.ec2.cloud.redislabs.com:16379`. The connection string has the format `host:port`. You must also copy and paste the username and password of your Cloud database and then either pass the credentials to your client or use the [AUTH command](/commands/auth/) after the connection is established. +You can copy and paste the connection details from the Redis Cloud database configuration page. Here is an example connection string of a Cloud database that is hosted in the AWS region `us-east-1` and listens on port 16379: `redis-16379.c283.us-east-1-4.ec2.cloud.redislabs.com:16379`. The connection string has the format `host:port`. You must also copy and paste the username and password of your Cloud database and then either pass the credentials to your client or use the [AUTH command]({{< relref "/commands/auth" >}}) after the connection is established. {{% /alert %}} ## Store and retrieve data @@ -78,17 +78,17 @@ Hashes are the equivalent of dictionaries (dicts or hash maps). Among other thin 8) "4972" {{< /clients-example >}} -You can get a complete overview of available data types in this documentation site's [data types section]({{< relref "/develop/data-types/" >}}). Each data type has commands allowing you to manipulate or retrieve data. The [commands reference](/commands/) provides a sophisticated explanation. +You can get a complete overview of available data types in this documentation site's [data types section]({{< relref "/develop/data-types/" >}}). Each data type has commands allowing you to manipulate or retrieve data. The [commands reference]({{< relref "/commands/" >}}) provides a sophisticated explanation. ## Scan the keyspace -Each item within Redis has a unique key. All items live within the Redis [keyspace]({{< relref "/develop/use/keyspace" >}}). You can scan the Redis keyspace via the [SCAN command](/commands/scan/). Here is an example that scans for the first 100 keys that have the prefix `bike:`: +Each item within Redis has a unique key. All items live within the Redis [keyspace]({{< relref "/develop/use/keyspace" >}}). You can scan the Redis keyspace via the [SCAN command]({{< relref "/commands/scan" >}}). Here is an example that scans for the first 100 keys that have the prefix `bike:`: {{< clients-example scan_example >}} SCAN 0 MATCH "bike:*" COUNT 100 {{< /clients-example >}} -[SCAN](/commands/scan/) returns a cursor position, allowing you to scan iteratively for the next batch of keys until you reach the cursor value 0. +[SCAN]({{< relref "/commands/scan" >}}) returns a cursor position, allowing you to scan iteratively for the next batch of keys until you reach the cursor value 0. ## Next steps diff --git a/content/develop/get-started/document-database.md b/content/develop/get-started/document-database.md index 792400a091..16cd3f284f 100644 --- a/content/develop/get-started/document-database.md +++ b/content/develop/get-started/document-database.md @@ -59,7 +59,7 @@ The first step is to connect to your Redis Stack database. You can find further
{{% alert title="Tip" color="warning" %}} -You can copy and paste the connection details from the Redis Cloud database configuration page. Here is an example connection string of a Cloud database that is hosted in the AWS region `us-east-1` and listens on port 16379: `redis-16379.c283.us-east-1-4.ec2.cloud.redislabs.com:16379`. The connection string has the format `host:port`. You must also copy and paste your Cloud database's username and password and then pass the credentials to your client or use the [AUTH command](/commands/auth/) after the connection is established. +You can copy and paste the connection details from the Redis Cloud database configuration page. Here is an example connection string of a Cloud database that is hosted in the AWS region `us-east-1` and listens on port 16379: `redis-16379.c283.us-east-1-4.ec2.cloud.redislabs.com:16379`. The connection string has the format `host:port`. You must also copy and paste your Cloud database's username and password and then pass the credentials to your client or use the [AUTH command]({{< relref "/commands/auth" >}}) after the connection is established. {{% /alert %}} @@ -67,7 +67,7 @@ You can copy and paste the connection details from the Redis Cloud database conf As explained in the [in-memory data store]({{< relref "/develop/get-started/data-store" >}}) quick start guide, Redis allows you to access an item directly via its key. You also learned how to scan the keyspace. Whereby you can use other data structures (e.g., hashes and sorted sets) as secondary indexes, your application would need to maintain those indexes manually. Redis Stack turns Redis into a document database by allowing you to declare which fields are auto-indexed. Redis Stack currently supports secondary index creation on the [hashes]({{< relref "/develop/data-types/hashes" >}}) and [JSON]({{< relref "/develop/data-types/json" >}}) documents. -The following example shows an [FT.CREATE](/commands/ft.create/) command that creates an index with some text fields, a numeric field (price), and a tag field (condition). The text fields have a weight of 1.0, meaning they have the same relevancy in the context of full-text searches. The field names follow the [JSONPath]({{< relref "/develop/data-types/json/path" >}}) notion. Each such index field maps to a property within the JSON document. +The following example shows an [FT.CREATE]({{< baseurl >}}/commands/ft.create/) command that creates an index with some text fields, a numeric field (price), and a tag field (condition). The text fields have a weight of 1.0, meaning they have the same relevancy in the context of full-text searches. The field names follow the [JSONPath]({{< relref "/develop/data-types/json/path" >}}) notion. Each such index field maps to a property within the JSON document. {{< clients-example search_quickstart create_index >}} @@ -79,7 +79,7 @@ Any pre-existing JSON documents with a key prefix `bicycle:` are automatically a ## Add JSON documents -The example below shows you how to use the [JSON.SET](/commands/json.set/) command to create new JSON documents: +The example below shows you how to use the [JSON.SET]({{< baseurl >}}/commands/json.set/) command to create new JSON documents: {{< clients-example search_quickstart add_documents "" 2 >}} > JSON.SET "bicycle:0" "." "{\"brand\": \"Velorim\", \"model\": \"Jigger\", \"price\": 270, \"description\": \"Small and powerful, the Jigger is the best ride for the smallest of tikes! This is the tiniest kids\\u2019 pedal bike on the market available without a coaster brake, the Jigger is the vehicle of choice for the rare tenacious little rider raring to go.\", \"condition\": \"new\"}" @@ -108,7 +108,7 @@ OK ### Wildcard query -You can retrieve all indexed documents using the [FT.SEARCH](/commands/ft.search/) command. Note the `LIMIT` clause below, which allows result pagination. +You can retrieve all indexed documents using the [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) command. Note the `LIMIT` clause below, which allows result pagination. {{< clients-example search_quickstart wildcard_query "" 10 >}} > FT.SEARCH "idx:bicycle" "*" LIMIT 0 10 diff --git a/content/develop/get-started/faq.md b/content/develop/get-started/faq.md index 4f79e7dae9..a5df3d50d2 100644 --- a/content/develop/get-started/faq.md +++ b/content/develop/get-started/faq.md @@ -150,7 +150,7 @@ If you use keys with limited time to live (Redis expires) this is normal behavio * The primary generates an RDB file on the first synchronization with the replica. * The RDB file will not include keys already expired in the primary but which are still in memory. -* These keys are still in the memory of the Redis primary, even if logically expired. They'll be considered non-existent, and their memory will be reclaimed later, either incrementally or explicitly on access. While these keys are not logically part of the dataset, they are accounted for in the [`INFO`](/commands/info) output and in the [`DBSIZE`](/commands/dbsize) command. +* These keys are still in the memory of the Redis primary, even if logically expired. They'll be considered non-existent, and their memory will be reclaimed later, either incrementally or explicitly on access. While these keys are not logically part of the dataset, they are accounted for in the [`INFO`]({{< relref "/commands/info" >}}) output and in the [`DBSIZE`]({{< relref "/commands/dbsize" >}}) command. * When the replica reads the RDB file generated by the primary, this set of keys will not be loaded. Because of this, it's common for users with many expired keys to see fewer keys in the replicas. However, logically, the primary and replica will have the same content. diff --git a/content/develop/get-started/vector-database.md b/content/develop/get-started/vector-database.md index e3d2480799..cd53d3cda6 100644 --- a/content/develop/get-started/vector-database.md +++ b/content/develop/get-started/vector-database.md @@ -213,7 +213,7 @@ query = ( ``` {{% alert title="Note" color="warning" %}} -To utilize a vector query with the [`FT.SEARCH`](/commands/ft.search) command, you must specify DIALECT 2 or greater. +To utilize a vector query with the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command, you must specify DIALECT 2 or greater. {{% /alert %}} You must pass the vectorized query as `$query_vector` as a byte array. The following code shows an example of creating a Python NumPy array from a vectorized query prompt (`encoded_query`) as a single precision floating point array and converting it into a compact, byte-level representation that can be passed as a parameter to the query: diff --git a/content/develop/interact/programmability/_index.md b/content/develop/interact/programmability/_index.md index e327b2ba4f..3fe461f13a 100644 --- a/content/develop/interact/programmability/_index.md +++ b/content/develop/interact/programmability/_index.md @@ -22,7 +22,7 @@ Redis provides a programming interface that lets you execute custom scripts on t ## Background Redis is, by [definition](https://github.com/redis/redis/blob/unstable/MANIFESTO#L7), a _"domain-specific language for abstract data types"_. -The language that Redis speaks consists of its [commands](/commands). +The language that Redis speaks consists of its [commands]({{< relref "/commands" >}}). Most the commands specialize at manipulating core [data types](/topics/data-types-intro) in different ways. In many cases, these commands provide all the functionality that a developer requires for managing application data in Redis. @@ -42,7 +42,7 @@ Please refer to the [Redis Lua API Reference](/topics/lua-api) page for complete Redis provides two means for running scripts. -Firstly, and ever since Redis 2.6.0, the [`EVAL`](/commands/eval) command enables running server-side scripts. +Firstly, and ever since Redis 2.6.0, the [`EVAL`]({{< relref "/commands/eval" >}}) command enables running server-side scripts. Eval scripts provide a quick and straightforward way to have Redis run your scripts ad-hoc. However, using them means that the scripted logic is a part of your application (not an extension of the Redis server). Every applicative instance that runs a script must have the script's source code readily available for loading at any time. @@ -71,15 +71,15 @@ However, if you intend to use a slow script in your application, be aware that a ## Read-only scripts A read-only script is a script that only executes commands that don't modify any keys within Redis. -Read-only scripts can be executed either by adding the `no-writes` [flag](/topics/lua-api#script_flags) to the script or by executing the script with one of the read-only script command variants: [`EVAL_RO`](/commands/eval_ro), [`EVALSHA_RO`](/commands/evalsha_ro), or [`FCALL_RO`](/commands/fcall_ro). +Read-only scripts can be executed either by adding the `no-writes` [flag](/topics/lua-api#script_flags) to the script or by executing the script with one of the read-only script command variants: [`EVAL_RO`]({{< relref "/commands/eval_ro" >}}), [`EVALSHA_RO`]({{< relref "/commands/evalsha_ro" >}}), or [`FCALL_RO`]({{< relref "/commands/fcall_ro" >}}). They have the following properties: * They can always be executed on replicas. -* They can always be killed by the [`SCRIPT KILL`](/commands/script-kill) command. +* They can always be killed by the [`SCRIPT KILL`]({{< relref "/commands/script-kill" >}}) command. * They never fail with OOM error when redis is over the memory limit. * They are not blocked during write pauses, such as those that occur during coordinated failovers. * They cannot execute any command that may modify the data set. -* Currently [`PUBLISH`](/commands/publish), [`SPUBLISH`](/commands/spublish) and [`PFCOUNT`](/commands/pfcount) are also considered write commands in scripts, because they could attempt to propagate commands to replicas and AOF file. +* Currently [`PUBLISH`]({{< relref "/commands/publish" >}}), [`SPUBLISH`]({{< relref "/commands/spublish" >}}) and [`PFCOUNT`]({{< relref "/commands/pfcount" >}}) are also considered write commands in scripts, because they could attempt to propagate commands to replicas and AOF file. In addition to the benefits provided by all read-only scripts, the read-only script commands have the following advantages: @@ -90,7 +90,7 @@ In addition to the benefits provided by all read-only scripts, the read-only scr Read-only scripts and read-only script commands were introduced in Redis 7.0 -* Before Redis 7.0.1 [`PUBLISH`](/commands/publish), [`SPUBLISH`](/commands/spublish) and [`PFCOUNT`](/commands/pfcount) were not considered write commands in scripts +* Before Redis 7.0.1 [`PUBLISH`]({{< relref "/commands/publish" >}}), [`SPUBLISH`]({{< relref "/commands/spublish" >}}) and [`PFCOUNT`]({{< relref "/commands/pfcount" >}}) were not considered write commands in scripts * Before Redis 7.0.1 the `no-writes` [flag](/topics/lua-api#script_flags) did not imply `allow-oom` * Before Redis 7.0.1 the `no-writes` flag did not permit the script to run during write pauses. @@ -113,7 +113,7 @@ This default timeout is enormous since a script usually runs in less than a mill The limit is in place to handle accidental infinite loops created during development. It is possible to modify the maximum time a script can be executed with millisecond precision, -either via `redis.conf` or by using the [`CONFIG SET`](/commands/config-set) command. +either via `redis.conf` or by using the [`CONFIG SET`]({{< relref "/commands/config-set" >}}) command. The configuration parameter affecting max execution time is called `busy-reply-threshold`. When a script reaches the timeout threshold, it isn't terminated by Redis automatically. @@ -123,6 +123,6 @@ Interrupting the execution of a script has the potential of leaving the dataset Therefore, when a script executes longer than the configured timeout, the following happens: * Redis logs that a script is running for too long. -* It starts accepting commands again from other clients but will reply with a BUSY error to all the clients sending normal commands. The only commands allowed in this state are [`SCRIPT KILL`](/commands/script-kill), [`FUNCTION KILL`](/commands/function-kill), and `SHUTDOWN NOSAVE`. -* It is possible to terminate a script that only executes read-only commands using the [`SCRIPT KILL`](/commands/script-kill) and [`FUNCTION KILL`](/commands/function-kill) commands. These commands do not violate the scripting semantic as no data was written to the dataset by the script yet. +* It starts accepting commands again from other clients but will reply with a BUSY error to all the clients sending normal commands. The only commands allowed in this state are [`SCRIPT KILL`]({{< relref "/commands/script-kill" >}}), [`FUNCTION KILL`]({{< relref "/commands/function-kill" >}}), and `SHUTDOWN NOSAVE`. +* It is possible to terminate a script that only executes read-only commands using the [`SCRIPT KILL`]({{< relref "/commands/script-kill" >}}) and [`FUNCTION KILL`]({{< relref "/commands/function-kill" >}}) commands. These commands do not violate the scripting semantic as no data was written to the dataset by the script yet. * If the script had already performed even a single write operation, the only command allowed is `SHUTDOWN NOSAVE` that stops the server without saving the current data set on disk (basically, the server is aborted). diff --git a/content/develop/interact/programmability/eval-intro.md b/content/develop/interact/programmability/eval-intro.md index e8e21d0f6a..d18eef25f6 100644 --- a/content/develop/interact/programmability/eval-intro.md +++ b/content/develop/interact/programmability/eval-intro.md @@ -18,7 +18,7 @@ weight: 2 --- Redis lets users upload and execute Lua scripts on the server. -Scripts can employ programmatic control structures and use most of the [commands](/commands) while executing to access the database. +Scripts can employ programmatic control structures and use most of the [commands]({{< relref "/commands" >}}) while executing to access the database. Because scripts execute in the server, reading and writing data from scripts is very efficient. Redis guarantees the script's atomic execution. @@ -45,7 +45,7 @@ As of version 7.0, [Redis Functions](/topics/functions-intro) offer an alternati ## Getting started -We'll start scripting with Redis by using the [`EVAL`](/commands/eval) command. +We'll start scripting with Redis by using the [`EVAL`]({{< relref "/commands/eval" >}}) command. Here's our first example: @@ -54,7 +54,7 @@ Here's our first example: "Hello, scripting!" ``` -In this example, [`EVAL`](/commands/eval) takes two arguments. +In this example, [`EVAL`]({{< relref "/commands/eval" >}}) takes two arguments. The first argument is a string that consists of the script's Lua source code. The script doesn't need to include any definitions of Lua function. It is just a Lua program that will run in the Redis engine's context. @@ -136,26 +136,26 @@ For example, consider the following: OK ``` The above script accepts one key name and one value as its input arguments. -When executed, the script calls the [`SET`](/commands/set) command to set the input key, _foo_, with the string value "bar". +When executed, the script calls the [`SET`]({{< relref "/commands/set" >}}) command to set the input key, _foo_, with the string value "bar". ## Script cache -Until this point, we've used the [`EVAL`](/commands/eval) command to run our script. +Until this point, we've used the [`EVAL`]({{< relref "/commands/eval" >}}) command to run our script. -Whenever we call [`EVAL`](/commands/eval), we also include the script's source code with the request. -Repeatedly calling [`EVAL`](/commands/eval) to execute the same set of parameterized scripts, wastes both network bandwidth and also has some overheads in Redis. +Whenever we call [`EVAL`]({{< relref "/commands/eval" >}}), we also include the script's source code with the request. +Repeatedly calling [`EVAL`]({{< relref "/commands/eval" >}}) to execute the same set of parameterized scripts, wastes both network bandwidth and also has some overheads in Redis. Naturally, saving on network and compute resources is key, so, instead, Redis provides a caching mechanism for scripts. -Every script you execute with [`EVAL`](/commands/eval) is stored in a dedicated cache that the server keeps. +Every script you execute with [`EVAL`]({{< relref "/commands/eval" >}}) is stored in a dedicated cache that the server keeps. The cache's contents are organized by the scripts' SHA1 digest sums, so the SHA1 digest sum of a script uniquely identifies it in the cache. -You can verify this behavior by running [`EVAL`](/commands/eval) and calling [`INFO`](/commands/info) afterward. +You can verify this behavior by running [`EVAL`]({{< relref "/commands/eval" >}}) and calling [`INFO`]({{< relref "/commands/info" >}}) afterward. You'll notice that the _used_memory_scripts_eval_ and _number_of_cached_scripts_ metrics grow with every new script that's executed. As mentioned above, dynamically-generated scripts are an anti-pattern. Generating scripts during the application's runtime may, and probably will, exhaust the host's memory resources for caching them. Instead, scripts should be as generic as possible and provide customized execution via their arguments. -A script is loaded to the server's cache by calling the [`SCRIPT LOAD`](/commands/script-load) command and providing its source code. +A script is loaded to the server's cache by calling the [`SCRIPT LOAD`]({{< relref "/commands/script-load" >}}) command and providing its source code. The server doesn't execute the script, but instead just compiles and loads it to the server's cache. Once loaded, you can execute the cached script with the SHA1 digest returned from the server. @@ -172,10 +172,10 @@ redis> EVALSHA c664a3bf70bd1d45c4284ffebb65a6f2299bfc9f 0 The Redis script cache is **always volatile**. It isn't considered as a part of the database and is **not persisted**. -The cache may be cleared when the server restarts, during fail-over when a replica assumes the master role, or explicitly by [`SCRIPT FLUSH`](/commands/script-flush). +The cache may be cleared when the server restarts, during fail-over when a replica assumes the master role, or explicitly by [`SCRIPT FLUSH`]({{< relref "/commands/script-flush" >}}). That means that cached scripts are ephemeral, and the cache's contents can be lost at any time. -Applications that use scripts should always call [`EVALSHA`](/commands/evalsha) to execute them. +Applications that use scripts should always call [`EVALSHA`]({{< relref "/commands/evalsha" >}}) to execute them. The server returns an error if the script's SHA1 digest is not in the cache. For example: @@ -184,17 +184,17 @@ redis> EVALSHA ffffffffffffffffffffffffffffffffffffffff 0 (error) NOSCRIPT No matching script ``` -In this case, the application should first load it with [`SCRIPT LOAD`](/commands/script-load) and then call [`EVALSHA`](/commands/evalsha) once more to run the cached script by its SHA1 sum. +In this case, the application should first load it with [`SCRIPT LOAD`]({{< relref "/commands/script-load" >}}) and then call [`EVALSHA`]({{< relref "/commands/evalsha" >}}) once more to run the cached script by its SHA1 sum. Most of [Redis' clients](/clients) already provide utility APIs for doing that automatically. Please consult your client's documentation regarding the specific details. ### `EVALSHA` in the context of pipelining -Special care should be given executing [`EVALSHA`](/commands/evalsha) in the context of a [pipelined request](/topics/pipelining). +Special care should be given executing [`EVALSHA`]({{< relref "/commands/evalsha" >}}) in the context of a [pipelined request](/topics/pipelining). The commands in a pipelined request run in the order they are sent, but other clients' commands may be interleaved for execution between these. Because of that, the `NOSCRIPT` error can return from a pipelined request but can't be handled. -Therefore, a client library's implementation should revert to using plain [`EVAL`](/commands/eval) of parameterized in the context of a pipeline. +Therefore, a client library's implementation should revert to using plain [`EVAL`]({{< relref "/commands/eval" >}}) of parameterized in the context of a pipeline. ### Script cache semantics @@ -202,7 +202,7 @@ During normal operation, an application's scripts are meant to stay indefinitely The underlying reasoning is that the script cache contents of a well-written application are unlikely to grow continuously. Even large applications that use hundreds of cached scripts shouldn't be an issue in terms of cache memory usage. -The only way to flush the script cache is by explicitly calling the [`SCRIPT FLUSH`](/commands/script-flush) command. +The only way to flush the script cache is by explicitly calling the [`SCRIPT FLUSH`]({{< relref "/commands/script-flush" >}}) command. Running the command will _completely flush_ the scripts cache, removing all the scripts executed so far. Typically, this is only needed when the instance is going to be instantiated for another customer or application in a cloud environment. @@ -210,31 +210,31 @@ Also, as already mentioned, restarting a Redis instance flushes the non-persiste However, from the point of view of the Redis client, there are only two ways to make sure that a Redis instance was not restarted between two different commands: * The connection we have with the server is persistent and was never closed so far. -* The client explicitly checks the `run_id` field in the [`INFO`](/commands/info) command to ensure the server was not restarted and is still the same process. +* The client explicitly checks the `run_id` field in the [`INFO`]({{< relref "/commands/info" >}}) command to ensure the server was not restarted and is still the same process. -Practically speaking, it is much simpler for the client to assume that in the context of a given connection, cached scripts are guaranteed to be there unless the administrator explicitly invoked the [`SCRIPT FLUSH`](/commands/script-flush) command. +Practically speaking, it is much simpler for the client to assume that in the context of a given connection, cached scripts are guaranteed to be there unless the administrator explicitly invoked the [`SCRIPT FLUSH`]({{< relref "/commands/script-flush" >}}) command. The fact that the user can count on Redis to retain cached scripts is semantically helpful in the context of pipelining. ## The `SCRIPT` command -The Redis [`SCRIPT`](/commands/script) provides several ways for controlling the scripting subsystem. +The Redis [`SCRIPT`]({{< relref "/commands/script" >}}) provides several ways for controlling the scripting subsystem. These are: -* [`SCRIPT FLUSH`](/commands/script-flush): this command is the only way to force Redis to flush the scripts cache. +* [`SCRIPT FLUSH`]({{< relref "/commands/script-flush" >}}): this command is the only way to force Redis to flush the scripts cache. It is most useful in environments where the same Redis instance is reassigned to different uses. It is also helpful for testing client libraries' implementations of the scripting feature. -* [`SCRIPT EXISTS`](/commands/script-exists): given one or more SHA1 digests as arguments, this command returns an array of _1_'s and _0_'s. - _1_ means the specific SHA1 is recognized as a script already present in the scripting cache. _0_'s meaning is that a script with this SHA1 wasn't loaded before (or at least never since the latest call to [`SCRIPT FLUSH`](/commands/script-flush)). +* [`SCRIPT EXISTS`]({{< relref "/commands/script-exists" >}}): given one or more SHA1 digests as arguments, this command returns an array of _1_'s and _0_'s. + _1_ means the specific SHA1 is recognized as a script already present in the scripting cache. _0_'s meaning is that a script with this SHA1 wasn't loaded before (or at least never since the latest call to [`SCRIPT FLUSH`]({{< relref "/commands/script-flush" >}})). * `SCRIPT LOAD script`: this command registers the specified script in the Redis script cache. - It is a useful command in all the contexts where we want to ensure that [`EVALSHA`](/commands/evalsha) doesn't not fail (for instance, in a pipeline or when called from a [[`MULTI`](/commands/multi)/[`EXEC`](/commands/exec) transaction](/topics/transactions)), without the need to execute the script. + It is a useful command in all the contexts where we want to ensure that [`EVALSHA`]({{< relref "/commands/evalsha" >}}) doesn't not fail (for instance, in a pipeline or when called from a [[`MULTI`]({{< relref "/commands/multi" >}})/[`EXEC`]({{< relref "/commands/exec" >}}) transaction](/topics/transactions)), without the need to execute the script. -* [`SCRIPT KILL`](/commands/script-kill): this command is the only way to interrupt a long-running script (a.k.a slow script), short of shutting down the server. +* [`SCRIPT KILL`]({{< relref "/commands/script-kill" >}}): this command is the only way to interrupt a long-running script (a.k.a slow script), short of shutting down the server. A script is deemed as slow once its execution's duration exceeds the configured [maximum execution time](/topics/programmability#maximum-execution-time) threshold. - The [`SCRIPT KILL`](/commands/script-kill) command can be used only with scripts that did not modify the dataset during their execution (since stopping a read-only script does not violate the scripting engine's guaranteed atomicity). + The [`SCRIPT KILL`]({{< relref "/commands/script-kill" >}}) command can be used only with scripts that did not modify the dataset during their execution (since stopping a read-only script does not violate the scripting engine's guaranteed atomicity). -* [`SCRIPT DEBUG`](/commands/script-debug): controls use of the built-in [Redis Lua scripts debugger](/topics/ldb). +* [`SCRIPT DEBUG`]({{< relref "/commands/script-debug" >}}): controls use of the built-in [Redis Lua scripts debugger](/topics/ldb). ## Script replication @@ -270,14 +270,14 @@ We call this **script effects replication**. starting with Redis 5.0, script effects replication is the default mode and does not need to be explicitly enabled. In this replication mode, while Lua scripts are executed, Redis collects all the commands executed by the Lua scripting engine that actually modify the dataset. -When the script execution finishes, the sequence of commands that the script generated are wrapped into a [[`MULTI`](/commands/multi)/[`EXEC`](/commands/exec) transaction](/topics/transactions) and are sent to the replicas and AOF. +When the script execution finishes, the sequence of commands that the script generated are wrapped into a [[`MULTI`]({{< relref "/commands/multi" >}})/[`EXEC`]({{< relref "/commands/exec" >}}) transaction](/topics/transactions) and are sent to the replicas and AOF. This is useful in several ways depending on the use case: * When the script is slow to compute, but the effects can be summarized by a few write commands, it is a shame to re-compute the script on the replicas or when reloading the AOF. In this case, it is much better to replicate just the effects of the script. * When script effects replication is enabled, the restrictions on non-deterministic functions are removed. - You can, for example, use the [`TIME`](/commands/time) or [`SRANDMEMBER`](/commands/srandmember) commands inside your scripts freely at any place. + You can, for example, use the [`TIME`]({{< relref "/commands/time" >}}) or [`SRANDMEMBER`]({{< relref "/commands/srandmember" >}}) commands inside your scripts freely at any place. * The Lua PRNG in this mode is seeded randomly on every call. Unless already enabled by the server's configuration or defaults (before Redis 7.0), you need to issue the following Lua command before the script performs a write: @@ -317,15 +317,15 @@ the script **always must** execute the same Redis _write_ commands with the same Operations performed by the script can't depend on any hidden (non-explicit) information or state that may change as the script execution proceeds or between different executions of the script. Nor can it depend on any external input from I/O devices. -Acts such as using the system time, calling Redis commands that return random values (e.g., [`RANDOMKEY`](/commands/randomkey)), or using Lua's random number generator, could result in scripts that will not evaluate consistently. +Acts such as using the system time, calling Redis commands that return random values (e.g., [`RANDOMKEY`]({{< relref "/commands/randomkey" >}})), or using Lua's random number generator, could result in scripts that will not evaluate consistently. To enforce the deterministic behavior of scripts, Redis does the following: * Lua does not export commands to access the system time or other external states. -* Redis will block the script with an error if a script calls a Redis command able to alter the data set **after** a Redis _random_ command like [`RANDOMKEY`](/commands/randomkey), [`SRANDMEMBER`](/commands/srandmember), [`TIME`](/commands/time). +* Redis will block the script with an error if a script calls a Redis command able to alter the data set **after** a Redis _random_ command like [`RANDOMKEY`]({{< relref "/commands/randomkey" >}}), [`SRANDMEMBER`]({{< relref "/commands/srandmember" >}}), [`TIME`]({{< relref "/commands/time" >}}). That means that read-only scripts that don't modify the dataset can call those commands. - Note that a _random command_ does not necessarily mean a command that uses random numbers: any non-deterministic command is considered as a random command (the best example in this regard is the [`TIME`](/commands/time) command). -* In Redis version 4.0, commands that may return elements in random order, such as [`SMEMBERS`](/commands/smembers) (because Redis Sets are _unordered_), exhibit a different behavior when called from Lua, + Note that a _random command_ does not necessarily mean a command that uses random numbers: any non-deterministic command is considered as a random command (the best example in this regard is the [`TIME`]({{< relref "/commands/time" >}}) command). +* In Redis version 4.0, commands that may return elements in random order, such as [`SMEMBERS`]({{< relref "/commands/smembers" >}}) (because Redis Sets are _unordered_), exhibit a different behavior when called from Lua, and undergo a silent lexicographical sorting filter before returning data to Lua scripts. So `redis.call("SMEMBERS",KEYS[1])` will always return the Set elements in the same order, while the same command invoked by normal clients may return different results even if the key contains exactly the same elements. However, starting with Redis 5.0, this ordering is no longer performed because replicating effects circumvents this type of non-determinism. @@ -415,7 +415,7 @@ The Lua debugger is described in the [Lua scripts debugging](/topics/ldb) sectio When memory usage in Redis exceeds the `maxmemory` limit, the first write command encountered in the script that uses additional memory will cause the script to abort (unless [`redis.pcall`](/topics/lua-api#redis.pcall) was used). -However, an exception to the above is when the script's first write command does not use additional memory, as is the case with (for example, [`DEL`](/commands/del) and [`LREM`](/commands/lrem)). +However, an exception to the above is when the script's first write command does not use additional memory, as is the case with (for example, [`DEL`]({{< relref "/commands/del" >}}) and [`LREM`]({{< relref "/commands/lrem" >}})). In this case, Redis will allow all commands in the script to run to ensure atomicity. If subsequent writes in the script consume additional memory, Redis' memory usage can exceed the threshold set by the `maxmemory` configuration directive. diff --git a/content/develop/interact/programmability/functions-intro.md b/content/develop/interact/programmability/functions-intro.md index ca4f2a9ea4..462bd137f8 100644 --- a/content/develop/interact/programmability/functions-intro.md +++ b/content/develop/interact/programmability/functions-intro.md @@ -21,15 +21,15 @@ Redis Functions is an API for managing code to be executed on the server. This f ## Prologue (or, what's wrong with Eval Scripts?) -Prior versions of Redis made scripting available only via the [`EVAL`](/commands/eval) command, which allows a Lua script to be sent for execution by the server. +Prior versions of Redis made scripting available only via the [`EVAL`]({{< relref "/commands/eval" >}}) command, which allows a Lua script to be sent for execution by the server. The core use cases for [Eval Scripts](/topics/eval-intro) is executing part of your application logic inside Redis, efficiently and atomically. Such script can perform conditional updates across multiple keys, possibly combining several different data types. -Using [`EVAL`](/commands/eval) requires that the application sends the entire script for execution every time. -Because this results in network and script compilation overheads, Redis provides an optimization in the form of the [`EVALSHA`](/commands/evalsha) command. By first calling [`SCRIPT LOAD`](/commands/script-load) to obtain the script's SHA1, the application can invoke it repeatedly afterward with its digest alone. +Using [`EVAL`]({{< relref "/commands/eval" >}}) requires that the application sends the entire script for execution every time. +Because this results in network and script compilation overheads, Redis provides an optimization in the form of the [`EVALSHA`]({{< relref "/commands/evalsha" >}}) command. By first calling [`SCRIPT LOAD`]({{< relref "/commands/script-load" >}}) to obtain the script's SHA1, the application can invoke it repeatedly afterward with its digest alone. By design, Redis only caches the loaded scripts. -That means that the script cache can become lost at any time, such as after calling [`SCRIPT FLUSH`](/commands/script-flush), after restarting the server, or when failing over to a replica. +That means that the script cache can become lost at any time, such as after calling [`SCRIPT FLUSH`]({{< relref "/commands/script-flush" >}}), after restarting the server, or when failing over to a replica. The application is responsible for reloading scripts during runtime if any are missing. The underlying assumption is that scripts are a part of the application and not maintained by the Redis server. @@ -37,8 +37,8 @@ This approach suits many light-weight scripting use cases, but introduces severa 1. All client application instances must maintain a copy of all scripts. That means having some mechanism that applies script updates to all of the application's instances. 1. Calling cached scripts within the context of a [transaction](/topics/transactions) increases the probability of the transaction failing because of a missing script. Being more likely to fail makes using cached scripts as building blocks of workflows less attractive. -1. SHA1 digests are meaningless, making debugging the system extremely hard (e.g., in a [`MONITOR`](/commands/monitor) session). -1. When used naively, [`EVAL`](/commands/eval) promotes an anti-pattern in which scripts the client application renders verbatim scripts instead of responsibly using the [`KEYS` and `ARGV` Lua APIs](/topics/lua-api#runtime-globals). +1. SHA1 digests are meaningless, making debugging the system extremely hard (e.g., in a [`MONITOR`]({{< relref "/commands/monitor" >}}) session). +1. When used naively, [`EVAL`]({{< relref "/commands/eval" >}}) promotes an anti-pattern in which scripts the client application renders verbatim scripts instead of responsibly using the [`KEYS` and `ARGV` Lua APIs](/topics/lua-api#runtime-globals). 1. Because they are ephemeral, a script can't call another script. This makes sharing and reusing code between scripts nearly impossible, short of client-side preprocessing (see the first point). To address these needs while avoiding breaking changes to already-established and well-liked ephemeral scripts, Redis v7.0 introduces Redis Functions. @@ -93,7 +93,7 @@ Let's explore Redis Functions via some tangible examples and Lua snippets. At this point, if you're unfamiliar with Lua in general and specifically in Redis, you may benefit from reviewing some of the examples in [Introduction to Eval Scripts](/topics/eval-intro) and [Lua API](/topics/lua-api) pages for a better grasp of the language. Every Redis function belongs to a single library that's loaded to Redis. -Loading a library to the database is done with the [`FUNCTION LOAD`](/commands/function-load) command. +Loading a library to the database is done with the [`FUNCTION LOAD`]({{< relref "/commands/function-load" >}}) command. The command gets the library payload as input, the library payload must start with Shebang statement that provides a metadata about the library (like the engine to use and the library name). The Shebang format is: @@ -110,7 +110,7 @@ redis> FUNCTION LOAD "#!lua name=mylib\n" The error is expected, as there are no functions in the loaded library. Every library needs to include at least one registered function to load successfully. A registered function is named and acts as an entry point to the library. -When the target execution engine handles the [`FUNCTION LOAD`](/commands/function-load) command, it registers the library's functions. +When the target execution engine handles the [`FUNCTION LOAD`]({{< relref "/commands/function-load" >}}) command, it registers the library's functions. The Lua engine compiles and evaluates the library source code when loaded, and expects functions to be registered by calling the `redis.register_function()` API. @@ -126,7 +126,7 @@ redis.register_function( In the example above, we provide two arguments about the function to Lua's `redis.register_function()` API: its registered name and a callback. -We can load our library and use [`FCALL`](/commands/fcall) to call the registered function: +We can load our library and use [`FCALL`]({{< relref "/commands/fcall" >}}) to call the registered function: ``` redis> FUNCTION LOAD "#!lua name=mylib\nredis.register_function('knockknock', function() return 'Who\\'s there?' end)" @@ -135,9 +135,9 @@ redis> FCALL knockknock 0 "Who's there?" ``` -Notice that the [`FUNCTION LOAD`](/commands/function-load) command returns the name of the loaded library, this name can later be used [`FUNCTION LIST`](/commands/function-list) and [`FUNCTION DELETE`](/commands/function-delete). +Notice that the [`FUNCTION LOAD`]({{< relref "/commands/function-load" >}}) command returns the name of the loaded library, this name can later be used [`FUNCTION LIST`]({{< relref "/commands/function-list" >}}) and [`FUNCTION DELETE`]({{< relref "/commands/function-delete" >}}). -We've provided [`FCALL`](/commands/fcall) with two arguments: the function's registered name and the numeric value `0`. This numeric value indicates the number of key names that follow it (the same way [`EVAL`](/commands/eval) and [`EVALSHA`](/commands/evalsha) work). +We've provided [`FCALL`]({{< relref "/commands/fcall" >}}) with two arguments: the function's registered name and the numeric value `0`. This numeric value indicates the number of key names that follow it (the same way [`EVAL`]({{< relref "/commands/eval" >}}) and [`EVALSHA`]({{< relref "/commands/evalsha" >}}) work). We'll explain immediately how key names and additional arguments are available to the function. As this simple example doesn't involve keys, we simply use 0 for now. @@ -154,10 +154,10 @@ To ensure the correct execution of Redis Functions, both in standalone and clust Any input to the function that isn't the name of a key is a regular input argument. Now, let's pretend that our application stores some of its data in Redis Hashes. -We want an [`HSET`](/commands/hset)-like way to set and update fields in said Hashes and store the last modification time in a new field named `_last_modified_`. +We want an [`HSET`]({{< relref "/commands/hset" >}})-like way to set and update fields in said Hashes and store the last modification time in a new field named `_last_modified_`. We can implement a function to do all that. -Our function will call [`TIME`](/commands/time) to get the server's clock reading and update the target Hash with the new fields' values and the modification's timestamp. +Our function will call [`TIME`]({{< relref "/commands/time" >}}) to get the server's clock reading and update the target Hash with the new fields' values and the modification's timestamp. The function we'll implement accepts the following input arguments: the Hash's key name and the field-value pairs to update. The Lua API for Redis Functions makes these inputs accessible as the first and second arguments to the function's callback. @@ -184,7 +184,7 @@ If we create a new file named _mylib.lua_ that consists of the library's definit $ cat mylib.lua | redis-cli -x FUNCTION LOAD REPLACE ``` -We've added the `REPLACE` modifier to the call to [`FUNCTION LOAD`](/commands/function-load) to tell Redis that we want to overwrite the existing library definition. +We've added the `REPLACE` modifier to the call to [`FUNCTION LOAD`]({{< relref "/commands/function-load" >}}) to tell Redis that we want to overwrite the existing library definition. Otherwise, we would have gotten an error from Redis complaining that the library already exists. Now that the library's updated code is loaded to Redis, we can proceed and call our function: @@ -201,7 +201,7 @@ redis> HGETALL myhash 6) "another value" ``` -In this case, we had invoked [`FCALL`](/commands/fcall) with _1_ as the number of key name arguments. +In this case, we had invoked [`FCALL`]({{< relref "/commands/fcall" >}}) with _1_ as the number of key name arguments. That means that the function's first input argument is a name of a key (and is therefore included in the callback's `keys` table). After that first argument, all following input arguments are considered regular arguments and constitute the `args` table passed to the callback as its second argument. @@ -255,7 +255,7 @@ Assuming you've saved the library's implementation in the _mylib.lua_ file, you $ cat mylib.lua | redis-cli -x FUNCTION LOAD REPLACE ``` -Once loaded, you can call the library's functions with [`FCALL`](/commands/fcall): +Once loaded, you can call the library's functions with [`FCALL`]({{< relref "/commands/fcall" >}}): ``` redis> FCALL my_hgetall 1 myhash @@ -267,7 +267,7 @@ redis> FCALL my_hlastmodified 1 myhash "1640772721" ``` -You can also get the library's details with the [`FUNCTION LIST`](/commands/function-list) command: +You can also get the library's details with the [`FUNCTION LIST`]({{< relref "/commands/function-list" >}}) command: ``` redis> FUNCTION LIST @@ -406,11 +406,11 @@ To do that, it is possible to use `redis-cli --functions-rdb` to extract the fun Redis needs to have some information about how a function is going to behave when executed, in order to properly enforce resource usage policies and maintain data consistency. -For example, Redis needs to know that a certain function is read-only before permitting it to execute using [`FCALL_RO`](/commands/fcall_ro) on a read-only replica. +For example, Redis needs to know that a certain function is read-only before permitting it to execute using [`FCALL_RO`]({{< relref "/commands/fcall_ro" >}}) on a read-only replica. By default, Redis assumes that all functions may perform arbitrary read or write operations. Function Flags make it possible to declare more specific function behavior at the time of registration. Let's see how this works. -In our previous example, we defined two functions that only read data. We can try executing them using [`FCALL_RO`](/commands/fcall_ro) against a read-only replica. +In our previous example, we defined two functions that only read data. We can try executing them using [`FCALL_RO`]({{< relref "/commands/fcall_ro" >}}) against a read-only replica. ``` redis > FCALL_RO my_hgetall 1 myhash @@ -421,8 +421,8 @@ Redis returns this error because a function can, in theory, perform both read an As a safeguard and by default, Redis assumes that the function does both, so it blocks its execution. The server will reply with this error in the following cases: -1. Executing a function with [`FCALL`](/commands/fcall) against a read-only replica. -2. Using [`FCALL_RO`](/commands/fcall_ro) to execute a function. +1. Executing a function with [`FCALL`]({{< relref "/commands/fcall" >}}) against a read-only replica. +2. Using [`FCALL_RO`]({{< relref "/commands/fcall_ro" >}}) to execute a function. 3. A disk error was detected (Redis is unable to persist so it rejects writes). In these cases, you can add the `no-writes` flag to the function's registration, disable the safeguard and allow them to run. @@ -444,7 +444,7 @@ redis.register_function{ } ``` -Once we've replaced the library, Redis allows running both `my_hgetall` and `my_hlastmodified` with [`FCALL_RO`](/commands/fcall_ro) against a read-only replica: +Once we've replaced the library, Redis allows running both `my_hgetall` and `my_hlastmodified` with [`FCALL_RO`]({{< relref "/commands/fcall_ro" >}}) against a read-only replica: ``` redis> FCALL_RO my_hgetall 1 myhash diff --git a/content/develop/interact/programmability/lua-api.md b/content/develop/interact/programmability/lua-api.md index 3889746963..b2f937b558 100644 --- a/content/develop/interact/programmability/lua-api.md +++ b/content/develop/interact/programmability/lua-api.md @@ -135,14 +135,14 @@ Following is the API provided by the _redis_ object instance. The `redis.call()` function calls a given Redis command and returns its reply. Its inputs are the command and arguments, and once called, it executes the command in Redis and returns the reply. -For example, we can call the [`ECHO`](/commands/echo) command from a script and return its reply like so: +For example, we can call the [`ECHO`]({{< relref "/commands/echo" >}}) command from a script and return its reply like so: ```lua return redis.call('ECHO', 'Echo, echo... eco... o...') ``` If and when `redis.call()` triggers a runtime exception, the raw exception is raised back to the user as an error, automatically. -Therefore, attempting to execute the following ephemeral script will fail and generate a runtime exception because [`ECHO`](/commands/echo) accepts exactly one argument: +Therefore, attempting to execute the following ephemeral script will fail and generate a runtime exception because [`ECHO`]({{< relref "/commands/echo" >}}) accepts exactly one argument: ```lua redis> EVAL "return redis.call('ECHO', 'Echo,', 'echo... ', 'eco... ', 'o...')" 0 @@ -335,12 +335,12 @@ By default, all write commands that a script executes are replicated. Sometimes, however, better control over this behavior can be helpful. This can be the case, for example, when storing intermediate values in the master alone. -Consider a script that intersects two sets and stores the result in a temporary key with [`SUNIONSTORE`](/commands/sunionstore). -It then picks five random elements ([`SRANDMEMBER`](/commands/srandmember)) from the intersection and stores ([`SADD`](/commands/sadd)) them in another set. +Consider a script that intersects two sets and stores the result in a temporary key with [`SUNIONSTORE`]({{< relref "/commands/sunionstore" >}}). +It then picks five random elements ([`SRANDMEMBER`]({{< relref "/commands/srandmember" >}})) from the intersection and stores ([`SADD`]({{< relref "/commands/sadd" >}})) them in another set. Finally, before returning, it deletes the temporary key that stores the intersection of the two source sets. In this case, only the new set with its five randomly-chosen elements needs to be replicated. -Replicating the [`SUNIONSTORE`](/commands/sunionstore) command and the [`DEL`](/commands/del)ition of the temporary key is unnecessary and wasteful. +Replicating the [`SUNIONSTORE`]({{< relref "/commands/sunionstore" >}}) command and the [`DEL`]({{< relref "/commands/del" >}})ition of the temporary key is unnecessary and wasteful. The `redis.set_repl()` function instructs the server how to treat subsequent write commands in terms of replication. It accepts a single input argument that only be one of the following: @@ -416,7 +416,7 @@ The function will raise an error if the passed command or its arguments are inva * Available in scripts: no * Available in functions: yes -This function is only available from the context of the [`FUNCTION LOAD`](/commands/function-load) command. +This function is only available from the context of the [`FUNCTION LOAD`]({{< relref "/commands/function-load" >}}) command. When called, it registers a function to the loaded library. The function can be called either with positional or named arguments. @@ -452,7 +452,7 @@ redis> FUNCTION LOAD "#!lua name=mylib\n redis.register_function{function_name=' **Important:** Use script flags with care, which may negatively impact if misused. -Note that the default for Eval scripts are different than the default for functions that are mentioned below, see [Eval Flags]({{< relref "/develop/interact/programmability/eval-intro" >}}) +Note that the default for Eval scripts are different than the default for functions that are mentioned below, see [Eval Flags]({{< baseurl >}}/develop/interact/programmability/eval-intro#eval-flags) When you register a function or load an Eval script, the server does not know how it accesses the database. By default, Redis assumes that all scripts read and write data. @@ -468,25 +468,25 @@ You can use the following flags and instruct the server to treat the scripts' ex * `no-writes`: this flag indicates that the script only reads data but never writes. By default, Redis will deny the execution of flagged scripts (Functions and Eval scripts with [shebang](/topics/eval-intro#eval-flags)) against read-only replicas, as they may attempt to perform writes. - Similarly, the server will not allow calling scripts with [`FCALL_RO`](/commands/fcall_ro) / [`EVAL_RO`](/commands/eval_ro). + Similarly, the server will not allow calling scripts with [`FCALL_RO`]({{< relref "/commands/fcall_ro" >}}) / [`EVAL_RO`]({{< relref "/commands/eval_ro" >}}). Lastly, when data persistence is at risk due to a disk error, execution is blocked as well. Using this flag allows executing the script: - 1. With [`FCALL_RO`](/commands/fcall_ro) / [`EVAL_RO`](/commands/eval_ro) + 1. With [`FCALL_RO`]({{< relref "/commands/fcall_ro" >}}) / [`EVAL_RO`]({{< relref "/commands/eval_ro" >}}) 2. On read-only replicas. 3. Even if there's a disk error (Redis is unable to persist so it rejects writes). 4. When over the memory limit since it implies the script doesn't increase memory consumption (see `allow-oom` below) However, note that the server will return an error if the script attempts to call a write command. - Also note that currently [`PUBLISH`](/commands/publish), [`SPUBLISH`](/commands/spublish) and [`PFCOUNT`](/commands/pfcount) are also considered write commands in scripts, because they could attempt to propagate commands to replicas and AOF file. + Also note that currently [`PUBLISH`]({{< relref "/commands/publish" >}}), [`SPUBLISH`]({{< relref "/commands/spublish" >}}) and [`PFCOUNT`]({{< relref "/commands/pfcount" >}}) are also considered write commands in scripts, because they could attempt to propagate commands to replicas and AOF file. - For more information please refer to [Read-only scripts]({{< relref "/develop/interact/programmability/" >}}) + For more information please refer to [Read-only scripts]({{< baseurl >}}/develop/interact/programmability/#read-only_scripts) * `allow-oom`: use this flag to allow a script to execute when the server is out of memory (OOM). Unless used, Redis will deny the execution of flagged scripts (Functions and Eval scripts with [shebang](/topics/eval-intro#eval-flags)) when in an OOM state. Furthermore, when you use this flag, the script can call any Redis command, including commands that aren't usually allowed in this state. - Specifying `no-writes` or using [`FCALL_RO`](/commands/fcall_ro) / [`EVAL_RO`](/commands/eval_ro) also implies the script can run in OOM state (without specifying `allow-oom`) + Specifying `no-writes` or using [`FCALL_RO`]({{< relref "/commands/fcall_ro" >}}) / [`EVAL_RO`]({{< relref "/commands/eval_ro" >}}) also implies the script can run in OOM state (without specifying `allow-oom`) * `allow-stale`: a flag that enables running the flagged scripts (Functions and Eval scripts with [shebang](/topics/eval-intro#eval-flags)) against a stale replica when the `replica-serve-stale-data` config is set to `no` . @@ -508,7 +508,7 @@ You can use the following flags and instruct the server to treat the scripts' ex This flag has no effect when cluster mode is disabled. -Please refer to [Function Flags]({{< relref "/develop/interact/programmability/functions-intro" >}}) and [Eval Flags]({{< relref "/develop/interact/programmability/eval-intro" >}}) for a detailed example. +Please refer to [Function Flags]({{< baseurl >}}/develop/interact/programmability/functions-intro#function-flags) and [Eval Flags]({{< baseurl >}}/develop/interact/programmability/eval-intro#eval-flags) for a detailed example. ###
`redis.REDIS_VERSION` @@ -551,7 +551,7 @@ Type conversion from Redis protocol replies (i.e., the replies from `redis.call( The default protocol version during script executions is RESP2. The script may switch the replies' protocol versions by calling the `redis.setresp()` function. -Type conversion from a script's returned Lua data type depends on the user's choice of protocol (see the [`HELLO`](/commands/hello) command). +Type conversion from a script's returned Lua data type depends on the user's choice of protocol (see the [`HELLO`]({{< relref "/commands/hello" >}}) command). The following sections describe the type conversion rules between Lua and Redis per the protocol's version. @@ -587,7 +587,7 @@ There are three additional rules to note about converting Lua to Redis data type There is no distinction between integers and floats. So we always convert Lua numbers into integer replies, removing the decimal part of the number, if any. **If you want to return a Lua float, it should be returned as a string**, - exactly like Redis itself does (see, for instance, the [`ZSCORE`](/commands/zscore) command). + exactly like Redis itself does (see, for instance, the [`ZSCORE`]({{< relref "/commands/zscore" >}}) command). * There's [no simple way to have nils inside Lua arrays](http://www.lua.org/pil/19.1.html) due to Lua's table semantics. Therefore, when Redis converts a Lua array to RESP, the conversion stops when it encounters a Lua `nil` value. @@ -661,9 +661,9 @@ That means, for example, that returning the RESP3 map type to a RESP2 connection ## Additional notes about scripting -### Using [`SELECT`](/commands/select) inside scripts +### Using [`SELECT`]({{< relref "/commands/select" >}}) inside scripts -You can call the [`SELECT`](/commands/select) command from your Lua scripts, like you can with any normal client connection. +You can call the [`SELECT`]({{< relref "/commands/select" >}}) command from your Lua scripts, like you can with any normal client connection. However, one subtle aspect of the behavior changed between Redis versions 2.8.11 and 2.8.12. Prior to Redis version 2.8.12, the database selected by the Lua script was *set as the current database* for the client connection that had called it. As of Redis version 2.8.12, the database selected by the Lua script only affects the execution context of the script, and does not modify the database that's selected by the client calling the script. diff --git a/content/develop/interact/programmability/triggers-and-functions/Configuration.md b/content/develop/interact/programmability/triggers-and-functions/Configuration.md index 0a6554565a..427f8256ae 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Configuration.md +++ b/content/develop/interact/programmability/triggers-and-functions/Configuration.md @@ -24,11 +24,11 @@ The following sections describe the configuration options and how to set them. ## Bootstrap configuration You can set your configuration options when the module is loaded. -When the module is loaded at start time, the module configuration can be defined in the Redis configuration file. When loading the module at runtime the configuration can be given to the [`MODULE LOADEX`](/commands/module-loadex) command. Each configuration must be prefixed with the module name, `redisgears_2.`. +When the module is loaded at start time, the module configuration can be defined in the Redis configuration file. When loading the module at runtime the configuration can be given to the [`MODULE LOADEX`]({{< relref "/commands/module-loadex" >}}) command. Each configuration must be prefixed with the module name, `redisgears_2.`. ## Runtime configuration -You may set certain configuration options at runtime. Setting a configuration at runtime is done using [`CONFIG SET`](/commands/config-set) command. Here each configuration must be prefixed with the module name, `redisgears_2.`. +You may set certain configuration options at runtime. Setting a configuration at runtime is done using [`CONFIG SET`]({{< relref "/commands/config-set" >}}) command. Here each configuration must be prefixed with the module name, `redisgears_2.`. Example: @@ -187,7 +187,7 @@ No ## lock-redis-timeout The `lock-redis-timeout` configuration option controls the maximum amount of time (in MS) a library can lock Redis. Exceeding this limit is considered a fatal error and will be handled based on the [library-fatal-failure-policy](#library-fatal-failure-policy) configuration value. This -configuration only affects library loading at runtime with [`TFUNCTION LOAD`](/commands/tfunction-load). +configuration only affects library loading at runtime with [`TFUNCTION LOAD`]({{< relref "/commands/tfunction-load" >}}). The timeout for loading a library from RDB is set separately via [db-loading-lock-redis-timeout](#db-loading-lock-redis-timeout). diff --git a/content/develop/interact/programmability/triggers-and-functions/Debugging.md b/content/develop/interact/programmability/triggers-and-functions/Debugging.md index 7be528b2ba..3a2307d302 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Debugging.md +++ b/content/develop/interact/programmability/triggers-and-functions/Debugging.md @@ -38,7 +38,7 @@ redis.registerFunction('hello', ()=> { }) ``` -After loading the library and executing the function with [`TFCALL`](/commands/tfcall), you'll see something like the following in your Redis log file: +After loading the library and executing the function with [`TFCALL`]({{< relref "/commands/tfcall" >}}), you'll see something like the following in your Redis log file: ``` 45718:M 01 Nov 2023 07:02:40.593 * Hello log @@ -46,7 +46,7 @@ After loading the library and executing the function with [`TFCALL`](/commands/t ### Use Redis pub/sub -If you don't have access to your Redis database log files, you can use pub/sub. The following example demonstrates how to use the [client.call]({{< relref "/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API" >}}) API to publish to a pub/sub channel. +If you don't have access to your Redis database log files, you can use pub/sub. The following example demonstrates how to use the [client.call]({{< baseurl >}}/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API#clientcall) API to publish to a pub/sub channel. ```javascript #!js api_version=1.0 name=lib diff --git a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md index 358b84906b..3e561ec87f 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md +++ b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md @@ -34,7 +34,7 @@ Use the `TFUNCION LOAD` command to create a new library in your Redis instance. OK ``` -When the library is created successfully, an `OK` response is returned. Run the [`TFUNCTION LIST`](/commands/tfunction-list) command to confirm your library was added to Redis. +When the library is created successfully, an `OK` response is returned. Run the [`TFUNCTION LIST`]({{< relref "/commands/tfunction-list" >}}) command to confirm your library was added to Redis. ```shell > TFUNCTION LIST @@ -62,14 +62,14 @@ When the library is created successfully, an `OK` response is returned. Run the 22) "default" ``` -The [`TFCALL`](/commands/tfcall) command is used to execute the JavaScript Function. If the command fails, an error will be returned. +The [`TFCALL`]({{< relref "/commands/tfcall" >}}) command is used to execute the JavaScript Function. If the command fails, an error will be returned. ```Shell 127.0.0.1:6379> TFCALL myFirstLibrary.hello 0 "Hello World" ``` -To update the library run the [`TFUNCTION LOAD`](/commands/tfunction-load) command with the additional parameter `REPLACE`. +To update the library run the [`TFUNCTION LOAD`]({{< relref "/commands/tfunction-load" >}}) command with the additional parameter `REPLACE`. ```Shell 127.0.0.1:6379> TFUNCTION LOAD REPLACE "#!js api_version=1.0 name=myFirstLibrary\n redis.registerFunction('hello', ()=>{ return 'Hello World updated'})" diff --git a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md index c76ded42af..018d978922 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md +++ b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md @@ -41,7 +41,7 @@ You'll see the following when the library was added: -The [`TFCALL`](/commands/tfcall) command is used to execute the JavaScript Function. If the command fails, an error will be returned. Click on the **>_ CLI** button in the lower left-hand corner to open a console window and then run the command shown below. +The [`TFCALL`]({{< relref "/commands/tfcall" >}}) command is used to execute the JavaScript Function. If the command fails, an error will be returned. Click on the **>_ CLI** button in the lower left-hand corner to open a console window and then run the command shown below. @@ -84,7 +84,7 @@ Update the existing library as before and then, using the RedisInsight console, > HSET fellowship:1 name "Frodo Baggins" title "The One Ring Bearer" ``` -Run the [`HGETALL`](/commands/hgetall) command to check if the last updated time is added to the example. +Run the [`HGETALL`]({{< relref "/commands/hgetall" >}}) command to check if the last updated time is added to the example. ```Shell > HGETALL fellowship:1 diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/Cluster_Support.md b/content/develop/interact/programmability/triggers-and-functions/concepts/Cluster_Support.md index 9d96b668b6..a58009ad65 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/Cluster_Support.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/Cluster_Support.md @@ -109,7 +109,7 @@ The remote function arguments and results are serialized in the following way: ## Execution timeout -Remote functions will not be permitted to run forever and will timeout. The timeout period can be configured using [remote-task-default-timeout]({{< relref "/develop/interact/programmability/triggers-and-functions/Configuration" >}}). When using `async_client.runOnShards` API, the timeout will be added as error to the error array. When using `async_client.runOnKey`, a timeout will cause an exception to be raised. +Remote functions will not be permitted to run forever and will timeout. The timeout period can be configured using [remote-task-default-timeout]({{< baseurl >}}/develop/interact/programmability/triggers-and-functions/Configuration#remote-task-default-timeout). When using `async_client.runOnShards` API, the timeout will be added as error to the error array. When using `async_client.runOnKey`, a timeout will cause an exception to be raised. ## Remote function limitations diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API.md b/content/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API.md index 8440a0bc0c..413a6e643d 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/JavaScript_API.md @@ -28,7 +28,7 @@ The triggers and functions JavaScript API provides a singleton instance of an ob * Since version: 2.0.0 -Register a new function that can later be invoke using [`TFCALL`](/commands/tfcall) command. +Register a new function that can later be invoke using [`TFCALL`]({{< relref "/commands/tfcall" >}}) command. ```JavaScript //name and callback mandatory @@ -47,7 +47,7 @@ redis.registerFunction( * Since version: 2.0.0 -Register a new async function that can later be invoke using [`TFCALLASYNC`](/commands/tfcallasync) command. +Register a new async function that can later be invoke using [`TFCALLASYNC`]({{< relref "/commands/tfcallasync" >}}) command. ```JavaScript redis.registerAsyncFunction( diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/Library_Configuration.md b/content/develop/interact/programmability/triggers-and-functions/concepts/Library_Configuration.md index d5579ddb2e..81516880d2 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/Library_Configuration.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/Library_Configuration.md @@ -41,7 +41,7 @@ Run example: 4) "1658653125" ``` -The problem with the above code is that the `__last_update__` field is hard coded. What if we want to allow the user to configure it at runtime? Triggers and functions provide for specifying a library configuration at load time using a [`CONFIG`](/commands/config) argument that is passed to the [`TFUNCTION LOAD`](/commands/tfunction-load) command. The configuration argument accepts a string representation of a JSON object. The JSON will be provided to the library as a JS object under the `redis.config` variable. We can change the above example to accept the `__last_update__` field name as a library configuration. The code will look like this: +The problem with the above code is that the `__last_update__` field is hard coded. What if we want to allow the user to configure it at runtime? Triggers and functions provide for specifying a library configuration at load time using a [`CONFIG`]({{< relref "/commands/config" >}}) argument that is passed to the [`TFUNCTION LOAD`]({{< relref "/commands/tfunction-load" >}}) command. The configuration argument accepts a string representation of a JSON object. The JSON will be provided to the library as a JS object under the `redis.config` variable. We can change the above example to accept the `__last_update__` field name as a library configuration. The code will look like this: ```js #!js api_version=1.0 name=lib @@ -62,7 +62,7 @@ redis.registerFunction("hset", function(client, key, field, val){ }); ``` -Notice that in the above example we first set `last_update_field_name` to `__last_update__`, the default value in cases where a value is not provided by the configuration. Then we check if we have `last_update_field_name` in our configuration and if we do we use it. We can now load our function with a [`CONFIG`](/commands/config) argument: +Notice that in the above example we first set `last_update_field_name` to `__last_update__`, the default value in cases where a value is not provided by the configuration. Then we check if we have `last_update_field_name` in our configuration and if we do we use it. We can now load our function with a [`CONFIG`]({{< relref "/commands/config" >}}) argument: ```bash > redis-cli -x TFUNCTION LOAD REPLACE CONFIG '{"last_update_field_name":"last_update"}' < diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md b/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md index ae4117b3f2..ec1fe65b72 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md @@ -57,9 +57,9 @@ Running this function will return a `pong` reply: "PONG" ``` -Notice that this time, in order to invoke the function, we used [`TFCALLASYNC`](/commands/tfcallasync). **We can only invoke async functions using [`TFCALLASYNC`](/commands/tfcallasync)**. +Notice that this time, in order to invoke the function, we used [`TFCALLASYNC`]({{< relref "/commands/tfcallasync" >}}). **We can only invoke async functions using [`TFCALLASYNC`]({{< relref "/commands/tfcallasync" >}})**. -Now let's look at a more complex example. Assume we want to write a function that counts the number of hashes in Redis that have a `name` property with some value. As a first attempt, we'll write a synchronous function that uses the [`SCAN`](/commands/scan) command to scan the key space: +Now let's look at a more complex example. Assume we want to write a function that counts the number of hashes in Redis that have a `name` property with some value. As a first attempt, we'll write a synchronous function that uses the [`SCAN`]({{< relref "/commands/scan" >}}) command to scan the key space: ```js #!js api_version=1.0 name=lib @@ -105,7 +105,7 @@ redis.registerAsyncFunction('test', async function(async_client, expected_name){ }); ``` -Both implementations return the same result, but the second function runs in the background and blocks Redis just to analyze the next batch of keys that are returned from the [`SCAN`](/commands/scan) command. Other commands will be processed in between [`SCAN`](/commands/scan) batches. Notice that the coroutine approach allows the key space to be changed while the scanning it. The function writer will need to decide if this is acceptable. +Both implementations return the same result, but the second function runs in the background and blocks Redis just to analyze the next batch of keys that are returned from the [`SCAN`]({{< relref "/commands/scan" >}}) command. Other commands will be processed in between [`SCAN`]({{< relref "/commands/scan" >}}) batches. Notice that the coroutine approach allows the key space to be changed while the scanning it. The function writer will need to decide if this is acceptable. # Start sync and move to async diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md index f91dc83f96..8b301d0705 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md @@ -78,7 +78,7 @@ The `data` argument, which is passed to the consumer callback, are in the follow Notice that the `key` field is given only if the key can be decoded as a `JS` `String`, otherwise the value will be `null`. -We can display trigger information using [`TFUNCTION LIST`](/commands/tfunction-list) command: +We can display trigger information using [`TFUNCTION LIST`]({{< relref "/commands/tfunction-list" >}}) command: ```bash 127.0.0.1:6379> TFUNCTION list vvv @@ -125,7 +125,7 @@ If the callback is a Coroutine, it will be executed in the background and there ## Upgrades -When upgrading existing trigger code using the `REPLACE` option of [`TFUNCTION LOAD`](/commands/tfunction-load) command, all trigger parameters can be modified. +When upgrading existing trigger code using the `REPLACE` option of [`TFUNCTION LOAD`]({{< relref "/commands/tfunction-load" >}}) command, all trigger parameters can be modified. ## Advanced usage diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md index bd68331e07..c9cb053d00 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md @@ -90,7 +90,7 @@ The reason why the record is a list of touples and not an object is because the Notice that `stream_name` and `record` fields might contains `null`'s if the data can not be decoded as string. the `*_raw` fields will always be provided and will contains the data as `JS` `ArrayBuffer`. -We can observe the streams which are tracked by our registered consumer using [`TFUNCTION LIST`](/commands/tfunction-list) command: +We can observe the streams which are tracked by our registered consumer using [`TFUNCTION LIST`]({{< relref "/commands/tfunction-list" >}}) command: ``` 127.0.0.1:6379> TFUNCTION LIST LIBRARY lib vvv @@ -188,7 +188,7 @@ The default values are: * `isStreamTrimmed` - `false` * `window` - 1 -It is enough that a single consumer will enable trimming so that the stream will be trimmed. The stream will be trim according to the slowest consumer that consume the stream at a given time (even if this is not the consumer that enabled the trimming). Raising exception during the callback invocation will **not prevent the trimming**. The callback should decide how to handle failures by invoke a retry or write some error log. The error will be added to the `last_error` field on [`TFUNCTION LIST`](/commands/tfunction-list) command. +It is enough that a single consumer will enable trimming so that the stream will be trimmed. The stream will be trim according to the slowest consumer that consume the stream at a given time (even if this is not the consumer that enabled the trimming). Raising exception during the callback invocation will **not prevent the trimming**. The callback should decide how to handle failures by invoke a retry or write some error log. The error will be added to the `last_error` field on [`TFUNCTION LIST`]({{< relref "/commands/tfunction-list" >}}) command. ## Data processing guarantees @@ -196,7 +196,7 @@ As long as the primary shard is up and running we guarantee exactly once propert ## Upgrades -When upgrading the consumer code (using the `REPLACE` option of [`TFUNCTION LOAD`](/commands/tfunction-load) command) the following consumer parameters can be updated: +When upgrading the consumer code (using the `REPLACE` option of [`TFUNCTION LOAD`]({{< relref "/commands/tfunction-load" >}}) command) the following consumer parameters can be updated: * Window * Trimming diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/User_Functions.md b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/User_Functions.md index 4d5c89ec9e..fcd6eaf826 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/User_Functions.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/User_Functions.md @@ -18,7 +18,7 @@ weight: 1 --- -All [[`TFCALL`](/commands/tfcall)](docs/commands.md#tfcall) command arguments that follow the function name are passed to the function callback. The following example shows how to implement a simple function that returns the value of a key of type string or hash: +All [[`TFCALL`]({{< relref "/commands/tfcall" >}})](docs/commands.md#tfcall) command arguments that follow the function name are passed to the function callback. The following example shows how to implement a simple function that returns the value of a key of type string or hash: ```js #!js api_version=1.0 name=lib diff --git a/content/develop/interact/pubsub.md b/content/develop/interact/pubsub.md index 7aaa6662ae..8aae53bcb8 100644 --- a/content/develop/interact/pubsub.md +++ b/content/develop/interact/pubsub.md @@ -15,12 +15,12 @@ title: Redis Pub/Sub weight: 40 --- -[`SUBSCRIBE`](/commands/subscribe), [`UNSUBSCRIBE`](/commands/unsubscribe) and [`PUBLISH`](/commands/publish) implement the [Publish/Subscribe messaging paradigm](http://en.wikipedia.org/wiki/Publish/subscribe) where (citing Wikipedia) senders (publishers) are not programmed to send their messages to specific receivers (subscribers). +[`SUBSCRIBE`]({{< relref "/commands/subscribe" >}}), [`UNSUBSCRIBE`]({{< relref "/commands/unsubscribe" >}}) and [`PUBLISH`]({{< relref "/commands/publish" >}}) implement the [Publish/Subscribe messaging paradigm](http://en.wikipedia.org/wiki/Publish/subscribe) where (citing Wikipedia) senders (publishers) are not programmed to send their messages to specific receivers (subscribers). Rather, published messages are characterized into channels, without knowledge of what (if any) subscribers there may be. Subscribers express interest in one or more channels and only receive messages that are of interest, without knowledge of what (if any) publishers there are. This decoupling of publishers and subscribers allows for greater scalability and a more dynamic network topology. -For instance, to subscribe to channels "channel11" and "ch:00" the client issues a [`SUBSCRIBE`](/commands/subscribe) providing the names of the channels: +For instance, to subscribe to channels "channel11" and "ch:00" the client issues a [`SUBSCRIBE`]({{< relref "/commands/subscribe" >}}) providing the names of the channels: ```bash SUBSCRIBE channel11 ch:00 @@ -29,23 +29,23 @@ SUBSCRIBE channel11 ch:00 Messages sent by other clients to these channels will be pushed by Redis to all the subscribed clients. Subscribers receive the messages in the order that the messages are published. -A client subscribed to one or more channels shouldn't issue commands, although it can [`SUBSCRIBE`](/commands/subscribe) and [`UNSUBSCRIBE`](/commands/unsubscribe) to and from other channels. +A client subscribed to one or more channels shouldn't issue commands, although it can [`SUBSCRIBE`]({{< relref "/commands/subscribe" >}}) and [`UNSUBSCRIBE`]({{< relref "/commands/unsubscribe" >}}) to and from other channels. The replies to subscription and unsubscribing operations are sent in the form of messages so that the client can just read a coherent stream of messages where the first element indicates the type of message. The commands that are allowed in the context of a subscribed RESP2 client are: -* [`PING`](/commands/ping) -* [`PSUBSCRIBE`](/commands/psubscribe) -* [`PUNSUBSCRIBE`](/commands/punsubscribe) -* [`QUIT`](/commands/quit) -* [`RESET`](/commands/reset) -* [`SSUBSCRIBE`](/commands/ssubscribe) -* [`SUBSCRIBE`](/commands/subscribe) -* [`SUNSUBSCRIBE`](/commands/sunsubscribe) -* [`UNSUBSCRIBE`](/commands/unsubscribe) +* [`PING`]({{< relref "/commands/ping" >}}) +* [`PSUBSCRIBE`]({{< relref "/commands/psubscribe" >}}) +* [`PUNSUBSCRIBE`]({{< relref "/commands/punsubscribe" >}}) +* [`QUIT`]({{< relref "/commands/quit" >}}) +* [`RESET`]({{< relref "/commands/reset" >}}) +* [`SSUBSCRIBE`]({{< relref "/commands/ssubscribe" >}}) +* [`SUBSCRIBE`]({{< relref "/commands/subscribe" >}}) +* [`SUNSUBSCRIBE`]({{< relref "/commands/sunsubscribe" >}}) +* [`UNSUBSCRIBE`]({{< relref "/commands/unsubscribe" >}}) -However, if RESP3 is used (see [`HELLO`](/commands/hello)), a client can issue any commands while in the subscribed state. +However, if RESP3 is used (see [`HELLO`]({{< relref "/commands/hello" >}})), a client can issue any commands while in the subscribed state. -Please note that when using `redis-cli`, in subscribed mode commands such as [`UNSUBSCRIBE`](/commands/unsubscribe) and [`PUNSUBSCRIBE`](/commands/punsubscribe) cannot be used because `redis-cli` will not accept any commands and can only quit the mode with `Ctrl-C`. +Please note that when using `redis-cli`, in subscribed mode commands such as [`UNSUBSCRIBE`]({{< relref "/commands/unsubscribe" >}}) and [`PUNSUBSCRIBE`]({{< relref "/commands/punsubscribe" >}}) cannot be used because `redis-cli` will not accept any commands and can only quit the mode with `Ctrl-C`. ## Delivery semantics @@ -70,7 +70,7 @@ The first element is the kind of message: The third argument represents the number of channels we are currently subscribed to. When the last argument is zero, we are no longer subscribed to any channel, and the client can issue any kind of Redis command as we are outside the Pub/Sub state. -* `message`: it is a message received as a result of a [`PUBLISH`](/commands/publish) command issued by another client. +* `message`: it is a message received as a result of a [`PUBLISH`]({{< relref "/commands/publish" >}}) command issued by another client. The second element is the name of the originating channel, and the third argument is the actual message payload. ## Database & Scoping @@ -100,7 +100,7 @@ second :2 ``` -At this point, from another client we issue a [`PUBLISH`](/commands/publish) operation against the channel named `second`: +At this point, from another client we issue a [`PUBLISH`]({{< relref "/commands/publish" >}}) operation against the channel named `second`: ``` > PUBLISH second Hello @@ -118,7 +118,7 @@ $5 Hello ``` -Now the client unsubscribes itself from all the channels using the [`UNSUBSCRIBE`](/commands/unsubscribe) command without additional arguments: +Now the client unsubscribes itself from all the channels using the [`UNSUBSCRIBE`]({{< relref "/commands/unsubscribe" >}}) command without additional arguments: ``` UNSUBSCRIBE @@ -159,10 +159,10 @@ No other subscriptions will be affected by this call. Messages received as a result of pattern matching are sent in a different format: -* The type of the message is `pmessage`: it is a message received as a result from a [`PUBLISH`](/commands/publish) command issued by another client, matching a pattern-matching subscription. +* The type of the message is `pmessage`: it is a message received as a result from a [`PUBLISH`]({{< relref "/commands/publish" >}}) command issued by another client, matching a pattern-matching subscription. The second element is the original pattern matched, the third element is the name of the originating channel, and the last element is the actual message payload. -Similarly to [`SUBSCRIBE`](/commands/subscribe) and [`UNSUBSCRIBE`](/commands/unsubscribe), [`PSUBSCRIBE`](/commands/psubscribe) and [`PUNSUBSCRIBE`](/commands/punsubscribe) commands are acknowledged by the system sending a message of type `psubscribe` and `punsubscribe` using the same format as the `subscribe` and `unsubscribe` message format. +Similarly to [`SUBSCRIBE`]({{< relref "/commands/subscribe" >}}) and [`UNSUBSCRIBE`]({{< relref "/commands/unsubscribe" >}}), [`PSUBSCRIBE`]({{< relref "/commands/psubscribe" >}}) and [`PUNSUBSCRIBE`]({{< relref "/commands/punsubscribe" >}}) commands are acknowledged by the system sending a message of type `psubscribe` and `punsubscribe` using the same format as the `subscribe` and `unsubscribe` message format. ## Messages matching both a pattern and a channel subscription @@ -187,7 +187,7 @@ So the client will exit the Pub/Sub state only when this count drops to zero as From Redis 7.0, sharded Pub/Sub is introduced in which shard channels are assigned to slots by the same algorithm used to assign keys to slots. A shard message must be sent to a node that owns the slot the shard channel is hashed to. The cluster makes sure the published shard messages are forwarded to all nodes in the shard, so clients can subscribe to a shard channel by connecting to either the master responsible for the slot, or to any of its replicas. -[`SSUBSCRIBE`](/commands/ssubscribe), [`SUNSUBSCRIBE`](/commands/sunsubscribe) and [`SPUBLISH`](/commands/spublish) are used to implement sharded Pub/Sub. +[`SSUBSCRIBE`]({{< relref "/commands/ssubscribe" >}}), [`SUNSUBSCRIBE`]({{< relref "/commands/sunsubscribe" >}}) and [`SPUBLISH`]({{< relref "/commands/spublish" >}}) are used to implement sharded Pub/Sub. Sharded Pub/Sub helps to scale the usage of Pub/Sub in cluster mode. It restricts the propagation of messages to be within the shard of a cluster. diff --git a/content/develop/interact/search-and-query/administration/design.md b/content/develop/interact/search-and-query/administration/design.md index 2c7e9fcdd4..27ea98feeb 100644 --- a/content/develop/interact/search-and-query/administration/design.md +++ b/content/develop/interact/search-and-query/administration/design.md @@ -75,7 +75,7 @@ When searching, priority queue of the top N results requested is maintained, whi ## Index ppecs and field weights -When creating an "index" using [`FT.CREATE`](/commands/ft.create), the user specifies the fields to be indexed and their respective weights. This can be used to give some document fields, like a title, more weight in ranking results. +When creating an "index" using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create), the user specifies the fields to be indexed and their respective weights. This can be used to give some document fields, like a title, more weight in ranking results. For example: @@ -91,7 +91,7 @@ When documents are indexed, the weights are taken from the saved *index Spec* th It is not mandatory to save the document data when indexing a document. Specifying `NOSAVE` for `FT.ADD` will cause the document to be indexed but not saved. -If the user does save the document, a HASH key is created in Redis that contains all fields (including ones not indexed), and upon search, perform an [`HGETALL`](/commands/hgetall) query on each retrieved document to retrieve all of its data. +If the user does save the document, a HASH key is created in Redis that contains all fields (including ones not indexed), and upon search, perform an [`HGETALL`]({{< relref "/commands/hgetall" >}}) query on each retrieved document to retrieve all of its data. **TODO**: Document snippets should be implemented down the road. diff --git a/content/develop/interact/search-and-query/administration/overview.md b/content/develop/interact/search-and-query/administration/overview.md index ed439f5207..d69d94755e 100644 --- a/content/develop/interact/search-and-query/administration/overview.md +++ b/content/develop/interact/search-and-query/administration/overview.md @@ -135,7 +135,7 @@ Optionally, you can choose not to save any one of those attributes besides the I ### Numeric index -Numeric properties are indexed in a special data structure that enables filtering by numeric ranges in an efficient way. One could view a numeric value as a term operating just like an inverted index. For example, all the products with the price $100 are in a specific list, which is intersected with the rest of the query. See [query execution engine]({{< relref "/develop/interact/search-and-query/administration/design" >}}) for more information. +Numeric properties are indexed in a special data structure that enables filtering by numeric ranges in an efficient way. One could view a numeric value as a term operating just like an inverted index. For example, all the products with the price $100 are in a specific list, which is intersected with the rest of the query. See [query execution engine]({{< baseurl >}}/develop/interact/search-and-query/administration/design#query-execution-engine) for more information. However, in order to filter by a range of prices, you would have to intersect the query with all the distinct prices within that range, or perform a union query. If the range has many values in it, this becomes highly inefficient. @@ -163,7 +163,7 @@ The auto-complete engine (see below for a fuller description) utilizes a compact ## Query language -Simple syntax is supported for complex queries that can be combined together to express complex filtering and matching rules. The query is a text string in the [`FT.SEARCH`](/commands/ft.search) request that is parsed using a complex query processor. +Simple syntax is supported for complex queries that can be combined together to express complex filtering and matching rules. The query is a text string in the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) request that is parsed using a complex query processor. * Multi-word phrases are lists of tokens, e.g., `foo bar baz`, and imply intersection (logical AND) of the terms. * Exact phrases are wrapped in quotes, e.g `"hello world"`. @@ -258,7 +258,7 @@ These are the pre-bundled scoring functions available in Redis Stack: It is possible to bypass the scoring function mechanism and order search results by the value of different document properties (fields) directly, even if the sorting field is not used by the query. For example, you can search for first name and sort by the last name. -When creating the index with [`FT.CREATE`](/commands/ft.create), you can declare `TEXT`, `TAG`, `NUMERIC`, and `GEO` properties as `SORTABLE`. When a property is sortable, you can later decide to order the results by its values with relatively low latency. When a property is not sortable, it can still be sorted by its values, but may increase latency. For example, the following schema: +When creating the index with [`FT.CREATE`]({{< baseurl >}}/commands/ft.create), you can declare `TEXT`, `TAG`, `NUMERIC`, and `GEO` properties as `SORTABLE`. When a property is sortable, you can later decide to order the results by its values with relatively low latency. When a property is not sortable, it can still be sorted by its values, but may increase latency. For example, the following schema: ``` FT.CREATE users SCHEMA first_name TEXT last_name TEXT SORTABLE age NUMERIC SORTABLE @@ -303,7 +303,7 @@ Redis Stack's auto-completer supports Unicode, allowing for fuzzy matches in non RediSearch is implemented using the [Redis module API](https://redis.io/topics/modules-intro) and is loaded into Redis as an extension module at start-up. -Redis modules make it possible to extend Redis's core functionality, implementing new Redis commands, data structures, and capabilities with similar performance to native core Redis itself. Redis modules are dynamic libraries that can be loaded into Redis at start-up or loaded at run-time using the [`MODULE LOAD`](/commands/module-load) command. Redis exports a C API, in the form of a single C header file called `redismodule.h`. +Redis modules make it possible to extend Redis's core functionality, implementing new Redis commands, data structures, and capabilities with similar performance to native core Redis itself. Redis modules are dynamic libraries that can be loaded into Redis at start-up or loaded at run-time using the [`MODULE LOAD`]({{< relref "/commands/module-load" >}}) command. Redis exports a C API, in the form of a single C header file called `redismodule.h`. While the logic of RediSearch and its algorithms are mostly independent, and it could be ported quite easily to run as a stand-alone server, it still takes advantage of Redis as a robust infrastructure for a database server. Building on top of Redis means that, by default, modules are afforded: diff --git a/content/develop/interact/search-and-query/advanced-concepts/_index.md b/content/develop/interact/search-and-query/advanced-concepts/_index.md index 17b0d05e41..115ba0a595 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/_index.md +++ b/content/develop/interact/search-and-query/advanced-concepts/_index.md @@ -38,7 +38,7 @@ Redis Stack supports the following search and query features. This article provi * Retrieval of full document contents or only their IDs * Exact phrase search and slop-based search * [Numeric filters]({{< relref "/develop/interact/search-and-query/query/#numeric-filters-in-query" >}}) and ranges -* [Geo-filtering]({{< relref "/develop/interact/search-and-query/query/#geo-filters-in-query" >}}) using Redis [geo commands](/commands/?group=geo) +* [Geo-filtering]({{< relref "/develop/interact/search-and-query/query/#geo-filters-in-query" >}}) using Redis [geo commands]({{< relref "/commands/?group=geo" >}}) * [Vector similartiy search]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}) @@ -46,8 +46,8 @@ Redis Stack supports the following search and query features. This article provi * [Prefix-based searches]({{< relref "/develop/interact/search-and-query/query/#prefix-matching" >}}) * Field weights -* [Auto-complete]({{< relref "/develop/interact/search-and-query/administration/overview" >}}) and fuzzy prefix suggestions -* [Stemming]({{< relref "/develop/interact/search-and-query/advanced-concepts/stemming" >}})-based query expansion for [many languages]({{< relref "/develop/interact/search-and-query/advanced-concepts/stemming" >}}) using [Snowball](http://snowballstem.org/) +* [Auto-complete]({{< baseurl >}}/develop/interact/search-and-query/administration/overview#auto-complete) and fuzzy prefix suggestions +* [Stemming]({{< relref "/develop/interact/search-and-query/advanced-concepts/stemming" >}})-based query expansion for [many languages]({{< baseurl >}}/develop/interact/search-and-query/advanced-concepts/stemming#supported-languages) using [Snowball](http://snowballstem.org/) * Support for custom functions for query expansion and scoring (see [Extensions]({{< relref "/develop/interact/search-and-query/administration/extensions" >}})) * Unicode support (UTF-8 input required) * Document ranking diff --git a/content/develop/interact/search-and-query/advanced-concepts/aggregations.md b/content/develop/interact/search-and-query/advanced-concepts/aggregations.md index 971fda91c0..6c1d7acb63 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/aggregations.md +++ b/content/develop/interact/search-and-query/advanced-concepts/aggregations.md @@ -481,12 +481,12 @@ FT.CURSOR READ {idx} {cid} [COUNT {read size}] FT.CURSOR DEL {idx} {cid} ``` -You can use cursors with [`FT.AGGREGATE`](/commands/ft.aggregate), with the `WITHCURSOR` keyword. Cursors allow you to +You can use cursors with [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate), with the `WITHCURSOR` keyword. Cursors allow you to consume only part of the response, allowing you to fetch additional results as needed. This is much quicker than using `LIMIT` with offset, since the query is executed only once, and its state is stored on the server. -To use cursors, specify the `WITHCURSOR` keyword in [`FT.AGGREGATE`](/commands/ft.aggregate). For example: +To use cursors, specify the `WITHCURSOR` keyword in [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate). For example: ``` FT.AGGREGATE idx * WITHCURSOR @@ -494,16 +494,16 @@ FT.AGGREGATE idx * WITHCURSOR This will return a response of an array with two elements. The first element is the actual (partial) result, and the second is the cursor ID. The cursor ID -can then be fed to [`FT.CURSOR READ`](/commands/ft.cursor-read) repeatedly until the cursor ID is 0, in +can then be fed to [`FT.CURSOR READ`]({{< baseurl >}}/commands/ft.cursor-read) repeatedly until the cursor ID is 0, in which case all results have been returned. -To read from an existing cursor, use [`FT.CURSOR READ`](/commands/ft.cursor-read). For example: +To read from an existing cursor, use [`FT.CURSOR READ`]({{< baseurl >}}/commands/ft.cursor-read). For example: ``` FT.CURSOR READ idx 342459320 ``` -Assuming `342459320` is the cursor ID returned from the [`FT.AGGREGATE`](/commands/ft.aggregate) request, here is an example in pseudo-code: +Assuming `342459320` is the cursor ID returned from the [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) request, here is an example in pseudo-code: ``` response, cursor = FT.AGGREGATE "idx" "redis" "WITHCURSOR"; @@ -523,8 +523,8 @@ Note that even if the cursor is 0, a partial result may still be returned. #### Read size You can control how many rows are read for each cursor fetch by using the -`COUNT` parameter. This parameter can be specified both in [`FT.AGGREGATE`](/commands/ft.aggregate) -(immediately after `WITHCURSOR`) or in [`FT.CURSOR READ`](/commands/ft.cursor-read). +`COUNT` parameter. This parameter can be specified both in [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) +(immediately after `WITHCURSOR`) or in [`FT.CURSOR READ`]({{< baseurl >}}/commands/ft.cursor-read). The following example will read 10 rows at a time: ``` diff --git a/content/develop/interact/search-and-query/advanced-concepts/dialects.md b/content/develop/interact/search-and-query/advanced-concepts/dialects.md index 98b14c5b1c..96b331bb67 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/dialects.md +++ b/content/develop/interact/search-and-query/advanced-concepts/dialects.md @@ -17,7 +17,7 @@ title: Query dialects weight: 5 --- -Redis Stack currently supports four query dialects for use with the [`FT.SEARCH`](/commands/ft.search), [`FT.AGGREGATE`](/commands/ft.aggregate), and other search and query commands. +Redis Stack currently supports four query dialects for use with the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search), [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate), and other search and query commands. Dialects provide for enhancing the query API incrementally, introducing innovative behaviors and new features that support new use cases in a way that does not break the API for existing applications.``` ## `DIALECT 1` @@ -82,7 +82,7 @@ With `DIALECT 2` you can use un-escaped spaces in tag queries, even with stopwor Dialect version 3 was introduced in the [2.6](https://github.com/RediSearch/RediSearch/releases/tag/v2.6.3) release. This version introduced support for multi-value indexing and querying of attributes for any attribute type ( [TEXT](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-text), [TAG](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-tag), [NUMERIC](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-numeric), [GEO](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-geo) and [VECTOR](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-vector)) defined by a [JSONPath](https://redis.io/docs/stack/json/path/) leading to an array or multiple scalar values. Support for [GEOSHAPE](https://redis.io/docs/interact/search-and-query/query/geo-spatial/) queries was also introduced in this dialect. -The primary difference between dialects version 2 and version 3 is that JSON is returned rather than scalars for multi-value attributes. Apart from specifying `DIALECT 3` at the end of a [`FT.SEARCH`](/commands/ft.search) command, there are no other syntactic changes. Dialect version 1 remains the default dialect. To use dialect version 3, append `DIALECT 3` to your query command. +The primary difference between dialects version 2 and version 3 is that JSON is returned rather than scalars for multi-value attributes. Apart from specifying `DIALECT 3` at the end of a [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command, there are no other syntactic changes. Dialect version 1 remains the default dialect. To use dialect version 3, append `DIALECT 3` to your query command. `FT.SEARCH ... DIALECT 3` @@ -146,7 +146,7 @@ DIALECT 3 is required for shape-based (`POINT` or `POLYGON`) geospatial queries. ## `DIALECT 4` -Dialect version 4 was introduced in the [2.8](https://github.com/RediSearch/RediSearch/releases/tag/v2.8.4) release. It introduces performance optimizations for sorting operations on [`FT.SEARCH`](/commands/ft.search) and [`FT.AGGREGATE`](/commands/ft.aggregate). Apart from specifying `DIALECT 4` at the end of a [`FT.SEARCH`](/commands/ft.search) command, there are no other syntactic changes. Dialect version 1 remains the default dialect. To use dialect version 4, append `DIALECT 4` to your query command. +Dialect version 4 was introduced in the [2.8](https://github.com/RediSearch/RediSearch/releases/tag/v2.8.4) release. It introduces performance optimizations for sorting operations on [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) and [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate). Apart from specifying `DIALECT 4` at the end of a [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command, there are no other syntactic changes. Dialect version 1 remains the default dialect. To use dialect version 4, append `DIALECT 4` to your query command. `FT.SEARCH ... DIALECT 4` @@ -156,17 +156,17 @@ Dialect version 4 will improve performance in four different scenarios: 1. **Partial range** - applied when there is a `SORTBY` on a numeric field, either with no filter or with a filter by the same numeric field. Such queries will iterate on a range large enough to satisfy the `LIMIT` of requested results. 1. **Hybrid** - applied when there is a `SORTBY` on a numeric field in addition to another non-numeric filter. It could be the case that some results will get filtered, leaving too small a range to satisfy any specified `LIMIT`. In such cases, the iterator then is re-wound and additional iterations occur to collect result up to the requested `LIMIT`. 1. **No optimization** - If there is a sort by score or by a non-numeric field, there is no other option but to retrieve all results and compare their values to the search parameters. -## Use [`FT.EXPLAINCLI`](/commands/ft.explaincli) to compare dialects +## Use [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli) to compare dialects -The [[`FT.EXPLAINCLI`](/commands/ft.explaincli)](https://redis.io/commands/ft.explaincli/) is a powerful tool that provides a window into the inner workings of your queries. It's like a roadmap that details your query's journey from start to finish. +The [[`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli)](https://redis.io/commands/ft.explaincli/) is a powerful tool that provides a window into the inner workings of your queries. It's like a roadmap that details your query's journey from start to finish. -When you run [`FT.EXPLAINCLI`](/commands/ft.explaincli), it returns an array representing the execution plan of a complex query. This plan is a step-by-step guide of how Redis interprets your query and how it plans to fetch results. It's a behind-the-scenes look at the process, giving you insights into how the search engine works. +When you run [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli), it returns an array representing the execution plan of a complex query. This plan is a step-by-step guide of how Redis interprets your query and how it plans to fetch results. It's a behind-the-scenes look at the process, giving you insights into how the search engine works. -The [`FT.EXPLAINCLI`](/commands/ft.explaincli) accepts a `DIALECT` argument, allowing you to execute the query using different dialect versions, allowing you to compare the resulting query plans. +The [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli) accepts a `DIALECT` argument, allowing you to execute the query using different dialect versions, allowing you to compare the resulting query plans. -To use [`FT.EXPLAINCLI`](/commands/ft.explaincli), you need to provide an index and a query predicate. The index is the name of the index you created using [`FT.CREATE`](/commands/ft.create), and the query predicate is the same as if you were sending it to [`FT.SEARCH`](/commands/ft.search) or [`FT.AGGREGATE`](/commands/ft.aggregate). +To use [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli), you need to provide an index and a query predicate. The index is the name of the index you created using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create), and the query predicate is the same as if you were sending it to [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) or [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate). -Here's an example of how to use [`FT.EXPLAINCLI`](/commands/ft.explaincli) to understand differences in dialect versions 1 and 2. +Here's an example of how to use [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli) to understand differences in dialect versions 1 and 2. Negation of the intersection between tokens `hello` and `world`: @@ -210,7 +210,7 @@ FT.EXPLAINCLI idx:dialects "-(hello world)" DIALECT 2 ``` {{% alert title=Note %}} -[`FT.EXPLAIN`](/commands/ft.explain) doesn't execute the query. It only explains the plan. It's a way to understand how your query is interpreted by the query engine, which can be invaluable when you're trying to optimize your searches. +[`FT.EXPLAIN`]({{< baseurl >}}/commands/ft.explain) doesn't execute the query. It only explains the plan. It's a way to understand how your query is interpreted by the query engine, which can be invaluable when you're trying to optimize your searches. {{% /alert %}} ## Change the default dialect diff --git a/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md b/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md index c85c658f7e..5516319370 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md +++ b/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md @@ -144,7 +144,7 @@ Tag clauses can be combined into any subclause, used as negative expressions, op ## Geo filters -As of v0.21, it is possible to add geo radius queries directly into the query language with the syntax `@field:[{lon} {lat} {radius} {m|km|mi|ft}]`. This filters the result to a given radius from a lon,lat point, defined in meters, kilometers, miles or feet. See Redis's own [`GEORADIUS`](/commands/georadius) command for more details. +As of v0.21, it is possible to add geo radius queries directly into the query language with the syntax `@field:[{lon} {lat} {radius} {m|km|mi|ft}]`. This filters the result to a given radius from a lon,lat point, defined in meters, kilometers, miles or feet. See Redis's own [`GEORADIUS`]({{< relref "/commands/georadius" >}}) command for more details. Radius filters can be added into the query just like numeric filters. For example, in a database of businesses, looking for Chinese restaurants near San Francisco (within a 5km radius) would be expressed as: `chinese restaurant @location:[-122.41 37.77 5 km]`. @@ -160,13 +160,13 @@ There is a new schema field type called `GEOSHAPE`, which can be specified as ei - `FLAT` for Cartesian X Y coordinates - `SPHERICAL` for geographic longitude and latitude coordinates. This is the default coordinate system. -Finally, there's new [`FT.SEARCH`](/commands/ft.search) syntax that allows you to query for polygons that either contain or are within a given geoshape. +Finally, there's new [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) syntax that allows you to query for polygons that either contain or are within a given geoshape. `@field:[{WITHIN|CONTAINS} $geometry] PARAMS 2 geometry {geometry}` Here's an example using two stacked polygons that represent a box contained within a house. -![two stacked polygons]({{< relref "/develop/interact/search-and-query/img/polygons.png" >}}) +![two stacked polygons]({{< baseurl >}}/develop/interact/search-and-query/img/polygons.png) First, create an index using a `FLAT` `GEOSHAPE`, representing a 2D X Y coordinate system. @@ -178,7 +178,7 @@ Next, create the data structures that represent the geometries in the picture. HSET shape:1 t "this is my house" g "POLYGON((2 2, 2 8, 6 11, 10 8, 10 2, 2 2))" HSET shape:2 t "this is a square in my house" g "POLYGON((4 4, 4 6, 6 6, 6 4, 4 4))" ``` -Finally, use [`FT.SEARCH`](/commands/ft.search) to query the geometries. Note the use of `DIALECT 3`, which is required. Here are a few examples. +Finally, use [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) to query the geometries. Note the use of `DIALECT 3`, which is required. Here are a few examples. Search for a polygon that contains a specified point: @@ -239,7 +239,7 @@ Note that both the house and box shapes were returned. GEOSHAPE does not support JSON multi-value or SORTABLE options. {{< /alert >}} -For more examples, see the [`FT.CREATE`](/commands/ft.create) and [`FT.SEARCH`](/commands/ft.search) command pages. +For more examples, see the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) and [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command pages. ## Vector search @@ -265,7 +265,7 @@ The general syntax for hybrid query is `{some filter query}=>[ KNN {num|$num} @v `@vector_field:[VECTOR_RANGE 0.5 $query_vec]` -As of v2.4, the KNN vector search can be used at most once in a query, while, as of v2.6, the vector range filter can be used multiple times in a query. For more information on vector similarity syntax, see [Querying vector fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}), and [Vector search examples]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}) sections. +As of v2.4, the KNN vector search can be used at most once in a query, while, as of v2.6, the vector range filter can be used multiple times in a query. For more information on vector similarity syntax, see [Querying vector fields]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}), and [Vector search examples]({{< baseurl >}}/develop/interact/search-and-query/advanced-concepts/vectors#vector-search-examples) sections. ## Prefix matching @@ -370,7 +370,7 @@ The supported attributes are: As of v2.6.1, the query attributes syntax supports these additional attributes: * **$yield_distance_as**: specifies the distance field name, used for later sorting and/or returning, for clauses that yield some distance metric. It is currently supported for vector queries only (both KNN and range). -* **vector query params**: pass optional parameters for [vector queries]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}) in key-value format. +* **vector query params**: pass optional parameters for [vector queries]({{< baseurl >}}/develop/interact/search-and-query/advanced-concepts/vectors#querying-vector-fields) in key-value format. ## A few query examples @@ -460,4 +460,4 @@ As of v2.6.1, the query attributes syntax supports these additional attributes: The query parser is built using the Lemon Parser Generator and a Ragel based lexer. You can see the `DIALECT 2` grammar definition [at this git repo](https://github.com/RediSearch/RediSearch/blob/master/src/query_parser/v2/parser.y). -You can also see the [DEFAULT_DIALECT]({{< relref "/develop/interact/search-and-query/basic-constructs/configuration-parameters" >}}) configuration parameter. +You can also see the [DEFAULT_DIALECT]({{< baseurl >}}/develop/interact/search-and-query/basic-constructs/configuration-parameters#default_dialect) configuration parameter. diff --git a/content/develop/interact/search-and-query/advanced-concepts/scoring.md b/content/develop/interact/search-and-query/advanced-concepts/scoring.md index 2fe5e892b9..ff33c9a2f4 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/scoring.md +++ b/content/develop/interact/search-and-query/advanced-concepts/scoring.md @@ -23,7 +23,7 @@ Redis Stack comes with a few very basic scoring functions to evaluate document r If you prefer a custom scoring function, it is possible to add more functions using the [extension API]({{< relref "/develop/interact/search-and-query/administration/extensions" >}}). -The following is a list of the pre-bundled scoring functions available in Redis Stack and a short explanation about how they work. Each function is mentioned by registered name, which can be passed as a `SCORER` argument in [`FT.SEARCH`](/commands/ft.search). +The following is a list of the pre-bundled scoring functions available in Redis Stack and a short explanation about how they work. Each function is mentioned by registered name, which can be passed as a `SCORER` argument in [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search). ## TFIDF (default) diff --git a/content/develop/interact/search-and-query/advanced-concepts/sorting.md b/content/develop/interact/search-and-query/advanced-concepts/sorting.md index 6e75120751..8dabf01bd3 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/sorting.md +++ b/content/develop/interact/search-and-query/advanced-concepts/sorting.md @@ -21,7 +21,7 @@ As of RediSearch 0.15, you can bypass the scoring function mechanism and order s ## Declaring sortable fields -When creating an index with [`FT.CREATE`](/commands/ft.create), you can declare `TEXT`, `TAG`, `NUMERIC`, and `GEO` attributes as `SORTABLE`. When an attribute is sortable, you can order the results by its values with relatively low latency. When an attribute is not sortable, it can still be sorted by its values, but with increased latency. For example, in the following schema: +When creating an index with [`FT.CREATE`]({{< baseurl >}}/commands/ft.create), you can declare `TEXT`, `TAG`, `NUMERIC`, and `GEO` attributes as `SORTABLE`. When an attribute is sortable, you can order the results by its values with relatively low latency. When an attribute is not sortable, it can still be sorted by its values, but with increased latency. For example, in the following schema: ``` FT.CREATE users SCHEMA first_name TEXT last_name TEXT SORTABLE age NUMERIC SORTABLE diff --git a/content/develop/interact/search-and-query/advanced-concepts/spellcheck.md b/content/develop/interact/search-and-query/advanced-concepts/spellcheck.md index b62284ad5b..ce202a2691 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/spellcheck.md +++ b/content/develop/interact/search-and-query/advanced-concepts/spellcheck.md @@ -27,11 +27,11 @@ In such cases, and as of v1.4, RediSearch can be used for generating alternative The alternatives for a misspelled term are generated from the corpus of already-indexed terms and, optionally, one or more custom dictionaries. Alternatives become spelling suggestions based on their respective Levenshtein distances from the misspelled term. Each spelling suggestion is given a normalized score based on its occurrences in the index. -To obtain the spelling corrections for a query, refer to the documentation of the [`FT.SPELLCHECK`](/commands/ft.spellcheck) command. +To obtain the spelling corrections for a query, refer to the documentation of the [`FT.SPELLCHECK`]({{< baseurl >}}/commands/ft.spellcheck) command. ## Custom dictionaries -A dictionary is a set of terms. Dictionaries can be added with terms, have terms deleted from them, and have their entire contents dumped using the [`FT.DICTADD`](/commands/ft.dictadd), [`FT.DICTDEL`](/commands/ft.dictdel) and [`FT.DICTDUMP`](/commands/ft.dictdump) commands, respectively. +A dictionary is a set of terms. Dictionaries can be added with terms, have terms deleted from them, and have their entire contents dumped using the [`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd), [`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel) and [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump) commands, respectively. Dictionaries can be used to modify the behavior of spelling corrections by including or excluding their contents from potential spelling correction suggestions. diff --git a/content/develop/interact/search-and-query/advanced-concepts/stemming.md b/content/develop/interact/search-and-query/advanced-concepts/stemming.md index c16a596d53..8f6deebd6c 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/stemming.md +++ b/content/develop/interact/search-and-query/advanced-concepts/stemming.md @@ -29,7 +29,7 @@ For further details see the [Snowball Stemmer website](https://snowballstem.org/ Stemming maps different forms of the same word to a common root - "stem" - for example, the English stemmer maps *studied* ,*studies* and *study* to *studi* . So a searching for *studied* would also find documents which only have the other forms. -In order to define which language the Stemmer should apply when building the index, you need to specify the `LANGUAGE` parameter for the entire index or for the specific field. For more details check the [FT.CREATE]({{< relref "/commands/FT.CREATE" >}}) syntax. +In order to define which language the Stemmer should apply when building the index, you need to specify the `LANGUAGE` parameter for the entire index or for the specific field. For more details check the [FT.CREATE]({{< baseurl >}}/develop/commands/ft.create.md) syntax. **Create a index with language definition** diff --git a/content/develop/interact/search-and-query/advanced-concepts/stopwords.md b/content/develop/interact/search-and-query/advanced-concepts/stopwords.md index 6ed14a0a26..6060347d94 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/stopwords.md +++ b/content/develop/interact/search-and-query/advanced-concepts/stopwords.md @@ -35,7 +35,7 @@ The following words are treated as stop words by default: ## Overriding the default stop word list -Stop words for an index can be defined (or disabled completely) on index creation using the `STOPWORDS` argument with the [[`FT.CREATE`](/commands/ft.create) command. +Stop words for an index can be defined (or disabled completely) on index creation using the `STOPWORDS` argument with the [[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) command. The format is `STOPWORDS {number} {stopword} ...` where number is the number of stop words given. The `STOPWORDS` argument must come before the `SCHEMA` argument. For example: @@ -45,7 +45,7 @@ FT.CREATE myIndex STOPWORDS 3 foo bar baz SCHEMA title TEXT body TEXT ## Disable the use of stop words -Disabling stop words completely can be done by passing `STOPWORDS 0` to [`FT.CREATE`](/commands/ft.create). +Disabling stop words completely can be done by passing `STOPWORDS 0` to [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). ## Avoiding stop word detection in search queries diff --git a/content/develop/interact/search-and-query/advanced-concepts/tags.md b/content/develop/interact/search-and-query/advanced-concepts/tags.md index de490bec4b..3a3df2c9b1 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/tags.md +++ b/content/develop/interact/search-and-query/advanced-concepts/tags.md @@ -112,7 +112,7 @@ FT.SEARCH myIndex "@cities:{ New York } @cities:{Los Angeles} @cities:{ Barcelon ## Including punctuation in tags -A tag can include punctuation other than the field's separator. You do not need to escape punctuation when using the [`HSET`](/commands/hset) command to add the value to a Redis Hash. +A tag can include punctuation other than the field's separator. You do not need to escape punctuation when using the [`HSET`]({{< relref "/commands/hset" >}}) command to add the value to a Redis Hash. For example, given the following index: diff --git a/content/develop/interact/search-and-query/advanced-concepts/vectors.md b/content/develop/interact/search-and-query/advanced-concepts/vectors.md index d154020d4f..27e30f76a5 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/vectors.md +++ b/content/develop/interact/search-and-query/advanced-concepts/vectors.md @@ -16,7 +16,7 @@ title: Vectors weight: 14 --- -*Vector fields* allow you to use vector similarity queries in the [`FT.SEARCH`](/commands/ft.search) command. +*Vector fields* allow you to use vector similarity queries in the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command. *Vector similarity* enables you to load, index, and query vectors stored as fields in Redis hashes or in JSON documents (via integration with the [JSON module]({{< relref "/develop/data-types/json/" >}})) Vector similarity provides these functionalities: @@ -70,7 +70,7 @@ For example: FT.CREATE my_idx SCHEMA vec_field VECTOR FLAT 6 TYPE FLOAT32 DIM 128 DISTANCE_METRIC L2 ``` -Here, three parameters are passed for the index ([`TYPE`](/commands/type), `DIM`, `DISTANCE_METRIC`), and `count` counts the total number of attributes (6). +Here, three parameters are passed for the index ([`TYPE`]({{< relref "/commands/type" >}}), `DIM`, `DISTANCE_METRIC`), and `count` counts the total number of attributes (6). * `{attribute_name} {attribute_value}` are algorithm attributes for the creation of the vector index. Every algorithm has its own mandatory and optional attributes. @@ -80,7 +80,7 @@ Here, three parameters are passed for the index ([`TYPE`](/commands/type), `DIM` Mandatory parameters are: -* [`TYPE`](/commands/type) - Vector type. Current supported types are `FLOAT32` and `FLOAT64`. +* [`TYPE`]({{< relref "/commands/type" >}}) - Vector type. Current supported types are `FLOAT32` and `FLOAT64`. * `DIM` - Vector dimension specified as a positive integer. @@ -112,7 +112,7 @@ BLOCK_SIZE 1000 Mandatory parameters are: -* [`TYPE`](/commands/type) - Vector type. Current supported types are `FLOAT32` and `FLOAT64`. +* [`TYPE`]({{< relref "/commands/type" >}}) - Vector type. Current supported types are `FLOAT32` and `FLOAT64`. * `DIM` - Vector dimension, specified as a positive integer. @@ -180,7 +180,7 @@ Unlike in hashes, vectors are stored in JSON documents as arrays (not as blobs). JSON.SET 1 $ '{"vec":[1,2,3,4]}' ``` -As of v2.6.1, JSON supports multi-value indexing. This capability accounts for vectors as well. Thus, it is possible to index multiple vectors under the same JSONPath. Additional information is available in the [Indexing JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}) section. +As of v2.6.1, JSON supports multi-value indexing. This capability accounts for vectors as well. Thus, it is possible to index multiple vectors under the same JSONPath. Additional information is available in the [Indexing JSON documents]({{< baseurl >}}/develop/interact/search-and-query/indexing/#index-json-arrays-as-vector) section. **Example** ``` @@ -190,7 +190,7 @@ JSON.SET 1 $ '{"foo":{"vec":[1,2,3,4]}, "bar":{"vec":[5,6,7,8]}}' ## Querying vector fields -You can use vector similarity queries in the [`FT.SEARCH`](/commands/ft.search) query command. To use a vector similarity query, you must specify the option `DIALECT 2` or greater in the command itself, or set the `DEFAULT_DIALECT` option to `2` or greater, by either using the command [`FT.CONFIG SET`](/commands/ft.config-set) or when loading the `redisearch` module and passing it the argument `DEFAULT_DIALECT 2`. +You can use vector similarity queries in the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) query command. To use a vector similarity query, you must specify the option `DIALECT 2` or greater in the command itself, or set the `DEFAULT_DIALECT` option to `2` or greater, by either using the command [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) or when loading the `redisearch` module and passing it the argument `DEFAULT_DIALECT 2`. There are two types of vector queries: *KNN* and *range*: @@ -213,7 +213,7 @@ The `` part inside the square brackets needs to be in t KNN ( | $) @ $ [ |$] [...]] [ AS | $] ``` -Every `*_attribute` parameter should refer to an attribute in the [`PARAMS`](/commands/ft.search) section. +Every `*_attribute` parameter should refer to an attribute in the [`PARAMS`]({{< baseurl >}}/commands/ft.search) section. * ` | $` - Number of requested results ("K"). @@ -221,11 +221,11 @@ Every `*_attribute` parameter should refer to an attribute in the [`PARAMS`](/co * `$` - An attribute that holds the query vector as blob and must be passed through the `PARAMS` section. The blob's byte size should match the vector field dimension and type. -* `[ |$ [...]]` - An optional part for passing one or more vector similarity query parameters. Parameters should come in key-value pairs and should be valid parameters for the query. See which [runtime parameters]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}) are valid for each algorithm. +* `[ |$ [...]]` - An optional part for passing one or more vector similarity query parameters. Parameters should come in key-value pairs and should be valid parameters for the query. See which [runtime parameters]({{< baseurl >}}/develop/interact/search-and-query/advanced-concepts/vectors#runtime-attributes) are valid for each algorithm. * `[AS | $]` - An optional part for specifying a distance field name, for later sorting by the similarity metric and/or returning it. By default, the distance field name is "`___score`" and it can be used for sorting without using `AS ` in the query. -**Note:** As of v2.6, vector query params and distance field name can be specified in [query attributes]({{< relref "/develop/interact/search-and-query/advanced-concepts/query_syntax" >}}) like syntax as well. Thus, the following format is also supported: +**Note:** As of v2.6, vector query params and distance field name can be specified in [query attributes]({{< baseurl >}}/develop/interact/search-and-query/advanced-concepts/query_syntax#query-attributes) like syntax as well. Thus, the following format is also supported: ``` =>[]=>{$: ( | $); ... } @@ -312,7 +312,7 @@ Now, sort the results by their distance from the query vector: ``` FT.SEARCH idx "*=>[KNN 10 @vec $BLOB]" PARAMS 2 BLOB "\x12\xa9\xf5\x6c" SORTBY __vec_score DIALECT 2 ``` -Return the top 10 similar documents, use *query params* (see "params" section in [FT.SEARCH command](/commands/ft.search/)) for specifying `K` and `EF_RUNTIME` parameter, and set `EF_RUNTIME` value to 150 (assuming `vec` is an HNSW index): +Return the top 10 similar documents, use *query params* (see "params" section in [FT.SEARCH command]({{< baseurl >}}/commands/ft.search/)) for specifying `K` and `EF_RUNTIME` parameter, and set `EF_RUNTIME` value to 150 (assuming `vec` is an HNSW index): ``` FT.SEARCH idx "*=>[KNN $K @vec $BLOB EF_RUNTIME $EF]" PARAMS 6 BLOB "\x12\xa9\xf5\x6c" K 10 EF 150 DIALECT 2 ``` diff --git a/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md b/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md index a86eec4d73..b530414adc 100644 --- a/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md +++ b/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md @@ -29,7 +29,7 @@ In [redis.conf]({{< relref "/operate/oss_and_stack/management/config" >}}): loadmodule ./redisearch.so [OPT VAL]... ``` -From the [Redis CLI]({{< relref "/develop/connect/cli" >}}), using the [MODULE LOAD](/commands/module-load/) command: +From the [Redis CLI]({{< relref "/develop/connect/cli" >}}), using the [MODULE LOAD]({{< relref "/commands/module-load" >}}) command: ``` 127.0.0.6379> MODULE LOAD redisearch.so [OPT VAL]... @@ -58,7 +58,7 @@ FT.CONFIG GET OPT1 FT.CONFIG GET * ``` -Values set using [`FT.CONFIG SET`](/commands/ft.config-set) are not persisted after server restart. +Values set using [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) are not persisted after server restart. ## RediSearch configuration parameters @@ -248,7 +248,7 @@ $ redis-server --loadmodule ./redisearch.so MAXDOCTABLESIZE 3000000 ### MAXSEARCHRESULTS -The maximum number of results to be returned by the [`FT.SEARCH`](/commands/ft.search) command if LIMIT is used. +The maximum number of results to be returned by the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command if LIMIT is used. Setting value to `-1` will remove the limit. #### Default @@ -265,7 +265,7 @@ $ redis-server --loadmodule ./redisearch.so MAXSEARCHRESULTS 3000000 ### MAXAGGREGATERESULTS -The maximum number of results to be returned by the [`FT.AGGREGATE`](/commands/ft.aggregate) command if LIMIT is used. +The maximum number of results to be returned by the [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) command if LIMIT is used. Setting value to `-1` will remove the limit. #### Default @@ -282,7 +282,7 @@ $ redis-server --loadmodule ./redisearch.so MAXAGGREGATERESULTS 3000000 ### FRISOINI -If present, load the custom Chinese dictionary from the specified path. See [Using custom dictionaries]({{< relref "/develop/interact/search-and-query/advanced-concepts/chinese" >}}) for more details. +If present, load the custom Chinese dictionary from the specified path. See [Using custom dictionaries]({{< baseurl >}}/develop/interact/search-and-query/advanced-concepts/chinese#using-custom-dictionaries) for more details. #### Default @@ -298,7 +298,7 @@ $ redis-server --loadmodule ./redisearch.so FRISOINI /opt/dict/friso.ini ### CURSOR_MAX_IDLE -The maximum idle time (in ms) that can be set to the [cursor api]({{< relref "/develop/interact/search-and-query/advanced-concepts/aggregations" >}}). +The maximum idle time (in ms) that can be set to the [cursor api]({{< baseurl >}}/develop/interact/search-and-query/advanced-concepts/aggregations#cursor-api). #### Default @@ -479,11 +479,11 @@ $ redis-server --loadmodule ./redisearch.so GC_POLICY FORK FORK_GC_CLEAN_THRESHO ### UPGRADE_INDEX -This configuration is a special configuration option introduced to upgrade indices from v1.x RediSearch versions, otherwise known as legacy indices. This configuration option needs to be given for each legacy index, followed by the index name and all valid options for the index description (also referred to as the `ON` arguments for following hashes) as described on [ft.create api](/commands/ft.create). +This configuration is a special configuration option introduced to upgrade indices from v1.x RediSearch versions, otherwise known as legacy indices. This configuration option needs to be given for each legacy index, followed by the index name and all valid options for the index description (also referred to as the `ON` arguments for following hashes) as described on [ft.create api]({{< baseurl >}}/commands/ft.create). #### Default -There is no default for index name, and the other arguments have the same defaults as with the [`FT.CREATE`](/commands/ft.create) API. +There is no default for index name, and the other arguments have the same defaults as with the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) API. #### Example @@ -525,7 +525,7 @@ $ redis-server --loadmodule ./redisearch.so OSS_GLOBAL_PASSWORD password ### DEFAULT_DIALECT -The default DIALECT to be used by [`FT.CREATE`](/commands/ft.create), [`FT.AGGREGATE`](/commands/ft.aggregate), [`FT.EXPLAIN`](/commands/ft.explain), [`FT.EXPLAINCLI`](/commands/ft.explaincli), and [`FT.SPELLCHECK`](/commands/ft.spellcheck). +The default DIALECT to be used by [`FT.CREATE`]({{< baseurl >}}/commands/ft.create), [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate), [`FT.EXPLAIN`]({{< baseurl >}}/commands/ft.explain), [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli), and [`FT.SPELLCHECK`]({{< baseurl >}}/commands/ft.spellcheck). #### Default @@ -540,7 +540,7 @@ $ redis-server --loadmodule ./redisearch.so DEFAULT_DIALECT 2 {{% alert title="Notes" color="info" %}} * Vector search, added in v2.4.3, requires `DIALECT 2` or greater. -* Returning multiple values from [`FT.SEARCH`](/commands/ft.search) and [`FT.AGGREGATE`](/commands/ft.aggregate) requires `DIALECT 3` or greater. +* Returning multiple values from [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) and [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) requires `DIALECT 3` or greater. {{% /alert %}} @@ -548,7 +548,7 @@ $ redis-server --loadmodule ./redisearch.so DEFAULT_DIALECT 2 ### VSS_MAX_RESIZE -The maximum memory resize for vector similarity indexes in bytes. This value will override default memory limits if you need to allow for a large [`BLOCK_SIZE`]({{< relref "/develop/get-started/vector-database" >}}). +The maximum memory resize for vector similarity indexes in bytes. This value will override default memory limits if you need to allow for a large [`BLOCK_SIZE`]({{< baseurl >}}/develop/get-started/vector-database#creation-attributes-per-algorithm). #### Default diff --git a/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md b/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md index 9e7a705581..50318944cd 100644 --- a/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md +++ b/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md @@ -24,7 +24,7 @@ Redis Stack provides various field types that allow you to store and search diff Number fields are used to store non-textual, countable values. They can hold integer or floating-point values. Number fields are sortable, meaning you can perform range-based queries and retrieve documents based on specific numeric conditions. For example, you can search for documents with a price between a certain range or retrieve documents with a specific rating value. -You can add number fields to a schema in [`FT.CREATE`](/commands/ft.create) using this syntax: +You can add number fields to a schema in [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) using this syntax: ``` FT.CREATE ... SCHEMA ... {field_name} NUMBER [SORTABLE] [NOINDEX] @@ -57,7 +57,7 @@ You can also use the following query syntax to perform more complex numeric quer Geo fields are used to store geographical coordinates such as longitude and latitude. They enable geospatial radius queries, which allow you to implement location-based search functionality in your applications such as finding nearby restaurants, stores, or any other points of interest. -You can add geo fields to the schema in [`FT.CREATE`](/commands/ft.create) using this syntax: +You can add geo fields to the schema in [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) using this syntax: ``` FT.CREATE ... SCHEMA ... {field_name} GEO [SORTABLE] [NOINDEX] @@ -77,7 +77,7 @@ FT.SEARCH cities "@coords:[2.34 48.86 1000 km]" Vector fields are floating-point vectors that are typically generated by external machine learning models. These vectors represent unstructured data such as text, images, or other complex features. Redis Stack allows you to search for similar vectors using vector search algorithms like cosine similarity, Euclidean distance, and inner product. This enables you to build advanced search applications, recommendation systems, or content similarity analysis. -You can add vector fields to the schema in [`FT.CREATE`](/commands/ft.create) using this syntax: +You can add vector fields to the schema in [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) using this syntax: ``` FT.CREATE ... SCHEMA ... {field_name} VECTOR {algorithm} {count} [{attribute_name} {attribute_value} ...] @@ -101,7 +101,7 @@ Notice that `{count}` represents the total number of attribute pairs passed in t FT.CREATE my_idx SCHEMA vec_field VECTOR FLAT 6 TYPE FLOAT32 DIM 128 DISTANCE_METRIC L2 ``` - Here, three parameters are passed for the index ([`TYPE`](/commands/type), `DIM`, `DISTANCE_METRIC`), and `count` is the total number of attributes (6). + Here, three parameters are passed for the index ([`TYPE`]({{< relref "/commands/type" >}}), `DIM`, `DISTANCE_METRIC`), and `count` is the total number of attributes (6). * `{attribute_name} {attribute_value}` are algorithm attributes for the creation of the vector index. Every algorithm has its own mandatory and optional attributes. diff --git a/content/develop/interact/search-and-query/basic-constructs/schema-definition.md b/content/develop/interact/search-and-query/basic-constructs/schema-definition.md index d07865348e..de4464062d 100644 --- a/content/develop/interact/search-and-query/basic-constructs/schema-definition.md +++ b/content/develop/interact/search-and-query/basic-constructs/schema-definition.md @@ -34,7 +34,7 @@ SCHEMA In this example, a schema is defined for an index named `idx` that will index all hash documents whose keyname starts with `blog:post:`. The schema includes the fields `title`, `content`, `author`, `created_date`, and `views`. The `TEXT` type indicates that the `title` and `content` fields are text-based, the `TAG` type is used for the `author` field, and the `NUMERIC` type is used for the `created_date` and `views` fields. Additionally, a weight of 5.0 is assigned to the `title` field to give it more relevance in search results, and `created_date` is marked as `SORTABLE` to enable sorting based on this field. -You can learn more about the available field types and options on the [`FT.CREATE`](/commands/ft.create) page. +You can learn more about the available field types and options on the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) page. ## More schema definition examples @@ -116,4 +116,4 @@ SCHEMA ``` -You can learn more about the available field types and options on the [`FT.CREATE`](/commands/ft.create) page. \ No newline at end of file +You can learn more about the available field types and options on the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) page. \ No newline at end of file diff --git a/content/develop/interact/search-and-query/deprecated/payloads.md b/content/develop/interact/search-and-query/deprecated/payloads.md index b6da68a161..1e6678ec0d 100644 --- a/content/develop/interact/search-and-query/deprecated/payloads.md +++ b/content/develop/interact/search-and-query/deprecated/payloads.md @@ -63,7 +63,7 @@ If no payload was set to the document, it is simply NULL. If it is not, you can When searching, it is possible to request the document payloads from the engine. -This is done by adding the keyword `WITHPAYLOADS` to [`FT.SEARCH`](/commands/ft.search). +This is done by adding the keyword `WITHPAYLOADS` to [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search). If `WITHPAYLOADS` is set, the payloads follow the document id in the returned result. If `WITHSCORES` is set as well, the payloads follow the scores. e.g.: diff --git a/content/develop/interact/search-and-query/indexing/_index.md b/content/develop/interact/search-and-query/indexing/_index.md index b3a77ca2cf..2dd7848c9b 100644 --- a/content/develop/interact/search-and-query/indexing/_index.md +++ b/content/develop/interact/search-and-query/indexing/_index.md @@ -28,14 +28,14 @@ Before you can index and search JSON documents, you need a database with either: ## Create index with JSON schema -When you create an index with the [`FT.CREATE`](/commands/ft.create) command, include the `ON JSON` keyword to index any existing and future JSON documents stored in the database. +When you create an index with the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) command, include the `ON JSON` keyword to index any existing and future JSON documents stored in the database. To define the `SCHEMA`, you can provide [JSONPath]({{< relref "/develop/data-types/json/path" >}}) expressions. The result of each JSONPath expression is indexed and associated with a logical name called an `attribute` (previously known as a `field`). You can use these attributes in queries. {{% alert title="Note" color="info" %}} -Note: `attribute` is optional for [`FT.CREATE`](/commands/ft.create). +Note: `attribute` is optional for [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). {{% /alert %}} Use the following syntax to create a JSON index: @@ -56,7 +56,7 @@ See [Index limitations](#index-limitations) for more details about JSON index `S After you create an index, Redis Stack automatically indexes any existing, modified, or newly created JSON documents stored in the database. For existing documents, indexing runs asynchronously in the background, so it can take some time before the document is available. Modified and newly created documents are indexed synchronously, so the document will be available by the time the add or modify command finishes. -You can use any JSON write command, such as [`JSON.SET`](/commands/json.set) and [`JSON.ARRAPPEND`](/commands/json.arrappend), to create or modify JSON documents. +You can use any JSON write command, such as [`JSON.SET`]({{< baseurl >}}/commands/json.set) and [`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend), to create or modify JSON documents. The following examples use these JSON documents to represent individual inventory items. @@ -100,7 +100,7 @@ Item 2 JSON document: } ``` -Use [`JSON.SET`](/commands/json.set) to store these documents in the database: +Use [`JSON.SET`]({{< baseurl >}}/commands/json.set) to store these documents in the database: ```sql 127.0.0.1:6379> JSON.SET item:1 $ '{"name":"Noise-cancelling Bluetooth headphones","description":"Wireless Bluetooth headphones with noise-cancelling technology","connection":{"wireless":true,"type":"Bluetooth"},"price":99.98,"stock":25,"colors":["black","silver"],"embedding":[0.87,-0.15,0.55,0.03]}' @@ -109,12 +109,12 @@ Use [`JSON.SET`](/commands/json.set) to store these documents in the database: "OK" ``` -Because indexing is synchronous in this case, the documents will be available on the index as soon as the [`JSON.SET`](/commands/json.set) command returns. +Because indexing is synchronous in this case, the documents will be available on the index as soon as the [`JSON.SET`]({{< baseurl >}}/commands/json.set) command returns. Any subsequent queries that match the indexed content will return the document. ## Search the index -To search the index for JSON documents, use the [`FT.SEARCH`](/commands/ft.search) command. +To search the index for JSON documents, use the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command. You can search any attribute defined in the `SCHEMA`. For example, use this query to search for items with the word "earbuds" in the name: @@ -170,7 +170,7 @@ And lastly, search for the Bluetooth headphones that are most similar to an imag For more information about search queries, see [Search query syntax]({{< relref "/develop/interact/search-and-query/advanced-concepts/query_syntax" >}}). {{% alert title="Note" color="info" %}} -[`FT.SEARCH`](/commands/ft.search) queries require `attribute` modifiers. Don't use JSONPath expressions in queries because the query parser doesn't fully support them. +[`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) queries require `attribute` modifiers. Don't use JSONPath expressions in queries because the query parser doesn't fully support them. {{% /alert %}} ## Index JSON arrays as TAG @@ -223,7 +223,7 @@ Now you can do full text search for light colored headphones: ``` ### Limitations -- When a JSONPath may lead to multiple values and not only to a single array, e.g., when a JSONPath contains wildcards, etc., specifying `SLOP` or `INORDER` in [`FT.SEARCH`](/commands/ft.search) will return an error, since the order of the values matching the JSONPath is not well defined, leading to potentially inconsistent results. +- When a JSONPath may lead to multiple values and not only to a single array, e.g., when a JSONPath contains wildcards, etc., specifying `SLOP` or `INORDER` in [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) will return an error, since the order of the values matching the JSONPath is not well defined, leading to potentially inconsistent results. For example, using a JSONPath such as `$..b[*]` on a JSON value such as ```json @@ -252,7 +252,7 @@ Now you can do full text search for light colored headphones: ### Handling phrases in different array slots: -When indexing, a predefined delta is used to increase positional offsets between array slots for multiple text values. This delta controls the level of separation between phrases in different array slots (related to the `SLOP` parameter of [`FT.SEARCH`](/commands/ft.search)). +When indexing, a predefined delta is used to increase positional offsets between array slots for multiple text values. This delta controls the level of separation between phrases in different array slots (related to the `SLOP` parameter of [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search)). This predefined value is set by the configuration parameter `MULTI_TEXT_SLOP` (at module load-time). The default value is 100. ## Index JSON arrays as NUMERIC @@ -502,7 +502,7 @@ You can also search for items with a Bluetooth connection type: ## Field projection -[`FT.SEARCH`](/commands/ft.search) returns the entire JSON document by default. If you want to limit the returned search results to specific attributes, you can use field projection. +[`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) returns the entire JSON document by default. If you want to limit the returned search results to specific attributes, you can use field projection. ### Return specific attributes @@ -579,7 +579,7 @@ This query returns the field as the alias `"stock"` instead of the JSONPath expr You can [highlight]({{< relref "/develop/interact/search-and-query/advanced-concepts/highlight" >}}) relevant search terms in any indexed `TEXT` attribute. -For [`FT.SEARCH`](/commands/ft.search), you have to explicitly set which attributes you want highlighted after the `RETURN` and `HIGHLIGHT` parameters. +For [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search), you have to explicitly set which attributes you want highlighted after the `RETURN` and `HIGHLIGHT` parameters. Use the optional `TAGS` keyword to specify the strings that will surround (or highlight) the matching search terms. @@ -630,7 +630,7 @@ This example uses aggregation to calculate a 10% price discount for each item an ``` {{% alert title="Note" color="info" %}} -[`FT.AGGREGATE`](/commands/ft.aggregate) queries require `attribute` modifiers. Don't use JSONPath expressions in queries, except with the `LOAD` option, because the query parser doesn't fully support them. +[`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) queries require `attribute` modifiers. Don't use JSONPath expressions in queries, except with the `LOAD` option, because the query parser doesn't fully support them. {{% /alert %}} ## Index limitations diff --git a/content/develop/interact/search-and-query/query/_index.md b/content/develop/interact/search-and-query/query/_index.md index e07dc795ab..2e984bc123 100644 --- a/content/develop/interact/search-and-query/query/_index.md +++ b/content/develop/interact/search-and-query/query/_index.md @@ -16,7 +16,7 @@ title: Query data weight: 5 --- -Redis Stack distinguishes between the [FT.SEARCH](/commands/ft.search/) and [FT.AGGREGATE](/commands/ft.aggregate/) query commands. You should use [FT.SEARCH](/commands/ft.search/) if you want to perform selections and projections only. If you also need to apply mapping functions, group, or aggregate data, use the [FT.AGGREGATE](/commands/ft.aggregate/) command. +Redis Stack distinguishes between the [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) and [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate/) query commands. You should use [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) if you want to perform selections and projections only. If you also need to apply mapping functions, group, or aggregate data, use the [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate/) command. * **Selection**: A selection allows you to return all documents that fulfill specific criteria. * **Projection**: Projections are used to return specific fields of the result set. You can also map/project to calculated field values. @@ -31,7 +31,7 @@ Here is a short SQL comparison using the [bicycle dataset](./data/bicycles.txt): | Calculated projection| `SELECT id, price-price*0.1 AS discounted FROM bicycles`| `FT.AGGREGATE idx:bicycle "*" LOAD 2 __key price APPLY "@price-@price*0.1" AS discounted`| | Aggregation | `SELECT condition, AVG(price) AS avg_price FROM bicycles GROUP BY condition` | `FT.AGGREGATE idx:bicycle "*" GROUPBY 1 @condition REDUCE AVG 1 @price AS avg_price` | -The following articles provide an overview of how to query data with the [FT.SEARCH](/commands/ft.search/) command: +The following articles provide an overview of how to query data with the [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) command: * [Exact match queries]({{< relref "/develop/interact/search-and-query/query/exact-match" >}}) * [Range queries]({{< relref "/develop/interact/search-and-query/query/range" >}}) @@ -40,6 +40,6 @@ The following articles provide an overview of how to query data with the [FT.SEA * [Vector search]({{< relref "/develop/interact/search-and-query/query/vector-search" >}}) * [Combined queries]({{< relref "/develop/interact/search-and-query/query/combined" >}}) -You can find further details about aggregation queries with [FT.AGGREGATE](/commands/ft.aggregate/) in the following article: +You can find further details about aggregation queries with [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate/) in the following article: * [Aggregation queries]({{< relref "/develop/interact/search-and-query/query/aggregation" >}}) \ No newline at end of file diff --git a/content/develop/interact/search-and-query/query/aggregation.md b/content/develop/interact/search-and-query/query/aggregation.md index d13c37a718..1e2efc15de 100644 --- a/content/develop/interact/search-and-query/query/aggregation.md +++ b/content/develop/interact/search-and-query/query/aggregation.md @@ -21,7 +21,7 @@ An aggregation query allows you to perform the following actions: - Group data based on field values. - Apply aggregation functions on the grouped data. -This article explains the basic usage of the [FT.AGGREGATE](/commands/ft.aggregate/) command. For further details, see the [command specification](/commands/ft.aggregate/) and the [aggregations reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/aggregations" >}}). +This article explains the basic usage of the [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate/) command. For further details, see the [command specification]({{< baseurl >}}/commands/ft.aggregate/) and the [aggregations reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/aggregations" >}}). The examples in this article use a schema with the following fields: @@ -40,7 +40,7 @@ FT.AGGREGATE index "query_expr" LOAD n "field_1" .. "field_n" APPLY "function_ex Here is a more detailed explanation of the query syntax: -1. **Query expression**: you can use the same query expressions as you would use with the [`FT.SEARCH`](/commands/ft.search) command. You can substitute `query_expr` with any of the expressions explained in the articles of this [query topic]({{< relref "/develop/interact/search-and-query/query/" >}}). Vector search queries are an exception. You can't combine a vector search with an aggregation query. +1. **Query expression**: you can use the same query expressions as you would use with the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command. You can substitute `query_expr` with any of the expressions explained in the articles of this [query topic]({{< relref "/develop/interact/search-and-query/query/" >}}). Vector search queries are an exception. You can't combine a vector search with an aggregation query. 2. **Loaded fields**: if field values weren't already loaded into the aggregation pipeline, you can force their presence via the `LOAD` clause. This clause takes the number of fields (`n`), followed by the field names (`"field_1" .. "field_n"`). 3. **Mapping function**: this mapping function operates on the field values. A specific field is referenced as `@field_name` within the function expression. The result is returned as `result_field`. @@ -113,7 +113,7 @@ FT.AGGREGATE idx:bicycle "*" LOAD 1 price APPLY "@price<1000" AS price_category ``` {{% alert title="Note" color="warning" %}} -You can also create more complex aggregation pipelines with [FT.AGGREGATE](/commands/ft.aggregate/). Applying multiple reduction functions under one `GROUPBY` clause is possible. In addition, you can also chain groupings and mix in additional mapping steps (e.g., `GROUPBY ... REDUCE ... APPLY ... GROUPBY ... REDUCE`) +You can also create more complex aggregation pipelines with [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate/). Applying multiple reduction functions under one `GROUPBY` clause is possible. In addition, you can also chain groupings and mix in additional mapping steps (e.g., `GROUPBY ... REDUCE ... APPLY ... GROUPBY ... REDUCE`) {{% /alert %}} diff --git a/content/develop/interact/search-and-query/query/combined.md b/content/develop/interact/search-and-query/query/combined.md index 4075b896f4..075c6ad0cb 100644 --- a/content/develop/interact/search-and-query/query/combined.md +++ b/content/develop/interact/search-and-query/query/combined.md @@ -26,7 +26,7 @@ A combined query is a combination of several query types, such as: You can use logical query operators to combine query expressions for numeric, tag, and text fields. For vector fields, you can combine a KNN query with a pre-filter. {{% alert title="Note" color="warning" %}} -The operators are interpreted slightly differently depending on the query dialect used. The default dialect is `DIALECT 1`; see [this article]({{< relref "/develop/interact/search-and-query/basic-constructs/configuration-parameters" >}}) for information on how to change the dialect version. This article uses the second version of the query dialect, `DIALECT 2`, and uses additional brackets (`(...)`) to help clarify the examples. Further details can be found in the [query syntax documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/query_syntax" >}}). +The operators are interpreted slightly differently depending on the query dialect used. The default dialect is `DIALECT 1`; see [this article]({{< baseurl >}}/develop/interact/search-and-query/basic-constructs/configuration-parameters#default_dialect) for information on how to change the dialect version. This article uses the second version of the query dialect, `DIALECT 2`, and uses additional brackets (`(...)`) to help clarify the examples. Further details can be found in the [query syntax documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/query_syntax" >}}). {{% /alert %}} The examples in this article use the following schema: @@ -121,7 +121,7 @@ FT.SEARCH idx:bicycle "@price:[500 1000] -@condition:{new}" ## Numeric filter -The [FT.SEARCH](/commands/ft.search/) command allows you to combine any query expression with a numeric filter. +The [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) command allows you to combine any query expression with a numeric filter. ``` FT.SEARCH index "expr" FILTER numeric_field start end diff --git a/content/develop/interact/search-and-query/query/geo-spatial.md b/content/develop/interact/search-and-query/query/geo-spatial.md index aba54ead78..13539a1901 100644 --- a/content/develop/interact/search-and-query/query/geo-spatial.md +++ b/content/develop/interact/search-and-query/query/geo-spatial.md @@ -31,7 +31,7 @@ Redis Stack version 7.2.0 or higher is required to use the `GEOSHAPE` field type ## Radius -You can construct a radius query by passing the center coordinates (longitude, latitude), the radius, and the distance unit to the [FT.SEARCH](/commands/ft.search/) command. +You can construct a radius query by passing the center coordinates (longitude, latitude), the radius, and the distance unit to the [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) command. ``` FT.SEARCH index "@geo_field:[lon lat radius unit]" diff --git a/content/develop/interact/search-and-query/query/range.md b/content/develop/interact/search-and-query/query/range.md index c510f88ae2..0e6e672938 100644 --- a/content/develop/interact/search-and-query/query/range.md +++ b/content/develop/interact/search-and-query/query/range.md @@ -37,13 +37,13 @@ The values `-inf`, `inf`, and `+inf` are valid values that allow you to define o An open-range query can lead to a large result set. -By default, [`FT.SEARCH`](/commands/ft.search) returns only the first ten results. The `LIMIT` argument helps you to scroll through the result set. The `SORTBY` argument ensures that the documents in the result set are returned in the specified order. +By default, [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) returns only the first ten results. The `LIMIT` argument helps you to scroll through the result set. The `SORTBY` argument ensures that the documents in the result set are returned in the specified order. ``` FT.SEARCH index "@field:[start end]" SORTBY field LIMIT page_start page_end ``` -You can find further details about using the `LIMIT` and `SORTBY` in the [[`FT.SEARCH`](/commands/ft.search) command reference](/commands/ft.search/). +You can find further details about using the `LIMIT` and `SORTBY` in the [[`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command reference](/commands/ft.search/). ## Examples diff --git a/content/develop/interact/search-and-query/query/vector-search.md b/content/develop/interact/search-and-query/query/vector-search.md index f5cf678e81..344c18b09b 100644 --- a/content/develop/interact/search-and-query/query/vector-search.md +++ b/content/develop/interact/search-and-query/query/vector-search.md @@ -28,7 +28,7 @@ The examples in this article use a schema with the following fields: ## K-neareast neighbours (KNN) -The Redis command [FT.SEARCH](/commands/ft.search/) takes the index name, the query string, and additional query parameters as arguments. You need to pass the number of nearest neighbors, the vector field name, and the vector's binary representation in the following way: +The Redis command [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) takes the index name, the query string, and additional query parameters as arguments. You need to pass the number of nearest neighbors, the vector field name, and the vector's binary representation in the following way: ``` FT.SEARCH index "(*)=>[KNN num_neighbours @field $vector]" PARAMS 2 vector "binary_data" DIALECT 2 @@ -42,7 +42,7 @@ Here is a more detailed explanation of this query: 4. **Vector binary data**: You need to use the `PARAMS` argument to substitute `$vector` with the binary representation of the vector. The value `2` indicates that `PARAMS` is followed by two arguments, the parameter name `vector` and the parameter value. 5. **Dialect**: The vector search feature has been available since version two of the query dialect. -You can read more about the `PARAMS` argument in the [FT.SEARCH](/commands/ft.search/) command reference. +You can read more about the `PARAMS` argument in the [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) command reference. The following example shows you how to query for three bikes based on their description embeddings, and by using the field alias `vector`. The result is returned in ascending order based on the distance. You can see that the query only returns the fields `__vector_score` and `description`. The field `__vector_score` is present by default. Because you can have multiple vector fields in your schema, the vector score field name depends on the name of the vector field. If you change the field name `@vector` to `@foo`, the score field name changes to `__foo_score`. @@ -88,7 +88,7 @@ Here is a more detailed explanation of this query: {{% alert title="Note" color="warning" %}} -By default, [`FT.SEARCH`](/commands/ft.search) returns only the first ten results. The [range query article]({{< relref "/develop/interact/search-and-query/query/range" >}}) explains to you how to scroll through the result set. +By default, [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) returns only the first ten results. The [range query article]({{< relref "/develop/interact/search-and-query/query/range" >}}) explains to you how to scroll through the result set. {{% /alert %}} The example below shows a radius query that returns the description and the distance within a radius of `0.5`. The result is sorted by the distance. diff --git a/content/develop/interact/transactions.md b/content/develop/interact/transactions.md index 1dbeb2c542..1f38b5823d 100644 --- a/content/develop/interact/transactions.md +++ b/content/develop/interact/transactions.md @@ -17,7 +17,7 @@ weight: 30 Redis Transactions allow the execution of a group of commands in a single step, they are centered around the commands -[`MULTI`](/commands/multi), [`EXEC`](/commands/exec), [`DISCARD`](/commands/discard) and [`WATCH`](/commands/watch). +[`MULTI`]({{< relref "/commands/multi" >}}), [`EXEC`]({{< relref "/commands/exec" >}}), [`DISCARD`]({{< relref "/commands/discard" >}}) and [`WATCH`]({{< relref "/commands/watch" >}}). Redis Transactions make two important guarantees: * All the commands in a transaction are serialized and executed @@ -26,11 +26,11 @@ served **in the middle** of the execution of a Redis Transaction. This guarantees that the commands are executed as a single isolated operation. -* The [`EXEC`](/commands/exec) command +* The [`EXEC`]({{< relref "/commands/exec" >}}) command triggers the execution of all the commands in the transaction, so if a client loses the connection to the server in the context of a -transaction before calling the [`EXEC`](/commands/exec) command none of the operations -are performed, instead if the [`EXEC`](/commands/exec) command is called, all the +transaction before calling the [`EXEC`]({{< relref "/commands/exec" >}}) command none of the operations +are performed, instead if the [`EXEC`]({{< relref "/commands/exec" >}}) command is called, all the operations are performed. When using the [append-only file](/topics/persistence#append-only-file) Redis makes sure to use a single write(2) syscall to write the transaction on disk. @@ -48,12 +48,12 @@ This is documented [later](#cas) on this page. ## Usage -A Redis Transaction is entered using the [`MULTI`](/commands/multi) command. The command +A Redis Transaction is entered using the [`MULTI`]({{< relref "/commands/multi" >}}) command. The command always replies with `OK`. At this point the user can issue multiple commands. Instead of executing these commands, Redis will queue -them. All the commands are executed once [`EXEC`](/commands/exec) is called. +them. All the commands are executed once [`EXEC`]({{< relref "/commands/exec" >}}) is called. -Calling [`DISCARD`](/commands/discard) instead will flush the transaction queue and will exit +Calling [`DISCARD`]({{< relref "/commands/discard" >}}) instead will flush the transaction queue and will exit the transaction. The following example increments keys `foo` and `bar` atomically. @@ -70,37 +70,37 @@ QUEUED 2) (integer) 1 ``` -As is clear from the session above, [`EXEC`](/commands/exec) returns an +As is clear from the session above, [`EXEC`]({{< relref "/commands/exec" >}}) returns an array of replies, where every element is the reply of a single command in the transaction, in the same order the commands were issued. -When a Redis connection is in the context of a [`MULTI`](/commands/multi) request, +When a Redis connection is in the context of a [`MULTI`]({{< relref "/commands/multi" >}}) request, all commands will reply with the string `QUEUED` (sent as a Status Reply from the point of view of the Redis protocol). A queued command is -simply scheduled for execution when [`EXEC`](/commands/exec) is called. +simply scheduled for execution when [`EXEC`]({{< relref "/commands/exec" >}}) is called. ## Errors inside a transaction During a transaction it is possible to encounter two kind of command errors: -* A command may fail to be queued, so there may be an error before [`EXEC`](/commands/exec) is called. +* A command may fail to be queued, so there may be an error before [`EXEC`]({{< relref "/commands/exec" >}}) is called. For instance the command may be syntactically wrong (wrong number of arguments, wrong command name, ...), or there may be some critical condition like an out of memory condition (if the server is configured to have a memory limit using the `maxmemory` directive). -* A command may fail *after* [`EXEC`](/commands/exec) is called, for instance since we performed +* A command may fail *after* [`EXEC`]({{< relref "/commands/exec" >}}) is called, for instance since we performed an operation against a key with the wrong value (like calling a list operation against a string value). Starting with Redis 2.6.5, the server will detect an error during the accumulation of commands. -It will then refuse to execute the transaction returning an error during [`EXEC`](/commands/exec), discarding the transaction. +It will then refuse to execute the transaction returning an error during [`EXEC`]({{< relref "/commands/exec" >}}), discarding the transaction. -> **Note for Redis < 2.6.5:** Prior to Redis 2.6.5 clients needed to detect errors occurring prior to [`EXEC`](/commands/exec) by checking +> **Note for Redis < 2.6.5:** Prior to Redis 2.6.5 clients needed to detect errors occurring prior to [`EXEC`]({{< relref "/commands/exec" >}}) by checking the return value of the queued command: if the command replies with QUEUED it was queued correctly, otherwise Redis returns an error. If there is an error while queueing a command, most clients will abort and discard the transaction. Otherwise, if the client elected to proceed with the transaction -the [`EXEC`](/commands/exec) command would execute all commands queued successfully regardless of previous errors. +the [`EXEC`]({{< relref "/commands/exec" >}}) command would execute all commands queued successfully regardless of previous errors. -Errors happening *after* [`EXEC`](/commands/exec) instead are not handled in a special way: +Errors happening *after* [`EXEC`]({{< relref "/commands/exec" >}}) instead are not handled in a special way: all the other commands will be executed even if some command fails during the transaction. This is more clear on the protocol level. In the following example one @@ -122,7 +122,7 @@ EXEC -WRONGTYPE Operation against a key holding the wrong kind of value ``` -[`EXEC`](/commands/exec) returned two-element [bulk string reply](/topics/protocol#bulk-string-reply) where one is an `OK` code and +[`EXEC`]({{< relref "/commands/exec" >}}) returned two-element [bulk string reply](/topics/protocol#bulk-string-reply) where one is an `OK` code and the other an error reply. It's up to the client library to find a sensible way to provide the error to the user. @@ -140,7 +140,7 @@ INCR a b c -ERR wrong number of arguments for 'incr' command ``` -This time due to the syntax error the bad [`INCR`](/commands/incr) command is not queued +This time due to the syntax error the bad [`INCR`]({{< relref "/commands/incr" >}}) command is not queued at all. ## What about rollbacks? @@ -150,7 +150,7 @@ would have a significant impact on the simplicity and performance of Redis. ## Discarding the command queue -[`DISCARD`](/commands/discard) can be used in order to abort a transaction. In this case, no +[`DISCARD`]({{< relref "/commands/discard" >}}) can be used in order to abort a transaction. In this case, no commands are executed and the state of the connection is restored to normal. @@ -170,16 +170,16 @@ OK ## Optimistic locking using check-and-set -[`WATCH`](/commands/watch) is used to provide a check-and-set (CAS) behavior to Redis +[`WATCH`]({{< relref "/commands/watch" >}}) is used to provide a check-and-set (CAS) behavior to Redis transactions. -[`WATCH`](/commands/watch)ed keys are monitored in order to detect changes against them. If -at least one watched key is modified before the [`EXEC`](/commands/exec) command, the -whole transaction aborts, and [`EXEC`](/commands/exec) returns a [Null reply](/topics/protocol#nil-reply) to notify that +[`WATCH`]({{< relref "/commands/watch" >}})ed keys are monitored in order to detect changes against them. If +at least one watched key is modified before the [`EXEC`]({{< relref "/commands/exec" >}}) command, the +whole transaction aborts, and [`EXEC`]({{< relref "/commands/exec" >}}) returns a [Null reply](/topics/protocol#nil-reply) to notify that the transaction failed. For example, imagine we have the need to atomically increment the value -of a key by 1 (let's suppose Redis doesn't have [`INCR`](/commands/incr)). +of a key by 1 (let's suppose Redis doesn't have [`INCR`]({{< relref "/commands/incr" >}})). The first try may be the following: @@ -193,10 +193,10 @@ This will work reliably only if we have a single client performing the operation in a given time. If multiple clients try to increment the key at about the same time there will be a race condition. For instance, client A and B will read the old value, for instance, 10. The value will -be incremented to 11 by both the clients, and finally [`SET`](/commands/set) as the value +be incremented to 11 by both the clients, and finally [`SET`]({{< relref "/commands/set" >}}) as the value of the key. So the final value will be 11 instead of 12. -Thanks to [`WATCH`](/commands/watch) we are able to model the problem very well: +Thanks to [`WATCH`]({{< relref "/commands/watch" >}}) we are able to model the problem very well: ``` WATCH mykey @@ -208,8 +208,8 @@ EXEC ``` Using the above code, if there are race conditions and another client -modifies the result of `val` in the time between our call to [`WATCH`](/commands/watch) and -our call to [`EXEC`](/commands/exec), the transaction will fail. +modifies the result of `val` in the time between our call to [`WATCH`]({{< relref "/commands/watch" >}}) and +our call to [`EXEC`]({{< relref "/commands/exec" >}}), the transaction will fail. We just have to repeat the operation hoping this time we'll not get a new race. This form of locking is called _optimistic locking_. @@ -218,42 +218,42 @@ so collisions are unlikely – usually there's no need to repeat the operation. ## WATCH explained -So what is [`WATCH`](/commands/watch) really about? It is a command that will -make the [`EXEC`](/commands/exec) conditional: we are asking Redis to perform -the transaction only if none of the [`WATCH`](/commands/watch)ed keys were modified. This includes +So what is [`WATCH`]({{< relref "/commands/watch" >}}) really about? It is a command that will +make the [`EXEC`]({{< relref "/commands/exec" >}}) conditional: we are asking Redis to perform +the transaction only if none of the [`WATCH`]({{< relref "/commands/watch" >}})ed keys were modified. This includes modifications made by the client, like write commands, and by Redis itself, like expiration or eviction. If keys were modified between when they were -[`WATCH`](/commands/watch)ed and when the [`EXEC`](/commands/exec) was received, the entire transaction will be aborted +[`WATCH`]({{< relref "/commands/watch" >}})ed and when the [`EXEC`]({{< relref "/commands/exec" >}}) was received, the entire transaction will be aborted instead. **NOTE** * In Redis versions before 6.0.9, an expired key would not cause a transaction to be aborted. [More on this](https://github.com/redis/redis/pull/7920) -* Commands within a transaction won't trigger the [`WATCH`](/commands/watch) condition since they -are only queued until the [`EXEC`](/commands/exec) is sent. +* Commands within a transaction won't trigger the [`WATCH`]({{< relref "/commands/watch" >}}) condition since they +are only queued until the [`EXEC`]({{< relref "/commands/exec" >}}) is sent. -[`WATCH`](/commands/watch) can be called multiple times. Simply all the [`WATCH`](/commands/watch) calls will +[`WATCH`]({{< relref "/commands/watch" >}}) can be called multiple times. Simply all the [`WATCH`]({{< relref "/commands/watch" >}}) calls will have the effects to watch for changes starting from the call, up to -the moment [`EXEC`](/commands/exec) is called. You can also send any number of keys to a -single [`WATCH`](/commands/watch) call. +the moment [`EXEC`]({{< relref "/commands/exec" >}}) is called. You can also send any number of keys to a +single [`WATCH`]({{< relref "/commands/watch" >}}) call. -When [`EXEC`](/commands/exec) is called, all keys are [`UNWATCH`](/commands/unwatch)ed, regardless of whether +When [`EXEC`]({{< relref "/commands/exec" >}}) is called, all keys are [`UNWATCH`]({{< relref "/commands/unwatch" >}})ed, regardless of whether the transaction was aborted or not. Also when a client connection is -closed, everything gets [`UNWATCH`](/commands/unwatch)ed. +closed, everything gets [`UNWATCH`]({{< relref "/commands/unwatch" >}})ed. -It is also possible to use the [`UNWATCH`](/commands/unwatch) command (without arguments) +It is also possible to use the [`UNWATCH`]({{< relref "/commands/unwatch" >}}) command (without arguments) in order to flush all the watched keys. Sometimes this is useful as we optimistically lock a few keys, since possibly we need to perform a transaction to alter those keys, but after reading the current content of the keys we don't want to proceed. When this happens we just call -[`UNWATCH`](/commands/unwatch) so that the connection can already be used freely for new +[`UNWATCH`]({{< relref "/commands/unwatch" >}}) so that the connection can already be used freely for new transactions. ### Using WATCH to implement ZPOP -A good example to illustrate how [`WATCH`](/commands/watch) can be used to create new +A good example to illustrate how [`WATCH`]({{< relref "/commands/watch" >}}) can be used to create new atomic operations otherwise not supported by Redis is to implement ZPOP -([`ZPOPMIN`](/commands/zpopmin), [`ZPOPMAX`](/commands/zpopmax) and their blocking variants have only been added +([`ZPOPMIN`]({{< relref "/commands/zpopmin" >}}), [`ZPOPMAX`]({{< relref "/commands/zpopmax" >}}) and their blocking variants have only been added in version 5.0), that is a command that pops the element with the lower score from a sorted set in an atomic way. This is the simplest implementation: @@ -266,11 +266,11 @@ ZREM zset element EXEC ``` -If [`EXEC`](/commands/exec) fails (i.e. returns a [Null reply](/topics/protocol#nil-reply)) we just repeat the operation. +If [`EXEC`]({{< relref "/commands/exec" >}}) fails (i.e. returns a [Null reply](/topics/protocol#nil-reply)) we just repeat the operation. ## Redis scripting and transactions Something else to consider for transaction like operations in redis are -[redis scripts](/commands/eval) which are transactional. Everything +[redis scripts]({{< relref "/commands/eval" >}}) which are transactional. Everything you can do with a Redis Transaction, you can also do with a script, and usually the script will be both simpler and faster. diff --git a/content/develop/manual/_index.md b/content/develop/manual/_index.md deleted file mode 100644 index 33b9f80dbd..0000000000 --- a/content/develop/manual/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: A developer's guide to Redis -linkTitle: Use Redis -title: Use Redis -weight: 50 ---- diff --git a/content/develop/manual/client-side-caching.md b/content/develop/manual/client-side-caching.md deleted file mode 100644 index 1e0e472971..0000000000 --- a/content/develop/manual/client-side-caching.md +++ /dev/null @@ -1,344 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: 'Server-assisted, client-side caching in Redis - - ' -linkTitle: Client-side caching -title: Client-side caching in Redis -weight: 2 ---- - -Client-side caching is a technique used to create high performance services. -It exploits the memory available on application servers, servers that are -usually distinct computers compared to the database nodes, to store some subset -of the database information directly in the application side. - -Normally when data is required, the application servers ask the database about -such information, like in the following diagram: - - - +-------------+ +----------+ - | | ------- GET user:1234 -------> | | - | Application | | Database | - | | <---- username = Alice ------- | | - +-------------+ +----------+ - -When client-side caching is used, the application will store the reply of -popular queries directly inside the application memory, so that it can -reuse such replies later, without contacting the database again: - - +-------------+ +----------+ - | | | | - | Application | ( No chat needed ) | Database | - | | | | - +-------------+ +----------+ - | Local cache | - | | - | user:1234 = | - | username | - | Alice | - +-------------+ - -While the application memory used for the local cache may not be very big, -the time needed in order to access the local computer memory is orders of -magnitude smaller compared to accessing a networked service like a database. -Since often the same small percentage of data are accessed frequently, -this pattern can greatly reduce the latency for the application to get data -and, at the same time, the load in the database side. - -Moreover there are many datasets where items change very infrequently. -For instance, most user posts in a social network are either immutable or -rarely edited by the user. Adding to this the fact that usually a small -percentage of the posts are very popular, either because a small set of users -have a lot of followers and/or because recent posts have a lot more -visibility, it is clear why such a pattern can be very useful. - -Usually the two key advantages of client-side caching are: - -1. Data is available with a very small latency. -2. The database system receives less queries, allowing it to serve the same dataset with a smaller number of nodes. - -## There are two hard problems in computer science... - -A problem with the above pattern is how to invalidate the information that -the application is holding, in order to avoid presenting stale data to the -user. For example after the application above locally cached the information -for user:1234, Alice may update her username to Flora. Yet the application -may continue to serve the old username for user:1234. - -Sometimes, depending on the exact application we are modeling, this isn't a -big deal, so the client will just use a fixed maximum "time to live" for the -cached information. Once a given amount of time has elapsed, the information -will no longer be considered valid. More complex patterns, when using Redis, -leverage the Pub/Sub system in order to send invalidation messages to -listening clients. This can be made to work but is tricky and costly from -the point of view of the bandwidth used, because often such patterns involve -sending the invalidation messages to every client in the application, even -if certain clients may not have any copy of the invalidated data. Moreover -every application query altering the data requires to use the [`PUBLISH`](/commands/publish) -command, costing the database more CPU time to process this command. - -Regardless of what schema is used, there is a simple fact: many very large -applications implement some form of client-side caching, because it is the -next logical step to having a fast store or a fast cache server. For this -reason Redis 6 implements direct support for client-side caching, in order -to make this pattern much simpler to implement, more accessible, reliable, -and efficient. - -## The Redis implementation of client-side caching - -The Redis client-side caching support is called _Tracking_, and has two modes: - -* In the default mode, the server remembers what keys a given client accessed, and sends invalidation messages when the same keys are modified. This costs memory in the server side, but sends invalidation messages only for the set of keys that the client might have in memory. -* In the _broadcasting_ mode, the server does not attempt to remember what keys a given client accessed, so this mode costs no memory at all in the server side. Instead clients subscribe to key prefixes such as `object:` or `user:`, and receive a notification message every time a key matching a subscribed prefix is touched. - -To recap, for now let's forget for a moment about the broadcasting mode, to -focus on the first mode. We'll describe broadcasting in more detail later. - -1. Clients can enable tracking if they want. Connections start without tracking enabled. -2. When tracking is enabled, the server remembers what keys each client requested during the connection lifetime (by sending read commands about such keys). -3. When a key is modified by some client, or is evicted because it has an associated expire time, or evicted because of a _maxmemory_ policy, all the clients with tracking enabled that may have the key cached, are notified with an _invalidation message_. -4. When clients receive invalidation messages, they are required to remove the corresponding keys, in order to avoid serving stale data. - -This is an example of the protocol: - -* Client 1 `->` Server: CLIENT TRACKING ON -* Client 1 `->` Server: GET foo -* (The server remembers that Client 1 may have the key "foo" cached) -* (Client 1 may remember the value of "foo" inside its local memory) -* Client 2 `->` Server: SET foo SomeOtherValue -* Server `->` Client 1: INVALIDATE "foo" - -This looks great superficially, but if you imagine 10k connected clients all -asking for millions of keys over long living connection, the server ends up -storing too much information. For this reason Redis uses two key ideas in -order to limit the amount of memory used server-side and the CPU cost of -handling the data structures implementing the feature: - -* The server remembers the list of clients that may have cached a given key in a single global table. This table is called the **Invalidation Table**. The invalidation table can contain a maximum number of entries. If a new key is inserted, the server may evict an older entry by pretending that such key was modified (even if it was not), and sending an invalidation message to the clients. Doing so, it can reclaim the memory used for this key, even if this will force the clients having a local copy of the key to evict it. -* Inside the invalidation table we don't really need to store pointers to clients' structures, that would force a garbage collection procedure when the client disconnects: instead what we do is just store client IDs (each Redis client has a unique numerical ID). If a client disconnects, the information will be incrementally garbage collected as caching slots are invalidated. -* There is a single keys namespace, not divided by database numbers. So if a client is caching the key `foo` in database 2, and some other client changes the value of the key `foo` in database 3, an invalidation message will still be sent. This way we can ignore database numbers reducing both the memory usage and the implementation complexity. - -## Two connections mode - -Using the new version of the Redis protocol, RESP3, supported by Redis 6, it is possible to run the data queries and receive the invalidation messages in the same connection. However many client implementations may prefer to implement client-side caching using two separated connections: one for data, and one for invalidation messages. For this reason when a client enables tracking, it can specify to redirect the invalidation messages to another connection by specifying the "client ID" of a different connection. Many data connections can redirect invalidation messages to the same connection, this is useful for clients implementing connection pooling. The two connections model is the only one that is also supported for RESP2 (which lacks the ability to multiplex different kind of information in the same connection). - -Here's an example of a complete session using the Redis protocol in the old RESP2 mode involving the following steps: enabling tracking redirecting to another connection, asking for a key, and getting an invalidation message once the key gets modified. - -To start, the client opens a first connection that will be used for invalidations, requests the connection ID, and subscribes via Pub/Sub to the special channel that is used to get invalidation messages when in RESP2 modes (remember that RESP2 is the usual Redis protocol, and not the more advanced protocol that you can use, optionally, with Redis 6 using the [`HELLO`](/commands/hello) command): - -``` -(Connection 1 -- used for invalidations) -CLIENT ID -:4 -SUBSCRIBE __redis__:invalidate -*3 -$9 -subscribe -$20 -__redis__:invalidate -:1 -``` - -Now we can enable tracking from the data connection: - -``` -(Connection 2 -- data connection) -CLIENT TRACKING on REDIRECT 4 -+OK - -GET foo -$3 -bar -``` - -The client may decide to cache `"foo" => "bar"` in the local memory. - -A different client will now modify the value of the "foo" key: - -``` -(Some other unrelated connection) -SET foo bar -+OK -``` - -As a result, the invalidations connection will receive a message that invalidates the specified key. - -``` -(Connection 1 -- used for invalidations) -*3 -$7 -message -$20 -__redis__:invalidate -*1 -$3 -foo -``` -The client will check if there are cached keys in this caching slot, and will evict the information that is no longer valid. - -Note that the third element of the Pub/Sub message is not a single key but -is a Redis array with just a single element. Since we send an array, if there -are groups of keys to invalidate, we can do that in a single message. -In case of a flush ([`FLUSHALL`](/commands/flushall) or [`FLUSHDB`](/commands/flushdb)), a `null` message will be sent. - -A very important thing to understand about client-side caching used with -RESP2 and a Pub/Sub connection in order to read the invalidation messages, -is that using Pub/Sub is entirely a trick **in order to reuse old client -implementations**, but actually the message is not really sent to a channel -and received by all the clients subscribed to it. Only the connection we -specified in the `REDIRECT` argument of the [`CLIENT`](/commands/client) command will actually -receive the Pub/Sub message, making the feature a lot more scalable. - -When RESP3 is used instead, invalidation messages are sent (either in the -same connection, or in the secondary connection when redirection is used) -as `push` messages (read the RESP3 specification for more information). - -## What tracking tracks - -As you can see clients do not need, by default, to tell the server what keys -they are caching. Every key that is mentioned in the context of a read-only -command is tracked by the server, because it *could be cached*. - -This has the obvious advantage of not requiring the client to tell the server -what it is caching. Moreover in many clients implementations, this is what -you want, because a good solution could be to just cache everything that is not -already cached, using a first-in first-out approach: we may want to cache a -fixed number of objects, every new data we retrieve, we could cache it, -discarding the oldest cached object. More advanced implementations may instead -drop the least used object or alike. - -Note that anyway if there is write traffic on the server, caching slots -will get invalidated during the course of the time. In general when the -server assumes that what we get we also cache, we are making a tradeoff: - -1. It is more efficient when the client tends to cache many things with a policy that welcomes new objects. -2. The server will be forced to retain more data about the client keys. -3. The client will receive useless invalidation messages about objects it did not cache. - -So there is an alternative described in the next section. - -## Opt-in caching - -Clients implementations may want to cache only selected keys, and communicate -explicitly to the server what they'll cache and what they will not. This will -require more bandwidth when caching new objects, but at the same time reduces -the amount of data that the server has to remember and the amount of -invalidation messages received by the client. - -In order to do this, tracking must be enabled using the OPTIN option: - - CLIENT TRACKING on REDIRECT 1234 OPTIN - -In this mode, by default, keys mentioned in read queries *are not supposed to be cached*, instead when a client wants to cache something, it must send a special command immediately before the actual command to retrieve the data: - - CLIENT CACHING YES - +OK - GET foo - "bar" - -The `CACHING` command affects the command executed immediately after it, -however in case the next command is [`MULTI`](/commands/multi), all the commands in the -transaction will be tracked. Similarly in case of Lua scripts, all the -commands executed by the script will be tracked. - -## Broadcasting mode - -So far we described the first client-side caching model that Redis implements. -There is another one, called broadcasting, that sees the problem from the -point of view of a different tradeoff, does not consume any memory on the -server side, but instead sends more invalidation messages to clients. -In this mode we have the following main behaviors: - -* Clients enable client-side caching using the `BCAST` option, specifying one or more prefixes using the `PREFIX` option. For instance: `CLIENT TRACKING on REDIRECT 10 BCAST PREFIX object: PREFIX user:`. If no prefix is specified at all, the prefix is assumed to be the empty string, so the client will receive invalidation messages for every key that gets modified. Instead if one or more prefixes are used, only keys matching one of the specified prefixes will be sent in the invalidation messages. -* The server does not store anything in the invalidation table. Instead it uses a different **Prefixes Table**, where each prefix is associated to a list of clients. -* No two prefixes can track overlapping parts of the keyspace. For instance, having the prefix "foo" and "foob" would not be allowed, since they would both trigger an invalidation for the key "foobar". However, just using the prefix "foo" is sufficient. -* Every time a key matching any of the prefixes is modified, all the clients subscribed to that prefix, will receive the invalidation message. -* The server will consume CPU proportional to the number of registered prefixes. If you have just a few, it is hard to see any difference. With a big number of prefixes the CPU cost can become quite large. -* In this mode the server can perform the optimization of creating a single reply for all the clients subscribed to a given prefix, and send the same reply to all. This helps to lower the CPU usage. - -## The NOLOOP option - -By default client-side tracking will send invalidation messages to the -client that modified the key. Sometimes clients want this, since they -implement very basic logic that does not involve automatically caching -writes locally. However, more advanced clients may want to cache even the -writes they are doing in the local in-memory table. In such case receiving -an invalidation message immediately after the write is a problem, since it -will force the client to evict the value it just cached. - -In this case it is possible to use the `NOLOOP` option: it works both -in normal and broadcasting mode. Using this option, clients are able to -tell the server they don't want to receive invalidation messages for keys -that they modified. - -## Avoiding race conditions - -When implementing client-side caching redirecting the invalidation messages -to a different connection, you should be aware that there is a possible -race condition. See the following example interaction, where we'll call -the data connection "D" and the invalidation connection "I": - - [D] client -> server: GET foo - [I] server -> client: Invalidate foo (somebody else touched it) - [D] server -> client: "bar" (the reply of "GET foo") - -As you can see, because the reply to the GET was slower to reach the -client, we received the invalidation message before the actual data that -is already no longer valid. So we'll keep serving a stale version of the -foo key. To avoid this problem, it is a good idea to populate the cache -when we send the command with a placeholder: - - Client cache: set the local copy of "foo" to "caching-in-progress" - [D] client-> server: GET foo. - [I] server -> client: Invalidate foo (somebody else touched it) - Client cache: delete "foo" from the local cache. - [D] server -> client: "bar" (the reply of "GET foo") - Client cache: don't set "bar" since the entry for "foo" is missing. - -Such a race condition is not possible when using a single connection for both -data and invalidation messages, since the order of the messages is always known -in that case. - -## What to do when losing connection with the server - -Similarly, if we lost the connection with the socket we use in order to -get the invalidation messages, we may end with stale data. In order to avoid -this problem, we need to do the following things: - -1. Make sure that if the connection is lost, the local cache is flushed. -2. Both when using RESP2 with Pub/Sub, or RESP3, ping the invalidation channel periodically (you can send PING commands even when the connection is in Pub/Sub mode!). If the connection looks broken and we are not able to receive ping backs, after a maximum amount of time, close the connection and flush the cache. - -## What to cache - -Clients may want to run internal statistics about the number of times -a given cached key was actually served in a request, to understand in the -future what is good to cache. In general: - -* We don't want to cache many keys that change continuously. -* We don't want to cache many keys that are requested very rarely. -* We want to cache keys that are requested often and change at a reasonable rate. For an example of key not changing at a reasonable rate, think of a global counter that is continuously [`INCR`](/commands/incr)emented. - -However simpler clients may just evict data using some random sampling just -remembering the last time a given cached value was served, trying to evict -keys that were not served recently. - -## Other hints for implementing client libraries - -* Handling TTLs: make sure you also request the key TTL and set the TTL in the local cache if you want to support caching keys with a TTL. -* Putting a max TTL on every key is a good idea, even if it has no TTL. This protects against bugs or connection issues that would make the client have old data in the local copy. -* Limiting the amount of memory used by clients is absolutely needed. There must be a way to evict old keys when new ones are added. - -## Limiting the amount of memory used by Redis - -Be sure to configure a suitable value for the maximum number of keys remembered by Redis or alternatively use the BCAST mode that consumes no memory at all on the Redis side. Note that the memory consumed by Redis when BCAST is not used, is proportional both to the number of keys tracked and the number of clients requesting such keys. - diff --git a/content/develop/manual/keyspace-notifications.md b/content/develop/manual/keyspace-notifications.md deleted file mode 100644 index 2577c62dea..0000000000 --- a/content/develop/manual/keyspace-notifications.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: 'Monitor changes to Redis keys and values in real time - - ' -linkTitle: Keyspace notifications -title: Redis keyspace notifications -weight: 4 ---- - -Keyspace notifications allow clients to subscribe to Pub/Sub channels in order -to receive events affecting the Redis data set in some way. - -Examples of events that can be received are: - -* All the commands affecting a given key. -* All the keys receiving an LPUSH operation. -* All the keys expiring in the database 0. - -Note: Redis Pub/Sub is *fire and forget* that is, if your Pub/Sub client disconnects, -and reconnects later, all the events delivered during the time the client was -disconnected are lost. - -### Type of events - -Keyspace notifications are implemented by sending two distinct types of events -for every operation affecting the Redis data space. For instance a [`DEL`](/commands/del) -operation targeting the key named `mykey` in database `0` will trigger -the delivering of two messages, exactly equivalent to the following two -[`PUBLISH`](/commands/publish) commands: - - PUBLISH __keyspace@0__:mykey del - PUBLISH __keyevent@0__:del mykey - -The first channel listens to all the events targeting -the key `mykey` and the other channel listens only to `del` operation -events on the key `mykey` - -The first kind of event, with `keyspace` prefix in the channel is called -a **Key-space notification**, while the second, with the `keyevent` prefix, -is called a **Key-event notification**. - -In the previous example a `del` event was generated for the key `mykey` resulting -in two messages: - -* The Key-space channel receives as message the name of the event. -* The Key-event channel receives as message the name of the key. - -It is possible to enable only one kind of notification in order to deliver -just the subset of events we are interested in. - -### Configuration - -By default keyspace event notifications are disabled because while not -very sensible the feature uses some CPU power. Notifications are enabled -using the `notify-keyspace-events` of redis.conf or via the **CONFIG SET**. - -Setting the parameter to the empty string disables notifications. -In order to enable the feature a non-empty string is used, composed of multiple -characters, where every character has a special meaning according to the -following table: - - K Keyspace events, published with __keyspace@__ prefix. - E Keyevent events, published with __keyevent@__ prefix. - g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... - $ String commands - l List commands - s Set commands - h Hash commands - z Sorted set commands - t Stream commands - d Module key type events - x Expired events (events generated every time a key expires) - e Evicted events (events generated when a key is evicted for maxmemory) - m Key miss events (events generated when a key that doesn't exist is accessed) - n New key events (Note: not included in the 'A' class) - A Alias for "g$lshztxed", so that the "AKE" string means all the events except "m" and "n". - -At least `K` or `E` should be present in the string, otherwise no event -will be delivered regardless of the rest of the string. - -For instance to enable just Key-space events for lists, the configuration -parameter must be set to `Kl`, and so forth. - -You can use the string `KEA` to enable most types of events. - -### Events generated by different commands - -Different commands generate different kind of events according to the following list. - -* [`DEL`](/commands/del) generates a `del` event for every deleted key. -* [`RENAME`](/commands/rename) generates two events, a `rename_from` event for the source key, and a `rename_to` event for the destination key. -* [`MOVE`](/commands/move) generates two events, a `move_from` event for the source key, and a `move_to` event for the destination key. -* [`COPY`](/commands/copy) generates a `copy_to` event. -* [`MIGRATE`](/commands/migrate) generates a `del` event if the source key is removed. -* [`RESTORE`](/commands/restore) generates a `restore` event for the key. -* [`EXPIRE`](/commands/expire) and all its variants ([`PEXPIRE`](/commands/pexpire), [`EXPIREAT`](/commands/expireat), [`PEXPIREAT`](/commands/pexpireat)) generate an `expire` event when called with a positive timeout (or a future timestamp). Note that when these commands are called with a negative timeout value or timestamp in the past, the key is deleted and only a `del` event is generated instead. -* [`SORT`](/commands/sort) generates a `sortstore` event when `STORE` is used to set a new key. If the resulting list is empty, and the `STORE` option is used, and there was already an existing key with that name, the result is that the key is deleted, so a `del` event is generated in this condition. -* [`SET`](/commands/set) and all its variants ([`SETEX`](/commands/setex), [`SETNX`](/commands/setnx),[`GETSET`](/commands/getset)) generate `set` events. However [`SETEX`](/commands/setex) will also generate an `expire` events. -* [`MSET`](/commands/mset) generates a separate `set` event for every key. -* [`SETRANGE`](/commands/setrange) generates a `setrange` event. -* [`INCR`](/commands/incr), [`DECR`](/commands/decr), [`INCRBY`](/commands/incrby), [`DECRBY`](/commands/decrby) commands all generate `incrby` events. -* [`INCRBYFLOAT`](/commands/incrbyfloat) generates an `incrbyfloat` events. -* [`APPEND`](/commands/append) generates an `append` event. -* [`LPUSH`](/commands/lpush) and [`LPUSHX`](/commands/lpushx) generates a single `lpush` event, even in the variadic case. -* [`RPUSH`](/commands/rpush) and [`RPUSHX`](/commands/rpushx) generates a single `rpush` event, even in the variadic case. -* [`RPOP`](/commands/rpop) generates an `rpop` event. Additionally a `del` event is generated if the key is removed because the last element from the list was popped. -* [`LPOP`](/commands/lpop) generates an `lpop` event. Additionally a `del` event is generated if the key is removed because the last element from the list was popped. -* [`LINSERT`](/commands/linsert) generates an `linsert` event. -* [`LSET`](/commands/lset) generates an `lset` event. -* [`LREM`](/commands/lrem) generates an `lrem` event, and additionally a `del` event if the resulting list is empty and the key is removed. -* [`LTRIM`](/commands/ltrim) generates an `ltrim` event, and additionally a `del` event if the resulting list is empty and the key is removed. -* [`RPOPLPUSH`](/commands/rpoplpush) and [`BRPOPLPUSH`](/commands/brpoplpush) generate an `rpop` event and an `lpush` event. In both cases the order is guaranteed (the `lpush` event will always be delivered after the `rpop` event). Additionally a `del` event will be generated if the resulting list is zero length and the key is removed. -* [`LMOVE`](/commands/lmove) and [`BLMOVE`](/commands/blmove) generate an `lpop`/`rpop` event (depending on the wherefrom argument) and an `lpush`/`rpush` event (depending on the whereto argument). In both cases the order is guaranteed (the `lpush`/`rpush` event will always be delivered after the `lpop`/`rpop` event). Additionally a `del` event will be generated if the resulting list is zero length and the key is removed. -* [`HSET`](/commands/hset), [`HSETNX`](/commands/hsetnx) and [`HMSET`](/commands/hmset) all generate a single `hset` event. -* [`HINCRBY`](/commands/hincrby) generates an `hincrby` event. -* [`HINCRBYFLOAT`](/commands/hincrbyfloat) generates an `hincrbyfloat` event. -* [`HDEL`](/commands/hdel) generates a single `hdel` event, and an additional `del` event if the resulting hash is empty and the key is removed. -* [`SADD`](/commands/sadd) generates a single `sadd` event, even in the variadic case. -* [`SREM`](/commands/srem) generates a single `srem` event, and an additional `del` event if the resulting set is empty and the key is removed. -* [`SMOVE`](/commands/smove) generates an `srem` event for the source key, and an `sadd` event for the destination key. -* [`SPOP`](/commands/spop) generates an `spop` event, and an additional `del` event if the resulting set is empty and the key is removed. -* [`SINTERSTORE`](/commands/sinterstore), [`SUNIONSTORE`](/commands/sunionstore), [`SDIFFSTORE`](/commands/sdiffstore) generate `sinterstore`, `sunionstore`, `sdiffstore` events respectively. In the special case the resulting set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. -* `ZINCR` generates a `zincr` event. -* [`ZADD`](/commands/zadd) generates a single `zadd` event even when multiple elements are added. -* [`ZREM`](/commands/zrem) generates a single `zrem` event even when multiple elements are deleted. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. -* `ZREMBYSCORE` generates a single `zrembyscore` event. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. -* `ZREMBYRANK` generates a single `zrembyrank` event. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. -* [`ZDIFFSTORE`](/commands/zdiffstore), [`ZINTERSTORE`](/commands/zinterstore) and [`ZUNIONSTORE`](/commands/zunionstore) respectively generate `zdiffstore`, `zinterstore` and `zunionstore` events. In the special case the resulting sorted set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. -* [`XADD`](/commands/xadd) generates an `xadd` event, possibly followed an `xtrim` event when used with the `MAXLEN` subcommand. -* [`XDEL`](/commands/xdel) generates a single `xdel` event even when multiple entries are deleted. -* [`XGROUP CREATE`](/commands/xgroup-create) generates an `xgroup-create` event. -* [`XGROUP CREATECONSUMER`](/commands/xgroup-createconsumer) generates an `xgroup-createconsumer` event. -* [`XGROUP DELCONSUMER`](/commands/xgroup-delconsumer) generates an `xgroup-delconsumer` event. -* [`XGROUP DESTROY`](/commands/xgroup-destroy) generates an `xgroup-destroy` event. -* [`XGROUP SETID`](/commands/xgroup-setid) generates an `xgroup-setid` event. -* [`XSETID`](/commands/xsetid) generates an `xsetid` event. -* [`XTRIM`](/commands/xtrim) generates an `xtrim` event. -* [`PERSIST`](/commands/persist) generates a `persist` event if the expiry time associated with key has been successfully deleted. -* Every time a key with a time to live associated is removed from the data set because it expired, an `expired` event is generated. -* Every time a key is evicted from the data set in order to free memory as a result of the `maxmemory` policy, an `evicted` event is generated. -* Every time a new key is added to the data set, a `new` event is generated. - -**IMPORTANT** all the commands generate events only if the target key is really modified. For instance an [`SREM`](/commands/srem) deleting a non-existing element from a Set will not actually change the value of the key, so no event will be generated. - -If in doubt about how events are generated for a given command, the simplest -thing to do is to watch yourself: - - $ redis-cli config set notify-keyspace-events KEA - $ redis-cli --csv psubscribe '__key*__:*' - Reading messages... (press Ctrl-C to quit) - "psubscribe","__key*__:*",1 - -At this point use `redis-cli` in another terminal to send commands to the -Redis server and watch the events generated: - - "pmessage","__key*__:*","__keyspace@0__:foo","set" - "pmessage","__key*__:*","__keyevent@0__:set","foo" - ... - -### Timing of expired events - -Keys with a time to live associated are expired by Redis in two ways: - -* When the key is accessed by a command and is found to be expired. -* Via a background system that looks for expired keys in the background, incrementally, in order to be able to also collect keys that are never accessed. - -The `expired` events are generated when a key is accessed and is found to be expired by one of the above systems, as a result there are no guarantees that the Redis server will be able to generate the `expired` event at the time the key time to live reaches the value of zero. - -If no command targets the key constantly, and there are many keys with a TTL associated, there can be a significant delay between the time the key time to live drops to zero, and the time the `expired` event is generated. - -Basically `expired` events **are generated when the Redis server deletes the key** and not when the time to live theoretically reaches the value of zero. - -### Events in a cluster - -Every node of a Redis cluster generates events about its own subset of the keyspace as described above. However, unlike regular Pub/Sub communication in a cluster, events' notifications **are not** broadcasted to all nodes. Put differently, keyspace events are node-specific. This means that to receive all keyspace events of a cluster, clients need to subscribe to each of the nodes. - -@history - -* `>= 6.0`: Key miss events were added. -* `>= 7.0`: Event type `new` added - diff --git a/content/develop/manual/keyspace.md b/content/develop/manual/keyspace.md deleted file mode 100644 index 56dfce68ac..0000000000 --- a/content/develop/manual/keyspace.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: 'Managing keys in Redis: Key expiration, scanning, altering and querying - the key space - - ' -linkTitle: Keyspace -title: Keyspace -weight: 1 ---- - -Redis keys are binary safe; this means that you can use any binary sequence as a -key, from a string like "foo" to the content of a JPEG file. -The empty string is also a valid key. - -A few other rules about keys: - -* Very long keys are not a good idea. For instance a key of 1024 bytes is a bad - idea not only memory-wise, but also because the lookup of the key in the - dataset may require several costly key-comparisons. Even when the task at hand - is to match the existence of a large value, hashing it (for example - with SHA1) is a better idea, especially from the perspective of memory - and bandwidth. -* Very short keys are often not a good idea. There is little point in writing - "u1000flw" as a key if you can instead write "user:1000:followers". The latter - is more readable and the added space is minor compared to the space used by - the key object itself and the value object. While short keys will obviously - consume a bit less memory, your job is to find the right balance. -* Try to stick with a schema. For instance "object-type:id" is a good - idea, as in "user:1000". Dots or dashes are often used for multi-word - fields, as in "comment:4321:reply.to" or "comment:4321:reply-to". -* The maximum allowed key size is 512 MB. - -## Altering and querying the key space - -There are commands that are not defined on particular types, but are useful -in order to interact with the space of keys, and thus, can be used with -keys of any type. - -For example the [`EXISTS`](/commands/exists) command returns 1 or 0 to signal if a given key -exists or not in the database, while the [`DEL`](/commands/del) command deletes a key -and associated value, whatever the value is. - - > set mykey hello - OK - > exists mykey - (integer) 1 - > del mykey - (integer) 1 - > exists mykey - (integer) 0 - -From the examples you can also see how [`DEL`](/commands/del) itself returns 1 or 0 depending on whether -the key was removed (it existed) or not (there was no such key with that -name). - -There are many key space related commands, but the above two are the -essential ones together with the [`TYPE`](/commands/type) command, which returns the kind -of value stored at the specified key: - - > set mykey x - OK - > type mykey - string - > del mykey - (integer) 1 - > type mykey - none - -## Key expiration - -Before moving on, we should look at an important Redis feature that works regardless of the type of value you're storing: key expiration. Key expiration lets you set a timeout for a key, also known as a "time to live", or "TTL". When the time to live elapses, the key is automatically destroyed. - -A few important notes about key expiration: - -* They can be set both using seconds or milliseconds precision. -* However the expire time resolution is always 1 millisecond. -* Information about expires are replicated and persisted on disk, the time virtually passes when your Redis server remains stopped (this means that Redis saves the date at which a key will expire). - -Use the [`EXPIRE`](/commands/expire) command to set a key's expiration: - - > set key some-value - OK - > expire key 5 - (integer) 1 - > get key (immediately) - "some-value" - > get key (after some time) - (nil) - -The key vanished between the two [`GET`](/commands/get) calls, since the second call was -delayed more than 5 seconds. In the example above we used [`EXPIRE`](/commands/expire) in -order to set the expire (it can also be used in order to set a different -expire to a key already having one, like [`PERSIST`](/commands/persist) can be used in order -to remove the expire and make the key persistent forever). However we -can also create keys with expires using other Redis commands. For example -using [`SET`](/commands/set) options: - - > set key 100 ex 10 - OK - > ttl key - (integer) 9 - -The example above sets a key with the string value `100`, having an expire -of ten seconds. Later the [`TTL`](/commands/ttl) command is called in order to check the -remaining time to live for the key. - -In order to set and check expires in milliseconds, check the [`PEXPIRE`](/commands/pexpire) and -the [`PTTL`](/commands/pttl) commands, and the full list of [`SET`](/commands/set) options. - -## Navigating the keyspace - -### Scan -To incrementally iterate over the keys in a Redis database in an efficient manner, you can use the [`SCAN`](/commands/scan) command. - -Since [`SCAN`](/commands/scan) allows for incremental iteration, returning only a small number of elements per call, it can be used in production without the downside of commands like [`KEYS`](/commands/keys) or [`SMEMBERS`](/commands/smembers) that may block the server for a long time (even several seconds) when called against big collections of keys or elements. - -However while blocking commands like [`SMEMBERS`](/commands/smembers) are able to provide all the elements that are part of a Set in a given moment. -The [`SCAN`](/commands/scan) family of commands only offer limited guarantees about the returned elements since the collection that we incrementally iterate can change during the iteration process. - -### Keys - -Another way to iterate over the keyspace is to use the [`KEYS`](/commands/keys) command, but this approach should be used with care, since [`KEYS`](/commands/keys) will block the Redis server until all keys are returned. - -**Warning**: consider [`KEYS`](/commands/keys) as a command that should only be used in production -environments with extreme care. - -[`KEYS`](/commands/keys) may ruin performance when it is executed against large databases. -This command is intended for debugging and special operations, such as changing -your keyspace layout. -Don't use [`KEYS`](/commands/keys) in your regular application code. -If you're looking for a way to find keys in a subset of your keyspace, consider -using [`SCAN`](/commands/scan) or [sets][tdts]. - -[tdts]: /topics/data-types#sets - -Supported glob-style patterns: - -* `h?llo` matches `hello`, `hallo` and `hxllo` -* `h*llo` matches `hllo` and `heeeello` -* `h[ae]llo` matches `hello` and `hallo,` but not `hillo` -* `h[^e]llo` matches `hallo`, `hbllo`, ... but not `hello` -* `h[a-b]llo` matches `hallo` and `hbllo` - -Use `\` to escape special characters if you want to match them verbatim. diff --git a/content/develop/manual/patterns/_index.md b/content/develop/manual/patterns/_index.md deleted file mode 100644 index 714a33b1c3..0000000000 --- a/content/develop/manual/patterns/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: Novel patterns for working with Redis data structures -linkTitle: Patterns -title: Redis programming patterns -weight: 6 ---- - -The following documents describe some novel development patterns you can use with Redis. diff --git a/content/develop/manual/patterns/bulk-loading.md b/content/develop/manual/patterns/bulk-loading.md deleted file mode 100644 index 3ce70b0d98..0000000000 --- a/content/develop/manual/patterns/bulk-loading.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: 'Writing data in bulk using the Redis protocol - - ' -linkTitle: Bulk loading -title: Bulk loading -weight: 1 ---- - -Bulk loading is the process of loading Redis with a large amount of pre-existing data. Ideally, you want to perform this operation quickly and efficiently. This document describes some strategies for bulk loading data in Redis. - -## Bulk loading using the Redis protocol - -Using a normal Redis client to perform bulk loading is not a good idea -for a few reasons: the naive approach of sending one command after the other -is slow because you have to pay for the round trip time for every command. -It is possible to use pipelining, but for bulk loading of many records -you need to write new commands while you read replies at the same time to -make sure you are inserting as fast as possible. - -Only a small percentage of clients support non-blocking I/O, and not all the -clients are able to parse the replies in an efficient way in order to maximize -throughput. For all of these reasons the preferred way to mass import data into -Redis is to generate a text file containing the Redis protocol, in raw format, -in order to call the commands needed to insert the required data. - -For instance if I need to generate a large data set where there are billions -of keys in the form: `keyN -> ValueN' I will create a file containing the -following commands in the Redis protocol format: - - SET Key0 Value0 - SET Key1 Value1 - ... - SET KeyN ValueN - -Once this file is created, the remaining action is to feed it to Redis -as fast as possible. In the past the way to do this was to use the -`netcat` with the following command: - - (cat data.txt; sleep 10) | nc localhost 6379 > /dev/null - -However this is not a very reliable way to perform mass import because netcat -does not really know when all the data was transferred and can't check for -errors. In 2.6 or later versions of Redis the `redis-cli` utility -supports a new mode called **pipe mode** that was designed in order to perform -bulk loading. - -Using the pipe mode the command to run looks like the following: - - cat data.txt | redis-cli --pipe - -That will produce an output similar to this: - - All data transferred. Waiting for the last reply... - Last reply received from server. - errors: 0, replies: 1000000 - -The redis-cli utility will also make sure to only redirect errors received -from the Redis instance to the standard output. - -### Generating Redis Protocol - -The Redis protocol is extremely simple to generate and parse, and is -[Documented here](/topics/protocol). However in order to generate protocol for -the goal of bulk loading you don't need to understand every detail of the -protocol, but just that every command is represented in the following way: - - * - $ - - - ... - - -Where `` means "\r" (or ASCII character 13) and `` means "\n" (or ASCII character 10). - -For instance the command **SET key value** is represented by the following protocol: - - *3 - $3 - SET - $3 - key - $5 - value - -Or represented as a quoted string: - - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n" - -The file you need to generate for bulk loading is just composed of commands -represented in the above way, one after the other. - -The following Ruby function generates valid protocol: - - def gen_redis_proto(*cmd) - proto = "" - proto << "*"+cmd.length.to_s+"\r\n" - cmd.each{|arg| - proto << "$"+arg.to_s.bytesize.to_s+"\r\n" - proto << arg.to_s+"\r\n" - } - proto - end - - puts gen_redis_proto("SET","mykey","Hello World!").inspect - -Using the above function it is possible to easily generate the key value pairs -in the above example, with this program: - - (0...1000).each{|n| - STDOUT.write(gen_redis_proto("SET","Key#{n}","Value#{n}")) - } - -We can run the program directly in pipe to redis-cli in order to perform our -first mass import session. - - $ ruby proto.rb | redis-cli --pipe - All data transferred. Waiting for the last reply... - Last reply received from server. - errors: 0, replies: 1000 - -### How the pipe mode works under the hood - -The magic needed inside the pipe mode of redis-cli is to be as fast as netcat -and still be able to understand when the last reply was sent by the server -at the same time. - -This is obtained in the following way: - -+ redis-cli --pipe tries to send data as fast as possible to the server. -+ At the same time it reads data when available, trying to parse it. -+ Once there is no more data to read from stdin, it sends a special **ECHO** -command with a random 20 byte string: we are sure this is the latest command -sent, and we are sure we can match the reply checking if we receive the same -20 bytes as a bulk reply. -+ Once this special final command is sent, the code receiving replies starts -to match replies with these 20 bytes. When the matching reply is reached it -can exit with success. - -Using this trick we don't need to parse the protocol we send to the server -in order to understand how many commands we are sending, but just the replies. - -However while parsing the replies we take a counter of all the replies parsed -so that at the end we are able to tell the user the amount of commands -transferred to the server by the mass insert session. diff --git a/content/develop/manual/patterns/distributed-locks.md b/content/develop/manual/patterns/distributed-locks.md deleted file mode 100644 index 55526d4fc2..0000000000 --- a/content/develop/manual/patterns/distributed-locks.md +++ /dev/null @@ -1,242 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: 'A distributed lock pattern with Redis - - ' -linkTitle: Distributed locks -title: Distributed Locks with Redis -weight: 1 ---- -Distributed locks are a very useful primitive in many environments where -different processes must operate with shared resources in a mutually -exclusive way. - -There are a number of libraries and blog posts describing how to implement -a DLM (Distributed Lock Manager) with Redis, but every library uses a different -approach, and many use a simple approach with lower guarantees compared to -what can be achieved with slightly more complex designs. - -This page describes a more canonical algorithm to implement -distributed locks with Redis. We propose an algorithm, called **Redlock**, -which implements a DLM which we believe to be safer than the vanilla single -instance approach. We hope that the community will analyze it, provide -feedback, and use it as a starting point for the implementations or more -complex or alternative designs. - -## Implementations - -Before describing the algorithm, here are a few links to implementations -already available that can be used for reference. - -* [Redlock-rb](https://github.com/antirez/redlock-rb) (Ruby implementation). There is also a [fork of Redlock-rb](https://github.com/leandromoreira/redlock-rb) that adds a gem for easy distribution. -* [Redlock-py](https://github.com/SPSCommerce/redlock-py) (Python implementation). -* [Pottery](https://github.com/brainix/pottery#redlock) (Python implementation). -* [Aioredlock](https://github.com/joanvila/aioredlock) (Asyncio Python implementation). -* [Redlock-php](https://github.com/ronnylt/redlock-php) (PHP implementation). -* [PHPRedisMutex](https://github.com/malkusch/lock#phpredismutex) (further PHP implementation). -* [cheprasov/php-redis-lock](https://github.com/cheprasov/php-redis-lock) (PHP library for locks). -* [rtckit/react-redlock](https://github.com/rtckit/reactphp-redlock) (Async PHP implementation). -* [Redsync](https://github.com/go-redsync/redsync) (Go implementation). -* [Redisson](https://github.com/mrniko/redisson) (Java implementation). -* [Redis::DistLock](https://github.com/sbertrang/redis-distlock) (Perl implementation). -* [Redlock-cpp](https://github.com/jacket-code/redlock-cpp) (C++ implementation). -* [Redis-plus-plus](https://github.com/sewenew/redis-plus-plus/#redlock) (C++ implementation). -* [Redlock-cs](https://github.com/kidfashion/redlock-cs) (C#/.NET implementation). -* [RedLock.net](https://github.com/samcook/RedLock.net) (C#/.NET implementation). Includes async and lock extension support. -* [ScarletLock](https://github.com/psibernetic/scarletlock) (C# .NET implementation with configurable datastore). -* [Redlock4Net](https://github.com/LiZhenNet/Redlock4Net) (C# .NET implementation). -* [node-redlock](https://github.com/mike-marcacci/node-redlock) (NodeJS implementation). Includes support for lock extension. -* [Deno DLM](https://github.com/oslabs-beta/Deno-Redlock) (Deno implementation) -* [Rslock](https://github.com/hexcowboy/rslock) (Rust implementation). Includes async and lock extension support. - -## Safety and Liveness Guarantees - -We are going to model our design with just three properties that, from our point of view, are the minimum guarantees needed to use distributed locks in an effective way. - -1. Safety property: Mutual exclusion. At any given moment, only one client can hold a lock. -2. Liveness property A: Deadlock free. Eventually it is always possible to acquire a lock, even if the client that locked a resource crashes or gets partitioned. -3. Liveness property B: Fault tolerance. As long as the majority of Redis nodes are up, clients are able to acquire and release locks. - -## Why Failover-based Implementations Are Not Enough - -To understand what we want to improve, let’s analyze the current state of affairs with most Redis-based distributed lock libraries. - -The simplest way to use Redis to lock a resource is to create a key in an instance. The key is usually created with a limited time to live, using the Redis expires feature, so that eventually it will get released (property 2 in our list). When the client needs to release the resource, it deletes the key. - -Superficially this works well, but there is a problem: this is a single point of failure in our architecture. What happens if the Redis master goes down? -Well, let’s add a replica! And use it if the master is unavailable. This is unfortunately not viable. By doing so we can’t implement our safety property of mutual exclusion, because Redis replication is asynchronous. - -There is a race condition with this model: - -1. Client A acquires the lock in the master. -2. The master crashes before the write to the key is transmitted to the replica. -3. The replica gets promoted to master. -4. Client B acquires the lock to the same resource A already holds a lock for. **SAFETY VIOLATION!** - -Sometimes it is perfectly fine that, under special circumstances, for example during a failure, multiple clients can hold the lock at the same time. -If this is the case, you can use your replication based solution. Otherwise we suggest to implement the solution described in this document. - -## Correct Implementation with a Single Instance - -Before trying to overcome the limitation of the single instance setup described above, let’s check how to do it correctly in this simple case, since this is actually a viable solution in applications where a race condition from time to time is acceptable, and because locking into a single instance is the foundation we’ll use for the distributed algorithm described here. - -To acquire the lock, the way to go is the following: - - SET resource_name my_random_value NX PX 30000 - -The command will set the key only if it does not already exist (`NX` option), with an expire of 30000 milliseconds (`PX` option). -The key is set to a value “my\_random\_value”. This value must be unique across all clients and all lock requests. - -Basically the random value is used in order to release the lock in a safe way, with a script that tells Redis: remove the key only if it exists and the value stored at the key is exactly the one I expect to be. This is accomplished by the following Lua script: - - if redis.call("get",KEYS[1]) == ARGV[1] then - return redis.call("del",KEYS[1]) - else - return 0 - end - -This is important in order to avoid removing a lock that was created by another client. For example a client may acquire the lock, get blocked performing some operation for longer than the lock validity time (the time at which the key will expire), and later remove the lock, that was already acquired by some other client. -Using just [`DEL`](/commands/del) is not safe as a client may remove another client's lock. With the above script instead every lock is “signed” with a random string, so the lock will be removed only if it is still the one that was set by the client trying to remove it. - -What should this random string be? We assume it’s 20 bytes from `/dev/urandom`, but you can find cheaper ways to make it unique enough for your tasks. -For example a safe pick is to seed RC4 with `/dev/urandom`, and generate a pseudo random stream from that. -A simpler solution is to use a UNIX timestamp with microsecond precision, concatenating the timestamp with a client ID. It is not as safe, but probably sufficient for most environments. - -The "lock validity time" is the time we use as the key's time to live. It is both the auto release time, and the time the client has in order to perform the operation required before another client may be able to acquire the lock again, without technically violating the mutual exclusion guarantee, which is only limited to a given window of time from the moment the lock is acquired. - -So now we have a good way to acquire and release the lock. With this system, reasoning about a non-distributed system composed of a single, always available, instance, is safe. Let’s extend the concept to a distributed system where we don’t have such guarantees. - -## The Redlock Algorithm - -In the distributed version of the algorithm we assume we have N Redis masters. Those nodes are totally independent, so we don’t use replication or any other implicit coordination system. We already described how to acquire and release the lock safely in a single instance. We take for granted that the algorithm will use this method to acquire and release the lock in a single instance. In our examples we set N=5, which is a reasonable value, so we need to run 5 Redis masters on different computers or virtual machines in order to ensure that they’ll fail in a mostly independent way. - -In order to acquire the lock, the client performs the following operations: - -1. It gets the current time in milliseconds. -2. It tries to acquire the lock in all the N instances sequentially, using the same key name and random value in all the instances. During step 2, when setting the lock in each instance, the client uses a timeout which is small compared to the total lock auto-release time in order to acquire it. For example if the auto-release time is 10 seconds, the timeout could be in the ~ 5-50 milliseconds range. This prevents the client from remaining blocked for a long time trying to talk with a Redis node which is down: if an instance is not available, we should try to talk with the next instance ASAP. -3. The client computes how much time elapsed in order to acquire the lock, by subtracting from the current time the timestamp obtained in step 1. If and only if the client was able to acquire the lock in the majority of the instances (at least 3), and the total time elapsed to acquire the lock is less than lock validity time, the lock is considered to be acquired. -4. If the lock was acquired, its validity time is considered to be the initial validity time minus the time elapsed, as computed in step 3. -5. If the client failed to acquire the lock for some reason (either it was not able to lock N/2+1 instances or the validity time is negative), it will try to unlock all the instances (even the instances it believed it was not able to lock). - -### Is the Algorithm Asynchronous? - -The algorithm relies on the assumption that while there is no synchronized clock across the processes, the local time in every process updates at approximately at the same rate, with a small margin of error compared to the auto-release time of the lock. This assumption closely resembles a real-world computer: every computer has a local clock and we can usually rely on different computers to have a clock drift which is small. - -At this point we need to better specify our mutual exclusion rule: it is guaranteed only as long as the client holding the lock terminates its work within the lock validity time (as obtained in step 3), minus some time (just a few milliseconds in order to compensate for clock drift between processes). - -This paper contains more information about similar systems requiring a bound *clock drift*: [Leases: an efficient fault-tolerant mechanism for distributed file cache consistency](http://dl.acm.org/citation.cfm?id=74870). - -### Retry on Failure - -When a client is unable to acquire the lock, it should try again after a random delay in order to try to desynchronize multiple clients trying to acquire the lock for the same resource at the same time (this may result in a split brain condition where nobody wins). Also the faster a client tries to acquire the lock in the majority of Redis instances, the smaller the window for a split brain condition (and the need for a retry), so ideally the client should try to send the [`SET`](/commands/set) commands to the N instances at the same time using multiplexing. - -It is worth stressing how important it is for clients that fail to acquire the majority of locks, to release the (partially) acquired locks ASAP, so that there is no need to wait for key expiry in order for the lock to be acquired again (however if a network partition happens and the client is no longer able to communicate with the Redis instances, there is an availability penalty to pay as it waits for key expiration). - -### Releasing the Lock - -Releasing the lock is simple, and can be performed whether or not the client believes it was able to successfully lock a given instance. - -### Safety Arguments - -Is the algorithm safe? Let's examine what happens in different scenarios. - -To start let’s assume that a client is able to acquire the lock in the majority of instances. All the instances will contain a key with the same time to live. However, the key was set at different times, so the keys will also expire at different times. But if the first key was set at worst at time T1 (the time we sample before contacting the first server) and the last key was set at worst at time T2 (the time we obtained the reply from the last server), we are sure that the first key to expire in the set will exist for at least `MIN_VALIDITY=TTL-(T2-T1)-CLOCK_DRIFT`. All the other keys will expire later, so we are sure that the keys will be simultaneously set for at least this time. - -During the time that the majority of keys are set, another client will not be able to acquire the lock, since N/2+1 SET NX operations can’t succeed if N/2+1 keys already exist. So if a lock was acquired, it is not possible to re-acquire it at the same time (violating the mutual exclusion property). - -However we want to also make sure that multiple clients trying to acquire the lock at the same time can’t simultaneously succeed. - -If a client locked the majority of instances using a time near, or greater, than the lock maximum validity time (the TTL we use for SET basically), it will consider the lock invalid and will unlock the instances, so we only need to consider the case where a client was able to lock the majority of instances in a time which is less than the validity time. In this case for the argument already expressed above, for `MIN_VALIDITY` no client should be able to re-acquire the lock. So multiple clients will be able to lock N/2+1 instances at the same time (with "time" being the end of Step 2) only when the time to lock the majority was greater than the TTL time, making the lock invalid. - -### Liveness Arguments - -The system liveness is based on three main features: - -1. The auto release of the lock (since keys expire): eventually keys are available again to be locked. -2. The fact that clients, usually, will cooperate removing the locks when the lock was not acquired, or when the lock was acquired and the work terminated, making it likely that we don’t have to wait for keys to expire to re-acquire the lock. -3. The fact that when a client needs to retry a lock, it waits a time which is comparably greater than the time needed to acquire the majority of locks, in order to probabilistically make split brain conditions during resource contention unlikely. - -However, we pay an availability penalty equal to [`TTL`](/commands/ttl) time on network partitions, so if there are continuous partitions, we can pay this penalty indefinitely. -This happens every time a client acquires a lock and gets partitioned away before being able to remove the lock. - -Basically if there are infinite continuous network partitions, the system may become not available for an infinite amount of time. - -### Performance, Crash Recovery and fsync - -Many users using Redis as a lock server need high performance in terms of both latency to acquire and release a lock, and number of acquire / release operations that it is possible to perform per second. In order to meet this requirement, the strategy to talk with the N Redis servers to reduce latency is definitely multiplexing (putting the socket in non-blocking mode, send all the commands, and read all the commands later, assuming that the RTT between the client and each instance is similar). - -However there is another consideration around persistence if we want to target a crash-recovery system model. - -Basically to see the problem here, let’s assume we configure Redis without persistence at all. A client acquires the lock in 3 of 5 instances. One of the instances where the client was able to acquire the lock is restarted, at this point there are again 3 instances that we can lock for the same resource, and another client can lock it again, violating the safety property of exclusivity of lock. - -If we enable AOF persistence, things will improve quite a bit. For example we can upgrade a server by sending it a [`SHUTDOWN`](/commands/shutdown) command and restarting it. Because Redis expires are semantically implemented so that time still elapses when the server is off, all our requirements are fine. -However everything is fine as long as it is a clean shutdown. What about a power outage? If Redis is configured, as by default, to fsync on disk every second, it is possible that after a restart our key is missing. In theory, if we want to guarantee the lock safety in the face of any kind of instance restart, we need to enable `fsync=always` in the persistence settings. This will affect performance due to the additional sync overhead. - -However things are better than they look like at a first glance. Basically, -the algorithm safety is retained as long as when an instance restarts after a -crash, it no longer participates to any **currently active** lock. This means that the -set of currently active locks when the instance restarts were all obtained -by locking instances other than the one which is rejoining the system. - -To guarantee this we just need to make an instance, after a crash, unavailable -for at least a bit more than the max [`TTL`](/commands/ttl) we use. This is the time needed -for all the keys about the locks that existed when the instance crashed to -become invalid and be automatically released. - -Using *delayed restarts* it is basically possible to achieve safety even -without any kind of Redis persistence available, however note that this may -translate into an availability penalty. For example if a majority of instances -crash, the system will become globally unavailable for [`TTL`](/commands/ttl) (here globally means -that no resource at all will be lockable during this time). - -### Making the algorithm more reliable: Extending the lock - -If the work performed by clients consists of small steps, it is possible to -use smaller lock validity times by default, and extend the algorithm implementing -a lock extension mechanism. Basically the client, if in the middle of the -computation while the lock validity is approaching a low value, may extend the -lock by sending a Lua script to all the instances that extends the TTL of the key -if the key exists and its value is still the random value the client assigned -when the lock was acquired. - -The client should only consider the lock re-acquired if it was able to extend -the lock into the majority of instances, and within the validity time -(basically the algorithm to use is very similar to the one used when acquiring -the lock). - -However this does not technically change the algorithm, so the maximum number -of lock reacquisition attempts should be limited, otherwise one of the liveness -properties is violated. - -### Disclaimer about consistency - -Please consider thoroughly reviewing the [Analysis of Redlock](#analysis-of-redlock) section at the end of this page. -Martin Kleppman's article and antirez's answer to it are very relevant. -If you are concerned about consistency and correctness, you should pay attention to the following topics: - -1. You should implement fencing tokens. - This is especially important for processes that can take significant time and applies to any distributed locking system. - Extending locks' lifetime is also an option, but don´t assume that a lock is retained as long as the process that had acquired it is alive. -2. Redis is not using monotonic clock for TTL expiration mechanism. - That means that a wall-clock shift may result in a lock being acquired by more than one process. - Even though the problem can be mitigated by preventing admins from manually setting the server's time and setting up NTP properly, there's still a chance of this issue occurring in real life and compromising consistency. - -## Want to help? - -If you are into distributed systems, it would be great to have your opinion / analysis. Also reference implementations in other languages could be great. - -Thanks in advance! - -## Analysis of Redlock ---- - -1. Martin Kleppmann [analyzed Redlock here](http://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html). A counterpoint to this analysis can be [found here](http://antirez.com/news/101). diff --git a/content/develop/manual/patterns/indexes/2idx_0.png b/content/develop/manual/patterns/indexes/2idx_0.png deleted file mode 100644 index 8cf870885fb73615e20407d248e51860fd1aedcc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23007 zcmZU)1yCGMv^~r&3t0%Vc(AazySoOrKyZQ+Ah^3M4iN}W&_GCnyW8Tff#9wQ?hfCQ z{NDS&s*ftFW~QgRXL|bHd(OFcqtsR9FwsfSk&uuu735_!kdS~N#6L0^6>-J)2`U>B z5_+kPw6wZ{v@}%R#qo`eoh1^Id{ko6lUJHHggvbn5opC8OOKzeG^UKY59U zMwwNGy$JSmlmRMNC=1FcUX}@SceisKPmPH4dsYjOI{@ z#E3lX)BBjQRipRu^VW^4E$}V_=Q#jL?M-D^IZ~_`mAkAEJ>hTZZ4I=FA?H#zk4$$X=r?-QYos(Jz?JS^=GbR^v7U*6V(FOOQ7ovo6%DiMX(tghe4ee zEBgn6I(WN9pmSirs-mOwgLP6$uI6iGLV@tDZvExmI(RIK>K00=;qpW%QfU|O@M?ql zI(w#|>^5CM_t%N88G+p{-@GmqAC_SVQ)UF|-YQecNl51oijC@OX0S|a*A?DE*V)7= z(owZd@6#Iik)2v=msr5+9e@o^;6+V=3I=|Ilchjm%87&-f;XzjTNaW@u|%ZF-6mAu z={~ouy1q~1QvRSa2uKkPISD+9xUa8789RDcz5X1D#TMs>QBgTHz*^EnTHCz*n>!g& zpEy!o?@DCo?-SnBT_RM$g}5M8{2=sEiQzg*t}K)?l#jignQ$UdFh?1olLVPOP%0MS z0w;{cMsIT{!%l#8EHnH7e1{W{Vz316Ng$z1)H89#h~04ARO;U;+8^0jd7cC%yH;%ZoTj#H4i$u*ca;= z>u+61uW+uA!=%Q(323x4+9YHo==W>&yY|zg6@-2QDFri^E94mFD6E!r8Yl#RO`_3s zDYmGwh+5a{6OJ?205RG#S~1p4$mYA`pJ@1N;Elhs!Lu>`P4k<4oc^n$VodL{MU|zY zWgV9{mz-ruKgP?KuP_U^6QvgvMP-|1$-kM}2=NfIW@^g|zu5WqVW7X4Eg3UuD1kM( znL$`WZmjw@iw~7Q(H+Je<=wmV3NlSLeTPQF=K0t4j90YBACJe5QI0W>vDaG}IPeV# zaPiL>c$h^rWtjgkmuL*Ci+uG`akfLVkJ`f>jti!>N)piyPCQM#P5i}7uJuRbv%2<6 z4;?}V3iXx(k#ABu&aa|$7`5xaauq0QRqJfz6BZh2`wzuhau7$o*N}Z;Zm6O#B(3t* zL#EikUjMb6sJy67v*cYW>dOvAYS~=f+}GxYnp!{QxaBHUPs#};2NaoAZCka!|4OL& z=n;9&xA$fbhuoVyNwr2bV_5TbbV-Oqv_sy$zi3frqEYaoWb<=>-6cbD)mFoYj{rLp zQ#iMP^=Ko0<%ADopv}g(m~;i@PmYDMVW-hZhjse|+X=fsP|Fd6-pEe`+PP1H9y+a|MusOPEDJkt0b)S9KcMHV;8S#T1tPrZ{9N0v~Zgi zmKQdfvuHM<@sn%3qP=1y`pt$_b;qA@-tO5xpZGJBoe@4lJ{vwOyZb8MDwQ%(Q{f6J zs~4xy=FH){6MNGha=WOz#OI&R#pkdEeh8M4Gm#q!ycKXEFC;VOJWDwmj32C3?N*ig zF8iI)igXKe*q)7n$TJz3?4UzD1)buW+H&~gQ10OFu(rRszq2qW!tvbbxv$8zZa)iF zVG{4Y&F}iQ%8qxt1j|~RXYB?{2HOjNkZ-)Mch9^VG;C=DEi2g%EwA>^{5QX^C9huW zFzqt6YHbu6U8h}cU3#LbBmY842d7}Tq0bXs(#Z46<4-(iAtu8yBo!f8#nr&)f;6zW zNpJ6Qba5EzDhaTO2sAPInN{!n@h^;^9;7~|eqJ)B7hk`*sozx{Wy@X?Z`fnx$%l(a zjO$Hx%siL;x-T=*?1h%h2iJ7L8$rjqkMm~x)>W_FUY-^{YfQ;W`J~%8-CUmQjO9%0 zY%XGDM11hNN$z&#G9R=Q`X@BCGqy9F#!tgy+`9!s9JchSC@wv9o|yx({uEWiPUAf9 z{nO@9t$g)ApC^)dNX$GMWEyfPWI}$Q|K9kW#TCpw{6^90n7gs!w6eykx!+?ztmS!g zW@U@09IKVm@G|OVI5p{)4=+jQ_-*X^=XjcC{9T6#SCc|nTQ^9y2sZ-eNh+#&)dU?~ z7e}%8#V0nD)}Nl-)XX>t-%?-8EygU&4b@Og-!}&tI2Df8m3=+j!5aQ(Af9^r>iXy4 za7xBPBZ~+5?1zP+r8}pXj?B+gzjJnTzBG&ZrbQEXp>$DP6wi59T6p)Z+;!gN+#%;J z=CMBE!QK@+tY#_Mm8{qFmJZYCI*-l|nGf;A7JObHUgTGIc$l2!Eq+1aEI$4!|FxbA zzAw++_JDa?k5G&Od1A0rGJPmLExm>kLHhJ__P3&MN1Jtwp>g(Xyu;X_XM4qpyjQSgSTm^5AWzw?kcNKRNznr`8sg;BqH7}JH)L!O^syg0=?b-j^$PAlefA#ite8b74QsrEEeL6q6M zfx_SKOY2JWmQGu$11_%b-RNxI$ifLLwu+F@(FEvFgDxhK)Z3A=sak>= zI1fH;frf6b1%VGyTmGl%EJ#1xK_1GP`vbS?^ z74j0L`g?~E;`s40I~DZrEpE1=RJtnaP-#aOODHcJ4;u%S7&;UR6>+h6BcvfC`(Jm& znJAUDo12pmJG-Z+Cz~fXo1=>rJEx$aAUg*aI~Nx#;tp0VJd$&p0xc zuI4T_PHr}i4$#MOP0bwL-9)LV9w++WzkknZ>1Ff(W^!=-@3jyw$o|;E&dJ8X{=Z`* zx{5qr6;ikHvb58cv9Y&wa7D}^#>>ek^7sD#U(5fS@&EMH`@cPT|8LL#)AC+}>>h=$SM84v>wN-mFNiPwH~(p+Kv zyhI*&e|M2}7&9f#j#UQ5dz@iVZu#M79eyFh$34pzlFSsd`g6J)N`E4+EUO2YjK^a} zrxuiKlgmc?DbDl|Eg@{SX$DshcLzPE@3uS_Y6OLRy&My6i0|(XS_5eu zv2@v%8cxlVyardFc;`J=GmY&_K|eS!DhOUKIpUgQ$3h*r4Di4ww4(A0M*@l9M&h45 zA38I^LxDvwn1oOOw-iEw4B~vG(nB)}EmS_b(MaA}BJ=p5u218Xe;3A$UV_sR3xEP6 zCdT|m2dCk3_i}as0=Kl9&?n`1d) z5HwZ_ia)Di_r2<3dUN=D;~oix%k10h14_JTT=aOR^*MYHH=QL5nOva4;xoqcF0h?< zRaR=(dV7ocuxMWe1<(`ZaLnLm$#c8L%MTz*#yD4qDRA7k>ktMfP;smoQi^3{L73+> zr#Grp#*)Xv*CbGzq*V5pLHq<36Pd9I$h2=Jh=ztIZznk1+BJ}-Z=u- z$N1yJYEl9RBnkx1WU3*`hGB@UgZNTjbP!+tA6IdJu%IpVQd`onKMmzQuHqn{j~$r} zcQVis+(~rFq8x#O(Z5 znk#-^h(Df0j1+bVR9!nHKA5?W-jSA%c+LokH8$R^YxJb42z-el#~x$`C4*!ae#k>B zZF#SGNXtz^JSoevx9Tte|L*|?;;Nqpv$*jfz9_T|XPL(22T!ch(~8dm2}k+e4qb6Y zhMf(%jnE%2U;f(WCX_rc=A~qJJJfSoeeNt$+ux_{Bl}*GGHuUTnB}Wiw1ZTzVw~y< zL=|B!Z-6T_L3jY}8Y4_2{%*WdrCZxlh#{5fniB|>c%IR!$=IIh>N>AS5?5paX#*~! z3yDL4R@s3Z64GPU(pibI%`DQnkRv3dr1fgI7) z%VzCT7rZ1xJfi{MrhmHu6CkJ^Y4_V)c6Y4nP8-~5a4&@f>;i}Ot&4oY-%0-nMS}eS zNdeRwz@sF^4G~c7mt+Kexj5CwUy7?gXf1yDc(H*~-{Ge#{t=)SnU&@>xE#U4Y-H$T zj*$uUJNm@=;{nE`>u%yM^oC4Rfc|+;!0>?{r2yUvHwcw(^kldFy^3q`D{6CXWn^9e z_O0l2Hh?cW1IPFkGw9R#ZZHza2d7S#c-s=)hY|^PgjOcS5rA=o1T@D5qYP=j%>Cqj z91zT1OHWj|$KEDBC4kLm?f0x}IFcP7gd?!vIhon}_F{<~ikbqR=#MB~qT(0&RWSm} z&plMUFI~6oNf0Xh1{J`|ze=SkRY-2pp@ejQ+Td+VP3wAXK^dzUe0d5iLi0jdj; zYpI-=$NMh|yy?nykZ?FFY}P#A?IG``5v2ob3S_3nnX&n;T_Y^WW7K-1$x@b4HS?AU z=N+!Umsv0?L;x?vc{j^{-Ss&@OA>-`-+wk#&5Hf3ZSBny)y62rY;GaMg|D@*@Wr)% zQkKh`T1M6P>R;(^P~3r);41`43QMI1hXOHmrsC25HgcPLSP4O?11>g9e>f&XIL;hAQu*T=58-$kBgdI&4-V|DO-Y|kCf)K zRG@YX8oY5Twlz8slmi=frVX}eXVaA&j>q!iVgdARwFXAG13D;URV2D4J>*)+i`+Um znL(}2TjZc)grWRs4^6PbFQy!Y!97;1*{eO(WBPf3JH-$V)9;q(#Vz1nyyt>vUE!FJ zAh;LEjTvM}O3q%PiveH1_;}g&KFA{ah zU3>pr{P8BZ6+eEOWzNoQTDLv0=lF1>YS}RzP3L(Z6WwYw!De9RhBtl{EEQI zIyE>OiMins?!?LkQL)5O6c^(5vV&0-oRWqMH*qpzWF$(w2bMnB^=N~?;?@aQ4hx1O zV&+-ys-jkaWb7aOy>5CZet7o<1~ybZ0_lY=dC8WMFWHX~K8#Dfv+9l?#*}T|2p9f{ zcSP{OR=;#wQ9rw44A|_%BKj|nGAh57S#}8G)rcVhi{OH-qE~olI`7sJDj!74=@u3G z+Gs;)?0EF!kOpFv!pXl2!?lOs~B=YaBsul$gsrdtS9)p;S%n^+>iV*Vefy@{j*0sN`5=*{|UoPqOL|IW6h4^ zl$$-DobCd)=S2-Ep3lZeTOUe`^TQQK6g39t(iuFu?hDX^b)W2pNN@rpLR8DeeuIJW zc(Ik@4(X>j)_OCSYaQtn%nAHKu9{!IXPZ6+X`oHC0GNxweFf?TAF4u-K!$kP&|WmP zJq3ZDn#oz*cau~fTOENevJDUOhuap<1{SE~B}Xwuqbb#6;Y?PC&#?)g+cTx;yH#TW z(1X);OS#atI4Y`!R#JcdGNZ15ILR+pD?+;A80_6HwG1wrru?T2i1)8C6lFV7|aQ6dlROn#;VQl1iq zm66agYuv~#)={c+i-lc#yo59*4O^+MOSQSgPd@_!z;#q*#sO}YlWi%ut|n1pSae6{ zX+iITm*h*PhI1-_>^^Di|NG7G3(FRvL zI|mZR4n&b8*YgWJEpcKlm zcgL5a`<5Lu@!kQ=i^)3XN!;3v!P*P}76tYqc^6`m(F%P=14&6$Fga_Bc{nM_E`B~Y zm=D|Z0e0La!4`RGC?6vR!k`6kBSp262!g2@7p387FsD@J3k{tpvGip)23R5+06Pj8 zkjT7hQb{S*(N*>J>f=r~PaG?MTKF#n%0=Akt#X&6w}k4b?u%7dZ{}!;O*^Tvh#BN*F})ar#1Fp{+1Gu!D;v^Ty~Xg{0|BM7KnN(4rIPN~pTUw*2P6KE zPho?)S-~QsjTMyX3*kg41K1G>52nddANUYK0s|!UTzZoo%vTp!L$mTlj5PFP)Gzn+ zl0Tk0+ur>bq)fg<5g9H$A0E6-d8&=Ztnji?+fRrQ%4b^Yrx*Ic3S7%=PN__Pp8e6r4;S# z*2c#niZT~gOmE;>$ATaS)9RCiIDQ;1TFx^-taWGf=JP~`kcP;yg@1>LBtmcZ=n0r;N$agYS1xaXOrm9uGYs6 z#Qd2T)jjT{+JWi6HFDTnzemG%7&MVy2sWweOsC9Y^@EnMGVHj2RsG?EDUaA!)nEZ8 zyQI1ZlTxY<7|R6#-O?e`*p!;uEjEk~^md(P?TgCpn@N|!tkyt#BZn&%{=*p(Du21` zee|{r?V1~E2FleN{prSWFw23RoLwXTBOdO+r>%$UcoYMZ45$`*=kIsHU>)Yb5910*Z4uk)>=T?LAeU94wVxRByzL)B0 z5dBOz#J4YEUA3RjA}pIl%_juT3**Q?<{5D0GnIO;5^Xw;@gUNg>33)+TysXd%+ksT ze*3*cKECTs;gsdxtZ7aln1^zC7iqN9Dgi?}NC~wg6kVEPrnp59$GRlE06~}m9%BMOr zTW4UYuiScPP*d~fO^e}G+aLX7+Z|giN!9U|^NKd9AOhf1)=!CTtXWDAiR&fB->Vi* zpCFugHa=ieQn*j~4T8hY6_Ye}wLX;GbhSL(-w1mgmIdenUG7pG%`r&ZbHd}XJ<>_u zlxGo|ge`|(ML+3IeEKDg$K)gj#Dlk!WBs|F9K8b)aTr7OGD0*tz5p7jg)hgw5zg~} z2+xOoIdta214f}DgcN1Wsl$?MxdU?ieZ)2B z+t3)xf8m1SWse|M`!vE)1c0@pulBYh${Mc=nZP9^6_|)cfmeYrx`wMTLyHi44 zU0#pBG+Ow$xn+GWD@P=ZAk&m+b|h!=(tZ@zE*E`~~9Z&4QUncoV?P4h7Gea#9TMvb|MARL8w{~#Gy zy@f9)5!=4lKIgdN)>l6`8lgKn`78~^#FHP7{fW>rIt^`=TZ%cjpgreb!zcg6r+sD8 z+At)Hc&f`oUN?O2eD|iCvi^uSh!$@*zs+mmSG9pfe#OVqJ!?DWKxCkf z4&w$KpD0^=7{TlEFG)l101VDa4$j4b`paEygza}!9;`yh5 zf3Oj!_>idx(9oA=Nxdy3_}uzcV60P;`sX|Vzk$Z2uTTRkHp`f1L+oq5+5}znjUhA0 z_;+tOJCM?wUU8ZIVH$xF2GyXRd9DFc3ePnlG^V*PIf3{LUFzdwFeGpHps|DEcUI)5 zu7LtWYlNUzksY*io>Y3gu(D#;V*-9JV^U+@vA|g-rF=^S zqW^wjIW-d;aO^r8L-y<@+@sU^=UXYVPby1xQa2VL#2pypeHvJve`$b(O7;q8ECqi< zB)YhR8^98#QgKZa!<2f8qjH375Tx&arbEV=lrW@sPPy-I6Fco|A8=A0O(aR zh8aQEm|MM#;$OvDq|f+wz+2bTmCO5r(zW|F-$E<|4JMe~eGy|u%=jYW^6S~_yfrsW z-AP#t?;CQlxAy22j6&`G5~D}-ZrNUW+u(&?=W8!}ggrY)Oz#A`ti8l7lpWV@Im4eU z6$eCo`@3xfo^3^t33jhlU9II7KrgLV&O$KgO3zJ+K-$z!OduUsia<&Z5?DhrMtdkU zzTMg)eO7oqzE!H6PpM5-RhXPO1qA?J$Nf<&wOj-#W2xSf$`gGQN>lQ{G$|0>P>Fl= zwx7hB`3frvBUFI2;i0irp`UMi-1yH>>CV3& zbtS^tnwH#gep^c3?PHZi0sLa}?Djf|B1IPn>jQI$D-({!3-_1aYe3TLj&S}4JVMH{ zL+Qv8s-gx9r^#6Rr;wEO;TcM5+Fkm3z8NZ17xMJPGQz4cPJEa>iags*=k7P?UuvgK z4-G?0PCHjlsX5f;{I!0TIkkd`()I*P%nxExcDve}!BTsEgPBEALUnR+P>NaDqFIqF z-0V4Ac>HFHNbk#~;!IU-|k9M*F9PiFB5 z8CvX;lD+u9h~4(&Z@!|XM~=nn(KG+?YI3Z?;C{tOPJ5?)g9BPB)d-Le)Cn9r$~e$T zZ?E}N)Sj20ZPl1BnB>d;r6bU6%Jk=yftKXqWWi>5ZRI@WYdkK0>1-%Wt97WXU_S0bPJ`d$X{1|Iw*fW^6H(4d6=Msk4I#)KrjMdYFvP#JbJ?&et zI(TzqNRy?G+-~XF2KcT9mrT+8y8Lw2@WUgdUg|Tx zCI}ug>Fpe!b71#SeiXlbwnAn{BbPD6WaiDMFaeu!q#S5zrru~O#t=h3E9ytnmjW@k zp~JqCg1_RV)Itxr>L+SK6!}?CY*1ih;E7Bn*|ksR>@4iR&sf1VAu~HS@OjKYVluvO zcuwv*P7S2@1{$%~F=H5naaJlhy((cU96GOXnO+wxLXR_wT2iUtUNVFBaui~?mcKHSShu;F%e=F!;js8MxjM6b@tAmB}+>ztO zx5-*YYZ22_ftqgeb4p7~$#m`h6vv8VrU>qiUGu0;FZ}jm&+Gr{O)QS~`Budy9{vl3 z*+nPk{@QlPv+(w1DiaYlCd=`oTHQuyo2KMlKjG}^eNiz4>$@t}601t)+iLlkrG?T~ z-uCLiF_=3sMj&2x*%ax!yCO5VNZN#5HOnx``|@CkzR%;P>zfD3YwOi-QbeY3FxUbY zjF;k$t7H>zLpMZOMgx=VCP8V7n~C}vgGfZdV*(gZjHZ+=wPa>wX$h~3#!3|hr64Jh z5+UU>k{mBvmeK#ILC{2mxXL6rD`|ocjOKnfb!N0f`{cvJyW%SASST`#28h{>iz2CE zVGa7HgrvNK8=}Bgm90w-S1>{NAd+-Fh9U_Na0--CTUzfnEu~oHnIeR ze{waCJq7)uhCd^r+H@rVrK4efa(jmMD8$plo0@aC@4B}_qSkl$Xwcb7Vbt7e*wdf^v9RzuX$g8%KCL)HC|h= zq}`%(9w~JHg%hC zuk&_LW9}@P-N*hv2T>4Dp*GqdQS*zh@wWBh{zrgrwBnUB`cZxq#axV)wQ=J?KLXC> zlIt&Ny6HXw_w#-G1n-Ig+iAt&X7;`&x7OOO9GmeYtnf#V@}s<_yx()|i|#C;c(A|k zy+~C&U!$KjnYZ4Cw9YtP)6_PO_XoRe?xshUuszzfPw>9`2$G=_xYdwgs)VgIMm-rq3+;pHJcoVwGC!C& z_43V*etFTPX8UxJqlD4d_D53>gq#S)aEz_P!J^B;@H07NW2cpl`+`izM}=mW*rsZ^66-wQ?Nhqty(Vp%yIvvet^OM!t}BRE74#X?2Lk}>rkJ{4b*yV;mu zTK^2gjIj>}L?mm8@l+$edZ9bnZRxjM{=$%$Cz@i5`u?^3g=@}b=;SCpSFpmS>f`Eu z{uu8MOE0`Tu8;3bmHqpkp7t86r0p|%T98SSwIYWlE%o zWT<7ADj5yi+ffMcNSOeD$VDdlxe}5lwBb#TKs+psB{)%;Ls0vKE&dyZln`u}(lnjn zyxr9}NyfNSWSBNck`u7sHtbJ(f{}Q8Y{gaF-xsf8EscUG401s%@7V8uy@`t z>+q(s9ZtAJf`q{I06?6ncbFKA$j?gSTf$X!%ZDMEt74F?Rr%MR+oFq$d#>b$#@&g` z;F_%F3PnpsR9yQik9cjN0b z>xS~7#Eoaj2nkd%$lGe7_k&IC+px#{^+J*__&?oG?1fyN#nsuGEcO&;w6P_r5a(fH1f}dZHmHG|?7!{N3jdZpkfN`CrwV9^mKuo*n@ zS;zKl?p-{8bE=;nkK zxpSqE1Tkusg{5a5PKM02UVRqgC3w16u&2OOD=Q+=^SM{HM$J2(=7>CqZvRQDS&peo zkIqkzc+)Tmw1RqFFbg{(POt$hGhTA5sU&9MgEYfVex^3Bz8MV&Ucm#sh3J~5Pcm8a zc^ogN=X}wlq(gz_XTH)^UxK_II#v#31Xn4YR$eYJ_Z2ny&O{j@vU<=S&AuxeB42b? zW>COtf?eMNbdnI*7i|LufLUGy30m$u6(X`;FPuB6mBju-1X(%Kek4ONmCcMrMJ?aA z%MkYcy|XFu1t3n^I7Z1k1Jlr9V6s^lhov6@_1cX+JjQA7T z+{}hjcv=M1AjbA0+TBwP+Ob%W<|qabz38cGS$zf~q#wPvjd+LLbEgVwhpAvWWGFL~ z@ZksJhjf*fjIY>UW!>u0&NGSYQL3AMmY2th&c~BSTIe5DDJ7hWYeq*8Z36T(j+57EX1b)46nTZQt#vNEDHOF(=hjG^4En9Ow z1WT|iv31TMOyrU#Hn0z>jJnb_GldY(BjBIl(Q6^~%PD}11D(MSz9hS2xYOOqeg;x= zGR!^bu%uVE_34#t!>a~zw)XyvS4r_pwmurIP6j!M;j#rOUd(FQao4*U;7TSNn_Y z)S?81E@995?xu}xGb5Y)SZ~ydPAeZ)n?11JCslu&62#;2q}>%;fU z72g}9J5YJoSbkbP2oCh-)sckD7(;h{w@+?hwgX^BAVLA8fb5OS zBW!+x01FTW0P4<=j5e|=_YQas$EZIy`F)gVm*ngJO3{ggSkdc`5 z^L+p)Yu~5{GXKdGQGbU7lLR-hc`5=#BZsI{uk`%|1L7@d)-Y={AkR#Sp$+pD=70DF zYCK-+=FK)r9j{V@WT}>&SZ)}>WbMR^wPgfA&<;OuwVS>U{3;%hM zJd|v`{hi_U3j6`0n*o38Dz!O3WfcZDuv{O?oP5H0zlG@+S#5Ok9CvNSIdcu%$-|{0O^30BWPi+Gb6Ofnh7|Fw0RE5 z0tHG(!*#GF(kWv#UmK_-BQUs4BlbZq+gz+dio>YEgcEd&2aeb8iDE_Xt+apH!S3N; z73pj0TArp7kH?TcPy|uVWPh>nbU<@u!#(GreE;l0OaaSer_FAWj|RGgd-YSDzQ6$- z+Wuyamb3nonX4u6wr&i?#8Ld%)zmCeMJ-@J(j!z-IIdVLSmC<@s5gzqZsx;sW;jQf z!EX6b5#(!^X`ePVJ`gwMlN~|Y)r}I=GUvvAsBsL8P-sNE>uWq5yhEyd5uo0_6E~dN!QuCv;kvBTC&5qUdu}?qZSJF z4$#CVY-RC!%tx450O$N#jM*4=pCn_TBj*Vt3_@OF_!>QH?^&_LyfEN6*C%w75j-5< z&(!C7e#2y`ZR)%sJhw@k-FN5R+*7HA(BA7eQQPoEPVhmDi$75jLApvLMQgO0T`!4& zPw8}w`QvsNLRmu$qMh+d`^+~-GLl&lxYgbrce7QanE0}s#9x-60uNC!BeMAFQV1`$ zv()x!v$P30(1pX6vnH5rOefOmgK+xO)L*4(kO9J@-7U(`^z^YUQqU-4ubk9${(S@{ z&j-0|eon9PJpZ+QqDnMJIT|E!>`2_p+exxiSv*r4{`)8Q9k2g;hRT8^>TdM3$5s3a zi4^7K_72OIrHoxQe4DoTLat!p%R+C>)}@NU-+cQIIf;MY*5+6XjL56_&TKa0d!G4i zDqZE`5FMD?o7HrmXH0gwrCWAb72cMAIK8kij>fWIqDn>p^=_s>i?dF!_-?(t$mvKv zB1LEMG3u^*GfL}TfWCY8QOCy$U!b3QkZmH8vMa?5l;NvR9ev;j9IvOL89+IpupUQZ zjP0Kz87*82n7@%^+2x??%z}-E)6UO;nc$*EDd@{O!MUWJ)4bLa+P4vxz8!DCb2?la z=ut*TfAT!@o~<#1NFXVdP?O-du|hu>6ob-PqvMAWVzV$zkezs1(64&AjoDcK)*;2O z{;pb|wh&3@CAdpQQIb=zmid*E9d$_q$5Z;NLR;SIiF9U=W(3C8U!{6ENkRZVz?`R! zWuF%_ah?-c)>;I?AmkWqhQ|7Y`;8^`s7z`X(rs;WCaKs_l?V;CE8PFlS7Ci2sZP~o zz+1l*s+%Kz{a68o1mgj^1Yrjjv)B7U{dMQC+0-OCypT949z#?@5ZQW;RLE|6A5lba zeMGj5dd*vnOfs#U#xFNuWWFYif|IPITkJ%X|H z&|gunEml=s^KzgyIpCgV86d%9117|EW6(L}Gi2mQk@$McK#U?&iP1EJEgf$Xk%7EvO?zKC!fsX_S2l_EetzQAF`4l1EIh7(m?r} z?)r8kHW3>Yi5XNWgDd{m?vQ}Qpko~{SAyD-8~ld3u0+URdld?TI=wJ~UtRZqNViwQ z2a&DgYJ|3#Mxb&FbL%4}Z~#v*g7@jV8}}TX@cAqd{myp^9_|gEp)N?mDPVK)VUwFo zfnRJK$AV7WZRFk2gU7YsoYzvUbKT^R&+l}uc{{<26q&{uLE2C!15nBGZAgr}0~5xR z_(J6Vh4UxWGYHGEK-6_6;J+kZ7@R(JuZZQ+X>;p#(_N$wnBS&Cc_g`7cP;X;e7-*7 zPelnT8;0~&CbMWFZ!6I{pI5hZ(uUXtyCK0~XzJhu?8Qjsm=}Xb%}J_?r1nd_SI}%k zRV^a3^;{3$js?W4Ng$C;oH8c(DXy`wRYi(F+kMd19qKoAm$=3!WA>N$^ z7aYRflp-G}aC)ERHSrFvY?sx&^Yh*(q3~Q;Qk~!+G$Td?B3sj}&1A34qZCy=VSL_5 zuG@a_RCmOL{f4Q@*;s`#89hF)Q4Tm8%uLN+Z*lkx1yLO;3B?$G5BU5g^_vbzzl&OV zkqc%#!?{+YZD(b%)%jhB8N>xi0cV5~b3>O?;NC6Mo0y)3gpPg{Z*jCalKeB)s#zkt zKk^G?R!EHseEWf@gd~K*XO4lAnIm0@&X`Q8zojH6MPJK1%PRz3Nud=K=^o!fIt%Tf zWM|#zuv`<9ek7383SC!Pif^^bd04C3O-#k&VvJoQf6_EJDlE)3GZ?NEUD3Hsi)%Ym zt`cyQx~iYae!x0Hpeu&bf>d|IikzChzL~-Pj?_-~EcL$ddI6d@UvS8=INy%o5z6w= z^H)9VzTgyjy1|4fLn?9iwp~519TpjMgkE&WWt{()0?*SM69_r@qHpM=U0^bM8t+`i z1!|P-soCH)3>~kvmEfm6Vr;qi>i>9hAdZLi{n+@^z@R=D-v2Gn~emnlND%_HyOcql8k}@hGN;G?3DW#5)my`Q> zV2k%gyDl7z$nAs&v8eI;hRYIH{(hltQ>%oL+u~OYQTMOuqZX)LNzV|Jt2e1IFs zEJn`5y$`Md-ZrGrELcZs`tx+yd=o|chSG_*bPQ!dN)w zwd2iOS016`mV62#kMy&^OVdMPH~;+|sf8Z8;@b!n3eow%vYM#=0ry6J?6K>yO<7rs zjwS4?wU52NkBQUh*W068K3L2O@4h?7<*wyBmP?}h6*mc%)|5|n#$0r?60W9s4x*ph$_y$S&1m95>w@eS??5g|BNt<-8P%Tx)Lz#Y| zBEijQYvJb`<^KQGaNhA$|6d&Ec5yFqubq7*ql{45WNVli$zCCQq?CEDkiGY=l&`B? zWZb&tlFaONjqFttQg-~_*Z2GT&-HLW_wl&i?=xQK`8wp7Uk;$;vP3x}N}4K7pUV;9 zg{tOiHDX4qL0-b=cP^cKZQlMJwx#zzTz;fQ@YGG8=9v^qr+d+2|Bg$D@<3(Dr-5ye z(?SdEhb(LBlb_pF(mp!Q9cx>!kwZ#xh=^#)%dL(9S=>FR^QT%zKWcte<+%U~t_RR1 zQP_Bu1~d6lis)Qn@dxg%l;{H04>zg(UVr$*5#S!8*cmxR3@ouLvij<9ae7f@Z5GVe zB<0YL!SS;L({oUqu|imeOmPGER48u)0Bl{KK!e{tFFQMVy%LPXK(y~VfibMkO<4ILBBTlRR7AU>m`WFXx}2Wpee^+pL&( zprA=Alz7GRJNFH$uaKxWRWA!o7-W{~#h;SXRg+XrCTC`!YvFTqvLdH=4?`1{3y>QD z2(>eI((NtkMgf)Hx+a0U;^ctFp!F)b~X&k|}U~}etb@g5avVuD#I9ins zGC?UlxihooHxzox)6G7A{AVnlB}4FlFz`tRrU~1%;CD6ppks%lgIs__D~0eZXy>*d z>af9D?fMk051ef*pEx`?U?W|)Xa+Njx&NjpziENVs{ZeT@bvd(%4*51ain^g+%d&l zMq~7}u@>N%iD(8_p5-7#K0SJHYQmO{5TSYn!t!_8wScW=Ix2HaUCl6R{_un0$`?LI zr6&~7U`AJ~7YW8lSoZPJ7Avm)%9~6+Yo;5rv~}NpxWXx~0K7j`zOZ7U<}73Dc^FF% ziiw{{@stRl4{)HOp`7eursUhc*O^nG@x>^uIq9%se}BEP5&JH2S{w2_$^ZHLry6#& zkg7I;8g(B=2ZmU7LlRjS2h@4*=Q5Xx!sb^eDc`8+X!zNVbo7#v<+yo(>taO6N}uMO za4L@T>G0>fDEC9Z)@Y?~w=}+h)UVQpSV}Fb;yajjS88$gq_^C)uRY&xxdw{6fTtn1 z!;aV81SU{8F|2EEpatK6x6UXi#c{dF56X3V(KzAzI^tfK-FYN3 zCLChT1UNq?Ixha?>)|y6(|MQdQ*v}KN2_uH{mxVFU|4#7VEX|@YUScCRnlF6F2qt$ zmC_o+`6W19MXH@jOP5ZLx)7M zex37S7Tbg4>m*~I_Z7(aTd=pfwD&>V?a%0{+@GZHJv@&QZZth{<_MKcwwIoeX%)Y; zz{SKhSNDAFBL9n4$Rnxej+UqQnd5#b?yujpkxn-CWSY}aO5Y5O{+zpRwwE4A&>LLL zaJ%t~yHhFzi@=j^0bPSP30Mr8ib{%UfamTHLHo0%L9ni~4uS~|G~+LO-pgpn2Lqdp@ym9$=v9nBGeoY^g*Gnth# z^NO^in+csS?8*4^LQa75;!;b6&6j2;vp{vMiI&`B)p(h2{u`lAgv+ldI5gl=2ziaY@X6_aQX6)x>P6>31s8)6+gSyn%| za)MfsVHwnR*`Koj?+RG{JH8eBgnbLIMX+2k7xQSKLpwo6HeE9eWa6W=-~jCd!Cll{ z>{qqbKjgr@P39J&M|~UkJLiI)=4jdYeI9rjnV7y?`nGDfp!ve*m+3OQCCB}xcg(M= zxGmpHL4?(MI@>RPO>*@3R}1SOb-{^FZK%h5|66!E2;|IQ;KN(G^rP?7D)jP-_?fG;7z~0VO8I)G@Dj^2{Z$A%dc!hEKi1ACDF z7z6249n%|sSbruS`?E9zHr9_7##NGufT9zyZ>8uqgD4etXAv)XwgT^(A

a2)(wR zl~D@_FblY{a&SfjXvt2-^mueM;brUD8Lkd z;B&Pq2-mg3`V)_*D|b?uoiy|y;BW~u^EO*!R2x)`y)&-r-Fh_lxiu)ha>bg&+A}fadIPk#clqDU*QLl`aWOS@}VrN zm+B(@)=d4HC+tQfk?^Cuud-YI1wZK6xB(lS++@PJ&t~mlDM2T*2t|K0k4Cj4J4M>8X6HMOG;Oe!W^sNXXSu>hU(Xr0?fF z96Px{0|P6{bkcGpQDl!Z2ztp@@pq~oi4rY~HWFfk zP@@^)>Abe32a13Fta>LU9V?)Mp++0^lal&W#5 zyBP?#6MV#uo^1ZA?G%+Kx;5T7avilNQ1kIEUR2eb$ltGgHt%UX9R!mg;^2@j=9!NP zi^dcbe_$+)Iq$av!ADYFp=zR}_C`P}WZTUb5#@p4OsJwVh^z6oJ^Tv>H~lQo!K;P`xKgKRiEvvTG$ZIhI$f$7NLk)tgk zI$Q^mS;LUD-RqiG|LATV_hxIoHUoPs2bEgFOrP+{EmpOY1Ub4bT-o#vXploTdBL|I z9z`4^R5x?Hi83z#!aO|`D(oQ(Y<1z;l7D>QCz_8EqoMLYxca7FwQB3Vm-#;TOl@j{ z%?uQ1CTO<*8jGWWGS8eP^%Nat&^1t<<6)9gy{ra=Jt3TWBG!y7h%n{_e=zv3Q0|x7 z^B?-O_+o>06plDqW)N`!v@?f>3SM(4{wlD>fG;*f6~E{H=j#Oowncsu5F}?GxK@71`0?V!7(6VX6HsMr}3ydbPU{c@@xx1Q3x{Nkom> zkGx2wC?X2#fW1)_644<0EN6`l0#SA-T=C%jGeZ~??+RAXNx={yP;H*xT8&GaQWYU; zhKlCE3*HacRho_G%{{-ip5a}E5vcR-MD6;Nho}|9?_;5!rENMD6XukI&+>PIxGvAX zEOS->vV{bA6WatkH0#M2n2-N}y6MWXxGCC9<->;B>(gTIspzXD28VW`&bh{&^DK4A z?JxF$zNEeOIAw#LVc1`;SyM1A<73b{aj4A4gV=;pp6W*E@dK;)QVrsjr38g3^N57$w4v_p7m4SNzz7tsaj_#n_yGa-=(l z@UmhyRDQSn?z|;`A3$KB(o(g9@h=(S$rlB*Pv==8pZ~$C-=~1Du*^KlUS>?H%^@s!>Nmm~5uvV)#Vcj`&);=h^}`oy&}da#XX1aXl&q z&utRnu`cN(Gnh8&oT)vXWsq5+azMRZT;e{|A~B;oZ&Xc7pW!B?*XfcsjI9Lpj8(CMW}nU_vGGQOX8SGodu7pHldn0f zPB#fFxpBnR;yL^3;Y9S1l+10?hVwZ{j{HqE$y>Op0DVYKM4-M1b>)mpc;t#WI$ddi zEvx><#Ro$xTk74zLcG?&T9KY7^#lc)2I5y!YIW-o^7h)B{$(TIAJ>~%pgOk*|6=a5 z@FA75M0yc3Bp92C*j=&Z__0mDjB8=CvF})(BX6qVGqISk>g=6s)1ptzlL)y=n?Gf_ zsNsJye_#va4gR^VlJ9r8+n*a}wwK|z6TA^IEH(IX`YIcAT8{)ww!z3ikp&f}J&-wu zgnj(|%xyPGoV?x_?)$s=#QS%4qpFS`ujPGCvN`Od1F+Q@?CWdKU-6^cFvTU}Wf)>M zR8B6QqkhUPt06CV)v>W+VuJ5C!XO-Dw~L%3kA5g&lT0E;fnOh}j55$-@N?g$^F~}Q z>rx;2tGmy7REbqpzx`wMOVoSZupuK)T!%B_CCZha`$RlZf%za0h(?DRG0oT~M#}Sq zZzEO~AY2MU^!Oe_X5+Otv6lM`eCIwn5#1ReEq;k|Bfl}+9nZRG)!06&ak`5d-({75 zIy&m?cE_sTSrppI45N`^a2mC8;jH}cX`P}lXWEefg-~zn{$+1k51?Wc!aV#VopZMT z;o81)oS21~pgNSg$|LU9(66*MIpeE9k6MOC5Q5s;zDwf9& z6efL)CIt7RiH1-!zKEK=t9e&7P7Cq1CR!qQXQEk=D+a|&nl?4-M+>3i8#AwHxI$w# z-Uv~?sxq^dF6$7)d;dJ6r>*lT3ffFWRB~(uXcvV(D?JF4Id<97Vb^-W05#{ABd+`! zxmQYxq`EN(JLx>Z^JZRW9agTr5~}9)=p)7Gs(>f*K8!?NJr^2uO&P1}xt!Iqn;D7j z#qy$W$=j9}JuV!$1hg=_$YO;#8Y+F!&otPv4@_tuT>2^JdyO5(1Df9ukZEY$rf9F@ z?$qAqW%iy96W7@NAF4pG45mAQfdj<9ssDx?C8SY0u!t`HqUc&oq)T zyog;`fcSGY-Yq`!P#V4g)1?yAEN4QoW9JecFdVomaY}IJtWJ$tu{z{Ne|@ux?(7Q7i4q@_#26_wd>!8Cj*h$%f-H&OCYOmon>%Bt+iS&6%*o zp2DfT`{tgHSJidIAVrk@IEAHr?a)!5_7vP2saJyekz#115L6uP6=Q^-^>v}@LOHF3Wi^Uncc`UY2kj)#J^9zG!}aP?C$^vR9gt8 zWtb!-FI|75ZU?xv^;3C0%UOco>77HRjZQbl@br0{frul_*9oc3Mvc5O^m_CG`o-RA zGu#YIpvzA~x<0Zig5F`4ebM0c%tKu;aw`{O*Ua4{2qVB)3dJ&CbW*x^;3~ggrfMor zY4e6vXc7m}1?rxqU$Cq0vxJwZGA@b7(W+M=k-~AIiNzdXR;d;)%vx6*&pvL4L zR3oi$NBq+bIOHoVURtG>rpSwEI2lWE)oUpBzJ_vXVcv9~lZmO2s=f)e$4a6389RQ9 zeAOTQ-+(Jz?&lHDFGqpDj^R|u1_9zOLEzK&6!e)W^h1IMxf(mUlb$digc?_mD+mGP z-!urRssTOc44Iz*wWF&>IHvft%Ukl#pb;c5oABY`jR$25`H$c7tE%-yU9$M^QE%Xf a$GqJg_EI?bU$EPb;<}c;<{Nc8%>Mwdd6zu^ diff --git a/content/develop/manual/patterns/indexes/2idx_1.png b/content/develop/manual/patterns/indexes/2idx_1.png deleted file mode 100644 index 637b6a6a1db358d7c9ef6c54901f699b49ff8098..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11649 zcmeHN2{=`IyWiNf$yA|(unmQbkqp^}%oQPH<|zr=u+5UOL>Y?ALs3#@BGER>JS$|Z zWM`gdo4fYfIj8RTob!C=ez$Yaz4!Ax9&7LaTI>IRr{DX&zgLL5ni2&W0~rJYp-@ql z*MvX_p};R8j0E^5-{9R{2!t%pMovy$MNST`?(A^Y#?A@?Q4V<+b3p5|4P8U!dN(5< zD-HK!QQ1%z?#IDw$_F$^NsmWU6Fv^1pg&e!0B0kdx^w;zvC`8=?y*|AD#Rs!P0ADX64v^O)^W(Ug&^bCBa)^Phy$VZt z$B%MB=4fV-_;bRH)KpQKmyvrfnt~w-ZR_OL-q`K=J?-wHuUeFlNR}FnAuYATHJHfK zxK*bs3r+gr$(R!T@C3{@%9aqDe2AFyoL|tO)PqkTjJC9@#?sfx_vWYb4o-X5V&c*-8xhfo-NwAVGB#6M z6-lgyAkWeB)r+)$DpMa9ND){3dgA8WC%yFpVl(yLPwR=jc-yH@rUWz2ema@E z99TC^Y@_xmB|yHie)Gt1{c7(DWU<($@nA{SqTQv+da0YAumt>7v>xm%Brw8^s$+3* z|BDallEt^xh%lD2F_A|ZW8a#xJv)(5{Rwq1M(ARgYAZn;B5>JvF?jcN5pmbz-Qsa( z2(Rs-OyleVc7kiNZgM&n1sSf#AWhPc(#DT>f+m+mR%VWphz~#TBcbsl>y&9PWfe*# z&L<9UtV^jn3Jnm9WiGl;a%@}&K9JF# zQ&6XOfxftP)4yF#_fev{zzU2ZNb&J{=h0t?qs$nx$!%*}OnKr+y{r8f7{ut*{EI$H zdT<(&>DF+5Jh4G`gYhPWs>)%Elt$*hx=S=eGqu$%XT{C58jVw2tk>CJK(qs1$iGX! zqLG~Gok`I9%aky~-HbH)YjQ97J`gblO+32X$=G$a)9NdO%aJl7A1cHhW+mSSb;0w$ z5Z$A=eT%fd&Rj@vGw$$3+KfXeie-rKz4$uw0+(#c_lKwbDQe$Z^jWx@QkgU6+xHvK zP`eQZ*K&`MJGzOP9Ak{V-O|8Rm0V}^cDSD6v(!fw5A8SHBNQy*n9r!i;~PgdE^L%< z=x*2q(8%63m0LLONU9%X6l4*kQ^QmvXqLdG+ROUk5K+it9bcWMIbxAvk$aISL|&6q zKT7Am$CI`vn%y?t6y5MiVg1|Lm)uS$MJkLZk6(9Y+&r{Nbn9GKhL~mzw@q|%^p$4q zW>hmL>5HKI&kL!^p2TomcFwUZu?!j4Z#w(HLKDhu&uz_J(yRE~ z`T4S@kLHnXEt?}YCX*bKOx>JM9L`5Hj#<96GPEic@)T0C%55gs(9oiIA^cEo_&h=p zt;m#NW+QQ$jxR+=`K-cpMp#R8BY!MKOj|TxESl@Aj8a$eB(E2n&rvKnmKA$9sqolk z{wv6GL-df*YwpeCOW{jhOTp!)RhN9?q3UEu zY9BItxcx!EaqAdKoq&fc4|g8E=V8*G(o9g-(QwnH<6=>-cp;f_PS;5*M3-CV^%J2N z=e3JT3%RZOGZC+3M z2#3Ze4xA{WD{^n4h*1)de)(oJ>H3^SMcbRs8!4J6m|pT45#Bg6H;3?`*%D(hSKA{o4IC z=|L*7O!0iClT5~9*TtNfvW}UYT8&$5jcR?R_EzoOGsS1z){Gd6c6)xVqwcYUu}EF| zK6sx{9|k#vEI_&EROCwfnu z1dAhgAseFI;7}G-rs-wor9XDakWrHM(_u{-A?h+-7rC#qg7t#NdKbj_CB@#HykTBE zJ>`=X%-+hr#Lk@Cr62WrV&Y1Dafq!zZj@nzvHO|BN9YfGvMuop#u_!H+%Z?smJdTE ziEoQLl!gzP&s}?IdmEH-{Jsi$yMpJD^57sph16zg!qUxV&95fIPdy zzNIu0F+A8-!qUHs_A_wI>MYHFvM^2A9&R8VzoWI))7l=FJY3G}#`GaOUS#Rn^buQ!O~VpB9eK)k;55~Y)Iu?D?u_i~%bs$# zH0#$wp9c;Fx>JcWzmU$pQM#}Yo9ZdO&f+B9t@YeU-@A(PJm$>ByvfYs>>R{n7js=EL(UhB2zsYNOmxfh7_Ztbc8ZnTH9cc5zphN1u=#p= zyq&(dt)Er2ih0?teOi<8V8Ww}?2N^U(h2{V{FssD6wIK3y#b@I$X4-Iz=vB)cMFfY z8?hNTm!C$7Z!gIwKC+)VKlAYMn(OSP+}p&-lu+gE(n!6#QL;6(d3?MjYw})RY2MS( zm5Sn<>sz}nCv2`NR?!t=vLR%oVkb!a*55(YYaoxH8O(@J;xG;`_WIK`e*;!y#vck2FX}Ggw@e7`@wjPcQ00d_2`1(Yi^3g&c>g zSIa!FP8iS%%~(J_oWP(rv!77!jn;~mz}Dtyvq#8x6?_kHhY>faqTd%mAOy)aT6!*e zsuv|J9PIhcEFH|P_&w|$fzlQNk@S!NF72&c%-|mOc1V3qs>9_RoUPy@{HOT^*`&zeaJZziqt&k{S4=IsT zXC%S<|6%2~9lv{0|NAFTfB)onE8m`!6u=4aoj`bPf!_k^k|L87_*L~%WT(bIcS9iX zaTR%4Ef0d(VFDz(?gOZLoB5?r;;|j(8@y7nJ`abv1)%Dhibb053!jH7nVPyT=^kCS z(qMXYdUbDAde8HX2Q^&A4?;lrj&M(V7(GD>e)O*-h7uknBp`&ssNoB2)TJ=sFL0ax zw*OdO<*t|buKQYnk?#(A(4u5I`SZ?>UV%iezcQC6kIwqYrH19fY0L86R`Ffe z`B<`uD6enZ^igkoi8XnvxYKBAslfs6Y(%){Q!*UQHNvvD6;d;>+OUX!>z8`T5tHXE zdoyJczRCIKtBXu41%+s}lIaLxWW+#johalhq!AP2Z2AlDf0*@CmVtPqig+F zj)MnOk%9lVMU^p$LhbvL;Cq$bb5}|IZ3Dr_sXYo}WlLd0^B>S3YM(A9}ZG#UjR25 z%Yxb=X*g1SP@&NaBd^jUmJ|Il7rj|T?>tTlkB`(-O#9{ z%J=$+ymzM(CqKHwPSA?ro#X_ZdS4|nrhu3Klm%0Pz(Rl9Se*Bj5zkn4B7BGOQNlvj z-vVDKZP?u0b}TZd9Z_^>2BThVU@dOe3nS*6Q{-iwWpSpbcAom;$?Kq@9$f^2l(Eze z^!_Tq$$$O=;N&U_VPNnWY5@i~%9LFK+P_K{)<49>^f;CFCJq4YpR&;3vX5c@NB~V- zI!wM{OE6#onB<((&0VRXzn7c*;EMf2*EBaT&5rKv9+yO6mr{LqU9l%zm4URa>HqR| z*Cd*pVVGgh%#QM0SYK%h;7$uC9VMur$S-akMenYkOuY4G4OKF4fh}2|-BL=n7~C#e zEATNIT#ZO*SaY@SoBr%6`dEA$+tAw45&3S~eIsIT^(>S{RSS=$$pDs$I#b}0A%c*w zTImQa9;7(o)V*PbwRrQV`^hYH$=?E_u@^9_D|??HXx0rbIQ5Ow8mwS0a|LqQaHGgY z(1E1LVBc?pCa-CXFxV&=DJw0QcckY6b{umioTQACVl5rY6p;s?Sp!T~Y7z6G*QI}r zc>qWQ;;(=-FuLYivfhzo0U#4zM^ns`6BB^Z2A~|@Z z%tO?$>xwxVpcs+F&;Yy7^mvij0g>)qNe4xOAA~^%-U{Q1ZvIJG5X2fn0Y08;1GUIu*X)Cb_H`t5`wfnq#PLlwm`WqjyMu!>bZ zv9Es2%&(>MW`)ZYQf*yn(Z?SRk`NC8iA zBg_v?J|~vJA;MIGR$6nzIx5(Ue8_VM} zi-@zFv5K^klmpEi3f*b^okHy=)q;S5WdCiTe;erE5rjv@Ki^IL|Baye$~`Hph{f!y z(VN@l6^I5m>}*rV+HO3`4rmgkw{VaLv{&;iKt6J!81nX@N~u)a4be`hos@~(C0uQQ zy}2E1;)`89#pyza(|+dR1@^#HG2~w9p8SnxAk2yaiI*|FVfSN%^sdJYX)?a-g#xW@ z)lTcu^!&k{*1oYJT(0Uo4yTs<_{PN7WQ^1#Ar?%VM`@{HzjY))%F6F5i~+&gP)i2B z5fZq%C$tf=p{9xhUkAYSqQ@ezfqSvl7HIrVg+SYB{|%e3;|e`R?47nSsb$jYz4Zus z<}P=*E_loXuqT7v<*tiF3q9N!zC)Llf#$*J7Tp10RSD4x{zvDx^NNWF*`kD zEAigJJN5l&Id_*!>$Gk1Zn=@~wtLckqVQe8Wj>%_cc`oRPr3^n{mc0Qv;Lg*6^Gfg zh9u2^qDzxx$`0mSg7J%9r@(i5Xk6ZldkW^gS1W&<_sYM^d&b5$&V8p{Xh4F7Z8T^d zOTfN^-;#ST3 zmupP?L{D*Qm#?Pm=J*7+$lzL**dxmBk5@^nzbNLgf(1hU(frtIrlRO48MP(r zjU&0z?XPi7N=aT3+IQj@u6JRvD#mr0TVlQuUOL&xO5dk>=mSyr(By^pzV%M`{aEOK z3K#mj@deOAjW3Z)Js;dl$)ka0&QaXo#lS0-+7Q{xzy@}x-UNw^!>2B&T$ivbw|V;q zARbKNjU0`dwC#JhIUS^37HWr#{%gAzfEP7;HPTc0J#6#8{1LYQYt1r1c*lHemAP=G z$e*Or|HTe1k&!sw0tll9farMlp{xRDCQrajTRqOF7=0~M_QeX8o?HjJ$Y4?hC{ZuK*9AdZ0TM#>ZGx(nv2Kd_QmS z{?5aPJ^MMKxfEdh$c)t+lYC^`;9L(;o1DfzPx`!rXwfh5+S`ra#g7<3Z~#XPSsnUJ z$evoxpF>Ihz%~6Y?TI6yG}yrqIZCVis&AdNTH!!0A07)10VPCdA#2m7s_-9E+(&*6 z!}q%^;{OHe?^mq@+^fQYEWFAWXaT;6Ec6UQyz%A@x<~kcatAnF_BUA+w5*E4M#qIH7!;E$=8Z=V{@EP6Q{$z2S6u+FANdh3?6%p z`TP#w=mrC=D3zJpVSHE*0YHN4Xvo4R2P%MDXRevyz@=xIvDW~(j!B8oRRUkk%mz>s jK?5rIh6QzSM_{>f>dmM7WV*nApFmU;)Z}x|nFjm^*eSv~ diff --git a/content/develop/manual/patterns/indexes/2idx_2.png b/content/develop/manual/patterns/indexes/2idx_2.png deleted file mode 100644 index 70dc6c381d0c70b01d5870af14e1d16671ed91ce..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13973 zcmeHt2T+sknl`;7(nX{QQj{tYL?D0yQk1F`1?doxE=V5=aQ_n-@LjJO9q^d^@|R%>2h0XUO~1yIpsCqD+i*S(y$qQBY8@>g#En zQBY7(1OKTQ=zuR~4?5QXWmm~XJnk#{P1`$_sau{)DAogER;lC-ZTF;HhcL1#bcpH)3M7&9F3UA zRiD`zdMPxIQl3)yMpI@JEpPac<>AVgD{(I|6<6X?F+@LiD$+}KK}rf^*Rp#h6pz(K zZk<<=;F;6PKI+kqS=(Z$5%%bca`I#E)CzJlGh36+IB#`&K7;Ui_bu#cl|-6=)XX+4 zg>&NG^Amc`FjB_oD)QM=$*WZlR5*Srzj~nYWUe*6|Jbq6s1%{{Z-I#5v5%X6M@!+sce`|e#&O3u1;jhaXKKBoEV z1WQ*iib77>1!HRUT6w^+e8~8-Ffgt`UC#O zA14aeBR^wl-Hd*uM`$CPwmHX}Hv2XxR?FR5nJZAM9>&Nf^{^i#N?8aFkmnK5Kn8!!+h#u9s$CNiwJdfF!8oY|xo+3U?O6E|$L-BR9$lj5f zRH?%h1U{kqZgq~y^XqPZ02 z5xLs^SyxS8=7i=@_8pps9DbDjn(w;S2bmFSfx9!Am%90TRJ&bf5BqagQ^VPy4+V9@ zTTJ8)4pGOj-oMA#^x08Cemj}tB6l{sAL}{=BqrsvV~KzMfdLLIg7s6g)1XtJJ)0wc zvFDKO!odKl2cM2lviJlj+a2M5e!s0n0QK_owdV0A)+zPx`a$Nk$0t~Y;xJQwt72Q6 zTNk!!wk)^YBDl^zve#M>^I^Pl_u5^jyB3WCjq(nul7@XpN7$*ORzJ&pHgklo9$r1Z zN*$$bCUGUvA|dEa#~ZV6Zr@nH3C}^U+|M@-5Z8UI^Yi7;8#np4*|(|ho$JX~HfucY z_Vne`t6$B(`hAsP%)OgHb0I>iME|wTYyBUEpRM&H-XtBnbhE&@!a3^al`pDKoXlvB zdme`!ujo7f{^t92Gq@S&x65vvZgz9W<^;Y;yzw@OZ=H1h;9}!ar4X#3>r(iY#nkjN zYcAxO*0=%meBF70YzH@$Q#>;17J8~W*zDN0udTArS(7@R$~>==RMph&DW8)L5rOlP zSV%`nk6x4>xg>kltH!2o?ApiU+hS{RYdveUYpiQ*KarC1TsGVsTsX;7Qfim9rRJpy z&Du@W-UJ!?dN6uMEpl`|i4cP&saZrk+jzF~Y(PrDeBLb8#KJVdl1Ea=q&`>NQ=c9@Mh# zEGdP~^pk<;`7EofYfd(o%zJeqx@AV|B|K-_45Ws&KJj5w2x-PN{1Pde? zRT#bOymT$PFw!g9>+KR8nxFp6He%vzogmx_ZKGj?wAqWJ^ssk8L6om|*Kn2fg&Yre zoBpP*ReH2ne!RHTr~9GTPtT|BeI6SDDKn)jE}==cWQkrLUN5fSZr_TEiivusU#Oq= zCG-;fQk>hzIXEObC2d%|jHfKHjWtPEPUAx@`o)bUr}~cC@tw4LZ|`-#o^b3l>s9zx z+F05Z?K%xB|1^JJv3cZ6Na7|fwo8de$xR97vHL;sgJCh$LACT8OlKq7N$UPW-{Me! z?gHHcA1(o>G0LW#bEa6}gn+H`4dt5xc}MI{ZYHm`C$?7_H5;9KcmCaR7(a%!(^FQG zH}E;tb1zH2LE%A#L5$bDSBcjxuc@V(CG7a9n!KQ`V5l0w>Z|mDyd=dXx4DlEWuG1` za8H`gY&Ke>t!KyQsfj^^h0WkT8R8l5s<<)7rRyKA`I~O!9jZxwot$7*GgMcS<$J(a%-2Z`X3MvHtyXvE z`__9J^xgTpDW4yIzJKhtne(^cdKL`?IwAkbiC8OLaBpLn>+YJrH|f7dj{D zX6}pfzlt^GA3g2n@pbf6?J(T0gXc%mT^Z!`VGPf7*x2FHaz*1a-hLC^Y)cw_(-(d+ zuM;bVy;OHZ3Az*U|7@Fws$7K9IufM5EzLa z??97$;yStQJv1$vuZgxv2wyN7Smqr3MP~a zg$rjtUJBN_XV!#^ejhm&8OU}dWtmL=G4%l z5^pU~RA82WBA;N_g=epDw-${H@7va)OLDV_se|SGgX4vEi_QVI z^$vkkjfQDy81F0NH|jloYu>lj=B=JyM<1?{sZLw6nsA#dS#n#v))P8c_-R}UZ?kj_ z*;O4*Z1_rF#}+Aw!|QCYZuk*(1;x}u@XDmsjl>S)lg7;H`cSFh!j0Osz&bQPoiv^7 zfohh3I)8SjGx{!BzSXs z-$JJodsn*(SG!lg2Q~&mmIH5_Y?+XJw_9+G$S6#req!$>+6*rB{@WFVv2C%RZFzGs zMO8&_(Hr&UVR*u>zqp(0c@$46CZB?dQCXbs4!)nlq>&<1q&`COaGw6B91}{!r7wn^stkw3i?rBj8)>3loYmyr$5h8 zt`vB?x>g6f&RBCREI3h&h-2!u^WPlYLw`D5!GK%h&Yxh})d{CR9!vYhux_A?f`am; z+hr?%E5nN_PTrof4$j_=F0w(MK0s(oL7^6;0{rRe;_o0FVRWZ{(|7$t$3o3Hm-`__?PA)Jo zP&N=E>wOa@ck;}cGjj3@ataDEzzi9`U@w1%AQ>+|(fv(+@1yPF=XBG}$KTD{OPIW` zgQNE?f2fEE`Jli2+rOtvklSBR^78vd7C@jJc|`7{ti0S`_6C-!k-w^#xCOa*SZTX? zx_J2kXQ(TlR8#}!|EH0^p7@8ASN^#2)E`&=VdU3JH90Z?e;{a|Ti{oKUFu9~a(`jH zIumY&HJF0p_?Eu**~>wci{q4~^Gc-@-D!5=Vi4@tt2fJ%t_-$W?8ZO&rq=SIc;t}$ zl9u^A;<5B*LKOd$J83OET)ke6;h?bQ9STG#)!rqgn+Q(w4@#~2lvkxmm^2fP6Q`!X=tva?g-*pEXLJm{L-32Th* zAHck?%NRE<-GmXgrYGz;KOBJmI?UwdUFavtf)v%`M3v$#z!vxbrqK5+N_i zVztf~RvIP{lF@@j*ZUo#1q=BeUVh3Bs?<5ECx~;2GM7QcIS6Q!2)7G(Lgr&YrMjJ& zpMeKx1Jm?$_(1VX0+NzEHGEDC6k@_1XJIqb^B`ZCfb~p<6(%5GG=XVc6Loztg>cMm(g-^-r@0ZQ&6V8RK}3Lpzi07EdjUV6^})CCb>+MCxg;4xer zk6E}f-@^EN=7e zYy2T50CzHdyVkaeH_9BjZ{PKbE+y($gkj}J#jWjx)`=>4>sQM^M*UMv^>1{~Do66S zdCN)yD8p=&`xsON6gvPxOk6Vd3dmMEDT@Z(+5d!|Dn;gLX|vx`|CiA5H~0B-2xadX z@f+20WPl$d>vAEWlFD-dV$hsQpI*oJyEdKJVi@Og0ezGM=)-34w=6K_H~cuEHjw|h zlKwfR_>;C!Y+{3-eyGd3 z0<`%f-aFer)!&HS2rnvK(T#^GEm>&r6D>&S@I6wI!wzoL8?O8Szlvt1H+FSGM~e<^ zT=>x%CuNOndIrMmx-9O;Iy<~{?@H1*x@;4!j)SZ3p1Gi5_tACo-96a8S9pW!OF=+d zH@v?hcL`X&`|BGgrMno^>L3DSU#RIu38Rac+uPkGVKw%)jaTAC_R$fFh(81AMjpqb zpB0|w77+oBf0=zN*?IDr8XAp!w&NaQc^LbM3>TfEldy>O_InBYF-#@Hk(;iQEWG&5 zlN%YFxa&C2+i(&N?M^2+Jdu9#x)z8wwlb6LsJx~?+#m-xuj10@UJ!Ven1H33;h|xe zCODiWFl#)h%zKJR#O9G0rxs#$l$5ZZ``5dH=*>6mMs5lQ3nA)8Ju0VTy>x2>Lt#VI zuFdMvM3$-3bmWOaHuf80_-PH!50PdX@=(En@=DbF$hwpYF!(~pe!d+gnP^4d6D~fx zNu&)XZG?u8k8GCSO50Q#EGpZyCVHgVg>4hC?V9mU(EGEuD&kWJ<5+VG=n@awV*%ou zUsnpm9oFH$fJ@h^Z6*6*VGiX}d_zQ5^fO{l-!IfHVjsFlhZCH%gWRrb zXHXV}WW6~6Fn*rNL!6o~Rdi2UzIT}qE9iSeedPYng_ttWi@*_E9*7R-NrQJQS&>+n zaYhdqg(6yLl@bvxHC5w{vzclqkmHRdVqLTNhnLoSl;R9J)7jTc${gLvsnvRaB(k>_EF07l?}SW}d>5izhQpvPp%s-A_Xo)*X-;xeU8)qdWqV`S zv->3++U<0`q+lxfTJbw?r}41GXThWx&x&1Gg(`obxZo`^k0#ZqvaCCnRUke;8ek)!2O-sKT;Xv7jYFcj@rU6I3oxOEtUj~nt-`K!)`pqzVB;1k_xDg49T>`D4-4Ph#q{IDB@Sn)SW1SMW4YTu1ICGvV!9|sofWxpMN^OuE$Z3D zf%FBg0u1QVWIsU^93Y%--)w?gFS??`Hz|okVr7vKkY~6D6)lAh{CpT<8MnwFG)hJU zZp0JbgB3sDO30gwUwr|}$Nrd4YYJkQ)er8A&bc(*!o2Y_yQ^sxIwV4-%f>wOX9^+= zwLssUC0;qcxjgGBsHil06od>>#Cz3)affxMVYRGYr?PYMTgvc)Bz(niC443v-wdBg zj)^79D~u^-Nj9{#i$9QiZHM$4Jq#_K`a0lE@+zOxFx|*FTQMK+=r&zevOFhBo*Jw1%U3f< zS$GN!g>12OE=$B)#m+dK;JY3>NT1q6r%_TXq^9A!(0ma@DM9XJBRauFJtBr#??$?J zK0TRSQ+pLYWg=oGvCHQ6+ECHftAm^X0lAD9C8%Nd=IFnrtZr6kea=SiVRC68X`vTh zhLuc7-mhdacn6wQ50y{`i$5t)=!no6J!I&@csG*#-Q?2bR3f4s;h*OfzDFb~s(wnR z5g`s9h9ul>2{A3R=+K;^WhO_*eE%OL$uJ?K4Gby&rEtXOOLvkHe8+J8JITgnFpFG zO&m{7+J1KPF^1|rbKZsbhXVulOvnb8 zaYDYj|MnNLm{COBjv-4)zc)DI)a{8HKI-+6wTo;kcTnau2Mp=dzn`niyZAzRAD&F99*9%xvfgl4B`vR4&Ig0w zz;JMgX9!N|P@|q{Ct5rnd${PJyhM@706iebLWyEkzAC|{k0LkiH3n}VVkLi*E+)dL zc)ydzWC(mu0_`9UMq!;%po5i{D^6ii3z^>CF;UNQ)mYIWqDCu`mB@*uamKH(_FPjS zgwCu$xhj~X!DI;mlS|>qYL6baRuZcI_VSgk(2TB;`bBb>joPFEfLY4+BbqHa^uV_) zr7o37g?VpxOOM6ZG%k5B@njM+(^22uId4-rX!yh%y;}aLyEmW|AMkMyU58(abuu2q zS74xIq)~p3nq4YQgVA+zH&0~^hh~n=2S2s)D7QxL<*ybHS5wI*_s0N)=r#wWKG(>6 z&xu!RF34b>;M48HE8u1^k+;;^2=F-9y;ws0$P#Et;ioV?TcIiHGs~V`(dK(&W5m;7 z8eUIM!;6Mmu6L5Hb;|NNK62a?-Dn5hP1$`NxpPtV*1k||5%C6eIJz@80?VnF`+x9t zq}>E>Z84ek1QAe-eHV;VI08rA?jMe}+$Y^ekq((EXeE!r9FvH2ZNo|1EXrO!E2d=92Fo|w{gxV#5s zcmkF21-er7M!dSD3sHmPma8=>j7ql!+FPkYHb_nT<31zqbR!M5Lfy})!#1|dDu}aH zANDID%N#)?DTu)EDQM|l`qLvHJ9L+yt;C$HbC>9oBeS;F5&(&y%?3;>_41P~zR^Xc zc)`i%QyVWp#@8CyhrD{d%O5y%C$Ola4sXl7lao@*hZOY0FHBqeX25rvL3_m0 zp07;JP=`F$r&?m04p|oT^n3R6M+iiQ9H`k``sL*kH<0BS1?Umjs55qXZtA&ReSO%@ z))e-d??#5efQFm1Q`f+<@9DDPp)&YF3L88*z5?w5-OpftqoPWlQ$cnY%c(6JZ1Bbeb254LP77HLoGGIsv~{a`_j-7EnmPHnYB5n4P zX4Ql2jLwO5ncakn(B^6{zqg?qFzF&Le27sU8Nv{&*O=dRu{a+%!Ft=6V@03y+2+=p zp%}2ta)S|Yx^2DR+A!|9VGSd7DiF`5m(A+A@tex82O+*2D#N}m*veIx(GP^saEn=A z9oe?hg=AcF&qzm{z7QB@6fXl#D8JB2kzfPtRj^t|y>Wf2=!BhJMsXM^#<#Y2if^^t zGc5e{d%KJVmoze2K8}bv=gF9p8<9WwT5!jm9Jh``T*#g}C>=Elu=!wWT}9@gHy2h3 z>pj;tST|nXv96baoOZdbyE#xXoFu=yIj9jg@@5eP%?Av%GJML77(V(Y(dJG4QTr|Z zGM9YvZgKm&>k^+xl=-X#R|Lk`l88}DRA+%vpq?scCc%w zW~^BhxhGdywh(@~KEDhD7-qgVQ5! zQiQvdez|}E)1#7AP;3NGc}aEY3ptIz4~KVUyhj_y;v>J88ivae7+(XiNpTMmd$N2m z#aCujuH}QYch}&u=Vn3ya;d0pV%}+797rC*pcB)fZJ1C(D^Tn8o@xUtzKbyXk9PSP zFnHN!$AfLFWxpPGqWPP(z~RdZ@?N{!%VQVRUeCI6KAuprMpmwam~$}b8A|JmCQh@O zC0FQ}b#}#WHX3a`pz_s2_tJt;!6z@rc{K_*&cv$&?c`0idx?+^(!5*{5i7_@l- z40$@G$>!8|dTTh62oZ*ybe>-lz)BH@RF%LOV=|SBO^g~QF-JcT*3&xN;(nXFqvoUl z6&sK&w4SZ(C6M}d6GqErKnv8gm-d;1P(w>*2U#`-Nihw{(}d;Xs@)@x$ffBrs&@u- z6Ty$eHYTffA3Ypq4AQL=#HtboBtXP52CkVYba^89AX0avrN4-8wrIKh{f1z?L79vN zuzd`zAdpsv^Dn*o-ZH*(8#eArBEUMFGF%A3I4A+)bjITQ?m)T#o-|Ph($>Xuh;(IF z3r2TdU(>%B>(X`LLnE$cx)m9LI7IfYlgj%UdH#bQ;}-l{&NWrV@i5!#^dE453wT`( zS8WRq6(UC}1KFF2ZoMNe^RVXrADw;PPHyoNGs~8vQ`k+2Z@X8`;3ycvgz;O0HHr#6 zvOd1pv3!j8d>5P!gHP1WKfi0d-P;&Y?7k->WIDVC@50bldm>$(>ffgB^V5P1?4D&U z284!%l7b^>0;XXzBdUEBS{LvLT3A+%w9xVfm6IpvfJ=QMJ;zQ(=BV1)$>W18Y7Q@y_ABcQYWk=YghWHwH&>z}t)1OqwZ)Ib z8I%>kj1?oq1#xN(Hm`8O35)Rl6(bM$e*OqyvQgku7Tvgh5$YQZ`}U1kKT^&i>IYW2 zSBw!CY$4rjUVAM4Qv323=GfYTP{X!d1sXb&^le<<_JJWNj8W}_Nl!xLNGOt2x40Br zHy;5tpQ^8v3yKHy!jj|Ac>3;C7t2ODYvHLYj{B)Z5m&MS-T9QyA(`>g>GGk~I|=Hp zQ?tH+6k47YpqrlpH4*fb*Is9H{kwQ_EUC2ntCB04Z&ePr$9{(>BQw+-6;mHvu+>y% zvN%I#34o?j!hBVVrsqKIHvkIa28NaP`|)JK9p}jb?fv9UQVb}fD~?C(7tt$$^13yr z7$~dEr$FJo&!0I7xH%2O7?U2Im*LZW1Tq7rnf1z8{W{1ewVcuG0j~Z@E`GM?FrJ7z z1qGNoSx%Kz)20Gh842#@2k)f(66H1^e#k}2y+n8Ei`uB}GUSqmY9T;UHkmNNKw_X0 z4hnY$oPFgg+2v!M!h{@}au=zs*)kfoNk7^1deoHv71|$&i~Y2rxy=F#RMBKs>mzy! zW?u4D{q$#!2D#PF4AkV~`XwWyQ=NI^fW!dq3Xmf=l^P2-79y8bib}r%6)(FZ>&%sa z1?}t&&VKBg)SJP!r65XT1If^HN0Sk#Tt~U_90PfG2vGiJrHOr&5d*5#V%b+7gL1N$ z1{zkQLHiAurw1RiJWX~zyIbbR6?aUf-8YeDK!Sfb$eoj>*Je|Y zWIlBufKmt8w(1Qz((YujxmiuC1FFyrP@?x~x$*R@j$tXjw)4LxX{i#qb1^P`v}gv; zX^~aeIzr>bx4f@6B2;1Gk_tVIgN|f>oO7Hx>x=M-wYWQ~>0Sfa9`)OSD}myDW@)q$nRVcdSe8e(5ioVJjuGRhu~x)PNX3M8BJ4nGXR=`oA10XN-s{42j>( z^`)=50|v}y_WpraV05x)#v}G?n(I^p;#E$HUo&5e4~N>qu$AiMB<8tq$8Q+1yy61j zi`)VHs9l)2Zr7saI*~E2$JW_1q9o>*0qQ{Xl#W~Ri|n0*0yLo=2rulK z75Rzj>g3|3oy#+F*-LqbH`Af%#`a!Ok6%VZLd?DBW`$oiq(ifF@ej>dz#bA}z;sR6 zL>sV_zqV=r*CVJAng(slI^FR8s>o}a>;M6>Kdpi5)_I&UHid1I_WHuE81&Cj_Gjb9r`L0mAZ%mkqsRePtLamgxH zAty&4mp%xT7`n&~%*Ez+O>WQJf}A2f`b5G#&-l{gN9+gQH{AHZdnL_Y`ZpeHqq=DR zF|Q8(=5+(PL7y!%bZK)H=;fGe@t!8@qa0A>+UXmic|LfBcJ@vc)#?*6h{?*xKVyke zh$@fK5tdFanUHur2)4sPYy{ik+<6sj9?;vrg1`02)W4e%{nsW5nAcy~=&$Plqg&wL zH;5`gN)ZB+2}Jey(!yJ(2|*q3s*aYBt{V8uabCd2;`bXIduk=Qp8Yc#A>wn*0{&!>l-;Mo)|8DF92=-^+1pR;8*#9fq z0B^c@UtB)AAB~980MA?`ucDm85?C$rBjURaO#?K5B8~t#ivf}{JJ>DTJf+h;pA`Qw zk8b|>o&kXS-w$<6tbrfp7`u)AJV+Ao+;^IxZJ6b}j(@e6Pk-|s4RFD~->_9>0?)N% zmEZrucwL4uD8xsbzGDAXAj1o5lp-}IT_|h+m69>em(DvoEuE wK!TWVlux~%lY0TV)kjRr?0&e?OS>yH{lS|f?!;;>`AM$6j*)iZIs1tJ0LXL&9smFU diff --git a/content/develop/manual/patterns/indexes/index.md b/content/develop/manual/patterns/indexes/index.md deleted file mode 100644 index 1eea4a79fd..0000000000 --- a/content/develop/manual/patterns/indexes/index.md +++ /dev/null @@ -1,755 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: 'Building secondary indexes in Redis - - ' -linkTitle: Secondary indexing -title: Secondary indexing -weight: 1 ---- - -Redis is not exactly a key-value store, since values can be complex data structures. However it has an external key-value shell: at API level data is addressed by the key name. It is fair to say that, natively, Redis only offers *primary key access*. However since Redis is a data structures server, its capabilities can be used for indexing, in order to create secondary indexes of different kinds, including composite (multi-column) indexes. - -This document explains how it is possible to create indexes in Redis using the following data structures: - -* Sorted sets to create secondary indexes by ID or other numerical fields. -* Sorted sets with lexicographical ranges for creating more advanced secondary indexes, composite indexes and graph traversal indexes. -* Sets for creating random indexes. -* Lists for creating simple iterable indexes and last N items indexes. - -Implementing and maintaining indexes with Redis is an advanced topic, so most -users that need to perform complex queries on data should understand if they -are better served by a relational store. However often, especially in caching -scenarios, there is the explicit need to store indexed data into Redis in order to speedup common queries which require some form of indexing in order to be executed. - -Simple numerical indexes with sorted sets -=== - -The simplest secondary index you can create with Redis is by using the -sorted set data type, which is a data structure representing a set of -elements ordered by a floating point number which is the *score* of -each element. Elements are ordered from the smallest to the highest score. - -Since the score is a double precision float, indexes you can build with -vanilla sorted sets are limited to things where the indexing field is a number -within a given range. - -The two commands to build these kind of indexes are [`ZADD`](/commands/zadd) and -[`ZRANGE`](/commands/zrange) with the `BYSCORE` argument to respectively add items and retrieve items within a -specified range. - -For instance, it is possible to index a set of person names by their -age by adding element to a sorted set. The element will be the name of the -person and the score will be the age. - - ZADD myindex 25 Manuel - ZADD myindex 18 Anna - ZADD myindex 35 Jon - ZADD myindex 67 Helen - -In order to retrieve all persons with an age between 20 and 40, the following -command can be used: - - ZRANGE myindex 20 40 BYSCORE - 1) "Manuel" - 2) "Jon" - -By using the **WITHSCORES** option of [`ZRANGE`](/commands/zrange) it is also possible -to obtain the scores associated with the returned elements. - -The [`ZCOUNT`](/commands/zcount) command can be used in order to retrieve the number of elements -within a given range, without actually fetching the elements, which is also -useful, especially given the fact the operation is executed in logarithmic -time regardless of the size of the range. - -Ranges can be inclusive or exclusive, please refer to the [`ZRANGE`](/commands/zrange) -command documentation for more information. - -**Note**: Using the [`ZRANGE`](/commands/zrange) with the `BYSCORE` and `REV` arguments, it is possible to query a range in -reversed order, which is often useful when data is indexed in a given -direction (ascending or descending) but we want to retrieve information -the other way around. - -Using objects IDs as associated values ---- - -In the above example we associated names to ages. However in general we -may want to index some field of an object which is stored elsewhere. -Instead of using the sorted set value directly to store the data associated -with the indexed field, it is possible to store just the ID of the object. - -For example I may have Redis hashes representing users. Each user is -represented by a single key, directly accessible by ID: - - HMSET user:1 id 1 username antirez ctime 1444809424 age 38 - HMSET user:2 id 2 username maria ctime 1444808132 age 42 - HMSET user:3 id 3 username jballard ctime 1443246218 age 33 - -If I want to create an index in order to query users by their age, I -could do: - - ZADD user.age.index 38 1 - ZADD user.age.index 42 2 - ZADD user.age.index 33 3 - -This time the value associated with the score in the sorted set is the -ID of the object. So once I query the index with [`ZRANGE`](/commands/zrange) with the `BYSCORE` argument, I'll -also have to retrieve the information I need with [`HGETALL`](/commands/hgetall) or similar -commands. The obvious advantage is that objects can change without touching -the index, as long as we don't change the indexed field. - -In the next examples we'll almost always use IDs as values associated with -the index, since this is usually the more sounding design, with a few -exceptions. - -Updating simple sorted set indexes ---- - -Often we index things which change over time. In the above -example, the age of the user changes every year. In such a case it would -make sense to use the birth date as index instead of the age itself, -but there are other cases where we simply want some field to change from -time to time, and the index to reflect this change. - -The [`ZADD`](/commands/zadd) command makes updating simple indexes a very trivial operation -since re-adding back an element with a different score and the same value -will simply update the score and move the element at the right position, -so if the user `antirez` turned 39 years old, in order to update the -data in the hash representing the user, and in the index as well, we need -to execute the following two commands: - - HSET user:1 age 39 - ZADD user.age.index 39 1 - -The operation may be wrapped in a [`MULTI`](/commands/multi)/[`EXEC`](/commands/exec) transaction in order to -make sure both fields are updated or none. - -Turning multi dimensional data into linear data ---- - -Indexes created with sorted sets are able to index only a single numerical -value. Because of this you may think it is impossible to index something -which has multiple dimensions using this kind of indexes, but actually this -is not always true. If you can efficiently represent something -multi-dimensional in a linear way, they it is often possible to use a simple -sorted set for indexing. - -For example the [Redis geo indexing API](/commands/geoadd) uses a sorted -set to index places by latitude and longitude using a technique called -[Geo hash](https://en.wikipedia.org/wiki/Geohash). The sorted set score -represents alternating bits of longitude and latitude, so that we map the -linear score of a sorted set to many small *squares* in the earth surface. -By doing an 8+1 style center plus neighborhoods search it is possible to -retrieve elements by radius. - -Limits of the score ---- - -Sorted set elements scores are double precision floats. It means that -they can represent different decimal or integer values with different -errors, because they use an exponential representation internally. -However what is interesting for indexing purposes is that the score is -always able to represent without any error numbers between -9007199254740992 -and 9007199254740992, which is `-/+ 2^53`. - -When representing much larger numbers, you need a different form of indexing -that is able to index numbers at any precision, called a lexicographical -index. - -Lexicographical indexes -=== - -Redis sorted sets have an interesting property. When elements are added -with the same score, they are sorted lexicographically, comparing the -strings as binary data with the `memcmp()` function. - -For people that don't know the C language nor the `memcmp` function, what -it means is that elements with the same score are sorted comparing the -raw values of their bytes, byte after byte. If the first byte is the same, -the second is checked and so forth. If the common prefix of two strings is -the same then the longer string is considered the greater of the two, -so "foobar" is greater than "foo". - -There are commands such as [`ZRANGE`](/commands/zrange) and [`ZLEXCOUNT`](/commands/zlexcount) that -are able to query and count ranges in a lexicographically fashion, assuming -they are used with sorted sets where all the elements have the same score. - -This Redis feature is basically equivalent to a `b-tree` data structure which -is often used in order to implement indexes with traditional databases. -As you can guess, because of this, it is possible to use this Redis data -structure in order to implement pretty fancy indexes. - -Before we dive into using lexicographical indexes, let's check how -sorted sets behave in this special mode of operation. Since we need to -add elements with the same score, we'll always use the special score of -zero. - - ZADD myindex 0 baaa - ZADD myindex 0 abbb - ZADD myindex 0 aaaa - ZADD myindex 0 bbbb - -Fetching all the elements from the sorted set immediately reveals that they -are ordered lexicographically. - - ZRANGE myindex 0 -1 - 1) "aaaa" - 2) "abbb" - 3) "baaa" - 4) "bbbb" - -Now we can use [`ZRANGE`](/commands/zrange) with the `BYLEX` argument in order to perform range queries. - - ZRANGE myindex [a (b BYLEX - 1) "aaaa" - 2) "abbb" - -Note that in the range queries we prefixed the `min` and `max` elements -identifying the range with the special characters `[` and `(`. -This prefixes are mandatory, and they specify if the elements -of the range are inclusive or exclusive. So the range `[a (b` means give me -all the elements lexicographically between `a` inclusive and `b` exclusive, -which are all the elements starting with `a`. - -There are also two more special characters indicating the infinitely negative -string and the infinitely positive string, which are `-` and `+`. - - ZRANGE myindex [b + BYLEX - 1) "baaa" - 2) "bbbb" - -That's it basically. Let's see how to use these features to build indexes. - -A first example: completion ---- - -An interesting application of indexing is completion. Completion is what -happens when you start typing your query into a search engine: the user -interface will anticipate what you are likely typing, providing common -queries that start with the same characters. - -A naive approach to completion is to just add every single query we -get from the user into the index. For example if the user searches `banana` -we'll just do: - - ZADD myindex 0 banana - -And so forth for each search query ever encountered. Then when we want to -complete the user input, we execute a range query using [`ZRANGE`](/commands/zrange) with the `BYLEX` argument. -Imagine the user is typing "bit" inside the search form, and we want to -offer possible search keywords starting for "bit". We send Redis a command -like that: - - ZRANGE myindex "[bit" "[bit\xff" BYLEX - -Basically we create a range using the string the user is typing right now -as start, and the same string plus a trailing byte set to 255, which is `\xff` in the example, as the end of the range. This way we get all the strings that start for the string the user is typing. - -Note that we don't want too many items returned, so we may use the **LIMIT** option in order to reduce the number of results. - -Adding frequency into the mix ---- - -The above approach is a bit naive, because all the user searches are the same -in this way. In a real system we want to complete strings according to their -frequency: very popular searches will be proposed with a higher probability -compared to search strings typed very rarely. - -In order to implement something which depends on the frequency, and at the -same time automatically adapts to future inputs, by purging searches that -are no longer popular, we can use a very simple *streaming algorithm*. - -To start, we modify our index in order to store not just the search term, -but also the frequency the term is associated with. So instead of just adding -`banana` we add `banana:1`, where 1 is the frequency. - - ZADD myindex 0 banana:1 - -We also need logic in order to increment the index if the search term -already exists in the index, so what we'll actually do is something like -that: - - ZRANGE myindex "[banana:" + BYLEX LIMIT 0 1 - 1) "banana:1" - -This will return the single entry of `banana` if it exists. Then we -can increment the associated frequency and send the following two -commands: - - ZREM myindex 0 banana:1 - ZADD myindex 0 banana:2 - -Note that because it is possible that there are concurrent updates, the -above three commands should be send via a [Lua script](/commands/eval) -instead, so that the Lua script will atomically get the old count and -re-add the item with incremented score. - -So the result will be that, every time a user searches for `banana` we'll -get our entry updated. - -There is more: our goal is to just have items searched very frequently. -So we need some form of purging. When we actually query the index -in order to complete the user input, we may see something like that: - - ZRANGE myindex "[banana:" + BYLEX LIMIT 0 10 - 1) "banana:123" - 2) "banaooo:1" - 3) "banned user:49" - 4) "banning:89" - -Apparently nobody searches for "banaooo", for example, but the query was -performed a single time, so we end presenting it to the user. - -This is what we can do. Out of the returned items, we pick a random one, -decrement its score by one, and re-add it with the new score. -However if the score reaches 0, we simply remove the item from the list. -You can use much more advanced systems, but the idea is that the index in -the long run will contain top searches, and if top searches will change over -the time it will adapt automatically. - -A refinement to this algorithm is to pick entries in the list according to -their weight: the higher the score, the less likely entries are picked -in order to decrement its score, or evict them. - -Normalizing strings for case and accents ---- - -In the completion examples we always used lowercase strings. However -reality is much more complex than that: languages have capitalized names, -accents, and so forth. - -One simple way do deal with this issues is to actually normalize the -string the user searches. Whatever the user searches for "Banana", -"BANANA" or "Ba'nana" we may always turn it into "banana". - -However sometimes we may like to present the user with the original -item typed, even if we normalize the string for indexing. In order to -do this, what we do is to change the format of the index so that instead -of just storing `term:frequency` we store `normalized:frequency:original` -like in the following example: - - ZADD myindex 0 banana:273:Banana - -Basically we add another field that we'll extract and use only for -visualization. Ranges will always be computed using the normalized strings -instead. This is a common trick which has multiple applications. - -Adding auxiliary information in the index ---- - -When using a sorted set in a direct way, we have two different attributes -for each object: the score, which we use as an index, and an associated -value. When using lexicographical indexes instead, the score is always -set to 0 and basically not used at all. We are left with a single string, -which is the element itself. - -Like we did in the previous completion examples, we are still able to -store associated data using separators. For example we used the colon in -order to add the frequency and the original word for completion. - -In general we can add any kind of associated value to our indexing key. -In order to use a lexicographical index to implement a simple key-value store -we just store the entry as `key:value`: - - ZADD myindex 0 mykey:myvalue - -And search for the key with: - - ZRANGE myindex [mykey: + BYLEX LIMIT 0 1 - 1) "mykey:myvalue" - -Then we extract the part after the colon to retrieve the value. -However a problem to solve in this case is collisions. The colon character -may be part of the key itself, so it must be chosen in order to never -collide with the key we add. - -Since lexicographical ranges in Redis are binary safe you can use any -byte or any sequence of bytes. However if you receive untrusted user -input, it is better to use some form of escaping in order to guarantee -that the separator will never happen to be part of the key. - -For example if you use two null bytes as separator `"\0\0"`, you may -want to always escape null bytes into two bytes sequences in your strings. - -Numerical padding ---- - -Lexicographical indexes may look like good only when the problem at hand -is to index strings. Actually it is very simple to use this kind of index -in order to perform indexing of arbitrary precision numbers. - -In the ASCII character set, digits appear in the order from 0 to 9, so -if we left-pad numbers with leading zeroes, the result is that comparing -them as strings will order them by their numerical value. - - ZADD myindex 0 00324823481:foo - ZADD myindex 0 12838349234:bar - ZADD myindex 0 00000000111:zap - - ZRANGE myindex 0 -1 - 1) "00000000111:zap" - 2) "00324823481:foo" - 3) "12838349234:bar" - -We effectively created an index using a numerical field which can be as -big as we want. This also works with floating point numbers of any precision -by making sure we left pad the numerical part with leading zeroes and the -decimal part with trailing zeroes like in the following list of numbers: - - 01000000000000.11000000000000 - 01000000000000.02200000000000 - 00000002121241.34893482930000 - 00999999999999.00000000000000 - -Using numbers in binary form ---- - -Storing numbers in decimal may use too much memory. An alternative approach -is just to store numbers, for example 128 bit integers, directly in their -binary form. However for this to work, you need to store the numbers in -*big endian format*, so that the most significant bytes are stored before -the least significant bytes. This way when Redis compares the strings with -`memcmp()`, it will effectively sort the numbers by their value. - -Keep in mind that data stored in binary format is less observable for -debugging, harder to parse and export. So it is definitely a trade off. - -Composite indexes -=== - -So far we explored ways to index single fields. However we all know that -SQL stores are able to create indexes using multiple fields. For example -I may index products in a very large store by room number and price. - -I need to run queries in order to retrieve all the products in a given -room having a given price range. What I can do is to index each product -in the following way: - - ZADD myindex 0 0056:0028.44:90 - ZADD myindex 0 0034:0011.00:832 - -Here the fields are `room:price:product_id`. I used just four digits padding -in the example for simplicity. The auxiliary data (the product ID) does not -need any padding. - -With an index like that, to get all the products in room 56 having a price -between 10 and 30 dollars is very easy. We can just run the following -command: - - ZRANGE myindex [0056:0010.00 [0056:0030.00 BYLEX - -The above is called a composed index. Its effectiveness depends on the -order of the fields and the queries I want to run. For example the above -index cannot be used efficiently in order to get all the products having -a specific price range regardless of the room number. However I can use -the primary key in order to run queries regardless of the price, like -*give me all the products in room 44*. - -Composite indexes are very powerful, and are used in traditional stores -in order to optimize complex queries. In Redis they could be useful both -to implement a very fast in-memory Redis index of something stored into -a traditional data store, or in order to directly index Redis data. - -Updating lexicographical indexes -=== - -The value of the index in a lexicographical index can get pretty fancy -and hard or slow to rebuild from what we store about the object. So one -approach to simplify the handling of the index, at the cost of using more -memory, is to also take alongside to the sorted set representing the index -a hash mapping the object ID to the current index value. - -So for example, when we index we also add to a hash: - - MULTI - ZADD myindex 0 0056:0028.44:90 - HSET index.content 90 0056:0028.44:90 - EXEC - -This is not always needed, but simplifies the operations of updating -the index. In order to remove the old information we indexed for the object -ID 90, regardless of the *current* fields values of the object, we just -have to retrieve the hash value by object ID and [`ZREM`](/commands/zrem) it in the sorted -set view. - -Representing and querying graphs using a hexastore -=== - -One cool thing about composite indexes is that they are handy in order -to represent graphs, using a data structure which is called -[Hexastore](http://www.vldb.org/pvldb/vol1/1453965.pdf). - -The hexastore provides a representation for relations between objects, -formed by a *subject*, a *predicate* and an *object*. -A simple relation between objects could be: - - antirez is-friend-of matteocollina - -In order to represent this relation I can store the following element -in my lexicographical index: - - ZADD myindex 0 spo:antirez:is-friend-of:matteocollina - -Note that I prefixed my item with the string **spo**. It means that -the item represents a subject,predicate,object relation. - -In can add 5 more entries for the same relation, but in a different order: - - ZADD myindex 0 sop:antirez:matteocollina:is-friend-of - ZADD myindex 0 ops:matteocollina:is-friend-of:antirez - ZADD myindex 0 osp:matteocollina:antirez:is-friend-of - ZADD myindex 0 pso:is-friend-of:antirez:matteocollina - ZADD myindex 0 pos:is-friend-of:matteocollina:antirez - -Now things start to be interesting, and I can query the graph in many -different ways. For example, who are all the people `antirez` -*is friend of*? - - ZRANGE myindex "[spo:antirez:is-friend-of:" "[spo:antirez:is-friend-of:\xff" BYLEX - 1) "spo:antirez:is-friend-of:matteocollina" - 2) "spo:antirez:is-friend-of:wonderwoman" - 3) "spo:antirez:is-friend-of:spiderman" - -Or, what are all the relationships `antirez` and `matteocollina` have where -the first is the subject and the second is the object? - - ZRANGE myindex "[sop:antirez:matteocollina:" "[sop:antirez:matteocollina:\xff" BYLEX - 1) "sop:antirez:matteocollina:is-friend-of" - 2) "sop:antirez:matteocollina:was-at-conference-with" - 3) "sop:antirez:matteocollina:talked-with" - -By combining different queries, I can ask fancy questions. For example: -*Who are all my friends that, like beer, live in Barcelona, and matteocollina consider friends as well?* -To get this information I start with an `spo` query to find all the people -I'm friend with. Then for each result I get I perform an `spo` query -to check if they like beer, removing the ones for which I can't find -this relation. I do it again to filter by city. Finally I perform an `ops` -query to find, of the list I obtained, who is considered friend by -matteocollina. - -Make sure to check [Matteo Collina's slides about Levelgraph](http://nodejsconfit.levelgraph.io/) in order to better understand these ideas. - -Multi dimensional indexes -=== - -A more complex type of index is an index that allows you to perform queries -where two or more variables are queried at the same time for specific -ranges. For example I may have a data set representing persons age and -salary, and I want to retrieve all the people between 50 and 55 years old -having a salary between 70000 and 85000. - -This query may be performed with a multi column index, but this requires -us to select the first variable and then scan the second, which means we -may do a lot more work than needed. It is possible to perform these kinds of -queries involving multiple variables using different data structures. -For example, multi-dimensional trees such as *k-d trees* or *r-trees* are -sometimes used. Here we'll describe a different way to index data into -multiple dimensions, using a representation trick that allows us to perform -the query in a very efficient way using Redis lexicographical ranges. - -Let's say we have points in the space, which represent our data samples, where `x` and `y` are our coordinates. The max value of both variables is 400. - -In the next figure, the blue box represents our query. We want all the points where `x` is between 50 and 100, and where `y` is between 100 and 300. - -![Points in the space](2idx_0.png) - -In order to represent data that makes these kinds of queries fast to perform, -we start by padding our numbers with 0. So for example imagine we want to -add the point 10,25 (x,y) to our index. Given that the maximum range in the -example is 400 we can just pad to three digits, so we obtain: - - x = 010 - y = 025 - -Now what we do is to interleave the digits, taking the leftmost digit -in x, and the leftmost digit in y, and so forth, in order to create a single -number: - - 001205 - -This is our index, however in order to more easily reconstruct the original -representation, if we want (at the cost of space), we may also add the -original values as additional columns: - - 001205:10:25 - -Now, let's reason about this representation and why it is useful in the -context of range queries. For example let's take the center of our blue -box, which is at `x=75` and `y=200`. We can encode this number as we did -earlier by interleaving the digits, obtaining: - - 027050 - -What happens if we substitute the last two digits respectively with 00 and 99? -We obtain a range which is lexicographically continuous: - - 027000 to 027099 - -What this maps to is to a square representing all values where the `x` -variable is between 70 and 79, and the `y` variable is between 200 and 209. -To identify this specific area, we can write random points in that interval. - -![Small area](2idx_1.png) - -So the above lexicographic query allows us to easily query for points in -a specific square in the picture. However the square may be too small for -the box we are searching, so that too many queries are needed. -So we can do the same but instead of replacing the last two digits with 00 -and 99, we can do it for the last four digits, obtaining the following -range: - - 020000 029999 - -This time the range represents all the points where `x` is between 0 and 99 -and `y` is between 200 and 299. Drawing random points in this interval -shows us this larger area. - -![Large area](2idx_2.png) - -So now our area is too big for our query, and still our search box is -not completely included. We need more granularity, but we can easily obtain -it by representing our numbers in binary form. This time, when we replace -digits instead of getting squares which are ten times bigger, we get squares -which are just two times bigger. - -Our numbers in binary form, assuming we need just 9 bits for each variable -(in order to represent numbers up to 400 in value) would be: - - x = 75 -> 001001011 - y = 200 -> 011001000 - -So by interleaving digits, our representation in the index would be: - - 000111000011001010:75:200 - -Let's see what are our ranges as we substitute the last 2, 4, 6, 8, ... -bits with 0s ad 1s in the interleaved representation: - - 2 bits: x between 74 and 75, y between 200 and 201 (range=2) - 4 bits: x between 72 and 75, y between 200 and 203 (range=4) - 6 bits: x between 72 and 79, y between 200 and 207 (range=8) - 8 bits: x between 64 and 79, y between 192 and 207 (range=16) - -And so forth. Now we have definitely better granularity! -As you can see substituting N bits from the index gives us -search boxes of side `2^(N/2)`. - -So what we do is check the dimension where our search box is smaller, -and check the nearest power of two to this number. Our search box -was 50,100 to 100,300, so it has a width of 50 and a height of 200. -We take the smaller of the two, 50, and check the nearest power of two -which is 64. 64 is 2^6, so we would work with indexes obtained replacing -the latest 12 bits from the interleaved representation (so that we end -replacing just 6 bits of each variable). - -However single squares may not cover all our search, so we may need more. -What we do is to start with the left bottom corner of our search box, -which is 50,100, and find the first range by substituting the last 6 bits -in each number with 0. Then we do the same with the right top corner. - -With two trivial nested for loops where we increment only the significant -bits, we can find all the squares between these two. For each square we -convert the two numbers into our interleaved representation, and create -the range using the converted representation as our start, and the same -representation but with the latest 12 bits turned on as end range. - -For each square found we perform our query and get the elements inside, -removing the elements which are outside our search box. - -Turning this into code is simple. Here is a Ruby example: - - def spacequery(x0,y0,x1,y1,exp) - bits=exp*2 - x_start = x0/(2**exp) - x_end = x1/(2**exp) - y_start = y0/(2**exp) - y_end = y1/(2**exp) - (x_start..x_end).each{|x| - (y_start..y_end).each{|y| - x_range_start = x*(2**exp) - x_range_end = x_range_start | ((2**exp)-1) - y_range_start = y*(2**exp) - y_range_end = y_range_start | ((2**exp)-1) - puts "#{x},#{y} x from #{x_range_start} to #{x_range_end}, y from #{y_range_start} to #{y_range_end}" - - # Turn it into interleaved form for ZRANGE query. - # We assume we need 9 bits for each integer, so the final - # interleaved representation will be 18 bits. - xbin = x_range_start.to_s(2).rjust(9,'0') - ybin = y_range_start.to_s(2).rjust(9,'0') - s = xbin.split("").zip(ybin.split("")).flatten.compact.join("") - # Now that we have the start of the range, calculate the end - # by replacing the specified number of bits from 0 to 1. - e = s[0..-(bits+1)]+("1"*bits) - puts "ZRANGE myindex [#{s} [#{e} BYLEX" - } - } - end - - spacequery(50,100,100,300,6) - -While non immediately trivial this is a very useful indexing strategy that -in the future may be implemented in Redis in a native way. -For now, the good thing is that the complexity may be easily encapsulated -inside a library that can be used in order to perform indexing and queries. -One example of such library is [Redimension](https://github.com/antirez/redimension), a proof of concept Ruby library which indexes N-dimensional data inside Redis using the technique described here. - -Multi dimensional indexes with negative or floating point numbers -=== - -The simplest way to represent negative values is just to work with unsigned -integers and represent them using an offset, so that when you index, before -translating numbers in the indexed representation, you add the absolute value -of your smaller negative integer. - -For floating point numbers, the simplest approach is probably to convert them -to integers by multiplying the integer for a power of ten proportional to the -number of digits after the dot you want to retain. - -Non range indexes -=== - -So far we checked indexes which are useful to query by range or by single -item. However other Redis data structures such as Sets or Lists can be used -in order to build other kind of indexes. They are very commonly used but -maybe we don't always realize they are actually a form of indexing. - -For instance I can index object IDs into a Set data type in order to use -the *get random elements* operation via [`SRANDMEMBER`](/commands/srandmember) in order to retrieve -a set of random objects. Sets can also be used to check for existence when -all I need is to test if a given item exists or not or has a single boolean -property or not. - -Similarly lists can be used in order to index items into a fixed order. -I can add all my items into a Redis list and rotate the list with -[`RPOPLPUSH`](/commands/rpoplpush) using the same key name as source and destination. This is useful -when I want to process a given set of items again and again forever in the -same order. Think of an RSS feed system that needs to refresh the local copy -periodically. - -Another popular index often used with Redis is a **capped list**, where items -are added with [`LPUSH`](/commands/lpush) and trimmed with [`LTRIM`](/commands/ltrim), in order to create a view -with just the latest N items encountered, in the same order they were -seen. - -Index inconsistency -=== - -Keeping the index updated may be challenging, in the course of months -or years it is possible that inconsistencies are added because of software -bugs, network partitions or other events. - -Different strategies could be used. If the index data is outside Redis -*read repair* can be a solution, where data is fixed in a lazy way when -it is requested. When we index data which is stored in Redis itself -the [`SCAN`](/commands/scan) family of commands can be used in order to verify, update or -rebuild the index from scratch, incrementally. diff --git a/content/develop/manual/patterns/twitter-clone.md b/content/develop/manual/patterns/twitter-clone.md deleted file mode 100644 index b8d3fb823a..0000000000 --- a/content/develop/manual/patterns/twitter-clone.md +++ /dev/null @@ -1,460 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: Learn several Redis patterns by building a Twitter clone -linkTitle: Patterns example -title: Redis patterns example -weight: 20 ---- - -This article describes the design and implementation of a [very simple Twitter clone](https://github.com/antirez/retwis) written using PHP with Redis as the only database. The programming community has traditionally considered key-value stores as a special purpose database that couldn't be used as a drop-in replacement for a relational database for the development of web applications. This article will try to show that Redis data structures on top of a key-value layer are an effective data model to implement many kinds of applications. - -Note: the original version of this article was written in 2009 when Redis was -released. It was not exactly clear at that time that the Redis data model was -suitable to write entire applications. Now after 5 years there are many cases of -applications using Redis as their main store, so the goal of the article today -is to be a tutorial for Redis newcomers. You'll learn how to design a simple -data layout using Redis, and how to apply different data structures. - -Our Twitter clone, called [Retwis](https://github.com/antirez/retwis), is structurally simple, has very good performance, and can be distributed among any number of web and Redis servers with little efforts. [View the Retwis source code](https://github.com/antirez/retwis). - -I used PHP for the example because of its universal readability. The same (or better) results can be obtained using Ruby, Python, Erlang, and so on. -A few clones exist (however not all the clones use the same data layout as the -current version of this tutorial, so please, stick with the official PHP -implementation for the sake of following the article better). - -* [Retwis-RB](https://github.com/danlucraft/retwis-rb) is a port of Retwis to Ruby and Sinatra written by Daniel Lucraft. -* [Retwis-J](https://docs.spring.io/spring-data/data-keyvalue/examples/retwisj/current/) is a port of Retwis to Java, using the Spring Data Framework, written by [Costin Leau](http://twitter.com/costinl). Its source code can be found on [GitHub](https://github.com/SpringSource/spring-data-keyvalue-examples), and there is comprehensive documentation available at [springsource.org](http://j.mp/eo6z6I). - -What is a key-value store? ---- -The essence of a key-value store is the ability to store some data, called a _value_, inside a key. The value can be retrieved later only if we know the specific key it was stored in. There is no direct way to search for a key by value. In some sense, it is like a very large hash/dictionary, but it is persistent, i.e. when your application ends, the data doesn't go away. So, for example, I can use the command [`SET`](/commands/set) to store the value *bar* in the key *foo*: - - SET foo bar - -Redis stores data permanently, so if I later ask "_What is the value stored in key foo?_" Redis will reply with *bar*: - - GET foo => bar - -Other common operations provided by key-value stores are [`DEL`](/commands/del), to delete a given key and its associated value, SET-if-not-exists (called [`SETNX`](/commands/setnx) on Redis), to assign a value to a key only if the key does not already exist, and [`INCR`](/commands/incr), to atomically increment a number stored in a given key: - - SET foo 10 - INCR foo => 11 - INCR foo => 12 - INCR foo => 13 - -Atomic operations ---- - -There is something special about [`INCR`](/commands/incr). You may wonder why Redis provides such an operation if we can do it ourselves with a bit of code? After all, it is as simple as: - - x = GET foo - x = x + 1 - SET foo x - -The problem is that incrementing this way will work as long as there is only one client working with the key _foo_ at one time. See what happens if two clients are accessing this key at the same time: - - x = GET foo (yields 10) - y = GET foo (yields 10) - x = x + 1 (x is now 11) - y = y + 1 (y is now 11) - SET foo x (foo is now 11) - SET foo y (foo is now 11) - -Something is wrong! We incremented the value two times, but instead of going from 10 to 12, our key holds 11. This is because the increment done with `GET / increment / SET` *is not an atomic operation*. Instead the INCR provided by Redis, Memcached, ..., are atomic implementations, and the server will take care of protecting the key during the time needed to complete the increment in order to prevent simultaneous accesses. - -What makes Redis different from other key-value stores is that it provides other operations similar to INCR that can be used to model complex problems. This is why you can use Redis to write whole web applications without using another database like an SQL database, and without going crazy. - -Beyond key-value stores: lists ---- - -In this section we will see which Redis features we need to build our Twitter clone. The first thing to know is that Redis values can be more than strings. Redis supports Lists, Sets, Hashes, Sorted Sets, Bitmaps, and HyperLogLog types as values, and there are atomic operations to operate on them so we are safe even with multiple accesses to the same key. Let's start with Lists: - - LPUSH mylist a (now mylist holds 'a') - LPUSH mylist b (now mylist holds 'b','a') - LPUSH mylist c (now mylist holds 'c','b','a') - -[`LPUSH`](/commands/lpush) means _Left Push_, that is, add an element to the left (or to the head) of the list stored in _mylist_. If the key _mylist_ does not exist it is automatically created as an empty list before the PUSH operation. As you can imagine, there is also an [`RPUSH`](/commands/rpush) operation that adds the element to the right of the list (on the tail). This is very useful for our Twitter clone. User updates can be added to a list stored in `username:updates`, for instance. - -There are operations to get data from Lists, of course. For instance, LRANGE returns a range from the list, or the whole list. - - LRANGE mylist 0 1 => c,b - -LRANGE uses zero-based indexes - that is the first element is 0, the second 1, and so on. The command arguments are `LRANGE key first-index last-index`. The _last-index_ argument can be negative, with a special meaning: -1 is the last element of the list, -2 the penultimate, and so on. So, to get the whole list use: - - LRANGE mylist 0 -1 => c,b,a - -Other important operations are LLEN that returns the number of elements in the list, and LTRIM that is like LRANGE but instead of returning the specified range *trims* the list, so it is like _Get range from mylist, Set this range as new value_ but does so atomically. - -The Set data type ---- - -Currently we don't use the Set type in this tutorial, but since we use -Sorted Sets, which are kind of a more capable version of Sets, it is better -to start introducing Sets first (which are a very useful data structure -per se), and later Sorted Sets. - -There are more data types than just Lists. Redis also supports Sets, which are unsorted collections of elements. It is possible to add, remove, and test for existence of members, and perform the intersection between different Sets. Of course it is possible to get the elements of a Set. Some examples will make it more clear. Keep in mind that [`SADD`](/commands/sadd) is the _add to set_ operation, [`SREM`](/commands/srem) is the _remove from set_ operation, [`SISMEMBER`](/commands/sismember) is the _test if member_ operation, and [`SINTER`](/commands/sinter) is the _perform intersection_ operation. Other operations are [`SCARD`](/commands/scard) to get the cardinality (the number of elements) of a Set, and [`SMEMBERS`](/commands/smembers) to return all the members of a Set. - - SADD myset a - SADD myset b - SADD myset foo - SADD myset bar - SCARD myset => 4 - SMEMBERS myset => bar,a,foo,b - -Note that [`SMEMBERS`](/commands/smembers) does not return the elements in the same order we added them since Sets are *unsorted* collections of elements. When you want to store in order it is better to use Lists instead. Some more operations against Sets: - - SADD mynewset b - SADD mynewset foo - SADD mynewset hello - SINTER myset mynewset => foo,b - -[`SINTER`](/commands/sinter) can return the intersection between Sets but it is not limited to two Sets. You may ask for the intersection of 4,5, or 10000 Sets. Finally let's check how [`SISMEMBER`](/commands/sismember) works: - - SISMEMBER myset foo => 1 - SISMEMBER myset notamember => 0 - -The Sorted Set data type ---- - -Sorted Sets are similar to Sets: collection of elements. However in Sorted -Sets each element is associated with a floating point value, called the -*element score*. Because of the score, elements inside a Sorted Set are -ordered, since we can always compare two elements by score (and if the score -happens to be the same, we compare the two elements as strings). - -Like Sets in Sorted Sets it is not possible to add repeated elements, every -element is unique. However it is possible to update an element's score. - -Sorted Set commands are prefixed with `Z`. The following is an example -of Sorted Sets usage: - - ZADD zset 10 a - ZADD zset 5 b - ZADD zset 12.55 c - ZRANGE zset 0 -1 => b,a,c - -In the above example we added a few elements with [`ZADD`](/commands/zadd), and later retrieved -the elements with [`ZRANGE`](/commands/zrange). As you can see the elements are returned in order -according to their score. In order to check if a given element exists, and -also to retrieve its score if it exists, we use the [`ZSCORE`](/commands/zscore) command: - - ZSCORE zset a => 10 - ZSCORE zset non_existing_element => NULL - -Sorted Sets are a very powerful data structure, you can query elements by -score range, lexicographically, in reverse order, and so forth. -To know more [please check the Sorted Set sections in the official Redis commands documentation](https://redis.io/commands/#sorted_set). - -The Hash data type ---- - -This is the last data structure we use in our program, and is extremely easy -to grasp since there is an equivalent in almost every programming language out -there: Hashes. Redis Hashes are basically like Ruby or Python hashes, a -collection of fields associated with values: - - HMSET myuser name Salvatore surname Sanfilippo country Italy - HGET myuser surname => Sanfilippo - -[`HMSET`](/commands/hmset) can be used to set fields in the hash, that can be retrieved with -[`HGET`](/commands/hget) later. It is possible to check if a field exists with [`HEXISTS`](/commands/hexists), or -to increment a hash field with [`HINCRBY`](/commands/hincrby) and so forth. - -Hashes are the ideal data structure to represent *objects*. For example we -use Hashes in order to represent Users and Updates in our Twitter clone. - -Okay, we just exposed the basics of the Redis main data structures, -we are ready to start coding! - -Prerequisites ---- - -If you haven't downloaded the [Retwis source code](https://github.com/antirez/retwis) already please grab it now. It contains a few PHP files, and also a copy of [Predis](https://github.com/nrk/predis), the PHP client library we use in this example. - -Another thing you probably want is a working Redis server. Just get the source, build with `make`, run with `./redis-server`, and you're ready to go. No configuration is required at all in order to play with or run Retwis on your computer. - -Data layout ---- - -When working with a relational database, a database schema must be designed so that we'd know the tables, indexes, and so on that the database will contain. We don't have tables in Redis, so what do we need to design? We need to identify what keys are needed to represent our objects and what kind of values these keys need to hold. - -Let's start with Users. We need to represent users, of course, with their username, userid, password, the set of users following a given user, the set of users a given user follows, and so on. The first question is, how should we identify a user? Like in a relational DB, a good solution is to identify different users with different numbers, so we can associate a unique ID with every user. Every other reference to this user will be done by id. Creating unique IDs is very simple to do by using our atomic [`INCR`](/commands/incr) operation. When we create a new user we can do something like this, assuming the user is called "antirez": - - INCR next_user_id => 1000 - HMSET user:1000 username antirez password p1pp0 - -*Note: you should use a hashed password in a real application, for simplicity -we store the password in clear text.* - -We use the `next_user_id` key in order to always get a unique ID for every new user. Then we use this unique ID to name the key holding a Hash with user's data. *This is a common design pattern* with key-values stores! Keep it in mind. -Besides the fields already defined, we need some more stuff in order to fully define a User. For example, sometimes it can be useful to be able to get the user ID from the username, so every time we add a user, we also populate the `users` key, which is a Hash, with the username as field, and its ID as value. - - HSET users antirez 1000 - -This may appear strange at first, but remember that we are only able to access data in a direct way, without secondary indexes. It's not possible to tell Redis to return the key that holds a specific value. This is also *our strength*. This new paradigm is forcing us to organize data so that everything is accessible by _primary key_, speaking in relational DB terms. - -Followers, following, and updates ---- - -There is another central need in our system. A user might have users who follow them, which we'll call their followers. A user might follow other users, which we'll call a following. We have a perfect data structure for this. That is... Sets. -The uniqueness of Sets elements, and the fact we can test in constant time for -existence, are two interesting features. However what about also remembering -the time at which a given user started following another one? In an enhanced -version of our simple Twitter clone this may be useful, so instead of using -a simple Set, we use a Sorted Set, using the user ID of the following or follower -user as element, and the unix time at which the relation between the users -was created, as our score. - -So let's define our keys: - - followers:1000 => Sorted Set of uids of all the followers users - following:1000 => Sorted Set of uids of all the following users - -We can add new followers with: - - ZADD followers:1000 1401267618 1234 => Add user 1234 with time 1401267618 - -Another important thing we need is a place where we can add the updates to display in the user's home page. We'll need to access this data in chronological order later, from the most recent update to the oldest, so the perfect kind of data structure for this is a List. Basically every new update will be [`LPUSH`](/commands/lpush)ed in the user updates key, and thanks to [`LRANGE`](/commands/lrange), we can implement pagination and so on. Note that we use the words _updates_ and _posts_ interchangeably, since updates are actually "little posts" in some way. - - posts:1000 => a List of post ids - every new post is LPUSHed here. - -This list is basically the User timeline. We'll push the IDs of her/his own -posts, and, the IDs of all the posts of created by the following users. -Basically, we'll implement a write fanout. - -Authentication ---- - -OK, we have more or less everything about the user except for authentication. We'll handle authentication in a simple but robust way: we don't want to use PHP sessions, as our system must be ready to be distributed among different web servers easily, so we'll keep the whole state in our Redis database. All we need is a random **unguessable** string to set as the cookie of an authenticated user, and a key that will contain the user ID of the client holding the string. - -We need two things in order to make this thing work in a robust way. -First: the current authentication *secret* (the random unguessable string) -should be part of the User object, so when the user is created we also set -an `auth` field in its Hash: - - HSET user:1000 auth fea5e81ac8ca77622bed1c2132a021f9 - -Moreover, we need a way to map authentication secrets to user IDs, so -we also take an `auths` key, which has as value a Hash type mapping -authentication secrets to user IDs. - - HSET auths fea5e81ac8ca77622bed1c2132a021f9 1000 - -In order to authenticate a user we'll do these simple steps (see the `login.php` file in the Retwis source code): - - * Get the username and password via the login form. - * Check if the `username` field actually exists in the `users` Hash. - * If it exists we have the user id, (i.e. 1000). - * Check if user:1000 password matches, if not, return an error message. - * Ok authenticated! Set "fea5e81ac8ca77622bed1c2132a021f9" (the value of user:1000 `auth` field) as the "auth" cookie. - -This is the actual code: - - include("retwis.php"); - - # Form sanity checks - if (!gt("username") || !gt("password")) - goback("You need to enter both username and password to login."); - - # The form is ok, check if the username is available - $username = gt("username"); - $password = gt("password"); - $r = redisLink(); - $userid = $r->hget("users",$username); - if (!$userid) - goback("Wrong username or password"); - $realpassword = $r->hget("user:$userid","password"); - if ($realpassword != $password) - goback("Wrong username or password"); - - # Username / password OK, set the cookie and redirect to index.php - $authsecret = $r->hget("user:$userid","auth"); - setcookie("auth",$authsecret,time()+3600*24*365); - header("Location: index.php"); - -This happens every time a user logs in, but we also need a function `isLoggedIn` in order to check if a given user is already authenticated or not. These are the logical steps preformed by the `isLoggedIn` function: - - * Get the "auth" cookie from the user. If there is no cookie, the user is not logged in, of course. Let's call the value of the cookie ``. - * Check if `` field in the `auths` Hash exists, and what the value (the user ID) is (1000 in the example). - * In order for the system to be more robust, also verify that user:1000 auth field also matches. - * OK the user is authenticated, and we loaded a bit of information in the `$User` global variable. - -The code is simpler than the description, possibly: - - function isLoggedIn() { - global $User, $_COOKIE; - - if (isset($User)) return true; - - if (isset($_COOKIE['auth'])) { - $r = redisLink(); - $authcookie = $_COOKIE['auth']; - if ($userid = $r->hget("auths",$authcookie)) { - if ($r->hget("user:$userid","auth") != $authcookie) return false; - loadUserInfo($userid); - return true; - } - } - return false; - } - - function loadUserInfo($userid) { - global $User; - - $r = redisLink(); - $User['id'] = $userid; - $User['username'] = $r->hget("user:$userid","username"); - return true; - } - -Having `loadUserInfo` as a separate function is overkill for our application, but it's a good approach in a complex application. The only thing that's missing from all the authentication is the logout. What do we do on logout? That's simple, we'll just change the random string in user:1000 `auth` field, remove the old authentication secret from the `auths` Hash, and add the new one. - -*Important:* the logout procedure explains why we don't just authenticate the user after looking up the authentication secret in the `auths` Hash, but double check it against user:1000 `auth` field. The true authentication string is the latter, while the `auths` Hash is just an authentication field that may even be volatile, or, if there are bugs in the program or a script gets interrupted, we may even end with multiple entries in the `auths` key pointing to the same user ID. The logout code is the following (`logout.php`): - - include("retwis.php"); - - if (!isLoggedIn()) { - header("Location: index.php"); - exit; - } - - $r = redisLink(); - $newauthsecret = getrand(); - $userid = $User['id']; - $oldauthsecret = $r->hget("user:$userid","auth"); - - $r->hset("user:$userid","auth",$newauthsecret); - $r->hset("auths",$newauthsecret,$userid); - $r->hdel("auths",$oldauthsecret); - - header("Location: index.php"); - -That is just what we described and should be simple to understand. - -Updates ---- - -Updates, also known as posts, are even simpler. In order to create a new post in the database we do something like this: - - INCR next_post_id => 10343 - HMSET post:10343 user_id $owner_id time $time body "I'm having fun with Retwis" - -As you can see each post is just represented by a Hash with three fields. The ID of the user owning the post, the time at which the post was published, and finally, the body of the post, which is, the actual status message. - -After we create a post and we obtain the post ID, we need to LPUSH the ID in the timeline of every user that is following the author of the post, and of course in the list of posts of the author itself (everybody is virtually following herself/himself). This is the file `post.php` that shows how this is performed: - - include("retwis.php"); - - if (!isLoggedIn() || !gt("status")) { - header("Location:index.php"); - exit; - } - - $r = redisLink(); - $postid = $r->incr("next_post_id"); - $status = str_replace("\n"," ",gt("status")); - $r->hmset("post:$postid","user_id",$User['id'],"time",time(),"body",$status); - $followers = $r->zrange("followers:".$User['id'],0,-1); - $followers[] = $User['id']; /* Add the post to our own posts too */ - - foreach($followers as $fid) { - $r->lpush("posts:$fid",$postid); - } - # Push the post on the timeline, and trim the timeline to the - # newest 1000 elements. - $r->lpush("timeline",$postid); - $r->ltrim("timeline",0,1000); - - header("Location: index.php"); - -The core of the function is the `foreach` loop. We use [`ZRANGE`](/commands/zrange) to get all the followers of the current user, then the loop will [`LPUSH`](/commands/lpush) the push the post in every follower timeline List. - -Note that we also maintain a global timeline for all the posts, so that in the Retwis home page we can show everybody's updates easily. This requires just doing an [`LPUSH`](/commands/lpush) to the `timeline` List. Let's face it, aren't you starting to think it was a bit strange to have to sort things added in chronological order using `ORDER BY` with SQL? I think so. - -There is an interesting thing to notice in the code above: we used a new -command called [`LTRIM`](/commands/ltrim) after we perform the [`LPUSH`](/commands/lpush) operation in the global -timeline. This is used in order to trim the list to just 1000 elements. The -global timeline is actually only used in order to show a few posts in the -home page, there is no need to have the full history of all the posts. - -Basically [`LTRIM`](/commands/ltrim) + [`LPUSH`](/commands/lpush) is a way to create a *capped collection* in Redis. - -Paginating updates ---- - -Now it should be pretty clear how we can use [`LRANGE`](/commands/lrange) in order to get ranges of posts, and render these posts on the screen. The code is simple: - - function showPost($id) { - $r = redisLink(); - $post = $r->hgetall("post:$id"); - if (empty($post)) return false; - - $userid = $post['user_id']; - $username = $r->hget("user:$userid","username"); - $elapsed = strElapsed($post['time']); - $userlink = "".utf8entities($username).""; - - echo('

'.$userlink.' '.utf8entities($post['body'])."
"); - echo('posted '.$elapsed.' ago via web
'); - return true; - } - - function showUserPosts($userid,$start,$count) { - $r = redisLink(); - $key = ($userid == -1) ? "timeline" : "posts:$userid"; - $posts = $r->lrange($key,$start,$start+$count); - $c = 0; - foreach($posts as $p) { - if (showPost($p)) $c++; - if ($c == $count) break; - } - return count($posts) == $count+1; - } - -`showPost` will simply convert and print a Post in HTML while `showUserPosts` gets a range of posts and then passes them to `showPosts`. - -*Note: [`LRANGE`](/commands/lrange) is not very efficient if the list of posts start to be very -big, and we want to access elements which are in the middle of the list, since Redis Lists are backed by linked lists. If a system is designed for -deep pagination of million of items, it is better to resort to Sorted Sets -instead.* - -Following users ---- - -It is not hard, but we did not yet check how we create following / follower relationships. If user ID 1000 (antirez) wants to follow user ID 5000 (pippo), we need to create both a following and a follower relationship. We just need to [`ZADD`](/commands/zadd) calls: - - ZADD following:1000 5000 - ZADD followers:5000 1000 - -Note the same pattern again and again. In theory with a relational database, the list of following and followers would be contained in a single table with fields like `following_id` and `follower_id`. You can extract the followers or following of every user using an SQL query. With a key-value DB things are a bit different since we need to set both the `1000 is following 5000` and `5000 is followed by 1000` relations. This is the price to pay, but on the other hand accessing the data is simpler and extremely fast. Having these things as separate sets allows us to do interesting stuff. For example, using [`ZINTERSTORE`](/commands/zinterstore) we can have the intersection of `following` of two different users, so we may add a feature to our Twitter clone so that it is able to tell you very quickly when you visit somebody else's profile, "you and Alice have 34 followers in common", and things like that. - -You can find the code that sets or removes a following / follower relation in the `follow.php` file. - -Making it horizontally scalable ---- - -Gentle reader, if you read till this point you are already a hero. Thank you. Before talking about scaling horizontally it is worth checking performance on a single server. Retwis is *extremely fast*, without any kind of cache. On a very slow and loaded server, an Apache benchmark with 100 parallel clients issuing 100000 requests measured the average pageview to take 5 milliseconds. This means you can serve millions of users every day with just a single Linux box, and this one was monkey ass slow... Imagine the results with more recent hardware. - -However you can't go with a single server forever, how do you scale a key-value -store? - -Retwis does not perform any multi-keys operation, so making it scalable is -simple: you may use client-side sharding, or something like a sharding proxy -like Twemproxy, or the upcoming Redis Cluster. - -To know more about those topics please read -[our documentation about sharding](/topics/partitioning). However, the point here -to stress is that in a key-value store, if you design with care, the data set -is split among **many independent small keys**. To distribute those keys -to multiple nodes is more straightforward and predictable compared to using -a semantically more complex database system. diff --git a/content/develop/manual/pipelining/index.md b/content/develop/manual/pipelining/index.md deleted file mode 100644 index 88343d0020..0000000000 --- a/content/develop/manual/pipelining/index.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: How to optimize round-trip times by batching Redis commands -linkTitle: Pipelining -title: Redis pipelining -weight: 2 ---- - -Redis pipelining is a technique for improving performance by issuing multiple commands at once without waiting for the response to each individual command. Pipelining is supported by most Redis clients. This document describes the problem that pipelining is designed to solve and how pipelining works in Redis. - -## Request/Response protocols and round-trip time (RTT) - -Redis is a TCP server using the client-server model and what is called a *Request/Response* protocol. - -This means that usually a request is accomplished with the following steps: - -* The client sends a query to the server, and reads from the socket, usually in a blocking way, for the server response. -* The server processes the command and sends the response back to the client. - -So for instance a four commands sequence is something like this: - - * *Client:* INCR X - * *Server:* 1 - * *Client:* INCR X - * *Server:* 2 - * *Client:* INCR X - * *Server:* 3 - * *Client:* INCR X - * *Server:* 4 - -Clients and Servers are connected via a network link. -Such a link can be very fast (a loopback interface) or very slow (a connection established over the Internet with many hops between the two hosts). -Whatever the network latency is, it takes time for the packets to travel from the client to the server, and back from the server to the client to carry the reply. - -This time is called RTT (Round Trip Time). -It's easy to see how this can affect performance when a client needs to perform many requests in a row (for instance adding many elements to the same list, or populating a database with many keys). -For instance if the RTT time is 250 milliseconds (in the case of a very slow link over the Internet), even if the server is able to process 100k requests per second, we'll be able to process at max four requests per second. - -If the interface used is a loopback interface, the RTT is much shorter, typically sub-millisecond, but even this will add up to a lot if you need to perform many writes in a row. - -Fortunately there is a way to improve this use case. - -## Redis Pipelining - -A Request/Response server can be implemented so that it is able to process new requests even if the client hasn't already read the old responses. -This way it is possible to send *multiple commands* to the server without waiting for the replies at all, and finally read the replies in a single step. - -This is called pipelining, and is a technique widely in use for many decades. -For instance many POP3 protocol implementations already support this feature, dramatically speeding up the process of downloading new emails from the server. - -Redis has supported pipelining since its early days, so whatever version you are running, you can use pipelining with Redis. -This is an example using the raw netcat utility: - -```bash -$ (printf "PING\r\nPING\r\nPING\r\n"; sleep 1) | nc localhost 6379 -+PONG -+PONG -+PONG -``` - -This time we don't pay the cost of RTT for every call, but just once for the three commands. - -To be explicit, with pipelining the order of operations of our very first example will be the following: - - * *Client:* INCR X - * *Client:* INCR X - * *Client:* INCR X - * *Client:* INCR X - * *Server:* 1 - * *Server:* 2 - * *Server:* 3 - * *Server:* 4 - -> **IMPORTANT NOTE**: While the client sends commands using pipelining, the server will be forced to queue the replies, using memory. So if you need to send a lot of commands with pipelining, it is better to send them as batches each containing a reasonable number, for instance 10k commands, read the replies, and then send another 10k commands again, and so forth. The speed will be nearly the same, but the additional memory used will be at most the amount needed to queue the replies for these 10k commands. - -## It's not just a matter of RTT - -Pipelining is not just a way to reduce the latency cost associated with the -round trip time, it actually greatly improves the number of operations -you can perform per second in a given Redis server. -This is because without using pipelining, serving each command is very cheap from -the point of view of accessing the data structures and producing the reply, -but it is very costly from the point of view of doing the socket I/O. This -involves calling the `read()` and `write()` syscall, that means going from user -land to kernel land. -The context switch is a huge speed penalty. - -When pipelining is used, many commands are usually read with a single `read()` -system call, and multiple replies are delivered with a single `write()` system -call. Consequently, the number of total queries performed per second -initially increases almost linearly with longer pipelines, and eventually -reaches 10 times the baseline obtained without pipelining, as shown in this figure. - -![Pipeline size and IOPs](pipeline_iops.png) - -## A real world code example - - -In the following benchmark we'll use the Redis Ruby client, supporting pipelining, to test the speed improvement due to pipelining: - -```ruby -require 'rubygems' -require 'redis' - -def bench(descr) - start = Time.now - yield - puts "#{descr} #{Time.now - start} seconds" -end - -def without_pipelining - r = Redis.new - 10_000.times do - r.ping - end -end - -def with_pipelining - r = Redis.new - r.pipelined do - 10_000.times do - r.ping - end - end -end - -bench('without pipelining') do - without_pipelining -end -bench('with pipelining') do - with_pipelining -end -``` - -Running the above simple script yields the following figures on my Mac OS X system, running over the loopback interface, where pipelining will provide the smallest improvement as the RTT is already pretty low: - -``` -without pipelining 1.185238 seconds -with pipelining 0.250783 seconds -``` -As you can see, using pipelining, we improved the transfer by a factor of five. - -## Pipelining vs Scripting - -Using [Redis scripting](/commands/eval), available since Redis 2.6, a number of use cases for pipelining can be addressed more efficiently using scripts that perform a lot of the work needed at the server side. -A big advantage of scripting is that it is able to both read and write data with minimal latency, making operations like *read, compute, write* very fast (pipelining can't help in this scenario since the client needs the reply of the read command before it can call the write command). - -Sometimes the application may also want to send [`EVAL`](/commands/eval) or [`EVALSHA`](/commands/evalsha) commands in a pipeline. -This is entirely possible and Redis explicitly supports it with the [SCRIPT LOAD](https://redis.io/commands/script-load) command (it guarantees that [`EVALSHA`](/commands/evalsha) can be called without the risk of failing). - -## Appendix: Why are busy loops slow even on the loopback interface? - -Even with all the background covered in this page, you may still wonder why -a Redis benchmark like the following (in pseudo code), is slow even when -executed in the loopback interface, when the server and the client are running -in the same physical machine: - -```sh -FOR-ONE-SECOND: - Redis.SET("foo","bar") -END -``` - -After all, if both the Redis process and the benchmark are running in the same -box, isn't it just copying messages in memory from one place to another without -any actual latency or networking involved? - -The reason is that processes in a system are not always running, actually it is -the kernel scheduler that lets the process run. -So, for instance, when the benchmark is allowed to run, it reads the reply from the Redis server (related to the last command executed), and writes a new command. -The command is now in the loopback interface buffer, but in order to be read by the server, the kernel should schedule the server process (currently blocked in a system call) -to run, and so forth. -So in practical terms the loopback interface still involves network-like latency, because of how the kernel scheduler works. - -Basically a busy loop benchmark is the silliest thing that can be done when -metering performances on a networked server. The wise thing is just avoiding -benchmarking in this way. diff --git a/content/develop/manual/pipelining/pipeline_iops.png b/content/develop/manual/pipelining/pipeline_iops.png deleted file mode 100644 index 6ab11079f2a4bc71d196b0e59839215ab40139e6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14577 zcmb_?c|4Tg+y9uTL46F_QW#22$Ui{JBn|9D={>+zSl?{i=0I@k4nzt44@`@U^tpu2-ZhywzF>^OZ& z=R5=gB|;!9?_gWOm4eGXZ4d}Yj*GUo(P?dMgwa(mCl_}|2;|i5gv4zZFSzj6;RtQJ zS$MSW^Mu69S%vI45%D;jqk zWwwpe)ad&*89Qgf4j!iNT0QToI!fU*L-CjG7i|#IF(tDX*WGe#_beL0}@Em1d>vi)<15 zR?iJXeH~QEY$ST2%^|K7>yeR=fZFRaBvLsTIl@zvUBY<+gO#C(<*s%P|K2oR&j^X~*2K zio@5m1XNQ6-d@ID*a@u^=q`<#&^i|p^hfaD^5tFe=LMXH^95!S_yvBKscJt867ej2 zom|??#h)ke{AhG22MAgy18n zukAI~!^Y-|tKS2KO80Rdfdv=%xSWyv-Jzps78djK{L8F@+Y*p>&fa8sG{t+`{rL8^ z%d%zLAPwfM*#d5pryx)6_s5;HdE+9oEn;;WBuRD8eTDF5@GW(UMfQTUqLD3hb;9RBiH7&Ph?y+(&})!D3Z-; zd^=mWC`0XB;JGcYd^((!_|Ix9e%pG*qEJ#zu<3YahRQR}R>v_RzrF9bgbQdyrJ*o& zMhbTX!|(36d4v7qr^|;G${z8Y;rpZhN9d2Hlc3^_+RM94J>E!k?c5!6ug>!mzb)70 z-T59p)|2o6o?9Qk4lnuys3Q05PP*ApD_ouW$?Egc&znB!v->JI26$&;VyhSCzAy8v zv4=@laUrjz??1((#D0Vo2~En!8l6%-c}47p*!G<-Z=cukcq)7-O3?7fNf|I#9(Ov= zVEn0l$`6!XByQ{W>K&9w-QSPc@jx^F#n%uAfqB=rQ5DgaXFc~K#c`QNQs%t6daM%n z-bSC-FVhP)SlM?xUg4qbX>W<*QxEl(^cGK7{LAy|$wkS(=C1pTB0~~GZmG$gj_Y{f zci%zZGf^g~0`Xn`=z$aQhwsnF>peIarzYlq!S$k$WRAqt{-%WH`l>SO)GTekCRM}N4y?=Z(M zXF)DO8;1mkREKAdf%RPt_^;@P7UH8)01g!}17|glqhrJ3*A&?bucg)K z-PnFZo1>feb5d(kkK9k0uNGIGzS^HKJ!g0b<@4;RG z>W&1BTpqbJa)|HAok|YF7p5a`>!n|AZ@>Nci;QD{g5*U$V-4dI*}50YjWat7JFj)V z?G)-r%i7tQ(_!Bc-PzYEsdP~(O!=+SUnLPY9;FNB`x$Wp|?QcG@+iQ4gmiT-c5mSh)$SGO&NCS6W6 zmmiVOvF!Ci9q~SVO68TJppt{&s-V;EBfF{&wJF?D=(X=~S?urmq55t4(cI3RE<5Gp z-Q)S=17C)`+|z!wJ=BR;G4P$NQc#?2w3pqPvqB%4v9Xb zzoX$)14jc|`h5b<7~8qS?QYS#CH{%%u5=F>_cr(Do;4RY+hp6&ngG8t{QTHRT_V@_ ztnUR=F+Dqa6sPu1VTwh)jr?toT~w>^z8Lsh?b6X0)p51jW2t^2!|szZews6D-)cWQ z#_K0b*LQwYZYkPUD{v`BGXeu%CR$1N2*dvB_GfPHATF{9+(I2xN&jy)= zc+CHu-5oNA1evXyq0pE<9ZtF1Z(uQKJ8axueDvZHE+JZ$$#UY$Xz!)ILd`{ z;mw?0G1Vs`buOMblY7;o$g6YKb)mEDwdk8~*6+|wO+_4TJG-sTb*<{G1CQ|R+$9t& zJ}=!DYxOnl?&TB4I%mBtRf$LQ%3^+8o;EGC@;kS9>aNw9bB$NtSWOP}6sX=ReNgI? z#+i4QaAZJ{?7ddUii`Xk`S?@orXsxS6{9xBISvzXe53yL~KbiU;4`dl&r{>CV+Z@Zj0Q zcgoIV+hRrkUX{3dql|RSJ#Y`1o3wYPZaUTJ7}w-lT3u6lf>(;o14>cE1@8&T_;fwY zlka@*UYEB%oo)4;^dwJj3*Ce7;62Uf$iHK1slU&(QKng%obT0Nl`PSq_s@*vw7`_8 z)Y1VD%?uw~0yfXl$C1#G+OYc|uXMnrF!e7QJ%L~5j7WAbKCaxttu+!?esag|y_aB~ zAfa2J+a6n`&VTjILTTq*@4*|{YuR3%Yk#p%BA&#UH}v-Iyd%jmF{E~`zvpYGWT-(| znrp7gD`Z>sl`qI02GIt%Rf+keZO#=fh3Q3V78rvC%i7@Hm5P(Ee6)vkE?Bmse1m*v z*z!21WQ%1-G0JO2t55&d&g&me;;^`#b2LXWNm1Ugn7#LN-Y4r_t~TA|->yBKp~BL~ z-sc6|l>79PRyz$cn;TRnJuNF-d>3kDUt63lk@ZvwcpR8YX<92;=;!ah7br8ML@*^3 zF3Qb#O!558dh7eUrbBpmghKCFn(oSni z2j?zV%m?-l?%pc1Rc^=oe*!9DVRH^qii@kU9muMGMk}6I$m}||D@#~exKj0xX4isi z=j)%{*Sgz>%P{e)7PY%mRYu8vYwz5|-7hca%uY3yUn9)Vyhh3pTQ$d#D`5v+TvzP} zBv%ZT({nTf(d?wdhg0YH2t|v@(K)2%;b=M32_ z25z@!OaGXV#Sg03RF{jUex_&7sM&jY$k{n~U3QcU^6&g(hG+a2bNo1QddKB=Ury!pvZcW6(l%TpJyX+ay& zTE7ayrOBZ#&j`IH2Ul;IFa#oV>9o%Ai$N??{n1ySmGZN-Hs_EjHqH0;`@ZB8a%Y7t zn&#H`@N32TtEUS8+9#TBQ)1)E3WFn15fBzAD{Q;uEbeE3gZ8FNYkLusp<;{?aKVd1 zjy;s+WvSi(7fQ~D?$4(^<_i4N0>IPsL1dWwO;S-cX|xb0nt-Q-M$RNE8LS?<7FKaZ zf2zUWV)%+AZ9X_cp}g9)C)T@CAhQs2H+kD?M`G;4!H!J(&0baTFINVXDxJaoP2aX^ zLh9=J_ujg<>+@lUp|9;I%ihSA#=MTqy>sdmC_)5B)IzMj@EKUTSJqa!o4T@%u>)-Q z--AUOmv#_cYSf?CK`$4Ko2tjyTM&YAfulK1Wy-mm){(XY#8nkVnNJxu zTe%q;#_qN`RZv;Qww3^dB%wtX4 ze$S+l^jF+{eE9{*rc5q2A;zi~b2NQ)5L#1Z0^OKUApym4KMy)@cK5-YTkhVeXMSsK zWapd|8ynlDZGIDmZd+*?KnY`EZ+s}OiKQWX2tEd6b*`CjBK9XKK~)u)PGro8y^h7* zcm6V7u~iL3LFt*~-nNAtdzZUy(3ZTBg0YJT zu6X5Q@497^H|0v|NqBiHdscGxLRh%&v)g^$0>=UeBT418O}ZPDf~z8ol(&nT8k%IP z!{1uWapNtrBK>Iy=w!uP`wzCuFWt6aoi!QFS&Cigo+d0}lm~ksnUvIaF6a0R=Ex&O zVu#TljV5(nt8PRY{*O!^=`!V8xdT zh3OKNz3X`uG(&J@hT_f2DQU_bWAA=+myarUDYQTOs?s0hU}1uH4tdcdO@rJJ6uXuk zD&)CKE2%~PQ5$?4TO{eax53^GqW{?`wWFYjh&02tRce%VJs!n};+=D=Dt9u3M;0;3 zWo4aA8xMm&GDG#Gm#yLrXi-Ec6h`DD{enGW=tQt}VnJpc^Ie)s1y^1`zl-iXsIint z-bqPlvmr(Ujz3wz&dtL(C&3WVmu*99z3{>Qbj;&YF&MQI(dOkMH zsx6m8LMfuF#6};B3WJ+(a`KT5W*&DSSWh zd0(&pW7#wa_BNaOG9h$k^hv^DcA~V@6F}`*w;ALc*)Sd>xpk7|iyH@3|jcLVZauUOcjn zNDr8d;^bpvW3Z9fxkcY=O^vA?4ov<+Oh_SkS^|MzHbH2iL;ku}C2cq&sFe}Y)1tlF zf7!4(oM7FgJM<968ZJiReHOp2wbgvkhVA76b73>;Nox_)178z4tvvT>#A@aa%oumZ zRBAG*Jw+MdjyQVMn&GV1`4u2-Q`ls$` z69_gNH21fF?YQ1Li&ptc_f6tB2J>P(3UX54brt(pf$i=HYvkGv!KU?K*q)&V)nX_!7~SBBCB^}Var*`ag?nMcI$Ug2!LnnZLH?}@L7x8m?Cbfj$r=) zIRDXiz+T{htz1a#f4LFaPG}9e^Xcck-WO5T$b+Nm8wPB(Q9XkZ-_8v2v#?P3o#Hp@ z12bU3OxkO(g&nrs@;&+Sy4IZ8kC-&Pcg=@{*|q z>Pw!id)P*3hQvVb#Ddz7AmCzy#%o*e*s;^x-vOq+nSH2_FDfGalGN`K3rT6()NxOX%76()bcS^}m0`WW0 zST0&(W4D>+gX3qGj`SHrwaCT~K8tN!-O9ggGd`eMrFAJxrseFETS@Pfo3HEci-^k4 zZMv%;ERbQxuH5&Cra7mVAvKxVN?zg1|FxHE&PT>eKl)8SnlI8vNB};+TRv`NJ$Ecn zSq|PCq?{c(8HRQ30#NQ{w-T=$T_Ot1jH)fqkN2b3H>Q+*69zFpmJVvGPM*LB>Rzt(maRQb1HMGKoJcC_D78k}{OGbTP z)D?UlSxqFVd!ec=0)?cmNzf*j~IL>F(ljcVjG#Hl$pm z)|rxkOSBSYJJ@)+&VAmhA~vRC%BwHFfjXz1aJ=*DWntpksDpaRl&f zq1pCEk?;l_GIRmM>avTfoQ~s`ILt&hyim9ecI4CldVPOM7$2;0=b=N#n#B&2_Yj@O z#!BkejG>jktRiqYUKAzjsdpSx`k)t1GjS7`I4PXocVZ1S?JKRIxN#X3Q4Mb3Se^yn z0^1QXu6&EW0NpwXgq8HizDa5<*4VI|MxvWMaQv-qpe``;+5|RT&4o0}p863oP0Usp`NcuRUnM z`TX_jHL}aeEi`JensYB(y^UMT4p)=c@LJ&?w2M+Mt{N~M-AlMe-aEOUrg$(Jw!~cl z!4fBS&q1L>7xDQqO+Q}5~XOz$*x z>QOXVl@D$~n;Xf9<9cn%Nt*M=o%J^1*47jon}@Pq{7i%`opzuqgKt6a)QaV}&l{hz zDo0#ax^AnyG^NAgh#@VYL0Us+ie>*H8oKBEH7g=s`1)PVH9F*J%OTbc=vyxawWgaT!R<(;l=w2r)}lh8K!$ z1-y&>8|^N@!%F~GXQ@Eea9ptSP1nASrUn$dNWebioe!e0)$0O1wCabo9#vr_X_G0vT6VLJ9?mQP!gKY)$8AbQUp&Phr#Yx9JLI9B z$-Y|%)#~+1Fj0%{Hx>3(TSracCQB``4y+6?VX0Q`jAxUj5(>HK*_pT9J&0x( zg#(>TUXiacuDjk$g{A>RM?gG|{N>nLmTkk*8ArG31N`!Fb&;g~27}RH3r4D{1wR<5 z8O}ya*1kPMch?uVOY0Oij2aoEdh+Vsyy>fCfr65X$SA3(s6!b84-b0X_)uwsgJz4- znQhMjBieU$=F;QO3R~mBSHhS-w;x1I5-ys0{T$CS@7gGAfqYwn=&mfg#tn2t(O3R~4bY^{WXKZzTXVdpHz$DDn*v%V zIxZXK0Hu*gBxXePs5Qspc+cUUZqJtPp^bWYV-$wUwDfi~Y3JVyrG_Q2#c>}IP#a{f zyetjoY~gh*K1~l@C-fFLhUPpZLdyo#)ozY8&BK=HTJo2bz|{Dz$AiC)aR*y0#9DQF zRq61bq9LmEyzOJVmb+8+qrD?`;JzCymDLy#KWPUfgz*LBC8Wm2hBz#{` zKvNV*Hu5yIaZf=X2;yvU(P9XW6F+DYB&%U-4S71QB`ucfq0Ga&Sv^o0+{IAHjV6_9 z$M^^Tj)l18{td)uzq6zROc<;99sgFE=wEzf{8jXzxKh?N54sS;Qh24vEdB8dM@b2& z$hEnu@AIzRF|Jri#dL1Or{T^elL7UsK|Cl6Kt zO7na}q*cSe$+R;cghB-{6n5Jqjt(!|OTbW9eIK1f5!JJR1G5sv$LWyA(whkefj$C- zF-+*;8r78WvZm)>2|T9sqWB!+vG0I)-w%4?7>~6B+OE48Lbe^pADK>q7# zK)2%u?D$EP*fH#w0Xt>~J<^$W00Y|-e%xf*0Y&>vZXsb}P}WnEZl@5~DST1l!?5!W zsLL_w$!v0J?;l!G13LqR$dJMVp>3w%&>%Y^A7kQZ)rr&ZRcFjd0bI7`VEkm=N-fN# z-n?E7luD+T11MV{=<+Cay2b%pcThgyg!#Rmu&91yM^(V<{;DIHN9g{rK;LnKlAE^| zFpxt0q8+xUp^wLN3jc?5Ojf-q=?j#;yl2$PTtbV3JkF2R)G+X%*5;WjKbSm*n+mA% z;k$2j&_#qQN}N!fz5ut$RC}^9jsu+?n1%mHO}}ezVyuyJnKj&AYn3-tyX;gHL0l2Lhivv1G`Ss zAaiQ-|+S zSHaP0Q3p7C{t9s)BQr+g-e3EI^!9hqYG|rHVM75ngqndB?aoJx^xOb@gV>{K#&6aT zfRE6uhyDzX!KHZNc+L-gOhsso26^{?QDH-IJvYo?@*XgZtL7-9-i<&_0NWFMZ|Nul zEc*Ws2tig_-vd*QH-IUL|YDEO^)%W z5GHQ^$75K=Z~5jKDQ%_8Z*?u;R&B;jhSVVFy)J_c5&!!!m|`CE&K9%dqYP==5LBT= zx@JXGw*v>-`4o_Jk04k*5KRfwkIWPYnFINI5B@C2@Tb#BbOfQRo-k87r%+&bVd@#K? z%Nd40sEkBVujzM{GUZ1EI7HyD<{F(&Xp5y|ID;EhjrX-o8Ki<>UxVphpSVYV-vU(j zKabIPI!@ZuT(YWxZIr-2x!~(%NMeRq32a9y=e9;ub#fy$-jyH?m08@(l>X;2l;#f6 z_7HH8NzW+i86elQ8wE1VKsW{X|u>O{XBvPc-@q4)Uts`n?&68Op|efI2oGbn;{6Ay2)g z@Q+64LvaBMSI=)@ND^3#>ZH7OxH;auPk4&(;4xD$P@+(d1ITXPsb$67I=s=(L>UwXMv^yDyH%^US+HsHVMitg5Ee|Rj3R_ zK%RHk%;e|@gBtNo`6)v$et{H#LuX2A+%S`zYQXj}*%U{n zxCcps#(d{hM#v&inP6d=H{Q83eVhS05U3Tyd}!<XD}1e{}6+1r^RN|Klp4)R2tizk7&c>CY(ZW`SiWPeT#T?VJ4+5rT*(?W;2!7 zkN$U@U_>ET&y5TVF*=us>MgWAt?LPJ2dXOy5|D0gc7O>kad0I{-QUDO8W4B{{_NTl7hWEvzPh(yMq}+HM}^PP`TDtD6qRL1aw< zcJ#ShnDcEH2kqYZQ+-T%&j%G|l6V@^13?|22G8S)=?6NXYXq-(30M3Px zr^Gxq)otfdJvN&-gtCRR*bUmW2>*WJzMGL^eHJ_9?d6HGhVC1qjEh@9P`2=X<7Z}* z@wS%k<#q({dehMuXp6y#YhKnBX8aW*fJ-QD58A68@MQH9AQ3M&l~>geKz7K9LN;bX z+`yG5cQ!RwTVRdCc+%1j7VDZ~#txV-p%B<(c#s@2-Q}ju;1g;T@OzZ4aMSK&0n8=d zrJq3oYNmpgH&vi@o|yo5cJQVmHc!;`UhGIlR`}GV@^B9W4@NLpodiG`d7u!3CZXKI zev4_wjCin@1P~|L3NcK#EdVqzZgJpPC1_O(0mV$=bCx!JRD>Gp&I4~%u2nEU{b7NI z!`3w4H?C!qz8!K+7XSyT!C?hFo4{kvN39|!WZu6O-MIu`w9iQL*BJ_%>o%I(*zc@~yYP((s*2^D=|nU+_mIC)R0NX$)iStxB%bpiD_P`fyV#s;A!zl+V_GE?64P#-Z0V; zf%@MqOu?{+a10+BVr9+1}$-BdQmnwRRNa49eQtT9JBM zTDN<8)isqZ&qL(g!WMXLrp9$R{{o zUsbZ(IJ`-aQDkK7#Omix4z6gIjaz8fgh}SfH874of-ZuS6jcgo!v9EOAztLo+F}mE zx;)&z>15r9cU3>K{sb z^8~(E(z0F7$KkbWFI+6H*&pv1CUa}Fg7+vstjQ;5s%OkMjW6%Ja$QmJ(S(?(CsT(? zOq@L`_v`oBG#wf)P)6nrx>>Wk6(5@KEhs7;n;~abKQVTVj!mz`klL%<^;>TJLuB7e zD$Y=TqrBat4{G;VJ?W*--G$=8Z+qQi>%-Fp(AFiRd%ByQ%3og1^_UoJ&9@dAbVZij znQOLw|3dg?DrR?cXJ`|?GwP>x$iVAEx*Ek!yyzEseRdzw5zhra98QT@tqHnplahDA zh8)|3SL+izK(dJQbl3j98d_X;#i0+0xB`FI;*s~A z*3NwPF{|~hqlLc87{^eD--EgaSDSx8$&zX1AQrl52o%Whw{M&*j`T*$?j-N}FU{$WY?m-C&TYrDF#_G7Ghzjtgz_taAJ znLL#yUG@2+O?^REkH^|@EQ>Bnv}9fXdL;b0#gBBM*00no&z`RyEu=znH6`HA%zXT< z9mJKAMwgPZybg9R$j3Mcin7{CXiVZrzbJx_$g#Sd*`%V%2Ml=qDOvKt!`!bQ7 zYNzH4p7IDyJQ`c^wkdXma?(AR(oXL58OukGHhjuu0(?7Jw}_WfBu7*?LO)hHWBxqt zWo+XREYST>VaB-#_`(;|KZW_l^q0G!CZ~KHPBMf@el4Va@O}D(fljWL?X~|04#hBK diff --git a/content/develop/reference/clients.md b/content/develop/reference/clients.md index ecc1c4c51b..c9666e56ba 100644 --- a/content/develop/reference/clients.md +++ b/content/develop/reference/clients.md @@ -110,7 +110,7 @@ Different kind of clients have different default limits: * **Pub/Sub clients** have a default hard limit of 32 megabytes and a soft limit of 8 megabytes per 60 seconds. * **Replicas** have a default hard limit of 256 megabytes and a soft limit of 64 megabyte per 60 seconds. -It is possible to change the limit at runtime using the [`CONFIG SET`](/commands/config-set) command or in a permanent way using the Redis configuration file `redis.conf`. See the example `redis.conf` in the Redis distribution for more information about how to set the limit. +It is possible to change the limit at runtime using the [`CONFIG SET`]({{< relref "/commands/config-set" >}}) command or in a permanent way using the Redis configuration file `redis.conf`. See the example `redis.conf` in the Redis distribution for more information about how to set the limit. ## Query Buffer Hard Limit @@ -132,7 +132,7 @@ The aggregation takes into account all the memory used by the client connections Note that replica and master connections aren't affected by the client eviction mechanism. Therefore, such connections are never evicted. -`maxmemory-clients` can be set permanently in the configuration file (`redis.conf`) or via the [`CONFIG SET`](/commands/config-set) command. +`maxmemory-clients` can be set permanently in the configuration file (`redis.conf`) or via the [`CONFIG SET`]({{< relref "/commands/config-set" >}}) command. This setting can either be 0 (meaning no limit), a size in bytes (possibly with `mb`/`gb` suffix), or a percentage of `maxmemory` by using the `%` suffix (e.g. setting it to `10%` would mean 10% of the `maxmemory` configuration). @@ -142,14 +142,14 @@ A value `5%`, for example, can be a good place to start. It is possible to flag a specific client connection to be excluded from the client eviction mechanism. This is useful for control path connections. -If, for example, you have an application that monitors the server via the [`INFO`](/commands/info) command and alerts you in case of a problem, you might want to make sure this connection isn't evicted. +If, for example, you have an application that monitors the server via the [`INFO`]({{< relref "/commands/info" >}}) command and alerts you in case of a problem, you might want to make sure this connection isn't evicted. You can do so using the following command (from the relevant client's connection): -[`CLIENT NO-EVICT`](/commands/client-no-evict) `on` +[`CLIENT NO-EVICT`]({{< relref "/commands/client-no-evict" >}}) `on` And you can revert that with: -[`CLIENT NO-EVICT`](/commands/client-no-evict) `off` +[`CLIENT NO-EVICT`]({{< relref "/commands/client-no-evict" >}}) `off` For more information and an example refer to the `maxmemory-clients` section in the default `redis.conf` file. @@ -176,9 +176,9 @@ Timeouts are not to be considered very precise: Redis avoids setting timer event ## The CLIENT Command -The Redis [`CLIENT`](/commands/client) command allows you to inspect the state of every connected client, to kill a specific client, and to name connections. It is a very powerful debugging tool if you use Redis at scale. +The Redis [`CLIENT`]({{< relref "/commands/client" >}}) command allows you to inspect the state of every connected client, to kill a specific client, and to name connections. It is a very powerful debugging tool if you use Redis at scale. -[`CLIENT LIST`](/commands/client-list) is used in order to obtain a list of connected clients and their state: +[`CLIENT LIST`]({{< relref "/commands/client-list" >}}) is used in order to obtain a list of connected clients and their state: ``` redis 127.0.0.1:6379> client list @@ -190,19 +190,19 @@ In the above example two clients are connected to the Redis server. Let's look a * **addr**: The client address, that is, the client IP and the remote port number it used to connect with the Redis server. * **fd**: The client socket file descriptor number. -* **name**: The client name as set by [`CLIENT SETNAME`](/commands/client-setname). +* **name**: The client name as set by [`CLIENT SETNAME`]({{< relref "/commands/client-setname" >}}). * **age**: The number of seconds the connection existed for. * **idle**: The number of seconds the connection is idle. * **flags**: The kind of client (N means normal client, check the [full list of flags](https://redis.io/commands/client-list)). * **omem**: The amount of memory used by the client for the output buffer. * **cmd**: The last executed command. -See the [[`CLIENT LIST`](/commands/client-list)](https://redis.io/commands/client-list) documentation for the full listing of fields and their purpose. +See the [[`CLIENT LIST`]({{< relref "/commands/client-list" >}})](https://redis.io/commands/client-list) documentation for the full listing of fields and their purpose. -Once you have the list of clients, you can close a client's connection using the [`CLIENT KILL`](/commands/client-kill) command, specifying the client address as its argument. +Once you have the list of clients, you can close a client's connection using the [`CLIENT KILL`]({{< relref "/commands/client-kill" >}}) command, specifying the client address as its argument. -The commands [`CLIENT SETNAME`](/commands/client-setname) and [`CLIENT GETNAME`](/commands/client-getname) can be used to set and get the connection name. Starting with Redis 4.0, the client name is shown in the -[`SLOWLOG`](/commands/slowlog) output, to help identify clients that create latency issues. +The commands [`CLIENT SETNAME`]({{< relref "/commands/client-setname" >}}) and [`CLIENT GETNAME`]({{< relref "/commands/client-getname" >}}) can be used to set and get the connection name. Starting with Redis 4.0, the client name is shown in the +[`SLOWLOG`]({{< relref "/commands/slowlog" >}}) output, to help identify clients that create latency issues. ## TCP keepalive diff --git a/content/develop/reference/command-arguments.md b/content/develop/reference/command-arguments.md index fdb8aba0d7..7f0f0a3531 100644 --- a/content/develop/reference/command-arguments.md +++ b/content/develop/reference/command-arguments.md @@ -15,7 +15,7 @@ title: Redis command arguments weight: 7 --- -The [`COMMAND DOCS`](/commands/command-docs) command returns documentation-focused information about available Redis commands. +The [`COMMAND DOCS`]({{< relref "/commands/command-docs" >}}) command returns documentation-focused information about available Redis commands. The map reply that the command returns includes the _arguments_ key. This key stores an array that describes the command's arguments. @@ -39,9 +39,9 @@ Every element in the _arguments_ array is a map with the following fields: - **pure-token:** an argument is a token, meaning a reserved keyword, which may or may not be provided. Not to be confused with free-text user input. - **oneof**: the argument is a container for nested arguments. - This type enables choice among several nested arguments (see the [`XADD`](/commands/xadd) example below). + This type enables choice among several nested arguments (see the [`XADD`]({{< relref "/commands/xadd" >}}) example below). - **block:** the argument is a container for nested arguments. - This type enables grouping arguments and applying a property (such as _optional_) to all (see the [`XADD`](/commands/xadd) example below). + This type enables grouping arguments and applying a property (such as _optional_) to all (see the [`XADD`]({{< relref "/commands/xadd" >}}) example below). * **key_spec_index:** this value is available for every argument of the _key_ type. It is a 0-based index of the specification in the command's [key specifications][tr] that corresponds to the argument. * **token**: a constant literal that precedes the argument (user input) itself. @@ -50,9 +50,9 @@ Every element in the _arguments_ array is a map with the following fields: * **deprecated_since:** the Redis version that deprecated the command (or for module commands, the module version). * **flags:** an array of argument flags. Possible flags are: - - **optional**: denotes that the argument is optional (for example, the _GET_ clause of the [`SET`](/commands/set) command). - - **multiple**: denotes that the argument may be repeated (such as the _key_ argument of [`DEL`](/commands/del)). - - **multiple-token:** denotes the possible repetition of the argument with its preceding token (see [`SORT`](/commands/sort)'s `GET pattern` clause). + - **optional**: denotes that the argument is optional (for example, the _GET_ clause of the [`SET`]({{< relref "/commands/set" >}}) command). + - **multiple**: denotes that the argument may be repeated (such as the _key_ argument of [`DEL`]({{< relref "/commands/del" >}})). + - **multiple-token:** denotes the possible repetition of the argument with its preceding token (see [`SORT`]({{< relref "/commands/sort" >}})'s `GET pattern` clause). * **value:** the argument's value. For arguments types other than _oneof_ and _block_, this is a string that describes the value in the command's syntax. For the _oneof_ and _block_ types, this is an array of nested arguments, each being a map as described in this section. @@ -61,7 +61,7 @@ Every element in the _arguments_ array is a map with the following fields: ## Example -The trimming clause of [`XADD`](/commands/xadd), i.e., `[MAXLEN|MINID [=|~] threshold [LIMIT count]]`, is represented at the top-level as _block_-typed argument. +The trimming clause of [`XADD`]({{< relref "/commands/xadd" >}}), i.e., `[MAXLEN|MINID [=|~] threshold [LIMIT count]]`, is represented at the top-level as _block_-typed argument. It consists of four nested arguments: @@ -72,7 +72,7 @@ It consists of four nested arguments: 3. **threshold:** this nested argument is a _string_. 4. **count:** this nested argument is an optional _integer_ with a _token_ (_LIMIT_). -Here's [`XADD`](/commands/xadd)'s arguments array: +Here's [`XADD`]({{< relref "/commands/xadd" >}})'s arguments array: ``` 1) 1) "name" diff --git a/content/develop/reference/command-tips.md b/content/develop/reference/command-tips.md index 1e3ec72673..4c42ead20d 100644 --- a/content/develop/reference/command-tips.md +++ b/content/develop/reference/command-tips.md @@ -19,7 +19,7 @@ Command tips are an array of strings. These provide Redis clients with additional information about the command. The information can instruct Redis Cluster clients as to how the command should be executed and its output processed in a clustered deployment. -Unlike the command's flags (see the 3rd element of [`COMMAND`](/commands/command)'s reply), which are strictly internal to the server's operation, tips don't serve any purpose other than being reported to clients. +Unlike the command's flags (see the 3rd element of [`COMMAND`]({{< relref "/commands/command" >}})'s reply), which are strictly internal to the server's operation, tips don't serve any purpose other than being reported to clients. Command tips are arbitrary strings. However, the following sections describe proposed tips and demonstrate the conventions they are likely to adhere to. @@ -28,14 +28,14 @@ However, the following sections describe proposed tips and demonstrate the conve This tip indicates that the command's output isn't deterministic. That means that calls to the command may yield different results with the same arguments and data. -That difference could be the result of the command's random nature (e.g., [`RANDOMKEY`](/commands/randomkey) and [`SPOP`](/commands/spop)); the call's timing (e.g., [`TTL`](/commands/ttl)); or generic differences that relate to the server's state (e.g., [`INFO`](/commands/info) and [`CLIENT LIST`](/commands/client-list)). +That difference could be the result of the command's random nature (e.g., [`RANDOMKEY`]({{< relref "/commands/randomkey" >}}) and [`SPOP`]({{< relref "/commands/spop" >}})); the call's timing (e.g., [`TTL`]({{< relref "/commands/ttl" >}})); or generic differences that relate to the server's state (e.g., [`INFO`]({{< relref "/commands/info" >}}) and [`CLIENT LIST`]({{< relref "/commands/client-list" >}})). **Note:** Prior to Redis 7.0, this tip was the _random_ command flag. ## nondeterministic_output_order -The existence of this tip indicates that the command's output is deterministic, but its ordering is random (e.g., [`HGETALL`](/commands/hgetall) and [`SMEMBERS`](/commands/smembers)). +The existence of this tip indicates that the command's output is deterministic, but its ordering is random (e.g., [`HGETALL`]({{< relref "/commands/hgetall" >}}) and [`SMEMBERS`]({{< relref "/commands/smembers" >}})). **Note:** Prior to Redis 7.0, this tip was the _sort_\__for_\__script_ flag. @@ -51,19 +51,19 @@ The default behavior a client should implement for commands without the _request In cases where the client should adopt a behavior different than the default, the _request_policy_ tip can be one of: - **all_nodes:** the client should execute the command on all nodes - masters and replicas alike. - An example is the [`CONFIG SET`](/commands/config-set) command. + An example is the [`CONFIG SET`]({{< relref "/commands/config-set" >}}) command. This tip is in-use by commands that don't accept key name arguments. The command operates atomically per shard. -* **all_shards:** the client should execute the command on all master shards (e.g., the [`DBSIZE`](/commands/dbsize) command). +* **all_shards:** the client should execute the command on all master shards (e.g., the [`DBSIZE`]({{< relref "/commands/dbsize" >}}) command). This tip is in-use by commands that don't accept key name arguments. The command operates atomically per shard. - **multi_shard:** the client should execute the command on several shards. The client should split the inputs according to the hash slots of its input key name arguments. For example, the command `DEL {foo} {foo}1 bar` should be split to `DEL {foo} {foo}1` and `DEL bar`. If the keys are hashed to more than a single slot, the command must be split even if all the slots are managed by the same shard. - Examples for such commands include [`MSET`](/commands/mset), [`MGET`](/commands/mget) and [`DEL`](/commands/del). - However, note that [`SUNIONSTORE`](/commands/sunionstore) isn't considered as _multi_shard_ because all of its keys must belong to the same hash slot. -- **special:** indicates a non-trivial form of the client's request policy, such as the [`SCAN`](/commands/scan) command. + Examples for such commands include [`MSET`]({{< relref "/commands/mset" >}}), [`MGET`]({{< relref "/commands/mget" >}}) and [`DEL`]({{< relref "/commands/del" >}}). + However, note that [`SUNIONSTORE`]({{< relref "/commands/sunionstore" >}}) isn't considered as _multi_shard_ because all of its keys must belong to the same hash slot. +- **special:** indicates a non-trivial form of the client's request policy, such as the [`SCAN`]({{< relref "/commands/scan" >}}) command. ## response_policy @@ -72,10 +72,10 @@ The default behavior for commands without a _request_policy_ tip only applies to The client's implementation for the default behavior should be as follows: 1. The command doesn't accept key name arguments: the client can aggregate all replies within a single nested data structure. -For example, the array replies we get from calling [`KEYS`](/commands/keys) against all shards. +For example, the array replies we get from calling [`KEYS`]({{< relref "/commands/keys" >}}) against all shards. These should be packed in a single in no particular order. 1. For commands that accept one or more key name arguments: the client needs to retain the same order of replies as the input key names. -For example, [`MGET`](/commands/mget)'s aggregated reply. +For example, [`MGET`]({{< relref "/commands/mget" >}})'s aggregated reply. The _response_policy_ tip is set for commands that reply with scalar data types, or when it's expected that clients implement a non-default aggregate. This tip can be one of: @@ -83,24 +83,24 @@ This tip can be one of: * **one_succeeded:** the clients should return success if at least one shard didn't reply with an error. The client should reply with the first non-error reply it obtains. If all shards return an error, the client can reply with any one of these. - For example, consider a [`SCRIPT KILL`](/commands/script-kill) command that's sent to all shards. - Although the script should be loaded in all of the cluster's shards, the [`SCRIPT KILL`](/commands/script-kill) will typically run only on one at a given time. + For example, consider a [`SCRIPT KILL`]({{< relref "/commands/script-kill" >}}) command that's sent to all shards. + Although the script should be loaded in all of the cluster's shards, the [`SCRIPT KILL`]({{< relref "/commands/script-kill" >}}) will typically run only on one at a given time. * **all_succeeded:** the client should return successfully only if there are no error replies. Even a single error reply should disqualify the aggregate and be returned. Otherwise, the client should return one of the non-error replies. - As an example, consider the [`CONFIG SET`](/commands/config-set), [`SCRIPT FLUSH`](/commands/script-flush) and [`SCRIPT LOAD`](/commands/script-load) commands. + As an example, consider the [`CONFIG SET`]({{< relref "/commands/config-set" >}}), [`SCRIPT FLUSH`]({{< relref "/commands/script-flush" >}}) and [`SCRIPT LOAD`]({{< relref "/commands/script-load" >}}) commands. * **agg_logical_and:** the client should return the result of a logical _AND_ operation on all replies (only applies to integer replies, usually from commands that return either _0_ or _1_). - Consider the [`SCRIPT EXISTS`](/commands/script-exists) command as an example. + Consider the [`SCRIPT EXISTS`]({{< relref "/commands/script-exists" >}}) command as an example. It returns an array of _0_'s and _1_'s that denote the existence of its given SHA1 sums in the script cache. The aggregated response should be _1_ only when all shards had reported that a given script SHA1 sum is in their respective cache. * **agg_logical_or:** the client should return the result of a logical _AND_ operation on all replies (only applies to integer replies, usually from commands that return either _0_ or _1_). * **agg_min:** the client should return the minimal value from the replies (only applies to numerical replies). - The aggregate reply from a cluster-wide [`WAIT`](/commands/wait) command, for example, should be the minimal value (number of synchronized replicas) from all shards. + The aggregate reply from a cluster-wide [`WAIT`]({{< relref "/commands/wait" >}}) command, for example, should be the minimal value (number of synchronized replicas) from all shards. * **agg_max:** the client should return the maximal value from the replies (only applies to numerical replies). * **agg_sum:** the client should return the sum of replies (only applies to numerical replies). - Example: [`DBSIZE`](/commands/dbsize). + Example: [`DBSIZE`]({{< relref "/commands/dbsize" >}}). * **special:** this type of tip indicates a non-trivial form of reply policy. - [`INFO`](/commands/info) is an excellent example of that. + [`INFO`]({{< relref "/commands/info" >}}) is an excellent example of that. ## Example diff --git a/content/develop/reference/eviction/index.md b/content/develop/reference/eviction/index.md index 22de39c968..beb17f269e 100644 --- a/content/develop/reference/eviction/index.md +++ b/content/develop/reference/eviction/index.md @@ -28,7 +28,7 @@ the exact LRU. The `maxmemory` configuration directive configures Redis to use a specified amount of memory for the data set. You can set the configuration directive using the `redis.conf` file, or later using -the [`CONFIG SET`](/commands/config-set) command at runtime. +the [`CONFIG SET`]({{< relref "/commands/config-set" >}}) command at runtime. For example, to configure a memory limit of 100 megabytes, you can use the following directive inside the `redis.conf` file: @@ -65,7 +65,7 @@ The policies **volatile-lru**, **volatile-lfu**, **volatile-random**, and **vola Picking the right eviction policy is important depending on the access pattern of your application, however you can reconfigure the policy at runtime while the application is running, and monitor the number of cache misses and hits -using the Redis [`INFO`](/commands/info) output to tune your setup. +using the Redis [`INFO`]({{< relref "/commands/info" >}}) output to tune your setup. In general as a rule of thumb: diff --git a/content/develop/reference/key-specs.md b/content/develop/reference/key-specs.md index 55115c4dae..49b89cb3f5 100644 --- a/content/develop/reference/key-specs.md +++ b/content/develop/reference/key-specs.md @@ -16,20 +16,20 @@ weight: 3 --- Many of the commands in Redis accept key names as input arguments. -The 9th element in the reply of [`COMMAND`](/commands/command) (and [`COMMAND INFO`](/commands/command-info)) is an array that consists of the command's key specifications. +The 9th element in the reply of [`COMMAND`]({{< relref "/commands/command" >}}) (and [`COMMAND INFO`]({{< relref "/commands/command-info" >}})) is an array that consists of the command's key specifications. A _key specification_ describes a rule for extracting the names of one or more keys from the arguments of a given command. Key specifications provide a robust and flexible mechanism, compared to the _first key_, _last key_ and _step_ scheme employed until Redis 7.0. Before introducing these specifications, Redis clients had no trivial programmatic means to extract key names for all commands. -Cluster-aware Redis clients had to have the keys' extraction logic hard-coded in the cases of commands such as [`EVAL`](/commands/eval) and [`ZUNIONSTORE`](/commands/zunionstore) that rely on a _numkeys_ argument or [`SORT`](/commands/sort) and its many clauses. -Alternatively, the [`COMMAND GETKEYS`](/commands/command-getkeys) can be used to achieve a similar extraction effect but at a higher latency. +Cluster-aware Redis clients had to have the keys' extraction logic hard-coded in the cases of commands such as [`EVAL`]({{< relref "/commands/eval" >}}) and [`ZUNIONSTORE`]({{< relref "/commands/zunionstore" >}}) that rely on a _numkeys_ argument or [`SORT`]({{< relref "/commands/sort" >}}) and its many clauses. +Alternatively, the [`COMMAND GETKEYS`]({{< relref "/commands/command-getkeys" >}}) can be used to achieve a similar extraction effect but at a higher latency. A Redis client isn't obligated to support key specifications. -It can continue using the legacy _first key_, _last key_ and _step_ scheme along with the [_movablekeys_ flag](/commands/command#flags) that remain unchanged. +It can continue using the legacy _first key_, _last key_ and _step_ scheme along with the [_movablekeys_ flag]({{< relref "/commands/command#flags" >}}) that remain unchanged. However, a Redis client that implements key specifications support can consolidate most of its keys' extraction logic. -Even if the client encounters an unfamiliar type of key specification, it can always revert to the [`COMMAND GETKEYS`](/commands/command-getkeys) command. +Even if the client encounters an unfamiliar type of key specification, it can always revert to the [`COMMAND GETKEYS`]({{< relref "/commands/command-getkeys" >}}) command. That said, most cluster-aware clients only require a single key name to perform correct command routing, so it is possible that although a command features one unfamiliar specification, its other specification may still be usable by the client. @@ -69,9 +69,9 @@ It is a map under the _spec_ with two keys: More examples of the _keyword_ search type include: -* [`SET`](/commands/set) has a `begin_search` specification of type _index_ with a value of _1_. -* [`XREAD`](/commands/xread) has a `begin_search` specification of type _keyword_ with the values _"STREAMS"_ and _1_ as _keyword_ and _startfrom_, respectively. -* [`MIGRATE`](/commands/migrate) has a _start_search_ specification of type _keyword_ with the values of _"KEYS"_ and _-2_. +* [`SET`]({{< relref "/commands/set" >}}) has a `begin_search` specification of type _index_ with a value of _1_. +* [`XREAD`]({{< relref "/commands/xread" >}}) has a `begin_search` specification of type _keyword_ with the values _"STREAMS"_ and _1_ as _keyword_ and _startfrom_, respectively. +* [`MIGRATE`]({{< relref "/commands/migrate" >}}) has a _start_search_ specification of type _keyword_ with the values of _"KEYS"_ and _-2_. ## find_keys @@ -105,10 +105,10 @@ The _keynum_ type of `find_keys` is a map under the _spec_ key with three keys: Examples: -* The [`SET`](/commands/set) command has a _range_ of _0_, _1_ and _0_. -* The [`MSET`](/commands/mset) command has a _range_ of _-1_, _2_ and _0_. -* The [`XREAD`](/commands/xread) command has a _range_ of _-1_, _1_ and _2_. -* The [`ZUNION`](/commands/zunion) command has a _start_search_ type _index_ with the value _1_, and `find_keys` of type _keynum_ with values of _0_, _1_ and _1_. +* The [`SET`]({{< relref "/commands/set" >}}) command has a _range_ of _0_, _1_ and _0_. +* The [`MSET`]({{< relref "/commands/mset" >}}) command has a _range_ of _-1_, _2_ and _0_. +* The [`XREAD`]({{< relref "/commands/xread" >}}) command has a _range_ of _-1_, _1_ and _2_. +* The [`ZUNION`]({{< relref "/commands/zunion" >}}) command has a _start_search_ type _index_ with the value _1_, and `find_keys` of type _keynum_ with values of _0_, _1_ and _1_. * The [`AI.DAGRUN`](https://oss.redislabs.com/redisai/master/commands/#aidagrun) command has a _start_search_ of type _keyword_ with values of _"LOAD"_ and _1_, and `find_keys` of type _keynum_ with values of _0_, _1_ and _1_. **Note:** @@ -177,35 +177,35 @@ Key specifications may have the following flags: ### incomplete Some commands feature exotic approaches when it comes to specifying their keys, which makes extraction difficult. -Consider, for example, what would happen with a call to [`MIGRATE`](/commands/migrate) that includes the literal string _"KEYS"_ as an argument to its _AUTH_ clause. +Consider, for example, what would happen with a call to [`MIGRATE`]({{< relref "/commands/migrate" >}}) that includes the literal string _"KEYS"_ as an argument to its _AUTH_ clause. Our key specifications would miss the mark, and extraction would begin at the wrong index. Thus, we recognize that key specifications are incomplete and may fail to extract all keys. However, we assure that even incomplete specifications never yield the wrong names of keys, providing that the command is syntactically correct. -In the case of [`MIGRATE`](/commands/migrate), the search begins at the end (_startfrom_ has the value of _-1_). +In the case of [`MIGRATE`]({{< relref "/commands/migrate" >}}), the search begins at the end (_startfrom_ has the value of _-1_). If and when we encounter a key named _"KEYS"_, we'll only extract the subset of the key name arguments after it. -That's why [`MIGRATE`](/commands/migrate) has the _incomplete_ flag in its key specification. +That's why [`MIGRATE`]({{< relref "/commands/migrate" >}}) has the _incomplete_ flag in its key specification. -Another case of incompleteness is the [`SORT`](/commands/sort) command. +Another case of incompleteness is the [`SORT`]({{< relref "/commands/sort" >}}) command. Here, the `begin_search` and `find_keys` are of type _unknown_. -The client should revert to calling the [`COMMAND GETKEYS`](/commands/command-getkeys) command to extract key names from the arguments, short of implementing it natively. -The difficulty arises, for example, because the string _"STORE"_ is both a keyword (token) and a valid literal argument for [`SORT`](/commands/sort). +The client should revert to calling the [`COMMAND GETKEYS`]({{< relref "/commands/command-getkeys" >}}) command to extract key names from the arguments, short of implementing it natively. +The difficulty arises, for example, because the string _"STORE"_ is both a keyword (token) and a valid literal argument for [`SORT`]({{< relref "/commands/sort" >}}). **Note:** -the only commands with _incomplete_ key specifications are [`SORT`](/commands/sort) and [`MIGRATE`](/commands/migrate). +the only commands with _incomplete_ key specifications are [`SORT`]({{< relref "/commands/sort" >}}) and [`MIGRATE`]({{< relref "/commands/migrate" >}}). We don't expect the addition of such commands in the future. ### variable_flags In some commands, the flags for the same key name argument can depend on other arguments. -For example, consider the [`SET`](/commands/set) command and its optional _GET_ argument. -Without the _GET_ argument, [`SET`](/commands/set) is write-only, but it becomes a read and write command with it. +For example, consider the [`SET`]({{< relref "/commands/set" >}}) command and its optional _GET_ argument. +Without the _GET_ argument, [`SET`]({{< relref "/commands/set" >}}) is write-only, but it becomes a read and write command with it. When this flag is present, it means that the key specification flags cover all possible options, but the effective flags depend on other arguments. ## Examples -### [`SET`](/commands/set)'s key specifications +### [`SET`]({{< relref "/commands/set" >}})'s key specifications ``` 1) 1) "flags" @@ -230,7 +230,7 @@ When this flag is present, it means that the key specification flags cover all p 6) (integer) 0 ``` -### [`ZUNION`](/commands/zunion)'s key specifications +### [`ZUNION`]({{< relref "/commands/zunion" >}})'s key specifications ``` 1) 1) "flags" diff --git a/content/develop/reference/modules/_index.md b/content/develop/reference/modules/_index.md index dc830dc641..afa0583938 100644 --- a/content/develop/reference/modules/_index.md +++ b/content/develop/reference/modules/_index.md @@ -29,7 +29,7 @@ modules, rapidly implementing new Redis commands with features similar to what can be done inside the core itself. Redis modules are dynamic libraries that can be loaded into Redis at -startup, or using the [`MODULE LOAD`](/commands/module-load) command. Redis exports a C API, in the +startup, or using the [`MODULE LOAD`]({{< relref "/commands/module-load" >}}) command. Redis exports a C API, in the form of a single C header file called `redismodule.h`. Modules are meant to be written in C, however it will be possible to use C++ or other languages that have C binding functionalities. @@ -65,7 +65,7 @@ following command: Note that `mymodule` above is not the filename without the `.so` suffix, but instead, the name the module used to register itself into the Redis core. -The name can be obtained using [`MODULE LIST`](/commands/module-list). However it is good practice +The name can be obtained using [`MODULE LIST`]({{< relref "/commands/module-list" >}}). However it is good practice that the filename of the dynamic library is the same as the name the module uses to register itself into the Redis core. @@ -121,7 +121,7 @@ The following is the function prototype: int module_version, int api_version); The `Init` function announces the Redis core that the module has a given -name, its version (that is reported by [`MODULE LIST`](/commands/module-list)), and that is willing +name, its version (that is reported by [`MODULE LIST`]({{< relref "/commands/module-list" >}})), and that is willing to use a specific version of the API. If the API version is wrong, the name is already taken, or there are other @@ -163,7 +163,7 @@ Zooming into the example command implementation, we can find another call: int RedisModule_ReplyWithLongLong(RedisModuleCtx *ctx, long long integer); This function returns an integer to the client that invoked the command, -exactly like other Redis commands do, like for example [`INCR`](/commands/incr) or [`SCARD`](/commands/scard). +exactly like other Redis commands do, like for example [`INCR`]({{< relref "/commands/incr" >}}) or [`SCARD`]({{< relref "/commands/scard" >}}). ## Module cleanup @@ -207,7 +207,7 @@ Using the macro or just comparing with NULL is a matter of personal preference. # Passing configuration parameters to Redis modules -When the module is loaded with the [`MODULE LOAD`](/commands/module-load) command, or using the +When the module is loaded with the [`MODULE LOAD`]({{< relref "/commands/module-load" >}}) command, or using the `loadmodule` directive in the `redis.conf` file, the user is able to pass configuration parameters to the module by adding arguments after the module file name: @@ -311,7 +311,7 @@ kind of strings: null-terminated C strings, RedisModuleString objects as received from the `argv` parameter in the command implementation, binary safe C buffers with a pointer and a length, and so forth. -For example if I want to call [`INCRBY`](/commands/incrby) using a first argument (the key) +For example if I want to call [`INCRBY`]({{< relref "/commands/incrby" >}}) using a first argument (the key) a string received in the argument vector `argv`, which is an array of RedisModuleString object pointers, and a C string representing the number "10" as second argument (the increment), I'll use the following @@ -669,7 +669,7 @@ replaced with the new value. If the key has an expire, and the special value `REDISMODULE_NO_EXPIRE` is used as a new expire, the expire is removed, similarly to the Redis -[`PERSIST`](/commands/persist) command. In case the key was already persistent, no operation is +[`PERSIST`]({{< relref "/commands/persist" >}}) command. In case the key was already persistent, no operation is performed. ## Obtaining the length of values @@ -685,12 +685,12 @@ If the key does not exist, 0 is returned by the function: ## String type API -Setting a new string value, like the Redis [`SET`](/commands/set) command does, is performed +Setting a new string value, like the Redis [`SET`]({{< relref "/commands/set" >}}) command does, is performed using: int RedisModule_StringSet(RedisModuleKey *key, RedisModuleString *str); -The function works exactly like the Redis [`SET`](/commands/set) command itself, that is, if +The function works exactly like the Redis [`SET`]({{< relref "/commands/set" >}}) command itself, that is, if there is a prior value (of any type) it will be deleted. Accessing existing string values is performed using DMA (direct memory @@ -868,7 +868,7 @@ specific functions, that are exact replacements for `malloc`, `free`, They work exactly like their `libc` equivalent calls, however they use the same allocator Redis uses, and the memory allocated using these -functions is reported by the [`INFO`](/commands/info) command in the memory section, is +functions is reported by the [`INFO`]({{< relref "/commands/info" >}}) command in the memory section, is accounted when enforcing the `maxmemory` policy, and in general is a first citizen of the Redis executable. On the contrary, the method allocated inside modules with libc `malloc()` is transparent to Redis. diff --git a/content/develop/reference/modules/modules-api-ref.md b/content/develop/reference/modules/modules-api-ref.md index 8abd6454b6..b80464a3c9 100644 --- a/content/develop/reference/modules/modules-api-ref.md +++ b/content/develop/reference/modules/modules-api-ref.md @@ -374,7 +374,7 @@ Redis keys. See [https://redis.io/commands/command](https://redis.io/commands/co * `keystep`: Step between first and last key indexes. 0 for commands with no keys. -This information is used by ACL, Cluster and the [`COMMAND`](/commands/command) command. +This information is used by ACL, Cluster and the [`COMMAND`]({{< relref "/commands/command" >}}) command. NOTE: The scheme described above serves a limited purpose and can only be used to find keys that exist at constant indices. @@ -473,7 +473,7 @@ outside of this function, an error is returned. Set additional command information. -Affects the output of [`COMMAND`](/commands/command), [`COMMAND INFO`](/commands/command-info) and [`COMMAND DOCS`](/commands/command-docs), Cluster, +Affects the output of [`COMMAND`]({{< relref "/commands/command" >}}), [`COMMAND INFO`]({{< relref "/commands/command-info" >}}) and [`COMMAND DOCS`]({{< relref "/commands/command-docs" >}}), Cluster, ACL and is used to filter commands with the wrong number of arguments before the call reaches the module code. @@ -726,8 +726,8 @@ All fields except `version` are optional. Explanation of the fields: `key_specs` above. If the argument is not a key, you may specify -1. * `token`: The token preceding the argument (optional). Example: the - argument `seconds` in [`SET`](/commands/set) has a token `EX`. If the argument consists - of only a token (for example `NX` in [`SET`](/commands/set)) the type should be + argument `seconds` in [`SET`]({{< relref "/commands/set" >}}) has a token `EX`. If the argument consists + of only a token (for example `NX` in [`SET`]({{< relref "/commands/set" >}})) the type should be `REDISMODULE_ARG_TYPE_PURE_TOKEN` and `value` should be NULL. * `summary`: A short description of the argument (optional). @@ -738,7 +738,7 @@ All fields except `version` are optional. Explanation of the fields: * `value`: The display-value of the argument. This string is what should be displayed when creating the command syntax from the output of - [`COMMAND`](/commands/command). If `token` is not NULL, it should also be displayed. + [`COMMAND`]({{< relref "/commands/command" >}}). If `token` is not NULL, it should also be displayed. Explanation of `RedisModuleCommandArgType`: @@ -750,14 +750,14 @@ All fields except `version` are optional. Explanation of the fields: * `REDISMODULE_ARG_TYPE_UNIX_TIME`: Integer, but Unix timestamp. * `REDISMODULE_ARG_TYPE_PURE_TOKEN`: Argument doesn't have a placeholder. It's just a token without a value. Example: the `KEEPTTL` option of the - [`SET`](/commands/set) command. + [`SET`]({{< relref "/commands/set" >}}) command. * `REDISMODULE_ARG_TYPE_ONEOF`: Used when the user can choose only one of a few sub-arguments. Requires `subargs`. Example: the `NX` and `XX` - options of [`SET`](/commands/set). + options of [`SET`]({{< relref "/commands/set" >}}). * `REDISMODULE_ARG_TYPE_BLOCK`: Used when one wants to group together several sub-arguments, usually to apply something on all of them, like making the entire group "optional". Requires `subargs`. Example: the - `LIMIT offset count` parameters in [`ZRANGE`](/commands/zrange). + `LIMIT offset count` parameters in [`ZRANGE`]({{< relref "/commands/zrange" >}}). Explanation of the command argument flags: @@ -4516,7 +4516,7 @@ The callbacks are called in the following contexts: reply_callback: called after a successful RedisModule_UnblockClient() call in order to reply to the client and unblock it. - timeout_callback: called when the timeout is reached or if [`CLIENT UNBLOCK`](/commands/client-unblock) + timeout_callback: called when the timeout is reached or if [`CLIENT UNBLOCK`]({{< relref "/commands/client-unblock" >}}) is invoked, in order to send an error to the client. free_privdata: called in order to free the private data that is passed @@ -4535,9 +4535,9 @@ In these cases, a call to [`RedisModule_BlockClient()`](#RedisModule_BlockClient client, but instead produce a specific error reply. A module that registers a `timeout_callback` function can also be unblocked -using the [`CLIENT UNBLOCK`](/commands/client-unblock) command, which will trigger the timeout callback. +using the [`CLIENT UNBLOCK`]({{< relref "/commands/client-unblock" >}}) command, which will trigger the timeout callback. If a callback function is not registered, then the blocked client will be -treated as if it is not in a blocked state and [`CLIENT UNBLOCK`](/commands/client-unblock) will return +treated as if it is not in a blocked state and [`CLIENT UNBLOCK`]({{< relref "/commands/client-unblock" >}}) will return a zero value. Measuring background time: By default the time spent in the blocked command @@ -6393,10 +6393,10 @@ begins processing the command, any change will affect the way the command is processed. For example, a module can override Redis commands this way: 1. Register a `MODULE.SET` command which implements an extended version of - the Redis [`SET`](/commands/set) command. -2. Register a command filter which detects invocation of [`SET`](/commands/set) on a specific + the Redis [`SET`]({{< relref "/commands/set" >}}) command. +2. Register a command filter which detects invocation of [`SET`]({{< relref "/commands/set" >}}) on a specific pattern of keys. Once detected, the filter will replace the first - argument from [`SET`](/commands/set) to `MODULE.SET`. + argument from [`SET`]({{< relref "/commands/set" >}}) to `MODULE.SET`. 3. When filter execution is complete, Redis considers the new command name and therefore executes the module's own command. @@ -7124,16 +7124,16 @@ subevent is not supported and non-zero otherwise. **Available since:** 7.0.0 Create a string config that Redis users can interact with via the Redis config file, -[`CONFIG SET`](/commands/config-set), [`CONFIG GET`](/commands/config-get), and [`CONFIG REWRITE`](/commands/config-rewrite) commands. +[`CONFIG SET`]({{< relref "/commands/config-set" >}}), [`CONFIG GET`]({{< relref "/commands/config-get" >}}), and [`CONFIG REWRITE`]({{< relref "/commands/config-rewrite" >}}) commands. The actual config value is owned by the module, and the `getfn`, `setfn` and optional `applyfn` callbacks that are provided to Redis in order to access or manipulate the value. The `getfn` callback retrieves the value from the module, while the `setfn` callback provides a value to be stored into the module config. -The optional `applyfn` callback is called after a [`CONFIG SET`](/commands/config-set) command modified one or +The optional `applyfn` callback is called after a [`CONFIG SET`]({{< relref "/commands/config-set" >}}) command modified one or more configs using the `setfn` callback and can be used to atomically apply a config after several configs were changed together. -If there are multiple configs with `applyfn` callbacks set by a single [`CONFIG SET`](/commands/config-set) +If there are multiple configs with `applyfn` callbacks set by a single [`CONFIG SET`]({{< relref "/commands/config-set" >}}) command, they will be deduplicated if their `applyfn` function and `privdata` pointers are identical, and the callback will only be run once. Both the `setfn` and `applyfn` can return an error if the provided value is invalid or @@ -7158,7 +7158,7 @@ The name must only contain alphanumeric characters or dashes. The supported flag * `REDISMODULE_CONFIG_DEFAULT`: The default flags for a config. This creates a config that can be modified after startup. * `REDISMODULE_CONFIG_IMMUTABLE`: This config can only be provided loading time. * `REDISMODULE_CONFIG_SENSITIVE`: The value stored in this config is redacted from all logging. -* `REDISMODULE_CONFIG_HIDDEN`: The name is hidden from [`CONFIG GET`](/commands/config-get) with pattern matching. +* `REDISMODULE_CONFIG_HIDDEN`: The name is hidden from [`CONFIG GET`]({{< relref "/commands/config-get" >}}) with pattern matching. * `REDISMODULE_CONFIG_PROTECTED`: This config will be only be modifiable based off the value of enable-protected-configs. * `REDISMODULE_CONFIG_DENY_LOADING`: This config is not modifiable while the server is loading data. * `REDISMODULE_CONFIG_MEMORY`: For numeric configs, this config will convert data unit notations into their byte equivalent. @@ -7215,7 +7215,7 @@ errno is set: **Available since:** 7.0.0 Create a bool config that server clients can interact with via the -[`CONFIG SET`](/commands/config-set), [`CONFIG GET`](/commands/config-get), and [`CONFIG REWRITE`](/commands/config-rewrite) commands. See +[`CONFIG SET`]({{< relref "/commands/config-set" >}}), [`CONFIG GET`]({{< relref "/commands/config-get" >}}), and [`CONFIG REWRITE`]({{< relref "/commands/config-rewrite" >}}) commands. See [`RedisModule_RegisterStringConfig`](#RedisModule_RegisterStringConfig) for detailed information about configs. @@ -7238,7 +7238,7 @@ Create a bool config that server clients can interact with via the Create an enum config that server clients can interact with via the -[`CONFIG SET`](/commands/config-set), [`CONFIG GET`](/commands/config-get), and [`CONFIG REWRITE`](/commands/config-rewrite) commands. +[`CONFIG SET`]({{< relref "/commands/config-set" >}}), [`CONFIG GET`]({{< relref "/commands/config-get" >}}), and [`CONFIG REWRITE`]({{< relref "/commands/config-rewrite" >}}) commands. Enum configs are a set of string tokens to corresponding integer values, where the string value is exposed to Redis clients but the value passed Redis and the module is the integer value. These values are defined in `enum_values`, an array @@ -7285,7 +7285,7 @@ See [`RedisModule_RegisterStringConfig`](#RedisModule_RegisterStringConfig) for Create an integer config that server clients can interact with via the -[`CONFIG SET`](/commands/config-set), [`CONFIG GET`](/commands/config-get), and [`CONFIG REWRITE`](/commands/config-rewrite) commands. See +[`CONFIG SET`]({{< relref "/commands/config-set" >}}), [`CONFIG GET`]({{< relref "/commands/config-get" >}}), and [`CONFIG REWRITE`]({{< relref "/commands/config-rewrite" >}}) commands. See [`RedisModule_RegisterStringConfig`](#RedisModule_RegisterStringConfig) for detailed information about configs. @@ -7299,7 +7299,7 @@ Create an integer config that server clients can interact with via the Applies all pending configurations on the module load. This should be called after all of the configurations have been registered for the module inside of `RedisModule_OnLoad`. This will return `REDISMODULE_ERR` if it is called outside `RedisModule_OnLoad`. -This API needs to be called when configurations are provided in either [`MODULE LOADEX`](/commands/module-loadex) +This API needs to be called when configurations are provided in either [`MODULE LOADEX`]({{< relref "/commands/module-loadex" >}}) or provided as startup arguments. @@ -7566,7 +7566,7 @@ If `old_value` is non-NULL, the old value is returned by reference. For a specified command, parse its arguments and return an array that contains the indexes of all key name arguments. This function is -essentially a more efficient way to do [`COMMAND GETKEYS`](/commands/command-getkeys). +essentially a more efficient way to do [`COMMAND GETKEYS`]({{< relref "/commands/command-getkeys" >}}). The `out_flags` argument is optional, and can be set to NULL. When provided it is filled with `REDISMODULE_CMD_KEY_` flags in matching diff --git a/content/develop/reference/modules/modules-blocking-ops.md b/content/develop/reference/modules/modules-blocking-ops.md index ec03880f6f..c5c72727d0 100644 --- a/content/develop/reference/modules/modules-blocking-ops.md +++ b/content/develop/reference/modules/modules-blocking-ops.md @@ -18,13 +18,13 @@ weight: 1 --- Redis has a few blocking commands among the built-in set of commands. -One of the most used is [`BLPOP`](/commands/blpop) (or the symmetric [`BRPOP`](/commands/brpop)) which blocks +One of the most used is [`BLPOP`]({{< relref "/commands/blpop" >}}) (or the symmetric [`BRPOP`]({{< relref "/commands/brpop" >}})) which blocks waiting for elements arriving in a list. The interesting fact about blocking commands is that they do not block the whole server, but just the client calling them. Usually the reason to block is that we expect some external event to happen: this can be -some change in the Redis data structures like in the [`BLPOP`](/commands/blpop) case, a +some change in the Redis data structures like in the [`BLPOP`]({{< relref "/commands/blpop" >}}) case, a long computation happening in a thread, to receive some data from the network, and so forth. diff --git a/content/develop/reference/modules/modules-native-types.md b/content/develop/reference/modules/modules-native-types.md index 0ccf2a6a43..dc7511b059 100644 --- a/content/develop/reference/modules/modules-native-types.md +++ b/content/develop/reference/modules/modules-native-types.md @@ -32,7 +32,7 @@ We call the ability of Redis modules to implement new data structures that feel like native Redis ones **native types support**. This document describes the API exported by the Redis modules system in order to create new data structures and handle the serialization in RDB files, the rewriting process -in AOF, the type reporting via the [`TYPE`](/commands/type) command, and so forth. +in AOF, the type reporting via the [`TYPE`]({{< relref "/commands/type" >}}) command, and so forth. Overview of native types --- @@ -103,7 +103,7 @@ finds no matching module, the integer is converted back to a name in order to provide some clue to the user about what module is missing in order to load the data. -The type name is also used as a reply for the [`TYPE`](/commands/type) command when called +The type name is also used as a reply for the [`TYPE`]({{< relref "/commands/type" >}}) command when called with a key holding the registered type. The `encver` argument is the encoding version used by the module to store data @@ -131,8 +131,8 @@ registration function: `rdb_load`, `rdb_save`, `aof_rewrite`, `digest` and * `rdb_save` is called when saving data to the RDB file. * `aof_rewrite` is called when the AOF is being rewritten, and the module needs to tell Redis what is the sequence of commands to recreate the content of a given key. * `digest` is called when `DEBUG DIGEST` is executed and a key holding this module type is found. Currently this is not yet implemented so the function ca be left empty. -* `mem_usage` is called when the [`MEMORY`](/commands/memory) command asks for the total memory consumed by a specific key, and is used in order to get the amount of bytes used by the module value. -* `free` is called when a key with the module native type is deleted via [`DEL`](/commands/del) or in any other mean, in order to let the module reclaim the memory associated with such a value. +* `mem_usage` is called when the [`MEMORY`]({{< relref "/commands/memory" >}}) command asks for the total memory consumed by a specific key, and is used in order to get the amount of bytes used by the module value. +* `free` is called when a key with the module native type is deleted via [`DEL`]({{< relref "/commands/del" >}}) or in any other mean, in order to let the module reclaim the memory associated with such a value. Ok, but *why* modules types require a 9 characters name? --- diff --git a/content/develop/reference/protocol-spec.md b/content/develop/reference/protocol-spec.md index 8b46ad27bf..ef2a67d3a3 100644 --- a/content/develop/reference/protocol-spec.md +++ b/content/develop/reference/protocol-spec.md @@ -48,7 +48,7 @@ In Redis 2.0, the protocol's next version, a.k.a RESP2, became the standard comm [RESP3](https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md) is a superset of RESP2 that mainly aims to make a client author's life a little bit easier. Redis 6.0 introduced experimental opt-in support of RESP3's features (excluding streaming strings and streaming aggregates). -In addition, the introduction of the [`HELLO`](/commands/hello) command allows clients to handshake and upgrade the connection's protocol version (see [Client handshake](#client-handshake)). +In addition, the introduction of the [`HELLO`]({{< relref "/commands/hello" >}}) command allows clients to handshake and upgrade the connection's protocol version (see [Client handshake](#client-handshake)). Up to and including Redis 7, both RESP2 and RESP3 clients can invoke all core commands. However, commands may return differently typed replies for different protocol versions. @@ -71,10 +71,10 @@ This is the simplest model possible; however, there are some exceptions: Pipelining enables clients to send multiple commands at once and wait for replies later. * When a RESP2 connection subscribes to a [Pub/Sub]({{< relref "/develop/interact/pubsub" >}}) channel, the protocol changes semantics and becomes a *push* protocol. The client no longer requires sending commands because the server will automatically send new messages to the client (for the channels the client is subscribed to) as soon as they are received. -* The [`MONITOR`](/commands/monitor) command. - Invoking the [`MONITOR`](/commands/monitor) command switches the connection to an ad-hoc push mode. +* The [`MONITOR`]({{< relref "/commands/monitor" >}}) command. + Invoking the [`MONITOR`]({{< relref "/commands/monitor" >}}) command switches the connection to an ad-hoc push mode. The protocol of this mode is not specified but is obvious to parse. -* [Protected mode]({{< relref "/operate/oss_and_stack/management/security/" >}}). +* [Protected mode]({{< baseurl >}}/operate/oss_and_stack/management/security/#protected-mode). Connections opened from a non-loopback address to a Redis while in protected mode are denied and terminated by the server. Before terminating the connection, Redis unconditionally sends a `-DENIED` reply, regardless of whether the client writes to the socket. * The [RESP3 Push type](#resp3-pushes). @@ -197,15 +197,15 @@ RESP encodes integers in the following way: For example, `:0\r\n` and `:1000\r\n` are integer replies (of zero and one thousand, respectively). -Many Redis commands return RESP integers, including [`INCR`](/commands/incr), [`LLEN`](/commands/llen), and [`LASTSAVE`](/commands/lastsave). +Many Redis commands return RESP integers, including [`INCR`]({{< relref "/commands/incr" >}}), [`LLEN`]({{< relref "/commands/llen" >}}), and [`LASTSAVE`]({{< relref "/commands/lastsave" >}}). An integer, by itself, has no special meaning other than in the context of the command that returned it. -For example, it is an incremental number for [`INCR`](/commands/incr), a UNIX timestamp for [`LASTSAVE`](/commands/lastsave), and so forth. +For example, it is an incremental number for [`INCR`]({{< relref "/commands/incr" >}}), a UNIX timestamp for [`LASTSAVE`]({{< relref "/commands/lastsave" >}}), and so forth. However, the returned integer is guaranteed to be in the range of a signed 64-bit integer. In some cases, integers can represent true and false Boolean values. -For instance, [`SISMEMBER`](/commands/sismember) returns 1 for true and 0 for false. +For instance, [`SISMEMBER`]({{< relref "/commands/sismember" >}}) returns 1 for true and 0 for false. -Other commands, including [`SADD`](/commands/sadd), [`SREM`](/commands/srem), and [`SETNX`](/commands/setnx), return 1 when the data changes and 0 otherwise. +Other commands, including [`SADD`]({{< relref "/commands/sadd" >}}), [`SREM`]({{< relref "/commands/srem" >}}), and [`SETNX`]({{< relref "/commands/setnx" >}}), return 1 when the data changes and 0 otherwise. @@ -238,7 +238,7 @@ Whereas RESP3 has a dedicated data type for [null values](#nulls), RESP2 has no Instead, due to historical reasons, the representation of null values in RESP2 is via predetermined forms of the [bulk strings](#bulk-strings) and [arrays](#arrays) types. The null bulk string represents a non-existing value. -The [`GET`](/commands/get) command returns the Null Bulk String when the target key doesn't exist. +The [`GET`]({{< relref "/commands/get" >}}) command returns the Null Bulk String when the target key doesn't exist. It is encoded as a bulk string with the length of negative one (-1), like so: @@ -252,7 +252,7 @@ For example, a Ruby library should return `nil` while a C library should return ### Arrays Clients send commands to the Redis server as RESP arrays. Similarly, some Redis commands that return collections of elements use arrays as their replies. -An example is the [`LRANGE`](/commands/lrange) command that returns elements of a list. +An example is the [`LRANGE`]({{< relref "/commands/lrange" >}}) command that returns elements of a list. RESP Arrays' encoding uses the following format: @@ -322,18 +322,18 @@ The two are the same. Whereas RESP3 has a dedicated data type for [null values](#nulls), RESP2 has no such type. Instead, due to historical reasons, the representation of null values in RESP2 is via predetermined forms of the [Bulk Strings](#bulk-strings) and [arrays](#arrays) types. Null arrays exist as an alternative way of representing a null value. -For instance, when the [`BLPOP`](/commands/blpop) command times out, it returns a null array. +For instance, when the [`BLPOP`]({{< relref "/commands/blpop" >}}) command times out, it returns a null array. The encoding of a null array is that of an array with the length of -1, i.e.: *-1\r\n When Redis replies with a null array, the client should return a null object rather than an empty array. -This is necessary to distinguish between an empty list and a different condition (for instance, the timeout condition of the [`BLPOP`](/commands/blpop) command). +This is necessary to distinguish between an empty list and a different condition (for instance, the timeout condition of the [`BLPOP`]({{< relref "/commands/blpop" >}}) command). #### Null elements in arrays Single elements of an array may be [null bulk string](#null-bulk-strings). -This is used in Redis replies to signal that these elements are missing and not empty strings. This can happen, for example, with the [`SORT`](/commands/sort) command when used with the `GET pattern` option +This is used in Redis replies to signal that these elements are missing and not empty strings. This can happen, for example, with the [`SORT`]({{< relref "/commands/sort" >}}) command when used with the `GET pattern` option if the specified key is missing. Here's an example of an array reply containing a null element: @@ -483,9 +483,9 @@ Example: Some client libraries may ignore the difference between this type and the string type and return a native string in both cases. However, interactive clients, such as command line interfaces (e.g., [`redis-cli`]({{< relref "/develop/connect/cli" >}})), can use this type and know that their output should be presented to the human user as is and without quoting the string. -For example, the Redis command [`INFO`](/commands/info) outputs a report that includes newlines. +For example, the Redis command [`INFO`]({{< relref "/commands/info" >}}) outputs a report that includes newlines. When using RESP3, `redis-cli` displays it correctly because it is sent as a Verbatim String reply (with its three bytes being "txt"). -When using RESP2, however, the `redis-cli` is hard-coded to look for the [`INFO`](/commands/info) command to ensure its correct display to the user. +When using RESP2, however, the `redis-cli` is hard-coded to look for the [`INFO`]({{< relref "/commands/info" >}}) command to ensure its correct display to the user. @@ -569,14 +569,14 @@ It also means that pushed data may appear before or after a command's reply, as Clients should react to pushes by invoking a callback that implements their handling of the pushed data. ## Client handshake -New RESP connections should begin the session by calling the [`HELLO`](/commands/hello) command. +New RESP connections should begin the session by calling the [`HELLO`]({{< relref "/commands/hello" >}}) command. This practice accomplishes two things: 1. It allows servers to be backward compatible with RESP2 versions. This is needed in Redis to make the transition to version 3 of the protocol gentler. -2. The [`HELLO`](/commands/hello) command returns information about the server and the protocol that the client can use for different goals. +2. The [`HELLO`]({{< relref "/commands/hello" >}}) command returns information about the server and the protocol that the client can use for different goals. -The [`HELLO`](/commands/hello) command has the following high-level syntax: +The [`HELLO`]({{< relref "/commands/hello" >}}) command has the following high-level syntax: HELLO [optional-arguments] @@ -596,14 +596,14 @@ Similarly, the client can easily detect a server that is only able to speak RESP The client can then proceed and use RESP2 to communicate with the server. -Note that even if the protocol's version is supported, the [`HELLO`](/commands/hello) command may return an error, perform no action and remain in RESP2 mode. +Note that even if the protocol's version is supported, the [`HELLO`]({{< relref "/commands/hello" >}}) command may return an error, perform no action and remain in RESP2 mode. For example, when used with invalid authentication credentials in the command's optional `AUTH` clause: Client: HELLO 3 AUTH default mypassword Server: -ERR invalid password (the connection remains in RESP2 mode) -A successful reply to the [`HELLO`](/commands/hello) command is a map reply. +A successful reply to the [`HELLO`]({{< relref "/commands/hello" >}}) command is a map reply. The information in the reply is partly server-dependent, but certain fields are mandatory for all the RESP3 implementations: * **server**: "redis" (or other software name). * **version**: the server's version. diff --git a/content/develop/reference/sentinel-clients.md b/content/develop/reference/sentinel-clients.md index ec006ca001..3e39797d60 100644 --- a/content/develop/reference/sentinel-clients.md +++ b/content/develop/reference/sentinel-clients.md @@ -75,10 +75,10 @@ Step 3: call the ROLE command in the target instance --- Once the client discovered the address of the master instance, it should -attempt a connection with the master, and call the [`ROLE`](/commands/role) command in order +attempt a connection with the master, and call the [`ROLE`]({{< relref "/commands/role" >}}) command in order to verify the role of the instance is actually a master. -If the [`ROLE`](/commands/role) commands is not available (it was introduced in Redis 2.8.12), a client may resort to the `INFO replication` command parsing the `role:` field of the output. +If the [`ROLE`]({{< relref "/commands/role" >}}) commands is not available (it was introduced in Redis 2.8.12), a client may resort to the `INFO replication` command parsing the `role:` field of the output. If the instance is not a master as expected, the client should wait a short amount of time (a few hundreds of milliseconds) and should try again starting from Step 1. @@ -103,7 +103,7 @@ command to the instance in order to make sure all the clients are disconnected from the reconfigured instance. This will force clients to resolve the master address again. -If the client will contact a Sentinel with yet not updated information, the verification of the Redis instance role via the [`ROLE`](/commands/role) command will fail, allowing the client to detect that the contacted Sentinel provided stale information, and will try again. +If the client will contact a Sentinel with yet not updated information, the verification of the Redis instance role via the [`ROLE`]({{< relref "/commands/role" >}}) command will fail, allowing the client to detect that the contacted Sentinel provided stale information, and will try again. Note: it is possible that a stale master returns online at the same time a client contacts a stale Sentinel instance, so the client may connect with a stale master, and yet the ROLE output will match. However when the master is back again Sentinel will try to demote it to replica, triggering a new disconnection. The same reasoning applies to connecting to stale replicas that will get reconfigured to replicate with a different master. @@ -120,7 +120,7 @@ The clients should call instead: In order to retrieve a list of replica instances. -Symmetrically the client should verify with the [`ROLE`](/commands/role) command that the +Symmetrically the client should verify with the [`ROLE`]({{< relref "/commands/role" >}}) command that the instance is actually a replica, in order to avoid scaling read queries with the master. diff --git a/content/develop/use/client-side-caching.md b/content/develop/use/client-side-caching.md index 1e0e472971..0d2b34fd31 100644 --- a/content/develop/use/client-side-caching.md +++ b/content/develop/use/client-side-caching.md @@ -84,7 +84,7 @@ listening clients. This can be made to work but is tricky and costly from the point of view of the bandwidth used, because often such patterns involve sending the invalidation messages to every client in the application, even if certain clients may not have any copy of the invalidated data. Moreover -every application query altering the data requires to use the [`PUBLISH`](/commands/publish) +every application query altering the data requires to use the [`PUBLISH`]({{< relref "/commands/publish" >}}) command, costing the database more CPU time to process this command. Regardless of what schema is used, there is a simple fact: many very large @@ -134,7 +134,7 @@ Using the new version of the Redis protocol, RESP3, supported by Redis 6, it is Here's an example of a complete session using the Redis protocol in the old RESP2 mode involving the following steps: enabling tracking redirecting to another connection, asking for a key, and getting an invalidation message once the key gets modified. -To start, the client opens a first connection that will be used for invalidations, requests the connection ID, and subscribes via Pub/Sub to the special channel that is used to get invalidation messages when in RESP2 modes (remember that RESP2 is the usual Redis protocol, and not the more advanced protocol that you can use, optionally, with Redis 6 using the [`HELLO`](/commands/hello) command): +To start, the client opens a first connection that will be used for invalidations, requests the connection ID, and subscribes via Pub/Sub to the special channel that is used to get invalidation messages when in RESP2 modes (remember that RESP2 is the usual Redis protocol, and not the more advanced protocol that you can use, optionally, with Redis 6 using the [`HELLO`]({{< relref "/commands/hello" >}}) command): ``` (Connection 1 -- used for invalidations) @@ -189,14 +189,14 @@ The client will check if there are cached keys in this caching slot, and will ev Note that the third element of the Pub/Sub message is not a single key but is a Redis array with just a single element. Since we send an array, if there are groups of keys to invalidate, we can do that in a single message. -In case of a flush ([`FLUSHALL`](/commands/flushall) or [`FLUSHDB`](/commands/flushdb)), a `null` message will be sent. +In case of a flush ([`FLUSHALL`]({{< relref "/commands/flushall" >}}) or [`FLUSHDB`]({{< relref "/commands/flushdb" >}})), a `null` message will be sent. A very important thing to understand about client-side caching used with RESP2 and a Pub/Sub connection in order to read the invalidation messages, is that using Pub/Sub is entirely a trick **in order to reuse old client implementations**, but actually the message is not really sent to a channel and received by all the clients subscribed to it. Only the connection we -specified in the `REDIRECT` argument of the [`CLIENT`](/commands/client) command will actually +specified in the `REDIRECT` argument of the [`CLIENT`]({{< relref "/commands/client" >}}) command will actually receive the Pub/Sub message, making the feature a lot more scalable. When RESP3 is used instead, invalidation messages are sent (either in the @@ -247,7 +247,7 @@ In this mode, by default, keys mentioned in read queries *are not supposed to be "bar" The `CACHING` command affects the command executed immediately after it, -however in case the next command is [`MULTI`](/commands/multi), all the commands in the +however in case the next command is [`MULTI`]({{< relref "/commands/multi" >}}), all the commands in the transaction will be tracked. Similarly in case of Lua scripts, all the commands executed by the script will be tracked. @@ -326,7 +326,7 @@ future what is good to cache. In general: * We don't want to cache many keys that change continuously. * We don't want to cache many keys that are requested very rarely. -* We want to cache keys that are requested often and change at a reasonable rate. For an example of key not changing at a reasonable rate, think of a global counter that is continuously [`INCR`](/commands/incr)emented. +* We want to cache keys that are requested often and change at a reasonable rate. For an example of key not changing at a reasonable rate, think of a global counter that is continuously [`INCR`]({{< relref "/commands/incr" >}})emented. However simpler clients may just evict data using some random sampling just remembering the last time a given cached value was served, trying to evict diff --git a/content/develop/use/keyspace-notifications.md b/content/develop/use/keyspace-notifications.md index 2577c62dea..bbf696b48f 100644 --- a/content/develop/use/keyspace-notifications.md +++ b/content/develop/use/keyspace-notifications.md @@ -33,10 +33,10 @@ disconnected are lost. ### Type of events Keyspace notifications are implemented by sending two distinct types of events -for every operation affecting the Redis data space. For instance a [`DEL`](/commands/del) +for every operation affecting the Redis data space. For instance a [`DEL`]({{< relref "/commands/del" >}}) operation targeting the key named `mykey` in database `0` will trigger the delivering of two messages, exactly equivalent to the following two -[`PUBLISH`](/commands/publish) commands: +[`PUBLISH`]({{< relref "/commands/publish" >}}) commands: PUBLISH __keyspace@0__:mykey del PUBLISH __keyevent@0__:del mykey @@ -97,60 +97,60 @@ You can use the string `KEA` to enable most types of events. Different commands generate different kind of events according to the following list. -* [`DEL`](/commands/del) generates a `del` event for every deleted key. -* [`RENAME`](/commands/rename) generates two events, a `rename_from` event for the source key, and a `rename_to` event for the destination key. -* [`MOVE`](/commands/move) generates two events, a `move_from` event for the source key, and a `move_to` event for the destination key. -* [`COPY`](/commands/copy) generates a `copy_to` event. -* [`MIGRATE`](/commands/migrate) generates a `del` event if the source key is removed. -* [`RESTORE`](/commands/restore) generates a `restore` event for the key. -* [`EXPIRE`](/commands/expire) and all its variants ([`PEXPIRE`](/commands/pexpire), [`EXPIREAT`](/commands/expireat), [`PEXPIREAT`](/commands/pexpireat)) generate an `expire` event when called with a positive timeout (or a future timestamp). Note that when these commands are called with a negative timeout value or timestamp in the past, the key is deleted and only a `del` event is generated instead. -* [`SORT`](/commands/sort) generates a `sortstore` event when `STORE` is used to set a new key. If the resulting list is empty, and the `STORE` option is used, and there was already an existing key with that name, the result is that the key is deleted, so a `del` event is generated in this condition. -* [`SET`](/commands/set) and all its variants ([`SETEX`](/commands/setex), [`SETNX`](/commands/setnx),[`GETSET`](/commands/getset)) generate `set` events. However [`SETEX`](/commands/setex) will also generate an `expire` events. -* [`MSET`](/commands/mset) generates a separate `set` event for every key. -* [`SETRANGE`](/commands/setrange) generates a `setrange` event. -* [`INCR`](/commands/incr), [`DECR`](/commands/decr), [`INCRBY`](/commands/incrby), [`DECRBY`](/commands/decrby) commands all generate `incrby` events. -* [`INCRBYFLOAT`](/commands/incrbyfloat) generates an `incrbyfloat` events. -* [`APPEND`](/commands/append) generates an `append` event. -* [`LPUSH`](/commands/lpush) and [`LPUSHX`](/commands/lpushx) generates a single `lpush` event, even in the variadic case. -* [`RPUSH`](/commands/rpush) and [`RPUSHX`](/commands/rpushx) generates a single `rpush` event, even in the variadic case. -* [`RPOP`](/commands/rpop) generates an `rpop` event. Additionally a `del` event is generated if the key is removed because the last element from the list was popped. -* [`LPOP`](/commands/lpop) generates an `lpop` event. Additionally a `del` event is generated if the key is removed because the last element from the list was popped. -* [`LINSERT`](/commands/linsert) generates an `linsert` event. -* [`LSET`](/commands/lset) generates an `lset` event. -* [`LREM`](/commands/lrem) generates an `lrem` event, and additionally a `del` event if the resulting list is empty and the key is removed. -* [`LTRIM`](/commands/ltrim) generates an `ltrim` event, and additionally a `del` event if the resulting list is empty and the key is removed. -* [`RPOPLPUSH`](/commands/rpoplpush) and [`BRPOPLPUSH`](/commands/brpoplpush) generate an `rpop` event and an `lpush` event. In both cases the order is guaranteed (the `lpush` event will always be delivered after the `rpop` event). Additionally a `del` event will be generated if the resulting list is zero length and the key is removed. -* [`LMOVE`](/commands/lmove) and [`BLMOVE`](/commands/blmove) generate an `lpop`/`rpop` event (depending on the wherefrom argument) and an `lpush`/`rpush` event (depending on the whereto argument). In both cases the order is guaranteed (the `lpush`/`rpush` event will always be delivered after the `lpop`/`rpop` event). Additionally a `del` event will be generated if the resulting list is zero length and the key is removed. -* [`HSET`](/commands/hset), [`HSETNX`](/commands/hsetnx) and [`HMSET`](/commands/hmset) all generate a single `hset` event. -* [`HINCRBY`](/commands/hincrby) generates an `hincrby` event. -* [`HINCRBYFLOAT`](/commands/hincrbyfloat) generates an `hincrbyfloat` event. -* [`HDEL`](/commands/hdel) generates a single `hdel` event, and an additional `del` event if the resulting hash is empty and the key is removed. -* [`SADD`](/commands/sadd) generates a single `sadd` event, even in the variadic case. -* [`SREM`](/commands/srem) generates a single `srem` event, and an additional `del` event if the resulting set is empty and the key is removed. -* [`SMOVE`](/commands/smove) generates an `srem` event for the source key, and an `sadd` event for the destination key. -* [`SPOP`](/commands/spop) generates an `spop` event, and an additional `del` event if the resulting set is empty and the key is removed. -* [`SINTERSTORE`](/commands/sinterstore), [`SUNIONSTORE`](/commands/sunionstore), [`SDIFFSTORE`](/commands/sdiffstore) generate `sinterstore`, `sunionstore`, `sdiffstore` events respectively. In the special case the resulting set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. +* [`DEL`]({{< relref "/commands/del" >}}) generates a `del` event for every deleted key. +* [`RENAME`]({{< relref "/commands/rename" >}}) generates two events, a `rename_from` event for the source key, and a `rename_to` event for the destination key. +* [`MOVE`]({{< relref "/commands/move" >}}) generates two events, a `move_from` event for the source key, and a `move_to` event for the destination key. +* [`COPY`]({{< relref "/commands/copy" >}}) generates a `copy_to` event. +* [`MIGRATE`]({{< relref "/commands/migrate" >}}) generates a `del` event if the source key is removed. +* [`RESTORE`]({{< relref "/commands/restore" >}}) generates a `restore` event for the key. +* [`EXPIRE`]({{< relref "/commands/expire" >}}) and all its variants ([`PEXPIRE`]({{< relref "/commands/pexpire" >}}), [`EXPIREAT`]({{< relref "/commands/expireat" >}}), [`PEXPIREAT`]({{< relref "/commands/pexpireat" >}})) generate an `expire` event when called with a positive timeout (or a future timestamp). Note that when these commands are called with a negative timeout value or timestamp in the past, the key is deleted and only a `del` event is generated instead. +* [`SORT`]({{< relref "/commands/sort" >}}) generates a `sortstore` event when `STORE` is used to set a new key. If the resulting list is empty, and the `STORE` option is used, and there was already an existing key with that name, the result is that the key is deleted, so a `del` event is generated in this condition. +* [`SET`]({{< relref "/commands/set" >}}) and all its variants ([`SETEX`]({{< relref "/commands/setex" >}}), [`SETNX`]({{< relref "/commands/setnx" >}}),[`GETSET`]({{< relref "/commands/getset" >}})) generate `set` events. However [`SETEX`]({{< relref "/commands/setex" >}}) will also generate an `expire` events. +* [`MSET`]({{< relref "/commands/mset" >}}) generates a separate `set` event for every key. +* [`SETRANGE`]({{< relref "/commands/setrange" >}}) generates a `setrange` event. +* [`INCR`]({{< relref "/commands/incr" >}}), [`DECR`]({{< relref "/commands/decr" >}}), [`INCRBY`]({{< relref "/commands/incrby" >}}), [`DECRBY`]({{< relref "/commands/decrby" >}}) commands all generate `incrby` events. +* [`INCRBYFLOAT`]({{< relref "/commands/incrbyfloat" >}}) generates an `incrbyfloat` events. +* [`APPEND`]({{< relref "/commands/append" >}}) generates an `append` event. +* [`LPUSH`]({{< relref "/commands/lpush" >}}) and [`LPUSHX`]({{< relref "/commands/lpushx" >}}) generates a single `lpush` event, even in the variadic case. +* [`RPUSH`]({{< relref "/commands/rpush" >}}) and [`RPUSHX`]({{< relref "/commands/rpushx" >}}) generates a single `rpush` event, even in the variadic case. +* [`RPOP`]({{< relref "/commands/rpop" >}}) generates an `rpop` event. Additionally a `del` event is generated if the key is removed because the last element from the list was popped. +* [`LPOP`]({{< relref "/commands/lpop" >}}) generates an `lpop` event. Additionally a `del` event is generated if the key is removed because the last element from the list was popped. +* [`LINSERT`]({{< relref "/commands/linsert" >}}) generates an `linsert` event. +* [`LSET`]({{< relref "/commands/lset" >}}) generates an `lset` event. +* [`LREM`]({{< relref "/commands/lrem" >}}) generates an `lrem` event, and additionally a `del` event if the resulting list is empty and the key is removed. +* [`LTRIM`]({{< relref "/commands/ltrim" >}}) generates an `ltrim` event, and additionally a `del` event if the resulting list is empty and the key is removed. +* [`RPOPLPUSH`]({{< relref "/commands/rpoplpush" >}}) and [`BRPOPLPUSH`]({{< relref "/commands/brpoplpush" >}}) generate an `rpop` event and an `lpush` event. In both cases the order is guaranteed (the `lpush` event will always be delivered after the `rpop` event). Additionally a `del` event will be generated if the resulting list is zero length and the key is removed. +* [`LMOVE`]({{< relref "/commands/lmove" >}}) and [`BLMOVE`]({{< relref "/commands/blmove" >}}) generate an `lpop`/`rpop` event (depending on the wherefrom argument) and an `lpush`/`rpush` event (depending on the whereto argument). In both cases the order is guaranteed (the `lpush`/`rpush` event will always be delivered after the `lpop`/`rpop` event). Additionally a `del` event will be generated if the resulting list is zero length and the key is removed. +* [`HSET`]({{< relref "/commands/hset" >}}), [`HSETNX`]({{< relref "/commands/hsetnx" >}}) and [`HMSET`]({{< relref "/commands/hmset" >}}) all generate a single `hset` event. +* [`HINCRBY`]({{< relref "/commands/hincrby" >}}) generates an `hincrby` event. +* [`HINCRBYFLOAT`]({{< relref "/commands/hincrbyfloat" >}}) generates an `hincrbyfloat` event. +* [`HDEL`]({{< relref "/commands/hdel" >}}) generates a single `hdel` event, and an additional `del` event if the resulting hash is empty and the key is removed. +* [`SADD`]({{< relref "/commands/sadd" >}}) generates a single `sadd` event, even in the variadic case. +* [`SREM`]({{< relref "/commands/srem" >}}) generates a single `srem` event, and an additional `del` event if the resulting set is empty and the key is removed. +* [`SMOVE`]({{< relref "/commands/smove" >}}) generates an `srem` event for the source key, and an `sadd` event for the destination key. +* [`SPOP`]({{< relref "/commands/spop" >}}) generates an `spop` event, and an additional `del` event if the resulting set is empty and the key is removed. +* [`SINTERSTORE`]({{< relref "/commands/sinterstore" >}}), [`SUNIONSTORE`]({{< relref "/commands/sunionstore" >}}), [`SDIFFSTORE`]({{< relref "/commands/sdiffstore" >}}) generate `sinterstore`, `sunionstore`, `sdiffstore` events respectively. In the special case the resulting set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. * `ZINCR` generates a `zincr` event. -* [`ZADD`](/commands/zadd) generates a single `zadd` event even when multiple elements are added. -* [`ZREM`](/commands/zrem) generates a single `zrem` event even when multiple elements are deleted. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. +* [`ZADD`]({{< relref "/commands/zadd" >}}) generates a single `zadd` event even when multiple elements are added. +* [`ZREM`]({{< relref "/commands/zrem" >}}) generates a single `zrem` event even when multiple elements are deleted. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. * `ZREMBYSCORE` generates a single `zrembyscore` event. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. * `ZREMBYRANK` generates a single `zrembyrank` event. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. -* [`ZDIFFSTORE`](/commands/zdiffstore), [`ZINTERSTORE`](/commands/zinterstore) and [`ZUNIONSTORE`](/commands/zunionstore) respectively generate `zdiffstore`, `zinterstore` and `zunionstore` events. In the special case the resulting sorted set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. -* [`XADD`](/commands/xadd) generates an `xadd` event, possibly followed an `xtrim` event when used with the `MAXLEN` subcommand. -* [`XDEL`](/commands/xdel) generates a single `xdel` event even when multiple entries are deleted. -* [`XGROUP CREATE`](/commands/xgroup-create) generates an `xgroup-create` event. -* [`XGROUP CREATECONSUMER`](/commands/xgroup-createconsumer) generates an `xgroup-createconsumer` event. -* [`XGROUP DELCONSUMER`](/commands/xgroup-delconsumer) generates an `xgroup-delconsumer` event. -* [`XGROUP DESTROY`](/commands/xgroup-destroy) generates an `xgroup-destroy` event. -* [`XGROUP SETID`](/commands/xgroup-setid) generates an `xgroup-setid` event. -* [`XSETID`](/commands/xsetid) generates an `xsetid` event. -* [`XTRIM`](/commands/xtrim) generates an `xtrim` event. -* [`PERSIST`](/commands/persist) generates a `persist` event if the expiry time associated with key has been successfully deleted. +* [`ZDIFFSTORE`]({{< relref "/commands/zdiffstore" >}}), [`ZINTERSTORE`]({{< relref "/commands/zinterstore" >}}) and [`ZUNIONSTORE`]({{< relref "/commands/zunionstore" >}}) respectively generate `zdiffstore`, `zinterstore` and `zunionstore` events. In the special case the resulting sorted set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. +* [`XADD`]({{< relref "/commands/xadd" >}}) generates an `xadd` event, possibly followed an `xtrim` event when used with the `MAXLEN` subcommand. +* [`XDEL`]({{< relref "/commands/xdel" >}}) generates a single `xdel` event even when multiple entries are deleted. +* [`XGROUP CREATE`]({{< relref "/commands/xgroup-create" >}}) generates an `xgroup-create` event. +* [`XGROUP CREATECONSUMER`]({{< relref "/commands/xgroup-createconsumer" >}}) generates an `xgroup-createconsumer` event. +* [`XGROUP DELCONSUMER`]({{< relref "/commands/xgroup-delconsumer" >}}) generates an `xgroup-delconsumer` event. +* [`XGROUP DESTROY`]({{< relref "/commands/xgroup-destroy" >}}) generates an `xgroup-destroy` event. +* [`XGROUP SETID`]({{< relref "/commands/xgroup-setid" >}}) generates an `xgroup-setid` event. +* [`XSETID`]({{< relref "/commands/xsetid" >}}) generates an `xsetid` event. +* [`XTRIM`]({{< relref "/commands/xtrim" >}}) generates an `xtrim` event. +* [`PERSIST`]({{< relref "/commands/persist" >}}) generates a `persist` event if the expiry time associated with key has been successfully deleted. * Every time a key with a time to live associated is removed from the data set because it expired, an `expired` event is generated. * Every time a key is evicted from the data set in order to free memory as a result of the `maxmemory` policy, an `evicted` event is generated. * Every time a new key is added to the data set, a `new` event is generated. -**IMPORTANT** all the commands generate events only if the target key is really modified. For instance an [`SREM`](/commands/srem) deleting a non-existing element from a Set will not actually change the value of the key, so no event will be generated. +**IMPORTANT** all the commands generate events only if the target key is really modified. For instance an [`SREM`]({{< relref "/commands/srem" >}}) deleting a non-existing element from a Set will not actually change the value of the key, so no event will be generated. If in doubt about how events are generated for a given command, the simplest thing to do is to watch yourself: diff --git a/content/develop/use/keyspace.md b/content/develop/use/keyspace.md index 56dfce68ac..07257eda67 100644 --- a/content/develop/use/keyspace.md +++ b/content/develop/use/keyspace.md @@ -46,8 +46,8 @@ There are commands that are not defined on particular types, but are useful in order to interact with the space of keys, and thus, can be used with keys of any type. -For example the [`EXISTS`](/commands/exists) command returns 1 or 0 to signal if a given key -exists or not in the database, while the [`DEL`](/commands/del) command deletes a key +For example the [`EXISTS`]({{< relref "/commands/exists" >}}) command returns 1 or 0 to signal if a given key +exists or not in the database, while the [`DEL`]({{< relref "/commands/del" >}}) command deletes a key and associated value, whatever the value is. > set mykey hello @@ -59,12 +59,12 @@ and associated value, whatever the value is. > exists mykey (integer) 0 -From the examples you can also see how [`DEL`](/commands/del) itself returns 1 or 0 depending on whether +From the examples you can also see how [`DEL`]({{< relref "/commands/del" >}}) itself returns 1 or 0 depending on whether the key was removed (it existed) or not (there was no such key with that name). There are many key space related commands, but the above two are the -essential ones together with the [`TYPE`](/commands/type) command, which returns the kind +essential ones together with the [`TYPE`]({{< relref "/commands/type" >}}) command, which returns the kind of value stored at the specified key: > set mykey x @@ -86,7 +86,7 @@ A few important notes about key expiration: * However the expire time resolution is always 1 millisecond. * Information about expires are replicated and persisted on disk, the time virtually passes when your Redis server remains stopped (this means that Redis saves the date at which a key will expire). -Use the [`EXPIRE`](/commands/expire) command to set a key's expiration: +Use the [`EXPIRE`]({{< relref "/commands/expire" >}}) command to set a key's expiration: > set key some-value OK @@ -97,13 +97,13 @@ Use the [`EXPIRE`](/commands/expire) command to set a key's expiration: > get key (after some time) (nil) -The key vanished between the two [`GET`](/commands/get) calls, since the second call was -delayed more than 5 seconds. In the example above we used [`EXPIRE`](/commands/expire) in +The key vanished between the two [`GET`]({{< relref "/commands/get" >}}) calls, since the second call was +delayed more than 5 seconds. In the example above we used [`EXPIRE`]({{< relref "/commands/expire" >}}) in order to set the expire (it can also be used in order to set a different -expire to a key already having one, like [`PERSIST`](/commands/persist) can be used in order +expire to a key already having one, like [`PERSIST`]({{< relref "/commands/persist" >}}) can be used in order to remove the expire and make the key persistent forever). However we can also create keys with expires using other Redis commands. For example -using [`SET`](/commands/set) options: +using [`SET`]({{< relref "/commands/set" >}}) options: > set key 100 ex 10 OK @@ -111,35 +111,35 @@ using [`SET`](/commands/set) options: (integer) 9 The example above sets a key with the string value `100`, having an expire -of ten seconds. Later the [`TTL`](/commands/ttl) command is called in order to check the +of ten seconds. Later the [`TTL`]({{< relref "/commands/ttl" >}}) command is called in order to check the remaining time to live for the key. -In order to set and check expires in milliseconds, check the [`PEXPIRE`](/commands/pexpire) and -the [`PTTL`](/commands/pttl) commands, and the full list of [`SET`](/commands/set) options. +In order to set and check expires in milliseconds, check the [`PEXPIRE`]({{< relref "/commands/pexpire" >}}) and +the [`PTTL`]({{< relref "/commands/pttl" >}}) commands, and the full list of [`SET`]({{< relref "/commands/set" >}}) options. ## Navigating the keyspace ### Scan -To incrementally iterate over the keys in a Redis database in an efficient manner, you can use the [`SCAN`](/commands/scan) command. +To incrementally iterate over the keys in a Redis database in an efficient manner, you can use the [`SCAN`]({{< relref "/commands/scan" >}}) command. -Since [`SCAN`](/commands/scan) allows for incremental iteration, returning only a small number of elements per call, it can be used in production without the downside of commands like [`KEYS`](/commands/keys) or [`SMEMBERS`](/commands/smembers) that may block the server for a long time (even several seconds) when called against big collections of keys or elements. +Since [`SCAN`]({{< relref "/commands/scan" >}}) allows for incremental iteration, returning only a small number of elements per call, it can be used in production without the downside of commands like [`KEYS`]({{< relref "/commands/keys" >}}) or [`SMEMBERS`]({{< relref "/commands/smembers" >}}) that may block the server for a long time (even several seconds) when called against big collections of keys or elements. -However while blocking commands like [`SMEMBERS`](/commands/smembers) are able to provide all the elements that are part of a Set in a given moment. -The [`SCAN`](/commands/scan) family of commands only offer limited guarantees about the returned elements since the collection that we incrementally iterate can change during the iteration process. +However while blocking commands like [`SMEMBERS`]({{< relref "/commands/smembers" >}}) are able to provide all the elements that are part of a Set in a given moment. +The [`SCAN`]({{< relref "/commands/scan" >}}) family of commands only offer limited guarantees about the returned elements since the collection that we incrementally iterate can change during the iteration process. ### Keys -Another way to iterate over the keyspace is to use the [`KEYS`](/commands/keys) command, but this approach should be used with care, since [`KEYS`](/commands/keys) will block the Redis server until all keys are returned. +Another way to iterate over the keyspace is to use the [`KEYS`]({{< relref "/commands/keys" >}}) command, but this approach should be used with care, since [`KEYS`]({{< relref "/commands/keys" >}}) will block the Redis server until all keys are returned. -**Warning**: consider [`KEYS`](/commands/keys) as a command that should only be used in production +**Warning**: consider [`KEYS`]({{< relref "/commands/keys" >}}) as a command that should only be used in production environments with extreme care. -[`KEYS`](/commands/keys) may ruin performance when it is executed against large databases. +[`KEYS`]({{< relref "/commands/keys" >}}) may ruin performance when it is executed against large databases. This command is intended for debugging and special operations, such as changing your keyspace layout. -Don't use [`KEYS`](/commands/keys) in your regular application code. +Don't use [`KEYS`]({{< relref "/commands/keys" >}}) in your regular application code. If you're looking for a way to find keys in a subset of your keyspace, consider -using [`SCAN`](/commands/scan) or [sets][tdts]. +using [`SCAN`]({{< relref "/commands/scan" >}}) or [sets][tdts]. [tdts]: /topics/data-types#sets diff --git a/content/develop/use/manual/_index.md b/content/develop/use/manual/_index.md deleted file mode 100644 index 33b9f80dbd..0000000000 --- a/content/develop/use/manual/_index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: A developer's guide to Redis -linkTitle: Use Redis -title: Use Redis -weight: 50 ---- diff --git a/content/develop/use/manual/client-side-caching.md b/content/develop/use/manual/client-side-caching.md deleted file mode 100644 index 1e0e472971..0000000000 --- a/content/develop/use/manual/client-side-caching.md +++ /dev/null @@ -1,344 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: 'Server-assisted, client-side caching in Redis - - ' -linkTitle: Client-side caching -title: Client-side caching in Redis -weight: 2 ---- - -Client-side caching is a technique used to create high performance services. -It exploits the memory available on application servers, servers that are -usually distinct computers compared to the database nodes, to store some subset -of the database information directly in the application side. - -Normally when data is required, the application servers ask the database about -such information, like in the following diagram: - - - +-------------+ +----------+ - | | ------- GET user:1234 -------> | | - | Application | | Database | - | | <---- username = Alice ------- | | - +-------------+ +----------+ - -When client-side caching is used, the application will store the reply of -popular queries directly inside the application memory, so that it can -reuse such replies later, without contacting the database again: - - +-------------+ +----------+ - | | | | - | Application | ( No chat needed ) | Database | - | | | | - +-------------+ +----------+ - | Local cache | - | | - | user:1234 = | - | username | - | Alice | - +-------------+ - -While the application memory used for the local cache may not be very big, -the time needed in order to access the local computer memory is orders of -magnitude smaller compared to accessing a networked service like a database. -Since often the same small percentage of data are accessed frequently, -this pattern can greatly reduce the latency for the application to get data -and, at the same time, the load in the database side. - -Moreover there are many datasets where items change very infrequently. -For instance, most user posts in a social network are either immutable or -rarely edited by the user. Adding to this the fact that usually a small -percentage of the posts are very popular, either because a small set of users -have a lot of followers and/or because recent posts have a lot more -visibility, it is clear why such a pattern can be very useful. - -Usually the two key advantages of client-side caching are: - -1. Data is available with a very small latency. -2. The database system receives less queries, allowing it to serve the same dataset with a smaller number of nodes. - -## There are two hard problems in computer science... - -A problem with the above pattern is how to invalidate the information that -the application is holding, in order to avoid presenting stale data to the -user. For example after the application above locally cached the information -for user:1234, Alice may update her username to Flora. Yet the application -may continue to serve the old username for user:1234. - -Sometimes, depending on the exact application we are modeling, this isn't a -big deal, so the client will just use a fixed maximum "time to live" for the -cached information. Once a given amount of time has elapsed, the information -will no longer be considered valid. More complex patterns, when using Redis, -leverage the Pub/Sub system in order to send invalidation messages to -listening clients. This can be made to work but is tricky and costly from -the point of view of the bandwidth used, because often such patterns involve -sending the invalidation messages to every client in the application, even -if certain clients may not have any copy of the invalidated data. Moreover -every application query altering the data requires to use the [`PUBLISH`](/commands/publish) -command, costing the database more CPU time to process this command. - -Regardless of what schema is used, there is a simple fact: many very large -applications implement some form of client-side caching, because it is the -next logical step to having a fast store or a fast cache server. For this -reason Redis 6 implements direct support for client-side caching, in order -to make this pattern much simpler to implement, more accessible, reliable, -and efficient. - -## The Redis implementation of client-side caching - -The Redis client-side caching support is called _Tracking_, and has two modes: - -* In the default mode, the server remembers what keys a given client accessed, and sends invalidation messages when the same keys are modified. This costs memory in the server side, but sends invalidation messages only for the set of keys that the client might have in memory. -* In the _broadcasting_ mode, the server does not attempt to remember what keys a given client accessed, so this mode costs no memory at all in the server side. Instead clients subscribe to key prefixes such as `object:` or `user:`, and receive a notification message every time a key matching a subscribed prefix is touched. - -To recap, for now let's forget for a moment about the broadcasting mode, to -focus on the first mode. We'll describe broadcasting in more detail later. - -1. Clients can enable tracking if they want. Connections start without tracking enabled. -2. When tracking is enabled, the server remembers what keys each client requested during the connection lifetime (by sending read commands about such keys). -3. When a key is modified by some client, or is evicted because it has an associated expire time, or evicted because of a _maxmemory_ policy, all the clients with tracking enabled that may have the key cached, are notified with an _invalidation message_. -4. When clients receive invalidation messages, they are required to remove the corresponding keys, in order to avoid serving stale data. - -This is an example of the protocol: - -* Client 1 `->` Server: CLIENT TRACKING ON -* Client 1 `->` Server: GET foo -* (The server remembers that Client 1 may have the key "foo" cached) -* (Client 1 may remember the value of "foo" inside its local memory) -* Client 2 `->` Server: SET foo SomeOtherValue -* Server `->` Client 1: INVALIDATE "foo" - -This looks great superficially, but if you imagine 10k connected clients all -asking for millions of keys over long living connection, the server ends up -storing too much information. For this reason Redis uses two key ideas in -order to limit the amount of memory used server-side and the CPU cost of -handling the data structures implementing the feature: - -* The server remembers the list of clients that may have cached a given key in a single global table. This table is called the **Invalidation Table**. The invalidation table can contain a maximum number of entries. If a new key is inserted, the server may evict an older entry by pretending that such key was modified (even if it was not), and sending an invalidation message to the clients. Doing so, it can reclaim the memory used for this key, even if this will force the clients having a local copy of the key to evict it. -* Inside the invalidation table we don't really need to store pointers to clients' structures, that would force a garbage collection procedure when the client disconnects: instead what we do is just store client IDs (each Redis client has a unique numerical ID). If a client disconnects, the information will be incrementally garbage collected as caching slots are invalidated. -* There is a single keys namespace, not divided by database numbers. So if a client is caching the key `foo` in database 2, and some other client changes the value of the key `foo` in database 3, an invalidation message will still be sent. This way we can ignore database numbers reducing both the memory usage and the implementation complexity. - -## Two connections mode - -Using the new version of the Redis protocol, RESP3, supported by Redis 6, it is possible to run the data queries and receive the invalidation messages in the same connection. However many client implementations may prefer to implement client-side caching using two separated connections: one for data, and one for invalidation messages. For this reason when a client enables tracking, it can specify to redirect the invalidation messages to another connection by specifying the "client ID" of a different connection. Many data connections can redirect invalidation messages to the same connection, this is useful for clients implementing connection pooling. The two connections model is the only one that is also supported for RESP2 (which lacks the ability to multiplex different kind of information in the same connection). - -Here's an example of a complete session using the Redis protocol in the old RESP2 mode involving the following steps: enabling tracking redirecting to another connection, asking for a key, and getting an invalidation message once the key gets modified. - -To start, the client opens a first connection that will be used for invalidations, requests the connection ID, and subscribes via Pub/Sub to the special channel that is used to get invalidation messages when in RESP2 modes (remember that RESP2 is the usual Redis protocol, and not the more advanced protocol that you can use, optionally, with Redis 6 using the [`HELLO`](/commands/hello) command): - -``` -(Connection 1 -- used for invalidations) -CLIENT ID -:4 -SUBSCRIBE __redis__:invalidate -*3 -$9 -subscribe -$20 -__redis__:invalidate -:1 -``` - -Now we can enable tracking from the data connection: - -``` -(Connection 2 -- data connection) -CLIENT TRACKING on REDIRECT 4 -+OK - -GET foo -$3 -bar -``` - -The client may decide to cache `"foo" => "bar"` in the local memory. - -A different client will now modify the value of the "foo" key: - -``` -(Some other unrelated connection) -SET foo bar -+OK -``` - -As a result, the invalidations connection will receive a message that invalidates the specified key. - -``` -(Connection 1 -- used for invalidations) -*3 -$7 -message -$20 -__redis__:invalidate -*1 -$3 -foo -``` -The client will check if there are cached keys in this caching slot, and will evict the information that is no longer valid. - -Note that the third element of the Pub/Sub message is not a single key but -is a Redis array with just a single element. Since we send an array, if there -are groups of keys to invalidate, we can do that in a single message. -In case of a flush ([`FLUSHALL`](/commands/flushall) or [`FLUSHDB`](/commands/flushdb)), a `null` message will be sent. - -A very important thing to understand about client-side caching used with -RESP2 and a Pub/Sub connection in order to read the invalidation messages, -is that using Pub/Sub is entirely a trick **in order to reuse old client -implementations**, but actually the message is not really sent to a channel -and received by all the clients subscribed to it. Only the connection we -specified in the `REDIRECT` argument of the [`CLIENT`](/commands/client) command will actually -receive the Pub/Sub message, making the feature a lot more scalable. - -When RESP3 is used instead, invalidation messages are sent (either in the -same connection, or in the secondary connection when redirection is used) -as `push` messages (read the RESP3 specification for more information). - -## What tracking tracks - -As you can see clients do not need, by default, to tell the server what keys -they are caching. Every key that is mentioned in the context of a read-only -command is tracked by the server, because it *could be cached*. - -This has the obvious advantage of not requiring the client to tell the server -what it is caching. Moreover in many clients implementations, this is what -you want, because a good solution could be to just cache everything that is not -already cached, using a first-in first-out approach: we may want to cache a -fixed number of objects, every new data we retrieve, we could cache it, -discarding the oldest cached object. More advanced implementations may instead -drop the least used object or alike. - -Note that anyway if there is write traffic on the server, caching slots -will get invalidated during the course of the time. In general when the -server assumes that what we get we also cache, we are making a tradeoff: - -1. It is more efficient when the client tends to cache many things with a policy that welcomes new objects. -2. The server will be forced to retain more data about the client keys. -3. The client will receive useless invalidation messages about objects it did not cache. - -So there is an alternative described in the next section. - -## Opt-in caching - -Clients implementations may want to cache only selected keys, and communicate -explicitly to the server what they'll cache and what they will not. This will -require more bandwidth when caching new objects, but at the same time reduces -the amount of data that the server has to remember and the amount of -invalidation messages received by the client. - -In order to do this, tracking must be enabled using the OPTIN option: - - CLIENT TRACKING on REDIRECT 1234 OPTIN - -In this mode, by default, keys mentioned in read queries *are not supposed to be cached*, instead when a client wants to cache something, it must send a special command immediately before the actual command to retrieve the data: - - CLIENT CACHING YES - +OK - GET foo - "bar" - -The `CACHING` command affects the command executed immediately after it, -however in case the next command is [`MULTI`](/commands/multi), all the commands in the -transaction will be tracked. Similarly in case of Lua scripts, all the -commands executed by the script will be tracked. - -## Broadcasting mode - -So far we described the first client-side caching model that Redis implements. -There is another one, called broadcasting, that sees the problem from the -point of view of a different tradeoff, does not consume any memory on the -server side, but instead sends more invalidation messages to clients. -In this mode we have the following main behaviors: - -* Clients enable client-side caching using the `BCAST` option, specifying one or more prefixes using the `PREFIX` option. For instance: `CLIENT TRACKING on REDIRECT 10 BCAST PREFIX object: PREFIX user:`. If no prefix is specified at all, the prefix is assumed to be the empty string, so the client will receive invalidation messages for every key that gets modified. Instead if one or more prefixes are used, only keys matching one of the specified prefixes will be sent in the invalidation messages. -* The server does not store anything in the invalidation table. Instead it uses a different **Prefixes Table**, where each prefix is associated to a list of clients. -* No two prefixes can track overlapping parts of the keyspace. For instance, having the prefix "foo" and "foob" would not be allowed, since they would both trigger an invalidation for the key "foobar". However, just using the prefix "foo" is sufficient. -* Every time a key matching any of the prefixes is modified, all the clients subscribed to that prefix, will receive the invalidation message. -* The server will consume CPU proportional to the number of registered prefixes. If you have just a few, it is hard to see any difference. With a big number of prefixes the CPU cost can become quite large. -* In this mode the server can perform the optimization of creating a single reply for all the clients subscribed to a given prefix, and send the same reply to all. This helps to lower the CPU usage. - -## The NOLOOP option - -By default client-side tracking will send invalidation messages to the -client that modified the key. Sometimes clients want this, since they -implement very basic logic that does not involve automatically caching -writes locally. However, more advanced clients may want to cache even the -writes they are doing in the local in-memory table. In such case receiving -an invalidation message immediately after the write is a problem, since it -will force the client to evict the value it just cached. - -In this case it is possible to use the `NOLOOP` option: it works both -in normal and broadcasting mode. Using this option, clients are able to -tell the server they don't want to receive invalidation messages for keys -that they modified. - -## Avoiding race conditions - -When implementing client-side caching redirecting the invalidation messages -to a different connection, you should be aware that there is a possible -race condition. See the following example interaction, where we'll call -the data connection "D" and the invalidation connection "I": - - [D] client -> server: GET foo - [I] server -> client: Invalidate foo (somebody else touched it) - [D] server -> client: "bar" (the reply of "GET foo") - -As you can see, because the reply to the GET was slower to reach the -client, we received the invalidation message before the actual data that -is already no longer valid. So we'll keep serving a stale version of the -foo key. To avoid this problem, it is a good idea to populate the cache -when we send the command with a placeholder: - - Client cache: set the local copy of "foo" to "caching-in-progress" - [D] client-> server: GET foo. - [I] server -> client: Invalidate foo (somebody else touched it) - Client cache: delete "foo" from the local cache. - [D] server -> client: "bar" (the reply of "GET foo") - Client cache: don't set "bar" since the entry for "foo" is missing. - -Such a race condition is not possible when using a single connection for both -data and invalidation messages, since the order of the messages is always known -in that case. - -## What to do when losing connection with the server - -Similarly, if we lost the connection with the socket we use in order to -get the invalidation messages, we may end with stale data. In order to avoid -this problem, we need to do the following things: - -1. Make sure that if the connection is lost, the local cache is flushed. -2. Both when using RESP2 with Pub/Sub, or RESP3, ping the invalidation channel periodically (you can send PING commands even when the connection is in Pub/Sub mode!). If the connection looks broken and we are not able to receive ping backs, after a maximum amount of time, close the connection and flush the cache. - -## What to cache - -Clients may want to run internal statistics about the number of times -a given cached key was actually served in a request, to understand in the -future what is good to cache. In general: - -* We don't want to cache many keys that change continuously. -* We don't want to cache many keys that are requested very rarely. -* We want to cache keys that are requested often and change at a reasonable rate. For an example of key not changing at a reasonable rate, think of a global counter that is continuously [`INCR`](/commands/incr)emented. - -However simpler clients may just evict data using some random sampling just -remembering the last time a given cached value was served, trying to evict -keys that were not served recently. - -## Other hints for implementing client libraries - -* Handling TTLs: make sure you also request the key TTL and set the TTL in the local cache if you want to support caching keys with a TTL. -* Putting a max TTL on every key is a good idea, even if it has no TTL. This protects against bugs or connection issues that would make the client have old data in the local copy. -* Limiting the amount of memory used by clients is absolutely needed. There must be a way to evict old keys when new ones are added. - -## Limiting the amount of memory used by Redis - -Be sure to configure a suitable value for the maximum number of keys remembered by Redis or alternatively use the BCAST mode that consumes no memory at all on the Redis side. Note that the memory consumed by Redis when BCAST is not used, is proportional both to the number of keys tracked and the number of clients requesting such keys. - diff --git a/content/develop/use/manual/keyspace-notifications.md b/content/develop/use/manual/keyspace-notifications.md deleted file mode 100644 index 2577c62dea..0000000000 --- a/content/develop/use/manual/keyspace-notifications.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: 'Monitor changes to Redis keys and values in real time - - ' -linkTitle: Keyspace notifications -title: Redis keyspace notifications -weight: 4 ---- - -Keyspace notifications allow clients to subscribe to Pub/Sub channels in order -to receive events affecting the Redis data set in some way. - -Examples of events that can be received are: - -* All the commands affecting a given key. -* All the keys receiving an LPUSH operation. -* All the keys expiring in the database 0. - -Note: Redis Pub/Sub is *fire and forget* that is, if your Pub/Sub client disconnects, -and reconnects later, all the events delivered during the time the client was -disconnected are lost. - -### Type of events - -Keyspace notifications are implemented by sending two distinct types of events -for every operation affecting the Redis data space. For instance a [`DEL`](/commands/del) -operation targeting the key named `mykey` in database `0` will trigger -the delivering of two messages, exactly equivalent to the following two -[`PUBLISH`](/commands/publish) commands: - - PUBLISH __keyspace@0__:mykey del - PUBLISH __keyevent@0__:del mykey - -The first channel listens to all the events targeting -the key `mykey` and the other channel listens only to `del` operation -events on the key `mykey` - -The first kind of event, with `keyspace` prefix in the channel is called -a **Key-space notification**, while the second, with the `keyevent` prefix, -is called a **Key-event notification**. - -In the previous example a `del` event was generated for the key `mykey` resulting -in two messages: - -* The Key-space channel receives as message the name of the event. -* The Key-event channel receives as message the name of the key. - -It is possible to enable only one kind of notification in order to deliver -just the subset of events we are interested in. - -### Configuration - -By default keyspace event notifications are disabled because while not -very sensible the feature uses some CPU power. Notifications are enabled -using the `notify-keyspace-events` of redis.conf or via the **CONFIG SET**. - -Setting the parameter to the empty string disables notifications. -In order to enable the feature a non-empty string is used, composed of multiple -characters, where every character has a special meaning according to the -following table: - - K Keyspace events, published with __keyspace@__ prefix. - E Keyevent events, published with __keyevent@__ prefix. - g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... - $ String commands - l List commands - s Set commands - h Hash commands - z Sorted set commands - t Stream commands - d Module key type events - x Expired events (events generated every time a key expires) - e Evicted events (events generated when a key is evicted for maxmemory) - m Key miss events (events generated when a key that doesn't exist is accessed) - n New key events (Note: not included in the 'A' class) - A Alias for "g$lshztxed", so that the "AKE" string means all the events except "m" and "n". - -At least `K` or `E` should be present in the string, otherwise no event -will be delivered regardless of the rest of the string. - -For instance to enable just Key-space events for lists, the configuration -parameter must be set to `Kl`, and so forth. - -You can use the string `KEA` to enable most types of events. - -### Events generated by different commands - -Different commands generate different kind of events according to the following list. - -* [`DEL`](/commands/del) generates a `del` event for every deleted key. -* [`RENAME`](/commands/rename) generates two events, a `rename_from` event for the source key, and a `rename_to` event for the destination key. -* [`MOVE`](/commands/move) generates two events, a `move_from` event for the source key, and a `move_to` event for the destination key. -* [`COPY`](/commands/copy) generates a `copy_to` event. -* [`MIGRATE`](/commands/migrate) generates a `del` event if the source key is removed. -* [`RESTORE`](/commands/restore) generates a `restore` event for the key. -* [`EXPIRE`](/commands/expire) and all its variants ([`PEXPIRE`](/commands/pexpire), [`EXPIREAT`](/commands/expireat), [`PEXPIREAT`](/commands/pexpireat)) generate an `expire` event when called with a positive timeout (or a future timestamp). Note that when these commands are called with a negative timeout value or timestamp in the past, the key is deleted and only a `del` event is generated instead. -* [`SORT`](/commands/sort) generates a `sortstore` event when `STORE` is used to set a new key. If the resulting list is empty, and the `STORE` option is used, and there was already an existing key with that name, the result is that the key is deleted, so a `del` event is generated in this condition. -* [`SET`](/commands/set) and all its variants ([`SETEX`](/commands/setex), [`SETNX`](/commands/setnx),[`GETSET`](/commands/getset)) generate `set` events. However [`SETEX`](/commands/setex) will also generate an `expire` events. -* [`MSET`](/commands/mset) generates a separate `set` event for every key. -* [`SETRANGE`](/commands/setrange) generates a `setrange` event. -* [`INCR`](/commands/incr), [`DECR`](/commands/decr), [`INCRBY`](/commands/incrby), [`DECRBY`](/commands/decrby) commands all generate `incrby` events. -* [`INCRBYFLOAT`](/commands/incrbyfloat) generates an `incrbyfloat` events. -* [`APPEND`](/commands/append) generates an `append` event. -* [`LPUSH`](/commands/lpush) and [`LPUSHX`](/commands/lpushx) generates a single `lpush` event, even in the variadic case. -* [`RPUSH`](/commands/rpush) and [`RPUSHX`](/commands/rpushx) generates a single `rpush` event, even in the variadic case. -* [`RPOP`](/commands/rpop) generates an `rpop` event. Additionally a `del` event is generated if the key is removed because the last element from the list was popped. -* [`LPOP`](/commands/lpop) generates an `lpop` event. Additionally a `del` event is generated if the key is removed because the last element from the list was popped. -* [`LINSERT`](/commands/linsert) generates an `linsert` event. -* [`LSET`](/commands/lset) generates an `lset` event. -* [`LREM`](/commands/lrem) generates an `lrem` event, and additionally a `del` event if the resulting list is empty and the key is removed. -* [`LTRIM`](/commands/ltrim) generates an `ltrim` event, and additionally a `del` event if the resulting list is empty and the key is removed. -* [`RPOPLPUSH`](/commands/rpoplpush) and [`BRPOPLPUSH`](/commands/brpoplpush) generate an `rpop` event and an `lpush` event. In both cases the order is guaranteed (the `lpush` event will always be delivered after the `rpop` event). Additionally a `del` event will be generated if the resulting list is zero length and the key is removed. -* [`LMOVE`](/commands/lmove) and [`BLMOVE`](/commands/blmove) generate an `lpop`/`rpop` event (depending on the wherefrom argument) and an `lpush`/`rpush` event (depending on the whereto argument). In both cases the order is guaranteed (the `lpush`/`rpush` event will always be delivered after the `lpop`/`rpop` event). Additionally a `del` event will be generated if the resulting list is zero length and the key is removed. -* [`HSET`](/commands/hset), [`HSETNX`](/commands/hsetnx) and [`HMSET`](/commands/hmset) all generate a single `hset` event. -* [`HINCRBY`](/commands/hincrby) generates an `hincrby` event. -* [`HINCRBYFLOAT`](/commands/hincrbyfloat) generates an `hincrbyfloat` event. -* [`HDEL`](/commands/hdel) generates a single `hdel` event, and an additional `del` event if the resulting hash is empty and the key is removed. -* [`SADD`](/commands/sadd) generates a single `sadd` event, even in the variadic case. -* [`SREM`](/commands/srem) generates a single `srem` event, and an additional `del` event if the resulting set is empty and the key is removed. -* [`SMOVE`](/commands/smove) generates an `srem` event for the source key, and an `sadd` event for the destination key. -* [`SPOP`](/commands/spop) generates an `spop` event, and an additional `del` event if the resulting set is empty and the key is removed. -* [`SINTERSTORE`](/commands/sinterstore), [`SUNIONSTORE`](/commands/sunionstore), [`SDIFFSTORE`](/commands/sdiffstore) generate `sinterstore`, `sunionstore`, `sdiffstore` events respectively. In the special case the resulting set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. -* `ZINCR` generates a `zincr` event. -* [`ZADD`](/commands/zadd) generates a single `zadd` event even when multiple elements are added. -* [`ZREM`](/commands/zrem) generates a single `zrem` event even when multiple elements are deleted. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. -* `ZREMBYSCORE` generates a single `zrembyscore` event. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. -* `ZREMBYRANK` generates a single `zrembyrank` event. When the resulting sorted set is empty and the key is generated, an additional `del` event is generated. -* [`ZDIFFSTORE`](/commands/zdiffstore), [`ZINTERSTORE`](/commands/zinterstore) and [`ZUNIONSTORE`](/commands/zunionstore) respectively generate `zdiffstore`, `zinterstore` and `zunionstore` events. In the special case the resulting sorted set is empty, and the key where the result is stored already exists, a `del` event is generated since the key is removed. -* [`XADD`](/commands/xadd) generates an `xadd` event, possibly followed an `xtrim` event when used with the `MAXLEN` subcommand. -* [`XDEL`](/commands/xdel) generates a single `xdel` event even when multiple entries are deleted. -* [`XGROUP CREATE`](/commands/xgroup-create) generates an `xgroup-create` event. -* [`XGROUP CREATECONSUMER`](/commands/xgroup-createconsumer) generates an `xgroup-createconsumer` event. -* [`XGROUP DELCONSUMER`](/commands/xgroup-delconsumer) generates an `xgroup-delconsumer` event. -* [`XGROUP DESTROY`](/commands/xgroup-destroy) generates an `xgroup-destroy` event. -* [`XGROUP SETID`](/commands/xgroup-setid) generates an `xgroup-setid` event. -* [`XSETID`](/commands/xsetid) generates an `xsetid` event. -* [`XTRIM`](/commands/xtrim) generates an `xtrim` event. -* [`PERSIST`](/commands/persist) generates a `persist` event if the expiry time associated with key has been successfully deleted. -* Every time a key with a time to live associated is removed from the data set because it expired, an `expired` event is generated. -* Every time a key is evicted from the data set in order to free memory as a result of the `maxmemory` policy, an `evicted` event is generated. -* Every time a new key is added to the data set, a `new` event is generated. - -**IMPORTANT** all the commands generate events only if the target key is really modified. For instance an [`SREM`](/commands/srem) deleting a non-existing element from a Set will not actually change the value of the key, so no event will be generated. - -If in doubt about how events are generated for a given command, the simplest -thing to do is to watch yourself: - - $ redis-cli config set notify-keyspace-events KEA - $ redis-cli --csv psubscribe '__key*__:*' - Reading messages... (press Ctrl-C to quit) - "psubscribe","__key*__:*",1 - -At this point use `redis-cli` in another terminal to send commands to the -Redis server and watch the events generated: - - "pmessage","__key*__:*","__keyspace@0__:foo","set" - "pmessage","__key*__:*","__keyevent@0__:set","foo" - ... - -### Timing of expired events - -Keys with a time to live associated are expired by Redis in two ways: - -* When the key is accessed by a command and is found to be expired. -* Via a background system that looks for expired keys in the background, incrementally, in order to be able to also collect keys that are never accessed. - -The `expired` events are generated when a key is accessed and is found to be expired by one of the above systems, as a result there are no guarantees that the Redis server will be able to generate the `expired` event at the time the key time to live reaches the value of zero. - -If no command targets the key constantly, and there are many keys with a TTL associated, there can be a significant delay between the time the key time to live drops to zero, and the time the `expired` event is generated. - -Basically `expired` events **are generated when the Redis server deletes the key** and not when the time to live theoretically reaches the value of zero. - -### Events in a cluster - -Every node of a Redis cluster generates events about its own subset of the keyspace as described above. However, unlike regular Pub/Sub communication in a cluster, events' notifications **are not** broadcasted to all nodes. Put differently, keyspace events are node-specific. This means that to receive all keyspace events of a cluster, clients need to subscribe to each of the nodes. - -@history - -* `>= 6.0`: Key miss events were added. -* `>= 7.0`: Event type `new` added - diff --git a/content/develop/use/manual/keyspace.md b/content/develop/use/manual/keyspace.md deleted file mode 100644 index 56dfce68ac..0000000000 --- a/content/develop/use/manual/keyspace.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: 'Managing keys in Redis: Key expiration, scanning, altering and querying - the key space - - ' -linkTitle: Keyspace -title: Keyspace -weight: 1 ---- - -Redis keys are binary safe; this means that you can use any binary sequence as a -key, from a string like "foo" to the content of a JPEG file. -The empty string is also a valid key. - -A few other rules about keys: - -* Very long keys are not a good idea. For instance a key of 1024 bytes is a bad - idea not only memory-wise, but also because the lookup of the key in the - dataset may require several costly key-comparisons. Even when the task at hand - is to match the existence of a large value, hashing it (for example - with SHA1) is a better idea, especially from the perspective of memory - and bandwidth. -* Very short keys are often not a good idea. There is little point in writing - "u1000flw" as a key if you can instead write "user:1000:followers". The latter - is more readable and the added space is minor compared to the space used by - the key object itself and the value object. While short keys will obviously - consume a bit less memory, your job is to find the right balance. -* Try to stick with a schema. For instance "object-type:id" is a good - idea, as in "user:1000". Dots or dashes are often used for multi-word - fields, as in "comment:4321:reply.to" or "comment:4321:reply-to". -* The maximum allowed key size is 512 MB. - -## Altering and querying the key space - -There are commands that are not defined on particular types, but are useful -in order to interact with the space of keys, and thus, can be used with -keys of any type. - -For example the [`EXISTS`](/commands/exists) command returns 1 or 0 to signal if a given key -exists or not in the database, while the [`DEL`](/commands/del) command deletes a key -and associated value, whatever the value is. - - > set mykey hello - OK - > exists mykey - (integer) 1 - > del mykey - (integer) 1 - > exists mykey - (integer) 0 - -From the examples you can also see how [`DEL`](/commands/del) itself returns 1 or 0 depending on whether -the key was removed (it existed) or not (there was no such key with that -name). - -There are many key space related commands, but the above two are the -essential ones together with the [`TYPE`](/commands/type) command, which returns the kind -of value stored at the specified key: - - > set mykey x - OK - > type mykey - string - > del mykey - (integer) 1 - > type mykey - none - -## Key expiration - -Before moving on, we should look at an important Redis feature that works regardless of the type of value you're storing: key expiration. Key expiration lets you set a timeout for a key, also known as a "time to live", or "TTL". When the time to live elapses, the key is automatically destroyed. - -A few important notes about key expiration: - -* They can be set both using seconds or milliseconds precision. -* However the expire time resolution is always 1 millisecond. -* Information about expires are replicated and persisted on disk, the time virtually passes when your Redis server remains stopped (this means that Redis saves the date at which a key will expire). - -Use the [`EXPIRE`](/commands/expire) command to set a key's expiration: - - > set key some-value - OK - > expire key 5 - (integer) 1 - > get key (immediately) - "some-value" - > get key (after some time) - (nil) - -The key vanished between the two [`GET`](/commands/get) calls, since the second call was -delayed more than 5 seconds. In the example above we used [`EXPIRE`](/commands/expire) in -order to set the expire (it can also be used in order to set a different -expire to a key already having one, like [`PERSIST`](/commands/persist) can be used in order -to remove the expire and make the key persistent forever). However we -can also create keys with expires using other Redis commands. For example -using [`SET`](/commands/set) options: - - > set key 100 ex 10 - OK - > ttl key - (integer) 9 - -The example above sets a key with the string value `100`, having an expire -of ten seconds. Later the [`TTL`](/commands/ttl) command is called in order to check the -remaining time to live for the key. - -In order to set and check expires in milliseconds, check the [`PEXPIRE`](/commands/pexpire) and -the [`PTTL`](/commands/pttl) commands, and the full list of [`SET`](/commands/set) options. - -## Navigating the keyspace - -### Scan -To incrementally iterate over the keys in a Redis database in an efficient manner, you can use the [`SCAN`](/commands/scan) command. - -Since [`SCAN`](/commands/scan) allows for incremental iteration, returning only a small number of elements per call, it can be used in production without the downside of commands like [`KEYS`](/commands/keys) or [`SMEMBERS`](/commands/smembers) that may block the server for a long time (even several seconds) when called against big collections of keys or elements. - -However while blocking commands like [`SMEMBERS`](/commands/smembers) are able to provide all the elements that are part of a Set in a given moment. -The [`SCAN`](/commands/scan) family of commands only offer limited guarantees about the returned elements since the collection that we incrementally iterate can change during the iteration process. - -### Keys - -Another way to iterate over the keyspace is to use the [`KEYS`](/commands/keys) command, but this approach should be used with care, since [`KEYS`](/commands/keys) will block the Redis server until all keys are returned. - -**Warning**: consider [`KEYS`](/commands/keys) as a command that should only be used in production -environments with extreme care. - -[`KEYS`](/commands/keys) may ruin performance when it is executed against large databases. -This command is intended for debugging and special operations, such as changing -your keyspace layout. -Don't use [`KEYS`](/commands/keys) in your regular application code. -If you're looking for a way to find keys in a subset of your keyspace, consider -using [`SCAN`](/commands/scan) or [sets][tdts]. - -[tdts]: /topics/data-types#sets - -Supported glob-style patterns: - -* `h?llo` matches `hello`, `hallo` and `hxllo` -* `h*llo` matches `hllo` and `heeeello` -* `h[ae]llo` matches `hello` and `hallo,` but not `hillo` -* `h[^e]llo` matches `hallo`, `hbllo`, ... but not `hello` -* `h[a-b]llo` matches `hallo` and `hbllo` - -Use `\` to escape special characters if you want to match them verbatim. diff --git a/content/develop/use/manual/patterns/_index.md b/content/develop/use/manual/patterns/_index.md deleted file mode 100644 index 714a33b1c3..0000000000 --- a/content/develop/use/manual/patterns/_index.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: Novel patterns for working with Redis data structures -linkTitle: Patterns -title: Redis programming patterns -weight: 6 ---- - -The following documents describe some novel development patterns you can use with Redis. diff --git a/content/develop/use/manual/patterns/bulk-loading.md b/content/develop/use/manual/patterns/bulk-loading.md deleted file mode 100644 index 3ce70b0d98..0000000000 --- a/content/develop/use/manual/patterns/bulk-loading.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: 'Writing data in bulk using the Redis protocol - - ' -linkTitle: Bulk loading -title: Bulk loading -weight: 1 ---- - -Bulk loading is the process of loading Redis with a large amount of pre-existing data. Ideally, you want to perform this operation quickly and efficiently. This document describes some strategies for bulk loading data in Redis. - -## Bulk loading using the Redis protocol - -Using a normal Redis client to perform bulk loading is not a good idea -for a few reasons: the naive approach of sending one command after the other -is slow because you have to pay for the round trip time for every command. -It is possible to use pipelining, but for bulk loading of many records -you need to write new commands while you read replies at the same time to -make sure you are inserting as fast as possible. - -Only a small percentage of clients support non-blocking I/O, and not all the -clients are able to parse the replies in an efficient way in order to maximize -throughput. For all of these reasons the preferred way to mass import data into -Redis is to generate a text file containing the Redis protocol, in raw format, -in order to call the commands needed to insert the required data. - -For instance if I need to generate a large data set where there are billions -of keys in the form: `keyN -> ValueN' I will create a file containing the -following commands in the Redis protocol format: - - SET Key0 Value0 - SET Key1 Value1 - ... - SET KeyN ValueN - -Once this file is created, the remaining action is to feed it to Redis -as fast as possible. In the past the way to do this was to use the -`netcat` with the following command: - - (cat data.txt; sleep 10) | nc localhost 6379 > /dev/null - -However this is not a very reliable way to perform mass import because netcat -does not really know when all the data was transferred and can't check for -errors. In 2.6 or later versions of Redis the `redis-cli` utility -supports a new mode called **pipe mode** that was designed in order to perform -bulk loading. - -Using the pipe mode the command to run looks like the following: - - cat data.txt | redis-cli --pipe - -That will produce an output similar to this: - - All data transferred. Waiting for the last reply... - Last reply received from server. - errors: 0, replies: 1000000 - -The redis-cli utility will also make sure to only redirect errors received -from the Redis instance to the standard output. - -### Generating Redis Protocol - -The Redis protocol is extremely simple to generate and parse, and is -[Documented here](/topics/protocol). However in order to generate protocol for -the goal of bulk loading you don't need to understand every detail of the -protocol, but just that every command is represented in the following way: - - * - $ - - - ... - - -Where `` means "\r" (or ASCII character 13) and `` means "\n" (or ASCII character 10). - -For instance the command **SET key value** is represented by the following protocol: - - *3 - $3 - SET - $3 - key - $5 - value - -Or represented as a quoted string: - - "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n" - -The file you need to generate for bulk loading is just composed of commands -represented in the above way, one after the other. - -The following Ruby function generates valid protocol: - - def gen_redis_proto(*cmd) - proto = "" - proto << "*"+cmd.length.to_s+"\r\n" - cmd.each{|arg| - proto << "$"+arg.to_s.bytesize.to_s+"\r\n" - proto << arg.to_s+"\r\n" - } - proto - end - - puts gen_redis_proto("SET","mykey","Hello World!").inspect - -Using the above function it is possible to easily generate the key value pairs -in the above example, with this program: - - (0...1000).each{|n| - STDOUT.write(gen_redis_proto("SET","Key#{n}","Value#{n}")) - } - -We can run the program directly in pipe to redis-cli in order to perform our -first mass import session. - - $ ruby proto.rb | redis-cli --pipe - All data transferred. Waiting for the last reply... - Last reply received from server. - errors: 0, replies: 1000 - -### How the pipe mode works under the hood - -The magic needed inside the pipe mode of redis-cli is to be as fast as netcat -and still be able to understand when the last reply was sent by the server -at the same time. - -This is obtained in the following way: - -+ redis-cli --pipe tries to send data as fast as possible to the server. -+ At the same time it reads data when available, trying to parse it. -+ Once there is no more data to read from stdin, it sends a special **ECHO** -command with a random 20 byte string: we are sure this is the latest command -sent, and we are sure we can match the reply checking if we receive the same -20 bytes as a bulk reply. -+ Once this special final command is sent, the code receiving replies starts -to match replies with these 20 bytes. When the matching reply is reached it -can exit with success. - -Using this trick we don't need to parse the protocol we send to the server -in order to understand how many commands we are sending, but just the replies. - -However while parsing the replies we take a counter of all the replies parsed -so that at the end we are able to tell the user the amount of commands -transferred to the server by the mass insert session. diff --git a/content/develop/use/manual/patterns/distributed-locks.md b/content/develop/use/manual/patterns/distributed-locks.md deleted file mode 100644 index 55526d4fc2..0000000000 --- a/content/develop/use/manual/patterns/distributed-locks.md +++ /dev/null @@ -1,242 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: 'A distributed lock pattern with Redis - - ' -linkTitle: Distributed locks -title: Distributed Locks with Redis -weight: 1 ---- -Distributed locks are a very useful primitive in many environments where -different processes must operate with shared resources in a mutually -exclusive way. - -There are a number of libraries and blog posts describing how to implement -a DLM (Distributed Lock Manager) with Redis, but every library uses a different -approach, and many use a simple approach with lower guarantees compared to -what can be achieved with slightly more complex designs. - -This page describes a more canonical algorithm to implement -distributed locks with Redis. We propose an algorithm, called **Redlock**, -which implements a DLM which we believe to be safer than the vanilla single -instance approach. We hope that the community will analyze it, provide -feedback, and use it as a starting point for the implementations or more -complex or alternative designs. - -## Implementations - -Before describing the algorithm, here are a few links to implementations -already available that can be used for reference. - -* [Redlock-rb](https://github.com/antirez/redlock-rb) (Ruby implementation). There is also a [fork of Redlock-rb](https://github.com/leandromoreira/redlock-rb) that adds a gem for easy distribution. -* [Redlock-py](https://github.com/SPSCommerce/redlock-py) (Python implementation). -* [Pottery](https://github.com/brainix/pottery#redlock) (Python implementation). -* [Aioredlock](https://github.com/joanvila/aioredlock) (Asyncio Python implementation). -* [Redlock-php](https://github.com/ronnylt/redlock-php) (PHP implementation). -* [PHPRedisMutex](https://github.com/malkusch/lock#phpredismutex) (further PHP implementation). -* [cheprasov/php-redis-lock](https://github.com/cheprasov/php-redis-lock) (PHP library for locks). -* [rtckit/react-redlock](https://github.com/rtckit/reactphp-redlock) (Async PHP implementation). -* [Redsync](https://github.com/go-redsync/redsync) (Go implementation). -* [Redisson](https://github.com/mrniko/redisson) (Java implementation). -* [Redis::DistLock](https://github.com/sbertrang/redis-distlock) (Perl implementation). -* [Redlock-cpp](https://github.com/jacket-code/redlock-cpp) (C++ implementation). -* [Redis-plus-plus](https://github.com/sewenew/redis-plus-plus/#redlock) (C++ implementation). -* [Redlock-cs](https://github.com/kidfashion/redlock-cs) (C#/.NET implementation). -* [RedLock.net](https://github.com/samcook/RedLock.net) (C#/.NET implementation). Includes async and lock extension support. -* [ScarletLock](https://github.com/psibernetic/scarletlock) (C# .NET implementation with configurable datastore). -* [Redlock4Net](https://github.com/LiZhenNet/Redlock4Net) (C# .NET implementation). -* [node-redlock](https://github.com/mike-marcacci/node-redlock) (NodeJS implementation). Includes support for lock extension. -* [Deno DLM](https://github.com/oslabs-beta/Deno-Redlock) (Deno implementation) -* [Rslock](https://github.com/hexcowboy/rslock) (Rust implementation). Includes async and lock extension support. - -## Safety and Liveness Guarantees - -We are going to model our design with just three properties that, from our point of view, are the minimum guarantees needed to use distributed locks in an effective way. - -1. Safety property: Mutual exclusion. At any given moment, only one client can hold a lock. -2. Liveness property A: Deadlock free. Eventually it is always possible to acquire a lock, even if the client that locked a resource crashes or gets partitioned. -3. Liveness property B: Fault tolerance. As long as the majority of Redis nodes are up, clients are able to acquire and release locks. - -## Why Failover-based Implementations Are Not Enough - -To understand what we want to improve, let’s analyze the current state of affairs with most Redis-based distributed lock libraries. - -The simplest way to use Redis to lock a resource is to create a key in an instance. The key is usually created with a limited time to live, using the Redis expires feature, so that eventually it will get released (property 2 in our list). When the client needs to release the resource, it deletes the key. - -Superficially this works well, but there is a problem: this is a single point of failure in our architecture. What happens if the Redis master goes down? -Well, let’s add a replica! And use it if the master is unavailable. This is unfortunately not viable. By doing so we can’t implement our safety property of mutual exclusion, because Redis replication is asynchronous. - -There is a race condition with this model: - -1. Client A acquires the lock in the master. -2. The master crashes before the write to the key is transmitted to the replica. -3. The replica gets promoted to master. -4. Client B acquires the lock to the same resource A already holds a lock for. **SAFETY VIOLATION!** - -Sometimes it is perfectly fine that, under special circumstances, for example during a failure, multiple clients can hold the lock at the same time. -If this is the case, you can use your replication based solution. Otherwise we suggest to implement the solution described in this document. - -## Correct Implementation with a Single Instance - -Before trying to overcome the limitation of the single instance setup described above, let’s check how to do it correctly in this simple case, since this is actually a viable solution in applications where a race condition from time to time is acceptable, and because locking into a single instance is the foundation we’ll use for the distributed algorithm described here. - -To acquire the lock, the way to go is the following: - - SET resource_name my_random_value NX PX 30000 - -The command will set the key only if it does not already exist (`NX` option), with an expire of 30000 milliseconds (`PX` option). -The key is set to a value “my\_random\_value”. This value must be unique across all clients and all lock requests. - -Basically the random value is used in order to release the lock in a safe way, with a script that tells Redis: remove the key only if it exists and the value stored at the key is exactly the one I expect to be. This is accomplished by the following Lua script: - - if redis.call("get",KEYS[1]) == ARGV[1] then - return redis.call("del",KEYS[1]) - else - return 0 - end - -This is important in order to avoid removing a lock that was created by another client. For example a client may acquire the lock, get blocked performing some operation for longer than the lock validity time (the time at which the key will expire), and later remove the lock, that was already acquired by some other client. -Using just [`DEL`](/commands/del) is not safe as a client may remove another client's lock. With the above script instead every lock is “signed” with a random string, so the lock will be removed only if it is still the one that was set by the client trying to remove it. - -What should this random string be? We assume it’s 20 bytes from `/dev/urandom`, but you can find cheaper ways to make it unique enough for your tasks. -For example a safe pick is to seed RC4 with `/dev/urandom`, and generate a pseudo random stream from that. -A simpler solution is to use a UNIX timestamp with microsecond precision, concatenating the timestamp with a client ID. It is not as safe, but probably sufficient for most environments. - -The "lock validity time" is the time we use as the key's time to live. It is both the auto release time, and the time the client has in order to perform the operation required before another client may be able to acquire the lock again, without technically violating the mutual exclusion guarantee, which is only limited to a given window of time from the moment the lock is acquired. - -So now we have a good way to acquire and release the lock. With this system, reasoning about a non-distributed system composed of a single, always available, instance, is safe. Let’s extend the concept to a distributed system where we don’t have such guarantees. - -## The Redlock Algorithm - -In the distributed version of the algorithm we assume we have N Redis masters. Those nodes are totally independent, so we don’t use replication or any other implicit coordination system. We already described how to acquire and release the lock safely in a single instance. We take for granted that the algorithm will use this method to acquire and release the lock in a single instance. In our examples we set N=5, which is a reasonable value, so we need to run 5 Redis masters on different computers or virtual machines in order to ensure that they’ll fail in a mostly independent way. - -In order to acquire the lock, the client performs the following operations: - -1. It gets the current time in milliseconds. -2. It tries to acquire the lock in all the N instances sequentially, using the same key name and random value in all the instances. During step 2, when setting the lock in each instance, the client uses a timeout which is small compared to the total lock auto-release time in order to acquire it. For example if the auto-release time is 10 seconds, the timeout could be in the ~ 5-50 milliseconds range. This prevents the client from remaining blocked for a long time trying to talk with a Redis node which is down: if an instance is not available, we should try to talk with the next instance ASAP. -3. The client computes how much time elapsed in order to acquire the lock, by subtracting from the current time the timestamp obtained in step 1. If and only if the client was able to acquire the lock in the majority of the instances (at least 3), and the total time elapsed to acquire the lock is less than lock validity time, the lock is considered to be acquired. -4. If the lock was acquired, its validity time is considered to be the initial validity time minus the time elapsed, as computed in step 3. -5. If the client failed to acquire the lock for some reason (either it was not able to lock N/2+1 instances or the validity time is negative), it will try to unlock all the instances (even the instances it believed it was not able to lock). - -### Is the Algorithm Asynchronous? - -The algorithm relies on the assumption that while there is no synchronized clock across the processes, the local time in every process updates at approximately at the same rate, with a small margin of error compared to the auto-release time of the lock. This assumption closely resembles a real-world computer: every computer has a local clock and we can usually rely on different computers to have a clock drift which is small. - -At this point we need to better specify our mutual exclusion rule: it is guaranteed only as long as the client holding the lock terminates its work within the lock validity time (as obtained in step 3), minus some time (just a few milliseconds in order to compensate for clock drift between processes). - -This paper contains more information about similar systems requiring a bound *clock drift*: [Leases: an efficient fault-tolerant mechanism for distributed file cache consistency](http://dl.acm.org/citation.cfm?id=74870). - -### Retry on Failure - -When a client is unable to acquire the lock, it should try again after a random delay in order to try to desynchronize multiple clients trying to acquire the lock for the same resource at the same time (this may result in a split brain condition where nobody wins). Also the faster a client tries to acquire the lock in the majority of Redis instances, the smaller the window for a split brain condition (and the need for a retry), so ideally the client should try to send the [`SET`](/commands/set) commands to the N instances at the same time using multiplexing. - -It is worth stressing how important it is for clients that fail to acquire the majority of locks, to release the (partially) acquired locks ASAP, so that there is no need to wait for key expiry in order for the lock to be acquired again (however if a network partition happens and the client is no longer able to communicate with the Redis instances, there is an availability penalty to pay as it waits for key expiration). - -### Releasing the Lock - -Releasing the lock is simple, and can be performed whether or not the client believes it was able to successfully lock a given instance. - -### Safety Arguments - -Is the algorithm safe? Let's examine what happens in different scenarios. - -To start let’s assume that a client is able to acquire the lock in the majority of instances. All the instances will contain a key with the same time to live. However, the key was set at different times, so the keys will also expire at different times. But if the first key was set at worst at time T1 (the time we sample before contacting the first server) and the last key was set at worst at time T2 (the time we obtained the reply from the last server), we are sure that the first key to expire in the set will exist for at least `MIN_VALIDITY=TTL-(T2-T1)-CLOCK_DRIFT`. All the other keys will expire later, so we are sure that the keys will be simultaneously set for at least this time. - -During the time that the majority of keys are set, another client will not be able to acquire the lock, since N/2+1 SET NX operations can’t succeed if N/2+1 keys already exist. So if a lock was acquired, it is not possible to re-acquire it at the same time (violating the mutual exclusion property). - -However we want to also make sure that multiple clients trying to acquire the lock at the same time can’t simultaneously succeed. - -If a client locked the majority of instances using a time near, or greater, than the lock maximum validity time (the TTL we use for SET basically), it will consider the lock invalid and will unlock the instances, so we only need to consider the case where a client was able to lock the majority of instances in a time which is less than the validity time. In this case for the argument already expressed above, for `MIN_VALIDITY` no client should be able to re-acquire the lock. So multiple clients will be able to lock N/2+1 instances at the same time (with "time" being the end of Step 2) only when the time to lock the majority was greater than the TTL time, making the lock invalid. - -### Liveness Arguments - -The system liveness is based on three main features: - -1. The auto release of the lock (since keys expire): eventually keys are available again to be locked. -2. The fact that clients, usually, will cooperate removing the locks when the lock was not acquired, or when the lock was acquired and the work terminated, making it likely that we don’t have to wait for keys to expire to re-acquire the lock. -3. The fact that when a client needs to retry a lock, it waits a time which is comparably greater than the time needed to acquire the majority of locks, in order to probabilistically make split brain conditions during resource contention unlikely. - -However, we pay an availability penalty equal to [`TTL`](/commands/ttl) time on network partitions, so if there are continuous partitions, we can pay this penalty indefinitely. -This happens every time a client acquires a lock and gets partitioned away before being able to remove the lock. - -Basically if there are infinite continuous network partitions, the system may become not available for an infinite amount of time. - -### Performance, Crash Recovery and fsync - -Many users using Redis as a lock server need high performance in terms of both latency to acquire and release a lock, and number of acquire / release operations that it is possible to perform per second. In order to meet this requirement, the strategy to talk with the N Redis servers to reduce latency is definitely multiplexing (putting the socket in non-blocking mode, send all the commands, and read all the commands later, assuming that the RTT between the client and each instance is similar). - -However there is another consideration around persistence if we want to target a crash-recovery system model. - -Basically to see the problem here, let’s assume we configure Redis without persistence at all. A client acquires the lock in 3 of 5 instances. One of the instances where the client was able to acquire the lock is restarted, at this point there are again 3 instances that we can lock for the same resource, and another client can lock it again, violating the safety property of exclusivity of lock. - -If we enable AOF persistence, things will improve quite a bit. For example we can upgrade a server by sending it a [`SHUTDOWN`](/commands/shutdown) command and restarting it. Because Redis expires are semantically implemented so that time still elapses when the server is off, all our requirements are fine. -However everything is fine as long as it is a clean shutdown. What about a power outage? If Redis is configured, as by default, to fsync on disk every second, it is possible that after a restart our key is missing. In theory, if we want to guarantee the lock safety in the face of any kind of instance restart, we need to enable `fsync=always` in the persistence settings. This will affect performance due to the additional sync overhead. - -However things are better than they look like at a first glance. Basically, -the algorithm safety is retained as long as when an instance restarts after a -crash, it no longer participates to any **currently active** lock. This means that the -set of currently active locks when the instance restarts were all obtained -by locking instances other than the one which is rejoining the system. - -To guarantee this we just need to make an instance, after a crash, unavailable -for at least a bit more than the max [`TTL`](/commands/ttl) we use. This is the time needed -for all the keys about the locks that existed when the instance crashed to -become invalid and be automatically released. - -Using *delayed restarts* it is basically possible to achieve safety even -without any kind of Redis persistence available, however note that this may -translate into an availability penalty. For example if a majority of instances -crash, the system will become globally unavailable for [`TTL`](/commands/ttl) (here globally means -that no resource at all will be lockable during this time). - -### Making the algorithm more reliable: Extending the lock - -If the work performed by clients consists of small steps, it is possible to -use smaller lock validity times by default, and extend the algorithm implementing -a lock extension mechanism. Basically the client, if in the middle of the -computation while the lock validity is approaching a low value, may extend the -lock by sending a Lua script to all the instances that extends the TTL of the key -if the key exists and its value is still the random value the client assigned -when the lock was acquired. - -The client should only consider the lock re-acquired if it was able to extend -the lock into the majority of instances, and within the validity time -(basically the algorithm to use is very similar to the one used when acquiring -the lock). - -However this does not technically change the algorithm, so the maximum number -of lock reacquisition attempts should be limited, otherwise one of the liveness -properties is violated. - -### Disclaimer about consistency - -Please consider thoroughly reviewing the [Analysis of Redlock](#analysis-of-redlock) section at the end of this page. -Martin Kleppman's article and antirez's answer to it are very relevant. -If you are concerned about consistency and correctness, you should pay attention to the following topics: - -1. You should implement fencing tokens. - This is especially important for processes that can take significant time and applies to any distributed locking system. - Extending locks' lifetime is also an option, but don´t assume that a lock is retained as long as the process that had acquired it is alive. -2. Redis is not using monotonic clock for TTL expiration mechanism. - That means that a wall-clock shift may result in a lock being acquired by more than one process. - Even though the problem can be mitigated by preventing admins from manually setting the server's time and setting up NTP properly, there's still a chance of this issue occurring in real life and compromising consistency. - -## Want to help? - -If you are into distributed systems, it would be great to have your opinion / analysis. Also reference implementations in other languages could be great. - -Thanks in advance! - -## Analysis of Redlock ---- - -1. Martin Kleppmann [analyzed Redlock here](http://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html). A counterpoint to this analysis can be [found here](http://antirez.com/news/101). diff --git a/content/develop/use/manual/patterns/indexes/2idx_0.png b/content/develop/use/manual/patterns/indexes/2idx_0.png deleted file mode 100644 index 8cf870885fb73615e20407d248e51860fd1aedcc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23007 zcmZU)1yCGMv^~r&3t0%Vc(AazySoOrKyZQ+Ah^3M4iN}W&_GCnyW8Tff#9wQ?hfCQ z{NDS&s*ftFW~QgRXL|bHd(OFcqtsR9FwsfSk&uuu735_!kdS~N#6L0^6>-J)2`U>B z5_+kPw6wZ{v@}%R#qo`eoh1^Id{ko6lUJHHggvbn5opC8OOKzeG^UKY59U zMwwNGy$JSmlmRMNC=1FcUX}@SceisKPmPH4dsYjOI{@ z#E3lX)BBjQRipRu^VW^4E$}V_=Q#jL?M-D^IZ~_`mAkAEJ>hTZZ4I=FA?H#zk4$$X=r?-QYos(Jz?JS^=GbR^v7U*6V(FOOQ7ovo6%DiMX(tghe4ee zEBgn6I(WN9pmSirs-mOwgLP6$uI6iGLV@tDZvExmI(RIK>K00=;qpW%QfU|O@M?ql zI(w#|>^5CM_t%N88G+p{-@GmqAC_SVQ)UF|-YQecNl51oijC@OX0S|a*A?DE*V)7= z(owZd@6#Iik)2v=msr5+9e@o^;6+V=3I=|Ilchjm%87&-f;XzjTNaW@u|%ZF-6mAu z={~ouy1q~1QvRSa2uKkPISD+9xUa8789RDcz5X1D#TMs>QBgTHz*^EnTHCz*n>!g& zpEy!o?@DCo?-SnBT_RM$g}5M8{2=sEiQzg*t}K)?l#jignQ$UdFh?1olLVPOP%0MS z0w;{cMsIT{!%l#8EHnH7e1{W{Vz316Ng$z1)H89#h~04ARO;U;+8^0jd7cC%yH;%ZoTj#H4i$u*ca;= z>u+61uW+uA!=%Q(323x4+9YHo==W>&yY|zg6@-2QDFri^E94mFD6E!r8Yl#RO`_3s zDYmGwh+5a{6OJ?205RG#S~1p4$mYA`pJ@1N;Elhs!Lu>`P4k<4oc^n$VodL{MU|zY zWgV9{mz-ruKgP?KuP_U^6QvgvMP-|1$-kM}2=NfIW@^g|zu5WqVW7X4Eg3UuD1kM( znL$`WZmjw@iw~7Q(H+Je<=wmV3NlSLeTPQF=K0t4j90YBACJe5QI0W>vDaG}IPeV# zaPiL>c$h^rWtjgkmuL*Ci+uG`akfLVkJ`f>jti!>N)piyPCQM#P5i}7uJuRbv%2<6 z4;?}V3iXx(k#ABu&aa|$7`5xaauq0QRqJfz6BZh2`wzuhau7$o*N}Z;Zm6O#B(3t* zL#EikUjMb6sJy67v*cYW>dOvAYS~=f+}GxYnp!{QxaBHUPs#};2NaoAZCka!|4OL& z=n;9&xA$fbhuoVyNwr2bV_5TbbV-Oqv_sy$zi3frqEYaoWb<=>-6cbD)mFoYj{rLp zQ#iMP^=Ko0<%ADopv}g(m~;i@PmYDMVW-hZhjse|+X=fsP|Fd6-pEe`+PP1H9y+a|MusOPEDJkt0b)S9KcMHV;8S#T1tPrZ{9N0v~Zgi zmKQdfvuHM<@sn%3qP=1y`pt$_b;qA@-tO5xpZGJBoe@4lJ{vwOyZb8MDwQ%(Q{f6J zs~4xy=FH){6MNGha=WOz#OI&R#pkdEeh8M4Gm#q!ycKXEFC;VOJWDwmj32C3?N*ig zF8iI)igXKe*q)7n$TJz3?4UzD1)buW+H&~gQ10OFu(rRszq2qW!tvbbxv$8zZa)iF zVG{4Y&F}iQ%8qxt1j|~RXYB?{2HOjNkZ-)Mch9^VG;C=DEi2g%EwA>^{5QX^C9huW zFzqt6YHbu6U8h}cU3#LbBmY842d7}Tq0bXs(#Z46<4-(iAtu8yBo!f8#nr&)f;6zW zNpJ6Qba5EzDhaTO2sAPInN{!n@h^;^9;7~|eqJ)B7hk`*sozx{Wy@X?Z`fnx$%l(a zjO$Hx%siL;x-T=*?1h%h2iJ7L8$rjqkMm~x)>W_FUY-^{YfQ;W`J~%8-CUmQjO9%0 zY%XGDM11hNN$z&#G9R=Q`X@BCGqy9F#!tgy+`9!s9JchSC@wv9o|yx({uEWiPUAf9 z{nO@9t$g)ApC^)dNX$GMWEyfPWI}$Q|K9kW#TCpw{6^90n7gs!w6eykx!+?ztmS!g zW@U@09IKVm@G|OVI5p{)4=+jQ_-*X^=XjcC{9T6#SCc|nTQ^9y2sZ-eNh+#&)dU?~ z7e}%8#V0nD)}Nl-)XX>t-%?-8EygU&4b@Og-!}&tI2Df8m3=+j!5aQ(Af9^r>iXy4 za7xBPBZ~+5?1zP+r8}pXj?B+gzjJnTzBG&ZrbQEXp>$DP6wi59T6p)Z+;!gN+#%;J z=CMBE!QK@+tY#_Mm8{qFmJZYCI*-l|nGf;A7JObHUgTGIc$l2!Eq+1aEI$4!|FxbA zzAw++_JDa?k5G&Od1A0rGJPmLExm>kLHhJ__P3&MN1Jtwp>g(Xyu;X_XM4qpyjQSgSTm^5AWzw?kcNKRNznr`8sg;BqH7}JH)L!O^syg0=?b-j^$PAlefA#ite8b74QsrEEeL6q6M zfx_SKOY2JWmQGu$11_%b-RNxI$ifLLwu+F@(FEvFgDxhK)Z3A=sak>= zI1fH;frf6b1%VGyTmGl%EJ#1xK_1GP`vbS?^ z74j0L`g?~E;`s40I~DZrEpE1=RJtnaP-#aOODHcJ4;u%S7&;UR6>+h6BcvfC`(Jm& znJAUDo12pmJG-Z+Cz~fXo1=>rJEx$aAUg*aI~Nx#;tp0VJd$&p0xc zuI4T_PHr}i4$#MOP0bwL-9)LV9w++WzkknZ>1Ff(W^!=-@3jyw$o|;E&dJ8X{=Z`* zx{5qr6;ikHvb58cv9Y&wa7D}^#>>ek^7sD#U(5fS@&EMH`@cPT|8LL#)AC+}>>h=$SM84v>wN-mFNiPwH~(p+Kv zyhI*&e|M2}7&9f#j#UQ5dz@iVZu#M79eyFh$34pzlFSsd`g6J)N`E4+EUO2YjK^a} zrxuiKlgmc?DbDl|Eg@{SX$DshcLzPE@3uS_Y6OLRy&My6i0|(XS_5eu zv2@v%8cxlVyardFc;`J=GmY&_K|eS!DhOUKIpUgQ$3h*r4Di4ww4(A0M*@l9M&h45 zA38I^LxDvwn1oOOw-iEw4B~vG(nB)}EmS_b(MaA}BJ=p5u218Xe;3A$UV_sR3xEP6 zCdT|m2dCk3_i}as0=Kl9&?n`1d) z5HwZ_ia)Di_r2<3dUN=D;~oix%k10h14_JTT=aOR^*MYHH=QL5nOva4;xoqcF0h?< zRaR=(dV7ocuxMWe1<(`ZaLnLm$#c8L%MTz*#yD4qDRA7k>ktMfP;smoQi^3{L73+> zr#Grp#*)Xv*CbGzq*V5pLHq<36Pd9I$h2=Jh=ztIZznk1+BJ}-Z=u- z$N1yJYEl9RBnkx1WU3*`hGB@UgZNTjbP!+tA6IdJu%IpVQd`onKMmzQuHqn{j~$r} zcQVis+(~rFq8x#O(Z5 znk#-^h(Df0j1+bVR9!nHKA5?W-jSA%c+LokH8$R^YxJb42z-el#~x$`C4*!ae#k>B zZF#SGNXtz^JSoevx9Tte|L*|?;;Nqpv$*jfz9_T|XPL(22T!ch(~8dm2}k+e4qb6Y zhMf(%jnE%2U;f(WCX_rc=A~qJJJfSoeeNt$+ux_{Bl}*GGHuUTnB}Wiw1ZTzVw~y< zL=|B!Z-6T_L3jY}8Y4_2{%*WdrCZxlh#{5fniB|>c%IR!$=IIh>N>AS5?5paX#*~! z3yDL4R@s3Z64GPU(pibI%`DQnkRv3dr1fgI7) z%VzCT7rZ1xJfi{MrhmHu6CkJ^Y4_V)c6Y4nP8-~5a4&@f>;i}Ot&4oY-%0-nMS}eS zNdeRwz@sF^4G~c7mt+Kexj5CwUy7?gXf1yDc(H*~-{Ge#{t=)SnU&@>xE#U4Y-H$T zj*$uUJNm@=;{nE`>u%yM^oC4Rfc|+;!0>?{r2yUvHwcw(^kldFy^3q`D{6CXWn^9e z_O0l2Hh?cW1IPFkGw9R#ZZHza2d7S#c-s=)hY|^PgjOcS5rA=o1T@D5qYP=j%>Cqj z91zT1OHWj|$KEDBC4kLm?f0x}IFcP7gd?!vIhon}_F{<~ikbqR=#MB~qT(0&RWSm} z&plMUFI~6oNf0Xh1{J`|ze=SkRY-2pp@ejQ+Td+VP3wAXK^dzUe0d5iLi0jdj; zYpI-=$NMh|yy?nykZ?FFY}P#A?IG``5v2ob3S_3nnX&n;T_Y^WW7K-1$x@b4HS?AU z=N+!Umsv0?L;x?vc{j^{-Ss&@OA>-`-+wk#&5Hf3ZSBny)y62rY;GaMg|D@*@Wr)% zQkKh`T1M6P>R;(^P~3r);41`43QMI1hXOHmrsC25HgcPLSP4O?11>g9e>f&XIL;hAQu*T=58-$kBgdI&4-V|DO-Y|kCf)K zRG@YX8oY5Twlz8slmi=frVX}eXVaA&j>q!iVgdARwFXAG13D;URV2D4J>*)+i`+Um znL(}2TjZc)grWRs4^6PbFQy!Y!97;1*{eO(WBPf3JH-$V)9;q(#Vz1nyyt>vUE!FJ zAh;LEjTvM}O3q%PiveH1_;}g&KFA{ah zU3>pr{P8BZ6+eEOWzNoQTDLv0=lF1>YS}RzP3L(Z6WwYw!De9RhBtl{EEQI zIyE>OiMins?!?LkQL)5O6c^(5vV&0-oRWqMH*qpzWF$(w2bMnB^=N~?;?@aQ4hx1O zV&+-ys-jkaWb7aOy>5CZet7o<1~ybZ0_lY=dC8WMFWHX~K8#Dfv+9l?#*}T|2p9f{ zcSP{OR=;#wQ9rw44A|_%BKj|nGAh57S#}8G)rcVhi{OH-qE~olI`7sJDj!74=@u3G z+Gs;)?0EF!kOpFv!pXl2!?lOs~B=YaBsul$gsrdtS9)p;S%n^+>iV*Vefy@{j*0sN`5=*{|UoPqOL|IW6h4^ zl$$-DobCd)=S2-Ep3lZeTOUe`^TQQK6g39t(iuFu?hDX^b)W2pNN@rpLR8DeeuIJW zc(Ik@4(X>j)_OCSYaQtn%nAHKu9{!IXPZ6+X`oHC0GNxweFf?TAF4u-K!$kP&|WmP zJq3ZDn#oz*cau~fTOENevJDUOhuap<1{SE~B}Xwuqbb#6;Y?PC&#?)g+cTx;yH#TW z(1X);OS#atI4Y`!R#JcdGNZ15ILR+pD?+;A80_6HwG1wrru?T2i1)8C6lFV7|aQ6dlROn#;VQl1iq zm66agYuv~#)={c+i-lc#yo59*4O^+MOSQSgPd@_!z;#q*#sO}YlWi%ut|n1pSae6{ zX+iITm*h*PhI1-_>^^Di|NG7G3(FRvL zI|mZR4n&b8*YgWJEpcKlm zcgL5a`<5Lu@!kQ=i^)3XN!;3v!P*P}76tYqc^6`m(F%P=14&6$Fga_Bc{nM_E`B~Y zm=D|Z0e0La!4`RGC?6vR!k`6kBSp262!g2@7p387FsD@J3k{tpvGip)23R5+06Pj8 zkjT7hQb{S*(N*>J>f=r~PaG?MTKF#n%0=Akt#X&6w}k4b?u%7dZ{}!;O*^Tvh#BN*F})ar#1Fp{+1Gu!D;v^Ty~Xg{0|BM7KnN(4rIPN~pTUw*2P6KE zPho?)S-~QsjTMyX3*kg41K1G>52nddANUYK0s|!UTzZoo%vTp!L$mTlj5PFP)Gzn+ zl0Tk0+ur>bq)fg<5g9H$A0E6-d8&=Ztnji?+fRrQ%4b^Yrx*Ic3S7%=PN__Pp8e6r4;S# z*2c#niZT~gOmE;>$ATaS)9RCiIDQ;1TFx^-taWGf=JP~`kcP;yg@1>LBtmcZ=n0r;N$agYS1xaXOrm9uGYs6 z#Qd2T)jjT{+JWi6HFDTnzemG%7&MVy2sWweOsC9Y^@EnMGVHj2RsG?EDUaA!)nEZ8 zyQI1ZlTxY<7|R6#-O?e`*p!;uEjEk~^md(P?TgCpn@N|!tkyt#BZn&%{=*p(Du21` zee|{r?V1~E2FleN{prSWFw23RoLwXTBOdO+r>%$UcoYMZ45$`*=kIsHU>)Yb5910*Z4uk)>=T?LAeU94wVxRByzL)B0 z5dBOz#J4YEUA3RjA}pIl%_juT3**Q?<{5D0GnIO;5^Xw;@gUNg>33)+TysXd%+ksT ze*3*cKECTs;gsdxtZ7aln1^zC7iqN9Dgi?}NC~wg6kVEPrnp59$GRlE06~}m9%BMOr zTW4UYuiScPP*d~fO^e}G+aLX7+Z|giN!9U|^NKd9AOhf1)=!CTtXWDAiR&fB->Vi* zpCFugHa=ieQn*j~4T8hY6_Ye}wLX;GbhSL(-w1mgmIdenUG7pG%`r&ZbHd}XJ<>_u zlxGo|ge`|(ML+3IeEKDg$K)gj#Dlk!WBs|F9K8b)aTr7OGD0*tz5p7jg)hgw5zg~} z2+xOoIdta214f}DgcN1Wsl$?MxdU?ieZ)2B z+t3)xf8m1SWse|M`!vE)1c0@pulBYh${Mc=nZP9^6_|)cfmeYrx`wMTLyHi44 zU0#pBG+Ow$xn+GWD@P=ZAk&m+b|h!=(tZ@zE*E`~~9Z&4QUncoV?P4h7Gea#9TMvb|MARL8w{~#Gy zy@f9)5!=4lKIgdN)>l6`8lgKn`78~^#FHP7{fW>rIt^`=TZ%cjpgreb!zcg6r+sD8 z+At)Hc&f`oUN?O2eD|iCvi^uSh!$@*zs+mmSG9pfe#OVqJ!?DWKxCkf z4&w$KpD0^=7{TlEFG)l101VDa4$j4b`paEygza}!9;`yh5 zf3Oj!_>idx(9oA=Nxdy3_}uzcV60P;`sX|Vzk$Z2uTTRkHp`f1L+oq5+5}znjUhA0 z_;+tOJCM?wUU8ZIVH$xF2GyXRd9DFc3ePnlG^V*PIf3{LUFzdwFeGpHps|DEcUI)5 zu7LtWYlNUzksY*io>Y3gu(D#;V*-9JV^U+@vA|g-rF=^S zqW^wjIW-d;aO^r8L-y<@+@sU^=UXYVPby1xQa2VL#2pypeHvJve`$b(O7;q8ECqi< zB)YhR8^98#QgKZa!<2f8qjH375Tx&arbEV=lrW@sPPy-I6Fco|A8=A0O(aR zh8aQEm|MM#;$OvDq|f+wz+2bTmCO5r(zW|F-$E<|4JMe~eGy|u%=jYW^6S~_yfrsW z-AP#t?;CQlxAy22j6&`G5~D}-ZrNUW+u(&?=W8!}ggrY)Oz#A`ti8l7lpWV@Im4eU z6$eCo`@3xfo^3^t33jhlU9II7KrgLV&O$KgO3zJ+K-$z!OduUsia<&Z5?DhrMtdkU zzTMg)eO7oqzE!H6PpM5-RhXPO1qA?J$Nf<&wOj-#W2xSf$`gGQN>lQ{G$|0>P>Fl= zwx7hB`3frvBUFI2;i0irp`UMi-1yH>>CV3& zbtS^tnwH#gep^c3?PHZi0sLa}?Djf|B1IPn>jQI$D-({!3-_1aYe3TLj&S}4JVMH{ zL+Qv8s-gx9r^#6Rr;wEO;TcM5+Fkm3z8NZ17xMJPGQz4cPJEa>iags*=k7P?UuvgK z4-G?0PCHjlsX5f;{I!0TIkkd`()I*P%nxExcDve}!BTsEgPBEALUnR+P>NaDqFIqF z-0V4Ac>HFHNbk#~;!IU-|k9M*F9PiFB5 z8CvX;lD+u9h~4(&Z@!|XM~=nn(KG+?YI3Z?;C{tOPJ5?)g9BPB)d-Le)Cn9r$~e$T zZ?E}N)Sj20ZPl1BnB>d;r6bU6%Jk=yftKXqWWi>5ZRI@WYdkK0>1-%Wt97WXU_S0bPJ`d$X{1|Iw*fW^6H(4d6=Msk4I#)KrjMdYFvP#JbJ?&et zI(TzqNRy?G+-~XF2KcT9mrT+8y8Lw2@WUgdUg|Tx zCI}ug>Fpe!b71#SeiXlbwnAn{BbPD6WaiDMFaeu!q#S5zrru~O#t=h3E9ytnmjW@k zp~JqCg1_RV)Itxr>L+SK6!}?CY*1ih;E7Bn*|ksR>@4iR&sf1VAu~HS@OjKYVluvO zcuwv*P7S2@1{$%~F=H5naaJlhy((cU96GOXnO+wxLXR_wT2iUtUNVFBaui~?mcKHSShu;F%e=F!;js8MxjM6b@tAmB}+>ztO zx5-*YYZ22_ftqgeb4p7~$#m`h6vv8VrU>qiUGu0;FZ}jm&+Gr{O)QS~`Budy9{vl3 z*+nPk{@QlPv+(w1DiaYlCd=`oTHQuyo2KMlKjG}^eNiz4>$@t}601t)+iLlkrG?T~ z-uCLiF_=3sMj&2x*%ax!yCO5VNZN#5HOnx``|@CkzR%;P>zfD3YwOi-QbeY3FxUbY zjF;k$t7H>zLpMZOMgx=VCP8V7n~C}vgGfZdV*(gZjHZ+=wPa>wX$h~3#!3|hr64Jh z5+UU>k{mBvmeK#ILC{2mxXL6rD`|ocjOKnfb!N0f`{cvJyW%SASST`#28h{>iz2CE zVGa7HgrvNK8=}Bgm90w-S1>{NAd+-Fh9U_Na0--CTUzfnEu~oHnIeR ze{waCJq7)uhCd^r+H@rVrK4efa(jmMD8$plo0@aC@4B}_qSkl$Xwcb7Vbt7e*wdf^v9RzuX$g8%KCL)HC|h= zq}`%(9w~JHg%hC zuk&_LW9}@P-N*hv2T>4Dp*GqdQS*zh@wWBh{zrgrwBnUB`cZxq#axV)wQ=J?KLXC> zlIt&Ny6HXw_w#-G1n-Ig+iAt&X7;`&x7OOO9GmeYtnf#V@}s<_yx()|i|#C;c(A|k zy+~C&U!$KjnYZ4Cw9YtP)6_PO_XoRe?xshUuszzfPw>9`2$G=_xYdwgs)VgIMm-rq3+;pHJcoVwGC!C& z_43V*etFTPX8UxJqlD4d_D53>gq#S)aEz_P!J^B;@H07NW2cpl`+`izM}=mW*rsZ^66-wQ?Nhqty(Vp%yIvvet^OM!t}BRE74#X?2Lk}>rkJ{4b*yV;mu zTK^2gjIj>}L?mm8@l+$edZ9bnZRxjM{=$%$Cz@i5`u?^3g=@}b=;SCpSFpmS>f`Eu z{uu8MOE0`Tu8;3bmHqpkp7t86r0p|%T98SSwIYWlE%o zWT<7ADj5yi+ffMcNSOeD$VDdlxe}5lwBb#TKs+psB{)%;Ls0vKE&dyZln`u}(lnjn zyxr9}NyfNSWSBNck`u7sHtbJ(f{}Q8Y{gaF-xsf8EscUG401s%@7V8uy@`t z>+q(s9ZtAJf`q{I06?6ncbFKA$j?gSTf$X!%ZDMEt74F?Rr%MR+oFq$d#>b$#@&g` z;F_%F3PnpsR9yQik9cjN0b z>xS~7#Eoaj2nkd%$lGe7_k&IC+px#{^+J*__&?oG?1fyN#nsuGEcO&;w6P_r5a(fH1f}dZHmHG|?7!{N3jdZpkfN`CrwV9^mKuo*n@ zS;zKl?p-{8bE=;nkK zxpSqE1Tkusg{5a5PKM02UVRqgC3w16u&2OOD=Q+=^SM{HM$J2(=7>CqZvRQDS&peo zkIqkzc+)Tmw1RqFFbg{(POt$hGhTA5sU&9MgEYfVex^3Bz8MV&Ucm#sh3J~5Pcm8a zc^ogN=X}wlq(gz_XTH)^UxK_II#v#31Xn4YR$eYJ_Z2ny&O{j@vU<=S&AuxeB42b? zW>COtf?eMNbdnI*7i|LufLUGy30m$u6(X`;FPuB6mBju-1X(%Kek4ONmCcMrMJ?aA z%MkYcy|XFu1t3n^I7Z1k1Jlr9V6s^lhov6@_1cX+JjQA7T z+{}hjcv=M1AjbA0+TBwP+Ob%W<|qabz38cGS$zf~q#wPvjd+LLbEgVwhpAvWWGFL~ z@ZksJhjf*fjIY>UW!>u0&NGSYQL3AMmY2th&c~BSTIe5DDJ7hWYeq*8Z36T(j+57EX1b)46nTZQt#vNEDHOF(=hjG^4En9Ow z1WT|iv31TMOyrU#Hn0z>jJnb_GldY(BjBIl(Q6^~%PD}11D(MSz9hS2xYOOqeg;x= zGR!^bu%uVE_34#t!>a~zw)XyvS4r_pwmurIP6j!M;j#rOUd(FQao4*U;7TSNn_Y z)S?81E@995?xu}xGb5Y)SZ~ydPAeZ)n?11JCslu&62#;2q}>%;fU z72g}9J5YJoSbkbP2oCh-)sckD7(;h{w@+?hwgX^BAVLA8fb5OS zBW!+x01FTW0P4<=j5e|=_YQas$EZIy`F)gVm*ngJO3{ggSkdc`5 z^L+p)Yu~5{GXKdGQGbU7lLR-hc`5=#BZsI{uk`%|1L7@d)-Y={AkR#Sp$+pD=70DF zYCK-+=FK)r9j{V@WT}>&SZ)}>WbMR^wPgfA&<;OuwVS>U{3;%hM zJd|v`{hi_U3j6`0n*o38Dz!O3WfcZDuv{O?oP5H0zlG@+S#5Ok9CvNSIdcu%$-|{0O^30BWPi+Gb6Ofnh7|Fw0RE5 z0tHG(!*#GF(kWv#UmK_-BQUs4BlbZq+gz+dio>YEgcEd&2aeb8iDE_Xt+apH!S3N; z73pj0TArp7kH?TcPy|uVWPh>nbU<@u!#(GreE;l0OaaSer_FAWj|RGgd-YSDzQ6$- z+Wuyamb3nonX4u6wr&i?#8Ld%)zmCeMJ-@J(j!z-IIdVLSmC<@s5gzqZsx;sW;jQf z!EX6b5#(!^X`ePVJ`gwMlN~|Y)r}I=GUvvAsBsL8P-sNE>uWq5yhEyd5uo0_6E~dN!QuCv;kvBTC&5qUdu}?qZSJF z4$#CVY-RC!%tx450O$N#jM*4=pCn_TBj*Vt3_@OF_!>QH?^&_LyfEN6*C%w75j-5< z&(!C7e#2y`ZR)%sJhw@k-FN5R+*7HA(BA7eQQPoEPVhmDi$75jLApvLMQgO0T`!4& zPw8}w`QvsNLRmu$qMh+d`^+~-GLl&lxYgbrce7QanE0}s#9x-60uNC!BeMAFQV1`$ zv()x!v$P30(1pX6vnH5rOefOmgK+xO)L*4(kO9J@-7U(`^z^YUQqU-4ubk9${(S@{ z&j-0|eon9PJpZ+QqDnMJIT|E!>`2_p+exxiSv*r4{`)8Q9k2g;hRT8^>TdM3$5s3a zi4^7K_72OIrHoxQe4DoTLat!p%R+C>)}@NU-+cQIIf;MY*5+6XjL56_&TKa0d!G4i zDqZE`5FMD?o7HrmXH0gwrCWAb72cMAIK8kij>fWIqDn>p^=_s>i?dF!_-?(t$mvKv zB1LEMG3u^*GfL}TfWCY8QOCy$U!b3QkZmH8vMa?5l;NvR9ev;j9IvOL89+IpupUQZ zjP0Kz87*82n7@%^+2x??%z}-E)6UO;nc$*EDd@{O!MUWJ)4bLa+P4vxz8!DCb2?la z=ut*TfAT!@o~<#1NFXVdP?O-du|hu>6ob-PqvMAWVzV$zkezs1(64&AjoDcK)*;2O z{;pb|wh&3@CAdpQQIb=zmid*E9d$_q$5Z;NLR;SIiF9U=W(3C8U!{6ENkRZVz?`R! zWuF%_ah?-c)>;I?AmkWqhQ|7Y`;8^`s7z`X(rs;WCaKs_l?V;CE8PFlS7Ci2sZP~o zz+1l*s+%Kz{a68o1mgj^1Yrjjv)B7U{dMQC+0-OCypT949z#?@5ZQW;RLE|6A5lba zeMGj5dd*vnOfs#U#xFNuWWFYif|IPITkJ%X|H z&|gunEml=s^KzgyIpCgV86d%9117|EW6(L}Gi2mQk@$McK#U?&iP1EJEgf$Xk%7EvO?zKC!fsX_S2l_EetzQAF`4l1EIh7(m?r} z?)r8kHW3>Yi5XNWgDd{m?vQ}Qpko~{SAyD-8~ld3u0+URdld?TI=wJ~UtRZqNViwQ z2a&DgYJ|3#Mxb&FbL%4}Z~#v*g7@jV8}}TX@cAqd{myp^9_|gEp)N?mDPVK)VUwFo zfnRJK$AV7WZRFk2gU7YsoYzvUbKT^R&+l}uc{{<26q&{uLE2C!15nBGZAgr}0~5xR z_(J6Vh4UxWGYHGEK-6_6;J+kZ7@R(JuZZQ+X>;p#(_N$wnBS&Cc_g`7cP;X;e7-*7 zPelnT8;0~&CbMWFZ!6I{pI5hZ(uUXtyCK0~XzJhu?8Qjsm=}Xb%}J_?r1nd_SI}%k zRV^a3^;{3$js?W4Ng$C;oH8c(DXy`wRYi(F+kMd19qKoAm$=3!WA>N$^ z7aYRflp-G}aC)ERHSrFvY?sx&^Yh*(q3~Q;Qk~!+G$Td?B3sj}&1A34qZCy=VSL_5 zuG@a_RCmOL{f4Q@*;s`#89hF)Q4Tm8%uLN+Z*lkx1yLO;3B?$G5BU5g^_vbzzl&OV zkqc%#!?{+YZD(b%)%jhB8N>xi0cV5~b3>O?;NC6Mo0y)3gpPg{Z*jCalKeB)s#zkt zKk^G?R!EHseEWf@gd~K*XO4lAnIm0@&X`Q8zojH6MPJK1%PRz3Nud=K=^o!fIt%Tf zWM|#zuv`<9ek7383SC!Pif^^bd04C3O-#k&VvJoQf6_EJDlE)3GZ?NEUD3Hsi)%Ym zt`cyQx~iYae!x0Hpeu&bf>d|IikzChzL~-Pj?_-~EcL$ddI6d@UvS8=INy%o5z6w= z^H)9VzTgyjy1|4fLn?9iwp~519TpjMgkE&WWt{()0?*SM69_r@qHpM=U0^bM8t+`i z1!|P-soCH)3>~kvmEfm6Vr;qi>i>9hAdZLi{n+@^z@R=D-v2Gn~emnlND%_HyOcql8k}@hGN;G?3DW#5)my`Q> zV2k%gyDl7z$nAs&v8eI;hRYIH{(hltQ>%oL+u~OYQTMOuqZX)LNzV|Jt2e1IFs zEJn`5y$`Md-ZrGrELcZs`tx+yd=o|chSG_*bPQ!dN)w zwd2iOS016`mV62#kMy&^OVdMPH~;+|sf8Z8;@b!n3eow%vYM#=0ry6J?6K>yO<7rs zjwS4?wU52NkBQUh*W068K3L2O@4h?7<*wyBmP?}h6*mc%)|5|n#$0r?60W9s4x*ph$_y$S&1m95>w@eS??5g|BNt<-8P%Tx)Lz#Y| zBEijQYvJb`<^KQGaNhA$|6d&Ec5yFqubq7*ql{45WNVli$zCCQq?CEDkiGY=l&`B? zWZb&tlFaONjqFttQg-~_*Z2GT&-HLW_wl&i?=xQK`8wp7Uk;$;vP3x}N}4K7pUV;9 zg{tOiHDX4qL0-b=cP^cKZQlMJwx#zzTz;fQ@YGG8=9v^qr+d+2|Bg$D@<3(Dr-5ye z(?SdEhb(LBlb_pF(mp!Q9cx>!kwZ#xh=^#)%dL(9S=>FR^QT%zKWcte<+%U~t_RR1 zQP_Bu1~d6lis)Qn@dxg%l;{H04>zg(UVr$*5#S!8*cmxR3@ouLvij<9ae7f@Z5GVe zB<0YL!SS;L({oUqu|imeOmPGER48u)0Bl{KK!e{tFFQMVy%LPXK(y~VfibMkO<4ILBBTlRR7AU>m`WFXx}2Wpee^+pL&( zprA=Alz7GRJNFH$uaKxWRWA!o7-W{~#h;SXRg+XrCTC`!YvFTqvLdH=4?`1{3y>QD z2(>eI((NtkMgf)Hx+a0U;^ctFp!F)b~X&k|}U~}etb@g5avVuD#I9ins zGC?UlxihooHxzox)6G7A{AVnlB}4FlFz`tRrU~1%;CD6ppks%lgIs__D~0eZXy>*d z>af9D?fMk051ef*pEx`?U?W|)Xa+Njx&NjpziENVs{ZeT@bvd(%4*51ain^g+%d&l zMq~7}u@>N%iD(8_p5-7#K0SJHYQmO{5TSYn!t!_8wScW=Ix2HaUCl6R{_un0$`?LI zr6&~7U`AJ~7YW8lSoZPJ7Avm)%9~6+Yo;5rv~}NpxWXx~0K7j`zOZ7U<}73Dc^FF% ziiw{{@stRl4{)HOp`7eursUhc*O^nG@x>^uIq9%se}BEP5&JH2S{w2_$^ZHLry6#& zkg7I;8g(B=2ZmU7LlRjS2h@4*=Q5Xx!sb^eDc`8+X!zNVbo7#v<+yo(>taO6N}uMO za4L@T>G0>fDEC9Z)@Y?~w=}+h)UVQpSV}Fb;yajjS88$gq_^C)uRY&xxdw{6fTtn1 z!;aV81SU{8F|2EEpatK6x6UXi#c{dF56X3V(KzAzI^tfK-FYN3 zCLChT1UNq?Ixha?>)|y6(|MQdQ*v}KN2_uH{mxVFU|4#7VEX|@YUScCRnlF6F2qt$ zmC_o+`6W19MXH@jOP5ZLx)7M zex37S7Tbg4>m*~I_Z7(aTd=pfwD&>V?a%0{+@GZHJv@&QZZth{<_MKcwwIoeX%)Y; zz{SKhSNDAFBL9n4$Rnxej+UqQnd5#b?yujpkxn-CWSY}aO5Y5O{+zpRwwE4A&>LLL zaJ%t~yHhFzi@=j^0bPSP30Mr8ib{%UfamTHLHo0%L9ni~4uS~|G~+LO-pgpn2Lqdp@ym9$=v9nBGeoY^g*Gnth# z^NO^in+csS?8*4^LQa75;!;b6&6j2;vp{vMiI&`B)p(h2{u`lAgv+ldI5gl=2ziaY@X6_aQX6)x>P6>31s8)6+gSyn%| za)MfsVHwnR*`Koj?+RG{JH8eBgnbLIMX+2k7xQSKLpwo6HeE9eWa6W=-~jCd!Cll{ z>{qqbKjgr@P39J&M|~UkJLiI)=4jdYeI9rjnV7y?`nGDfp!ve*m+3OQCCB}xcg(M= zxGmpHL4?(MI@>RPO>*@3R}1SOb-{^FZK%h5|66!E2;|IQ;KN(G^rP?7D)jP-_?fG;7z~0VO8I)G@Dj^2{Z$A%dc!hEKi1ACDF z7z6249n%|sSbruS`?E9zHr9_7##NGufT9zyZ>8uqgD4etXAv)XwgT^(A

a2)(wR zl~D@_FblY{a&SfjXvt2-^mueM;brUD8Lkd z;B&Pq2-mg3`V)_*D|b?uoiy|y;BW~u^EO*!R2x)`y)&-r-Fh_lxiu)ha>bg&+A}fadIPk#clqDU*QLl`aWOS@}VrN zm+B(@)=d4HC+tQfk?^Cuud-YI1wZK6xB(lS++@PJ&t~mlDM2T*2t|K0k4Cj4J4M>8X6HMOG;Oe!W^sNXXSu>hU(Xr0?fF z96Px{0|P6{bkcGpQDl!Z2ztp@@pq~oi4rY~HWFfk zP@@^)>Abe32a13Fta>LU9V?)Mp++0^lal&W#5 zyBP?#6MV#uo^1ZA?G%+Kx;5T7avilNQ1kIEUR2eb$ltGgHt%UX9R!mg;^2@j=9!NP zi^dcbe_$+)Iq$av!ADYFp=zR}_C`P}WZTUb5#@p4OsJwVh^z6oJ^Tv>H~lQo!K;P`xKgKRiEvvTG$ZIhI$f$7NLk)tgk zI$Q^mS;LUD-RqiG|LATV_hxIoHUoPs2bEgFOrP+{EmpOY1Ub4bT-o#vXploTdBL|I z9z`4^R5x?Hi83z#!aO|`D(oQ(Y<1z;l7D>QCz_8EqoMLYxca7FwQB3Vm-#;TOl@j{ z%?uQ1CTO<*8jGWWGS8eP^%Nat&^1t<<6)9gy{ra=Jt3TWBG!y7h%n{_e=zv3Q0|x7 z^B?-O_+o>06plDqW)N`!v@?f>3SM(4{wlD>fG;*f6~E{H=j#Oowncsu5F}?GxK@71`0?V!7(6VX6HsMr}3ydbPU{c@@xx1Q3x{Nkom> zkGx2wC?X2#fW1)_644<0EN6`l0#SA-T=C%jGeZ~??+RAXNx={yP;H*xT8&GaQWYU; zhKlCE3*HacRho_G%{{-ip5a}E5vcR-MD6;Nho}|9?_;5!rENMD6XukI&+>PIxGvAX zEOS->vV{bA6WatkH0#M2n2-N}y6MWXxGCC9<->;B>(gTIspzXD28VW`&bh{&^DK4A z?JxF$zNEeOIAw#LVc1`;SyM1A<73b{aj4A4gV=;pp6W*E@dK;)QVrsjr38g3^N57$w4v_p7m4SNzz7tsaj_#n_yGa-=(l z@UmhyRDQSn?z|;`A3$KB(o(g9@h=(S$rlB*Pv==8pZ~$C-=~1Du*^KlUS>?H%^@s!>Nmm~5uvV)#Vcj`&);=h^}`oy&}da#XX1aXl&q z&utRnu`cN(Gnh8&oT)vXWsq5+azMRZT;e{|A~B;oZ&Xc7pW!B?*XfcsjI9Lpj8(CMW}nU_vGGQOX8SGodu7pHldn0f zPB#fFxpBnR;yL^3;Y9S1l+10?hVwZ{j{HqE$y>Op0DVYKM4-M1b>)mpc;t#WI$ddi zEvx><#Ro$xTk74zLcG?&T9KY7^#lc)2I5y!YIW-o^7h)B{$(TIAJ>~%pgOk*|6=a5 z@FA75M0yc3Bp92C*j=&Z__0mDjB8=CvF})(BX6qVGqISk>g=6s)1ptzlL)y=n?Gf_ zsNsJye_#va4gR^VlJ9r8+n*a}wwK|z6TA^IEH(IX`YIcAT8{)ww!z3ikp&f}J&-wu zgnj(|%xyPGoV?x_?)$s=#QS%4qpFS`ujPGCvN`Od1F+Q@?CWdKU-6^cFvTU}Wf)>M zR8B6QqkhUPt06CV)v>W+VuJ5C!XO-Dw~L%3kA5g&lT0E;fnOh}j55$-@N?g$^F~}Q z>rx;2tGmy7REbqpzx`wMOVoSZupuK)T!%B_CCZha`$RlZf%za0h(?DRG0oT~M#}Sq zZzEO~AY2MU^!Oe_X5+Otv6lM`eCIwn5#1ReEq;k|Bfl}+9nZRG)!06&ak`5d-({75 zIy&m?cE_sTSrppI45N`^a2mC8;jH}cX`P}lXWEefg-~zn{$+1k51?Wc!aV#VopZMT z;o81)oS21~pgNSg$|LU9(66*MIpeE9k6MOC5Q5s;zDwf9& z6efL)CIt7RiH1-!zKEK=t9e&7P7Cq1CR!qQXQEk=D+a|&nl?4-M+>3i8#AwHxI$w# z-Uv~?sxq^dF6$7)d;dJ6r>*lT3ffFWRB~(uXcvV(D?JF4Id<97Vb^-W05#{ABd+`! zxmQYxq`EN(JLx>Z^JZRW9agTr5~}9)=p)7Gs(>f*K8!?NJr^2uO&P1}xt!Iqn;D7j z#qy$W$=j9}JuV!$1hg=_$YO;#8Y+F!&otPv4@_tuT>2^JdyO5(1Df9ukZEY$rf9F@ z?$qAqW%iy96W7@NAF4pG45mAQfdj<9ssDx?C8SY0u!t`HqUc&oq)T zyog;`fcSGY-Yq`!P#V4g)1?yAEN4QoW9JecFdVomaY}IJtWJ$tu{z{Ne|@ux?(7Q7i4q@_#26_wd>!8Cj*h$%f-H&OCYOmon>%Bt+iS&6%*o zp2DfT`{tgHSJidIAVrk@IEAHr?a)!5_7vP2saJyekz#115L6uP6=Q^-^>v}@LOHF3Wi^Uncc`UY2kj)#J^9zG!}aP?C$^vR9gt8 zWtb!-FI|75ZU?xv^;3C0%UOco>77HRjZQbl@br0{frul_*9oc3Mvc5O^m_CG`o-RA zGu#YIpvzA~x<0Zig5F`4ebM0c%tKu;aw`{O*Ua4{2qVB)3dJ&CbW*x^;3~ggrfMor zY4e6vXc7m}1?rxqU$Cq0vxJwZGA@b7(W+M=k-~AIiNzdXR;d;)%vx6*&pvL4L zR3oi$NBq+bIOHoVURtG>rpSwEI2lWE)oUpBzJ_vXVcv9~lZmO2s=f)e$4a6389RQ9 zeAOTQ-+(Jz?&lHDFGqpDj^R|u1_9zOLEzK&6!e)W^h1IMxf(mUlb$digc?_mD+mGP z-!urRssTOc44Iz*wWF&>IHvft%Ukl#pb;c5oABY`jR$25`H$c7tE%-yU9$M^QE%Xf a$GqJg_EI?bU$EPb;<}c;<{Nc8%>Mwdd6zu^ diff --git a/content/develop/use/manual/patterns/indexes/2idx_1.png b/content/develop/use/manual/patterns/indexes/2idx_1.png deleted file mode 100644 index 637b6a6a1db358d7c9ef6c54901f699b49ff8098..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11649 zcmeHN2{=`IyWiNf$yA|(unmQbkqp^}%oQPH<|zr=u+5UOL>Y?ALs3#@BGER>JS$|Z zWM`gdo4fYfIj8RTob!C=ez$Yaz4!Ax9&7LaTI>IRr{DX&zgLL5ni2&W0~rJYp-@ql z*MvX_p};R8j0E^5-{9R{2!t%pMovy$MNST`?(A^Y#?A@?Q4V<+b3p5|4P8U!dN(5< zD-HK!QQ1%z?#IDw$_F$^NsmWU6Fv^1pg&e!0B0kdx^w;zvC`8=?y*|AD#Rs!P0ADX64v^O)^W(Ug&^bCBa)^Phy$VZt z$B%MB=4fV-_;bRH)KpQKmyvrfnt~w-ZR_OL-q`K=J?-wHuUeFlNR}FnAuYATHJHfK zxK*bs3r+gr$(R!T@C3{@%9aqDe2AFyoL|tO)PqkTjJC9@#?sfx_vWYb4o-X5V&c*-8xhfo-NwAVGB#6M z6-lgyAkWeB)r+)$DpMa9ND){3dgA8WC%yFpVl(yLPwR=jc-yH@rUWz2ema@E z99TC^Y@_xmB|yHie)Gt1{c7(DWU<($@nA{SqTQv+da0YAumt>7v>xm%Brw8^s$+3* z|BDallEt^xh%lD2F_A|ZW8a#xJv)(5{Rwq1M(ARgYAZn;B5>JvF?jcN5pmbz-Qsa( z2(Rs-OyleVc7kiNZgM&n1sSf#AWhPc(#DT>f+m+mR%VWphz~#TBcbsl>y&9PWfe*# z&L<9UtV^jn3Jnm9WiGl;a%@}&K9JF# zQ&6XOfxftP)4yF#_fev{zzU2ZNb&J{=h0t?qs$nx$!%*}OnKr+y{r8f7{ut*{EI$H zdT<(&>DF+5Jh4G`gYhPWs>)%Elt$*hx=S=eGqu$%XT{C58jVw2tk>CJK(qs1$iGX! zqLG~Gok`I9%aky~-HbH)YjQ97J`gblO+32X$=G$a)9NdO%aJl7A1cHhW+mSSb;0w$ z5Z$A=eT%fd&Rj@vGw$$3+KfXeie-rKz4$uw0+(#c_lKwbDQe$Z^jWx@QkgU6+xHvK zP`eQZ*K&`MJGzOP9Ak{V-O|8Rm0V}^cDSD6v(!fw5A8SHBNQy*n9r!i;~PgdE^L%< z=x*2q(8%63m0LLONU9%X6l4*kQ^QmvXqLdG+ROUk5K+it9bcWMIbxAvk$aISL|&6q zKT7Am$CI`vn%y?t6y5MiVg1|Lm)uS$MJkLZk6(9Y+&r{Nbn9GKhL~mzw@q|%^p$4q zW>hmL>5HKI&kL!^p2TomcFwUZu?!j4Z#w(HLKDhu&uz_J(yRE~ z`T4S@kLHnXEt?}YCX*bKOx>JM9L`5Hj#<96GPEic@)T0C%55gs(9oiIA^cEo_&h=p zt;m#NW+QQ$jxR+=`K-cpMp#R8BY!MKOj|TxESl@Aj8a$eB(E2n&rvKnmKA$9sqolk z{wv6GL-df*YwpeCOW{jhOTp!)RhN9?q3UEu zY9BItxcx!EaqAdKoq&fc4|g8E=V8*G(o9g-(QwnH<6=>-cp;f_PS;5*M3-CV^%J2N z=e3JT3%RZOGZC+3M z2#3Ze4xA{WD{^n4h*1)de)(oJ>H3^SMcbRs8!4J6m|pT45#Bg6H;3?`*%D(hSKA{o4IC z=|L*7O!0iClT5~9*TtNfvW}UYT8&$5jcR?R_EzoOGsS1z){Gd6c6)xVqwcYUu}EF| zK6sx{9|k#vEI_&EROCwfnu z1dAhgAseFI;7}G-rs-wor9XDakWrHM(_u{-A?h+-7rC#qg7t#NdKbj_CB@#HykTBE zJ>`=X%-+hr#Lk@Cr62WrV&Y1Dafq!zZj@nzvHO|BN9YfGvMuop#u_!H+%Z?smJdTE ziEoQLl!gzP&s}?IdmEH-{Jsi$yMpJD^57sph16zg!qUxV&95fIPdy zzNIu0F+A8-!qUHs_A_wI>MYHFvM^2A9&R8VzoWI))7l=FJY3G}#`GaOUS#Rn^buQ!O~VpB9eK)k;55~Y)Iu?D?u_i~%bs$# zH0#$wp9c;Fx>JcWzmU$pQM#}Yo9ZdO&f+B9t@YeU-@A(PJm$>ByvfYs>>R{n7js=EL(UhB2zsYNOmxfh7_Ztbc8ZnTH9cc5zphN1u=#p= zyq&(dt)Er2ih0?teOi<8V8Ww}?2N^U(h2{V{FssD6wIK3y#b@I$X4-Iz=vB)cMFfY z8?hNTm!C$7Z!gIwKC+)VKlAYMn(OSP+}p&-lu+gE(n!6#QL;6(d3?MjYw})RY2MS( zm5Sn<>sz}nCv2`NR?!t=vLR%oVkb!a*55(YYaoxH8O(@J;xG;`_WIK`e*;!y#vck2FX}Ggw@e7`@wjPcQ00d_2`1(Yi^3g&c>g zSIa!FP8iS%%~(J_oWP(rv!77!jn;~mz}Dtyvq#8x6?_kHhY>faqTd%mAOy)aT6!*e zsuv|J9PIhcEFH|P_&w|$fzlQNk@S!NF72&c%-|mOc1V3qs>9_RoUPy@{HOT^*`&zeaJZziqt&k{S4=IsT zXC%S<|6%2~9lv{0|NAFTfB)onE8m`!6u=4aoj`bPf!_k^k|L87_*L~%WT(bIcS9iX zaTR%4Ef0d(VFDz(?gOZLoB5?r;;|j(8@y7nJ`abv1)%Dhibb053!jH7nVPyT=^kCS z(qMXYdUbDAde8HX2Q^&A4?;lrj&M(V7(GD>e)O*-h7uknBp`&ssNoB2)TJ=sFL0ax zw*OdO<*t|buKQYnk?#(A(4u5I`SZ?>UV%iezcQC6kIwqYrH19fY0L86R`Ffe z`B<`uD6enZ^igkoi8XnvxYKBAslfs6Y(%){Q!*UQHNvvD6;d;>+OUX!>z8`T5tHXE zdoyJczRCIKtBXu41%+s}lIaLxWW+#johalhq!AP2Z2AlDf0*@CmVtPqig+F zj)MnOk%9lVMU^p$LhbvL;Cq$bb5}|IZ3Dr_sXYo}WlLd0^B>S3YM(A9}ZG#UjR25 z%Yxb=X*g1SP@&NaBd^jUmJ|Il7rj|T?>tTlkB`(-O#9{ z%J=$+ymzM(CqKHwPSA?ro#X_ZdS4|nrhu3Klm%0Pz(Rl9Se*Bj5zkn4B7BGOQNlvj z-vVDKZP?u0b}TZd9Z_^>2BThVU@dOe3nS*6Q{-iwWpSpbcAom;$?Kq@9$f^2l(Eze z^!_Tq$$$O=;N&U_VPNnWY5@i~%9LFK+P_K{)<49>^f;CFCJq4YpR&;3vX5c@NB~V- zI!wM{OE6#onB<((&0VRXzn7c*;EMf2*EBaT&5rKv9+yO6mr{LqU9l%zm4URa>HqR| z*Cd*pVVGgh%#QM0SYK%h;7$uC9VMur$S-akMenYkOuY4G4OKF4fh}2|-BL=n7~C#e zEATNIT#ZO*SaY@SoBr%6`dEA$+tAw45&3S~eIsIT^(>S{RSS=$$pDs$I#b}0A%c*w zTImQa9;7(o)V*PbwRrQV`^hYH$=?E_u@^9_D|??HXx0rbIQ5Ow8mwS0a|LqQaHGgY z(1E1LVBc?pCa-CXFxV&=DJw0QcckY6b{umioTQACVl5rY6p;s?Sp!T~Y7z6G*QI}r zc>qWQ;;(=-FuLYivfhzo0U#4zM^ns`6BB^Z2A~|@Z z%tO?$>xwxVpcs+F&;Yy7^mvij0g>)qNe4xOAA~^%-U{Q1ZvIJG5X2fn0Y08;1GUIu*X)Cb_H`t5`wfnq#PLlwm`WqjyMu!>bZ zv9Es2%&(>MW`)ZYQf*yn(Z?SRk`NC8iA zBg_v?J|~vJA;MIGR$6nzIx5(Ue8_VM} zi-@zFv5K^klmpEi3f*b^okHy=)q;S5WdCiTe;erE5rjv@Ki^IL|Baye$~`Hph{f!y z(VN@l6^I5m>}*rV+HO3`4rmgkw{VaLv{&;iKt6J!81nX@N~u)a4be`hos@~(C0uQQ zy}2E1;)`89#pyza(|+dR1@^#HG2~w9p8SnxAk2yaiI*|FVfSN%^sdJYX)?a-g#xW@ z)lTcu^!&k{*1oYJT(0Uo4yTs<_{PN7WQ^1#Ar?%VM`@{HzjY))%F6F5i~+&gP)i2B z5fZq%C$tf=p{9xhUkAYSqQ@ezfqSvl7HIrVg+SYB{|%e3;|e`R?47nSsb$jYz4Zus z<}P=*E_loXuqT7v<*tiF3q9N!zC)Llf#$*J7Tp10RSD4x{zvDx^NNWF*`kD zEAigJJN5l&Id_*!>$Gk1Zn=@~wtLckqVQe8Wj>%_cc`oRPr3^n{mc0Qv;Lg*6^Gfg zh9u2^qDzxx$`0mSg7J%9r@(i5Xk6ZldkW^gS1W&<_sYM^d&b5$&V8p{Xh4F7Z8T^d zOTfN^-;#ST3 zmupP?L{D*Qm#?Pm=J*7+$lzL**dxmBk5@^nzbNLgf(1hU(frtIrlRO48MP(r zjU&0z?XPi7N=aT3+IQj@u6JRvD#mr0TVlQuUOL&xO5dk>=mSyr(By^pzV%M`{aEOK z3K#mj@deOAjW3Z)Js;dl$)ka0&QaXo#lS0-+7Q{xzy@}x-UNw^!>2B&T$ivbw|V;q zARbKNjU0`dwC#JhIUS^37HWr#{%gAzfEP7;HPTc0J#6#8{1LYQYt1r1c*lHemAP=G z$e*Or|HTe1k&!sw0tll9farMlp{xRDCQrajTRqOF7=0~M_QeX8o?HjJ$Y4?hC{ZuK*9AdZ0TM#>ZGx(nv2Kd_QmS z{?5aPJ^MMKxfEdh$c)t+lYC^`;9L(;o1DfzPx`!rXwfh5+S`ra#g7<3Z~#XPSsnUJ z$evoxpF>Ihz%~6Y?TI6yG}yrqIZCVis&AdNTH!!0A07)10VPCdA#2m7s_-9E+(&*6 z!}q%^;{OHe?^mq@+^fQYEWFAWXaT;6Ec6UQyz%A@x<~kcatAnF_BUA+w5*E4M#qIH7!;E$=8Z=V{@EP6Q{$z2S6u+FANdh3?6%p z`TP#w=mrC=D3zJpVSHE*0YHN4Xvo4R2P%MDXRevyz@=xIvDW~(j!B8oRRUkk%mz>s jK?5rIh6QzSM_{>f>dmM7WV*nApFmU;)Z}x|nFjm^*eSv~ diff --git a/content/develop/use/manual/patterns/indexes/2idx_2.png b/content/develop/use/manual/patterns/indexes/2idx_2.png deleted file mode 100644 index 70dc6c381d0c70b01d5870af14e1d16671ed91ce..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13973 zcmeHt2T+sknl`;7(nX{QQj{tYL?D0yQk1F`1?doxE=V5=aQ_n-@LjJO9q^d^@|R%>2h0XUO~1yIpsCqD+i*S(y$qQBY8@>g#En zQBY7(1OKTQ=zuR~4?5QXWmm~XJnk#{P1`$_sau{)DAogER;lC-ZTF;HhcL1#bcpH)3M7&9F3UA zRiD`zdMPxIQl3)yMpI@JEpPac<>AVgD{(I|6<6X?F+@LiD$+}KK}rf^*Rp#h6pz(K zZk<<=;F;6PKI+kqS=(Z$5%%bca`I#E)CzJlGh36+IB#`&K7;Ui_bu#cl|-6=)XX+4 zg>&NG^Amc`FjB_oD)QM=$*WZlR5*Srzj~nYWUe*6|Jbq6s1%{{Z-I#5v5%X6M@!+sce`|e#&O3u1;jhaXKKBoEV z1WQ*iib77>1!HRUT6w^+e8~8-Ffgt`UC#O zA14aeBR^wl-Hd*uM`$CPwmHX}Hv2XxR?FR5nJZAM9>&Nf^{^i#N?8aFkmnK5Kn8!!+h#u9s$CNiwJdfF!8oY|xo+3U?O6E|$L-BR9$lj5f zRH?%h1U{kqZgq~y^XqPZ02 z5xLs^SyxS8=7i=@_8pps9DbDjn(w;S2bmFSfx9!Am%90TRJ&bf5BqagQ^VPy4+V9@ zTTJ8)4pGOj-oMA#^x08Cemj}tB6l{sAL}{=BqrsvV~KzMfdLLIg7s6g)1XtJJ)0wc zvFDKO!odKl2cM2lviJlj+a2M5e!s0n0QK_owdV0A)+zPx`a$Nk$0t~Y;xJQwt72Q6 zTNk!!wk)^YBDl^zve#M>^I^Pl_u5^jyB3WCjq(nul7@XpN7$*ORzJ&pHgklo9$r1Z zN*$$bCUGUvA|dEa#~ZV6Zr@nH3C}^U+|M@-5Z8UI^Yi7;8#np4*|(|ho$JX~HfucY z_Vne`t6$B(`hAsP%)OgHb0I>iME|wTYyBUEpRM&H-XtBnbhE&@!a3^al`pDKoXlvB zdme`!ujo7f{^t92Gq@S&x65vvZgz9W<^;Y;yzw@OZ=H1h;9}!ar4X#3>r(iY#nkjN zYcAxO*0=%meBF70YzH@$Q#>;17J8~W*zDN0udTArS(7@R$~>==RMph&DW8)L5rOlP zSV%`nk6x4>xg>kltH!2o?ApiU+hS{RYdveUYpiQ*KarC1TsGVsTsX;7Qfim9rRJpy z&Du@W-UJ!?dN6uMEpl`|i4cP&saZrk+jzF~Y(PrDeBLb8#KJVdl1Ea=q&`>NQ=c9@Mh# zEGdP~^pk<;`7EofYfd(o%zJeqx@AV|B|K-_45Ws&KJj5w2x-PN{1Pde? zRT#bOymT$PFw!g9>+KR8nxFp6He%vzogmx_ZKGj?wAqWJ^ssk8L6om|*Kn2fg&Yre zoBpP*ReH2ne!RHTr~9GTPtT|BeI6SDDKn)jE}==cWQkrLUN5fSZr_TEiivusU#Oq= zCG-;fQk>hzIXEObC2d%|jHfKHjWtPEPUAx@`o)bUr}~cC@tw4LZ|`-#o^b3l>s9zx z+F05Z?K%xB|1^JJv3cZ6Na7|fwo8de$xR97vHL;sgJCh$LACT8OlKq7N$UPW-{Me! z?gHHcA1(o>G0LW#bEa6}gn+H`4dt5xc}MI{ZYHm`C$?7_H5;9KcmCaR7(a%!(^FQG zH}E;tb1zH2LE%A#L5$bDSBcjxuc@V(CG7a9n!KQ`V5l0w>Z|mDyd=dXx4DlEWuG1` za8H`gY&Ke>t!KyQsfj^^h0WkT8R8l5s<<)7rRyKA`I~O!9jZxwot$7*GgMcS<$J(a%-2Z`X3MvHtyXvE z`__9J^xgTpDW4yIzJKhtne(^cdKL`?IwAkbiC8OLaBpLn>+YJrH|f7dj{D zX6}pfzlt^GA3g2n@pbf6?J(T0gXc%mT^Z!`VGPf7*x2FHaz*1a-hLC^Y)cw_(-(d+ zuM;bVy;OHZ3Az*U|7@Fws$7K9IufM5EzLa z??97$;yStQJv1$vuZgxv2wyN7Smqr3MP~a zg$rjtUJBN_XV!#^ejhm&8OU}dWtmL=G4%l z5^pU~RA82WBA;N_g=epDw-${H@7va)OLDV_se|SGgX4vEi_QVI z^$vkkjfQDy81F0NH|jloYu>lj=B=JyM<1?{sZLw6nsA#dS#n#v))P8c_-R}UZ?kj_ z*;O4*Z1_rF#}+Aw!|QCYZuk*(1;x}u@XDmsjl>S)lg7;H`cSFh!j0Osz&bQPoiv^7 zfohh3I)8SjGx{!BzSXs z-$JJodsn*(SG!lg2Q~&mmIH5_Y?+XJw_9+G$S6#req!$>+6*rB{@WFVv2C%RZFzGs zMO8&_(Hr&UVR*u>zqp(0c@$46CZB?dQCXbs4!)nlq>&<1q&`COaGw6B91}{!r7wn^stkw3i?rBj8)>3loYmyr$5h8 zt`vB?x>g6f&RBCREI3h&h-2!u^WPlYLw`D5!GK%h&Yxh})d{CR9!vYhux_A?f`am; z+hr?%E5nN_PTrof4$j_=F0w(MK0s(oL7^6;0{rRe;_o0FVRWZ{(|7$t$3o3Hm-`__?PA)Jo zP&N=E>wOa@ck;}cGjj3@ataDEzzi9`U@w1%AQ>+|(fv(+@1yPF=XBG}$KTD{OPIW` zgQNE?f2fEE`Jli2+rOtvklSBR^78vd7C@jJc|`7{ti0S`_6C-!k-w^#xCOa*SZTX? zx_J2kXQ(TlR8#}!|EH0^p7@8ASN^#2)E`&=VdU3JH90Z?e;{a|Ti{oKUFu9~a(`jH zIumY&HJF0p_?Eu**~>wci{q4~^Gc-@-D!5=Vi4@tt2fJ%t_-$W?8ZO&rq=SIc;t}$ zl9u^A;<5B*LKOd$J83OET)ke6;h?bQ9STG#)!rqgn+Q(w4@#~2lvkxmm^2fP6Q`!X=tva?g-*pEXLJm{L-32Th* zAHck?%NRE<-GmXgrYGz;KOBJmI?UwdUFavtf)v%`M3v$#z!vxbrqK5+N_i zVztf~RvIP{lF@@j*ZUo#1q=BeUVh3Bs?<5ECx~;2GM7QcIS6Q!2)7G(Lgr&YrMjJ& zpMeKx1Jm?$_(1VX0+NzEHGEDC6k@_1XJIqb^B`ZCfb~p<6(%5GG=XVc6Loztg>cMm(g-^-r@0ZQ&6V8RK}3Lpzi07EdjUV6^})CCb>+MCxg;4xer zk6E}f-@^EN=7e zYy2T50CzHdyVkaeH_9BjZ{PKbE+y($gkj}J#jWjx)`=>4>sQM^M*UMv^>1{~Do66S zdCN)yD8p=&`xsON6gvPxOk6Vd3dmMEDT@Z(+5d!|Dn;gLX|vx`|CiA5H~0B-2xadX z@f+20WPl$d>vAEWlFD-dV$hsQpI*oJyEdKJVi@Og0ezGM=)-34w=6K_H~cuEHjw|h zlKwfR_>;C!Y+{3-eyGd3 z0<`%f-aFer)!&HS2rnvK(T#^GEm>&r6D>&S@I6wI!wzoL8?O8Szlvt1H+FSGM~e<^ zT=>x%CuNOndIrMmx-9O;Iy<~{?@H1*x@;4!j)SZ3p1Gi5_tACo-96a8S9pW!OF=+d zH@v?hcL`X&`|BGgrMno^>L3DSU#RIu38Rac+uPkGVKw%)jaTAC_R$fFh(81AMjpqb zpB0|w77+oBf0=zN*?IDr8XAp!w&NaQc^LbM3>TfEldy>O_InBYF-#@Hk(;iQEWG&5 zlN%YFxa&C2+i(&N?M^2+Jdu9#x)z8wwlb6LsJx~?+#m-xuj10@UJ!Ven1H33;h|xe zCODiWFl#)h%zKJR#O9G0rxs#$l$5ZZ``5dH=*>6mMs5lQ3nA)8Ju0VTy>x2>Lt#VI zuFdMvM3$-3bmWOaHuf80_-PH!50PdX@=(En@=DbF$hwpYF!(~pe!d+gnP^4d6D~fx zNu&)XZG?u8k8GCSO50Q#EGpZyCVHgVg>4hC?V9mU(EGEuD&kWJ<5+VG=n@awV*%ou zUsnpm9oFH$fJ@h^Z6*6*VGiX}d_zQ5^fO{l-!IfHVjsFlhZCH%gWRrb zXHXV}WW6~6Fn*rNL!6o~Rdi2UzIT}qE9iSeedPYng_ttWi@*_E9*7R-NrQJQS&>+n zaYhdqg(6yLl@bvxHC5w{vzclqkmHRdVqLTNhnLoSl;R9J)7jTc${gLvsnvRaB(k>_EF07l?}SW}d>5izhQpvPp%s-A_Xo)*X-;xeU8)qdWqV`S zv->3++U<0`q+lxfTJbw?r}41GXThWx&x&1Gg(`obxZo`^k0#ZqvaCCnRUke;8ek)!2O-sKT;Xv7jYFcj@rU6I3oxOEtUj~nt-`K!)`pqzVB;1k_xDg49T>`D4-4Ph#q{IDB@Sn)SW1SMW4YTu1ICGvV!9|sofWxpMN^OuE$Z3D zf%FBg0u1QVWIsU^93Y%--)w?gFS??`Hz|okVr7vKkY~6D6)lAh{CpT<8MnwFG)hJU zZp0JbgB3sDO30gwUwr|}$Nrd4YYJkQ)er8A&bc(*!o2Y_yQ^sxIwV4-%f>wOX9^+= zwLssUC0;qcxjgGBsHil06od>>#Cz3)affxMVYRGYr?PYMTgvc)Bz(niC443v-wdBg zj)^79D~u^-Nj9{#i$9QiZHM$4Jq#_K`a0lE@+zOxFx|*FTQMK+=r&zevOFhBo*Jw1%U3f< zS$GN!g>12OE=$B)#m+dK;JY3>NT1q6r%_TXq^9A!(0ma@DM9XJBRauFJtBr#??$?J zK0TRSQ+pLYWg=oGvCHQ6+ECHftAm^X0lAD9C8%Nd=IFnrtZr6kea=SiVRC68X`vTh zhLuc7-mhdacn6wQ50y{`i$5t)=!no6J!I&@csG*#-Q?2bR3f4s;h*OfzDFb~s(wnR z5g`s9h9ul>2{A3R=+K;^WhO_*eE%OL$uJ?K4Gby&rEtXOOLvkHe8+J8JITgnFpFG zO&m{7+J1KPF^1|rbKZsbhXVulOvnb8 zaYDYj|MnNLm{COBjv-4)zc)DI)a{8HKI-+6wTo;kcTnau2Mp=dzn`niyZAzRAD&F99*9%xvfgl4B`vR4&Ig0w zz;JMgX9!N|P@|q{Ct5rnd${PJyhM@706iebLWyEkzAC|{k0LkiH3n}VVkLi*E+)dL zc)ydzWC(mu0_`9UMq!;%po5i{D^6ii3z^>CF;UNQ)mYIWqDCu`mB@*uamKH(_FPjS zgwCu$xhj~X!DI;mlS|>qYL6baRuZcI_VSgk(2TB;`bBb>joPFEfLY4+BbqHa^uV_) zr7o37g?VpxOOM6ZG%k5B@njM+(^22uId4-rX!yh%y;}aLyEmW|AMkMyU58(abuu2q zS74xIq)~p3nq4YQgVA+zH&0~^hh~n=2S2s)D7QxL<*ybHS5wI*_s0N)=r#wWKG(>6 z&xu!RF34b>;M48HE8u1^k+;;^2=F-9y;ws0$P#Et;ioV?TcIiHGs~V`(dK(&W5m;7 z8eUIM!;6Mmu6L5Hb;|NNK62a?-Dn5hP1$`NxpPtV*1k||5%C6eIJz@80?VnF`+x9t zq}>E>Z84ek1QAe-eHV;VI08rA?jMe}+$Y^ekq((EXeE!r9FvH2ZNo|1EXrO!E2d=92Fo|w{gxV#5s zcmkF21-er7M!dSD3sHmPma8=>j7ql!+FPkYHb_nT<31zqbR!M5Lfy})!#1|dDu}aH zANDID%N#)?DTu)EDQM|l`qLvHJ9L+yt;C$HbC>9oBeS;F5&(&y%?3;>_41P~zR^Xc zc)`i%QyVWp#@8CyhrD{d%O5y%C$Ola4sXl7lao@*hZOY0FHBqeX25rvL3_m0 zp07;JP=`F$r&?m04p|oT^n3R6M+iiQ9H`k``sL*kH<0BS1?Umjs55qXZtA&ReSO%@ z))e-d??#5efQFm1Q`f+<@9DDPp)&YF3L88*z5?w5-OpftqoPWlQ$cnY%c(6JZ1Bbeb254LP77HLoGGIsv~{a`_j-7EnmPHnYB5n4P zX4Ql2jLwO5ncakn(B^6{zqg?qFzF&Le27sU8Nv{&*O=dRu{a+%!Ft=6V@03y+2+=p zp%}2ta)S|Yx^2DR+A!|9VGSd7DiF`5m(A+A@tex82O+*2D#N}m*veIx(GP^saEn=A z9oe?hg=AcF&qzm{z7QB@6fXl#D8JB2kzfPtRj^t|y>Wf2=!BhJMsXM^#<#Y2if^^t zGc5e{d%KJVmoze2K8}bv=gF9p8<9WwT5!jm9Jh``T*#g}C>=Elu=!wWT}9@gHy2h3 z>pj;tST|nXv96baoOZdbyE#xXoFu=yIj9jg@@5eP%?Av%GJML77(V(Y(dJG4QTr|Z zGM9YvZgKm&>k^+xl=-X#R|Lk`l88}DRA+%vpq?scCc%w zW~^BhxhGdywh(@~KEDhD7-qgVQ5! zQiQvdez|}E)1#7AP;3NGc}aEY3ptIz4~KVUyhj_y;v>J88ivae7+(XiNpTMmd$N2m z#aCujuH}QYch}&u=Vn3ya;d0pV%}+797rC*pcB)fZJ1C(D^Tn8o@xUtzKbyXk9PSP zFnHN!$AfLFWxpPGqWPP(z~RdZ@?N{!%VQVRUeCI6KAuprMpmwam~$}b8A|JmCQh@O zC0FQ}b#}#WHX3a`pz_s2_tJt;!6z@rc{K_*&cv$&?c`0idx?+^(!5*{5i7_@l- z40$@G$>!8|dTTh62oZ*ybe>-lz)BH@RF%LOV=|SBO^g~QF-JcT*3&xN;(nXFqvoUl z6&sK&w4SZ(C6M}d6GqErKnv8gm-d;1P(w>*2U#`-Nihw{(}d;Xs@)@x$ffBrs&@u- z6Ty$eHYTffA3Ypq4AQL=#HtboBtXP52CkVYba^89AX0avrN4-8wrIKh{f1z?L79vN zuzd`zAdpsv^Dn*o-ZH*(8#eArBEUMFGF%A3I4A+)bjITQ?m)T#o-|Ph($>Xuh;(IF z3r2TdU(>%B>(X`LLnE$cx)m9LI7IfYlgj%UdH#bQ;}-l{&NWrV@i5!#^dE453wT`( zS8WRq6(UC}1KFF2ZoMNe^RVXrADw;PPHyoNGs~8vQ`k+2Z@X8`;3ycvgz;O0HHr#6 zvOd1pv3!j8d>5P!gHP1WKfi0d-P;&Y?7k->WIDVC@50bldm>$(>ffgB^V5P1?4D&U z284!%l7b^>0;XXzBdUEBS{LvLT3A+%w9xVfm6IpvfJ=QMJ;zQ(=BV1)$>W18Y7Q@y_ABcQYWk=YghWHwH&>z}t)1OqwZ)Ib z8I%>kj1?oq1#xN(Hm`8O35)Rl6(bM$e*OqyvQgku7Tvgh5$YQZ`}U1kKT^&i>IYW2 zSBw!CY$4rjUVAM4Qv323=GfYTP{X!d1sXb&^le<<_JJWNj8W}_Nl!xLNGOt2x40Br zHy;5tpQ^8v3yKHy!jj|Ac>3;C7t2ODYvHLYj{B)Z5m&MS-T9QyA(`>g>GGk~I|=Hp zQ?tH+6k47YpqrlpH4*fb*Is9H{kwQ_EUC2ntCB04Z&ePr$9{(>BQw+-6;mHvu+>y% zvN%I#34o?j!hBVVrsqKIHvkIa28NaP`|)JK9p}jb?fv9UQVb}fD~?C(7tt$$^13yr z7$~dEr$FJo&!0I7xH%2O7?U2Im*LZW1Tq7rnf1z8{W{1ewVcuG0j~Z@E`GM?FrJ7z z1qGNoSx%Kz)20Gh842#@2k)f(66H1^e#k}2y+n8Ei`uB}GUSqmY9T;UHkmNNKw_X0 z4hnY$oPFgg+2v!M!h{@}au=zs*)kfoNk7^1deoHv71|$&i~Y2rxy=F#RMBKs>mzy! zW?u4D{q$#!2D#PF4AkV~`XwWyQ=NI^fW!dq3Xmf=l^P2-79y8bib}r%6)(FZ>&%sa z1?}t&&VKBg)SJP!r65XT1If^HN0Sk#Tt~U_90PfG2vGiJrHOr&5d*5#V%b+7gL1N$ z1{zkQLHiAurw1RiJWX~zyIbbR6?aUf-8YeDK!Sfb$eoj>*Je|Y zWIlBufKmt8w(1Qz((YujxmiuC1FFyrP@?x~x$*R@j$tXjw)4LxX{i#qb1^P`v}gv; zX^~aeIzr>bx4f@6B2;1Gk_tVIgN|f>oO7Hx>x=M-wYWQ~>0Sfa9`)OSD}myDW@)q$nRVcdSe8e(5ioVJjuGRhu~x)PNX3M8BJ4nGXR=`oA10XN-s{42j>( z^`)=50|v}y_WpraV05x)#v}G?n(I^p;#E$HUo&5e4~N>qu$AiMB<8tq$8Q+1yy61j zi`)VHs9l)2Zr7saI*~E2$JW_1q9o>*0qQ{Xl#W~Ri|n0*0yLo=2rulK z75Rzj>g3|3oy#+F*-LqbH`Af%#`a!Ok6%VZLd?DBW`$oiq(ifF@ej>dz#bA}z;sR6 zL>sV_zqV=r*CVJAng(slI^FR8s>o}a>;M6>Kdpi5)_I&UHid1I_WHuE81&Cj_Gjb9r`L0mAZ%mkqsRePtLamgxH zAty&4mp%xT7`n&~%*Ez+O>WQJf}A2f`b5G#&-l{gN9+gQH{AHZdnL_Y`ZpeHqq=DR zF|Q8(=5+(PL7y!%bZK)H=;fGe@t!8@qa0A>+UXmic|LfBcJ@vc)#?*6h{?*xKVyke zh$@fK5tdFanUHur2)4sPYy{ik+<6sj9?;vrg1`02)W4e%{nsW5nAcy~=&$Plqg&wL zH;5`gN)ZB+2}Jey(!yJ(2|*q3s*aYBt{V8uabCd2;`bXIduk=Qp8Yc#A>wn*0{&!>l-;Mo)|8DF92=-^+1pR;8*#9fq z0B^c@UtB)AAB~980MA?`ucDm85?C$rBjURaO#?K5B8~t#ivf}{JJ>DTJf+h;pA`Qw zk8b|>o&kXS-w$<6tbrfp7`u)AJV+Ao+;^IxZJ6b}j(@e6Pk-|s4RFD~->_9>0?)N% zmEZrucwL4uD8xsbzGDAXAj1o5lp-}IT_|h+m69>em(DvoEuE wK!TWVlux~%lY0TV)kjRr?0&e?OS>yH{lS|f?!;;>`AM$6j*)iZIs1tJ0LXL&9smFU diff --git a/content/develop/use/manual/patterns/indexes/index.md b/content/develop/use/manual/patterns/indexes/index.md deleted file mode 100644 index 1eea4a79fd..0000000000 --- a/content/develop/use/manual/patterns/indexes/index.md +++ /dev/null @@ -1,755 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: 'Building secondary indexes in Redis - - ' -linkTitle: Secondary indexing -title: Secondary indexing -weight: 1 ---- - -Redis is not exactly a key-value store, since values can be complex data structures. However it has an external key-value shell: at API level data is addressed by the key name. It is fair to say that, natively, Redis only offers *primary key access*. However since Redis is a data structures server, its capabilities can be used for indexing, in order to create secondary indexes of different kinds, including composite (multi-column) indexes. - -This document explains how it is possible to create indexes in Redis using the following data structures: - -* Sorted sets to create secondary indexes by ID or other numerical fields. -* Sorted sets with lexicographical ranges for creating more advanced secondary indexes, composite indexes and graph traversal indexes. -* Sets for creating random indexes. -* Lists for creating simple iterable indexes and last N items indexes. - -Implementing and maintaining indexes with Redis is an advanced topic, so most -users that need to perform complex queries on data should understand if they -are better served by a relational store. However often, especially in caching -scenarios, there is the explicit need to store indexed data into Redis in order to speedup common queries which require some form of indexing in order to be executed. - -Simple numerical indexes with sorted sets -=== - -The simplest secondary index you can create with Redis is by using the -sorted set data type, which is a data structure representing a set of -elements ordered by a floating point number which is the *score* of -each element. Elements are ordered from the smallest to the highest score. - -Since the score is a double precision float, indexes you can build with -vanilla sorted sets are limited to things where the indexing field is a number -within a given range. - -The two commands to build these kind of indexes are [`ZADD`](/commands/zadd) and -[`ZRANGE`](/commands/zrange) with the `BYSCORE` argument to respectively add items and retrieve items within a -specified range. - -For instance, it is possible to index a set of person names by their -age by adding element to a sorted set. The element will be the name of the -person and the score will be the age. - - ZADD myindex 25 Manuel - ZADD myindex 18 Anna - ZADD myindex 35 Jon - ZADD myindex 67 Helen - -In order to retrieve all persons with an age between 20 and 40, the following -command can be used: - - ZRANGE myindex 20 40 BYSCORE - 1) "Manuel" - 2) "Jon" - -By using the **WITHSCORES** option of [`ZRANGE`](/commands/zrange) it is also possible -to obtain the scores associated with the returned elements. - -The [`ZCOUNT`](/commands/zcount) command can be used in order to retrieve the number of elements -within a given range, without actually fetching the elements, which is also -useful, especially given the fact the operation is executed in logarithmic -time regardless of the size of the range. - -Ranges can be inclusive or exclusive, please refer to the [`ZRANGE`](/commands/zrange) -command documentation for more information. - -**Note**: Using the [`ZRANGE`](/commands/zrange) with the `BYSCORE` and `REV` arguments, it is possible to query a range in -reversed order, which is often useful when data is indexed in a given -direction (ascending or descending) but we want to retrieve information -the other way around. - -Using objects IDs as associated values ---- - -In the above example we associated names to ages. However in general we -may want to index some field of an object which is stored elsewhere. -Instead of using the sorted set value directly to store the data associated -with the indexed field, it is possible to store just the ID of the object. - -For example I may have Redis hashes representing users. Each user is -represented by a single key, directly accessible by ID: - - HMSET user:1 id 1 username antirez ctime 1444809424 age 38 - HMSET user:2 id 2 username maria ctime 1444808132 age 42 - HMSET user:3 id 3 username jballard ctime 1443246218 age 33 - -If I want to create an index in order to query users by their age, I -could do: - - ZADD user.age.index 38 1 - ZADD user.age.index 42 2 - ZADD user.age.index 33 3 - -This time the value associated with the score in the sorted set is the -ID of the object. So once I query the index with [`ZRANGE`](/commands/zrange) with the `BYSCORE` argument, I'll -also have to retrieve the information I need with [`HGETALL`](/commands/hgetall) or similar -commands. The obvious advantage is that objects can change without touching -the index, as long as we don't change the indexed field. - -In the next examples we'll almost always use IDs as values associated with -the index, since this is usually the more sounding design, with a few -exceptions. - -Updating simple sorted set indexes ---- - -Often we index things which change over time. In the above -example, the age of the user changes every year. In such a case it would -make sense to use the birth date as index instead of the age itself, -but there are other cases where we simply want some field to change from -time to time, and the index to reflect this change. - -The [`ZADD`](/commands/zadd) command makes updating simple indexes a very trivial operation -since re-adding back an element with a different score and the same value -will simply update the score and move the element at the right position, -so if the user `antirez` turned 39 years old, in order to update the -data in the hash representing the user, and in the index as well, we need -to execute the following two commands: - - HSET user:1 age 39 - ZADD user.age.index 39 1 - -The operation may be wrapped in a [`MULTI`](/commands/multi)/[`EXEC`](/commands/exec) transaction in order to -make sure both fields are updated or none. - -Turning multi dimensional data into linear data ---- - -Indexes created with sorted sets are able to index only a single numerical -value. Because of this you may think it is impossible to index something -which has multiple dimensions using this kind of indexes, but actually this -is not always true. If you can efficiently represent something -multi-dimensional in a linear way, they it is often possible to use a simple -sorted set for indexing. - -For example the [Redis geo indexing API](/commands/geoadd) uses a sorted -set to index places by latitude and longitude using a technique called -[Geo hash](https://en.wikipedia.org/wiki/Geohash). The sorted set score -represents alternating bits of longitude and latitude, so that we map the -linear score of a sorted set to many small *squares* in the earth surface. -By doing an 8+1 style center plus neighborhoods search it is possible to -retrieve elements by radius. - -Limits of the score ---- - -Sorted set elements scores are double precision floats. It means that -they can represent different decimal or integer values with different -errors, because they use an exponential representation internally. -However what is interesting for indexing purposes is that the score is -always able to represent without any error numbers between -9007199254740992 -and 9007199254740992, which is `-/+ 2^53`. - -When representing much larger numbers, you need a different form of indexing -that is able to index numbers at any precision, called a lexicographical -index. - -Lexicographical indexes -=== - -Redis sorted sets have an interesting property. When elements are added -with the same score, they are sorted lexicographically, comparing the -strings as binary data with the `memcmp()` function. - -For people that don't know the C language nor the `memcmp` function, what -it means is that elements with the same score are sorted comparing the -raw values of their bytes, byte after byte. If the first byte is the same, -the second is checked and so forth. If the common prefix of two strings is -the same then the longer string is considered the greater of the two, -so "foobar" is greater than "foo". - -There are commands such as [`ZRANGE`](/commands/zrange) and [`ZLEXCOUNT`](/commands/zlexcount) that -are able to query and count ranges in a lexicographically fashion, assuming -they are used with sorted sets where all the elements have the same score. - -This Redis feature is basically equivalent to a `b-tree` data structure which -is often used in order to implement indexes with traditional databases. -As you can guess, because of this, it is possible to use this Redis data -structure in order to implement pretty fancy indexes. - -Before we dive into using lexicographical indexes, let's check how -sorted sets behave in this special mode of operation. Since we need to -add elements with the same score, we'll always use the special score of -zero. - - ZADD myindex 0 baaa - ZADD myindex 0 abbb - ZADD myindex 0 aaaa - ZADD myindex 0 bbbb - -Fetching all the elements from the sorted set immediately reveals that they -are ordered lexicographically. - - ZRANGE myindex 0 -1 - 1) "aaaa" - 2) "abbb" - 3) "baaa" - 4) "bbbb" - -Now we can use [`ZRANGE`](/commands/zrange) with the `BYLEX` argument in order to perform range queries. - - ZRANGE myindex [a (b BYLEX - 1) "aaaa" - 2) "abbb" - -Note that in the range queries we prefixed the `min` and `max` elements -identifying the range with the special characters `[` and `(`. -This prefixes are mandatory, and they specify if the elements -of the range are inclusive or exclusive. So the range `[a (b` means give me -all the elements lexicographically between `a` inclusive and `b` exclusive, -which are all the elements starting with `a`. - -There are also two more special characters indicating the infinitely negative -string and the infinitely positive string, which are `-` and `+`. - - ZRANGE myindex [b + BYLEX - 1) "baaa" - 2) "bbbb" - -That's it basically. Let's see how to use these features to build indexes. - -A first example: completion ---- - -An interesting application of indexing is completion. Completion is what -happens when you start typing your query into a search engine: the user -interface will anticipate what you are likely typing, providing common -queries that start with the same characters. - -A naive approach to completion is to just add every single query we -get from the user into the index. For example if the user searches `banana` -we'll just do: - - ZADD myindex 0 banana - -And so forth for each search query ever encountered. Then when we want to -complete the user input, we execute a range query using [`ZRANGE`](/commands/zrange) with the `BYLEX` argument. -Imagine the user is typing "bit" inside the search form, and we want to -offer possible search keywords starting for "bit". We send Redis a command -like that: - - ZRANGE myindex "[bit" "[bit\xff" BYLEX - -Basically we create a range using the string the user is typing right now -as start, and the same string plus a trailing byte set to 255, which is `\xff` in the example, as the end of the range. This way we get all the strings that start for the string the user is typing. - -Note that we don't want too many items returned, so we may use the **LIMIT** option in order to reduce the number of results. - -Adding frequency into the mix ---- - -The above approach is a bit naive, because all the user searches are the same -in this way. In a real system we want to complete strings according to their -frequency: very popular searches will be proposed with a higher probability -compared to search strings typed very rarely. - -In order to implement something which depends on the frequency, and at the -same time automatically adapts to future inputs, by purging searches that -are no longer popular, we can use a very simple *streaming algorithm*. - -To start, we modify our index in order to store not just the search term, -but also the frequency the term is associated with. So instead of just adding -`banana` we add `banana:1`, where 1 is the frequency. - - ZADD myindex 0 banana:1 - -We also need logic in order to increment the index if the search term -already exists in the index, so what we'll actually do is something like -that: - - ZRANGE myindex "[banana:" + BYLEX LIMIT 0 1 - 1) "banana:1" - -This will return the single entry of `banana` if it exists. Then we -can increment the associated frequency and send the following two -commands: - - ZREM myindex 0 banana:1 - ZADD myindex 0 banana:2 - -Note that because it is possible that there are concurrent updates, the -above three commands should be send via a [Lua script](/commands/eval) -instead, so that the Lua script will atomically get the old count and -re-add the item with incremented score. - -So the result will be that, every time a user searches for `banana` we'll -get our entry updated. - -There is more: our goal is to just have items searched very frequently. -So we need some form of purging. When we actually query the index -in order to complete the user input, we may see something like that: - - ZRANGE myindex "[banana:" + BYLEX LIMIT 0 10 - 1) "banana:123" - 2) "banaooo:1" - 3) "banned user:49" - 4) "banning:89" - -Apparently nobody searches for "banaooo", for example, but the query was -performed a single time, so we end presenting it to the user. - -This is what we can do. Out of the returned items, we pick a random one, -decrement its score by one, and re-add it with the new score. -However if the score reaches 0, we simply remove the item from the list. -You can use much more advanced systems, but the idea is that the index in -the long run will contain top searches, and if top searches will change over -the time it will adapt automatically. - -A refinement to this algorithm is to pick entries in the list according to -their weight: the higher the score, the less likely entries are picked -in order to decrement its score, or evict them. - -Normalizing strings for case and accents ---- - -In the completion examples we always used lowercase strings. However -reality is much more complex than that: languages have capitalized names, -accents, and so forth. - -One simple way do deal with this issues is to actually normalize the -string the user searches. Whatever the user searches for "Banana", -"BANANA" or "Ba'nana" we may always turn it into "banana". - -However sometimes we may like to present the user with the original -item typed, even if we normalize the string for indexing. In order to -do this, what we do is to change the format of the index so that instead -of just storing `term:frequency` we store `normalized:frequency:original` -like in the following example: - - ZADD myindex 0 banana:273:Banana - -Basically we add another field that we'll extract and use only for -visualization. Ranges will always be computed using the normalized strings -instead. This is a common trick which has multiple applications. - -Adding auxiliary information in the index ---- - -When using a sorted set in a direct way, we have two different attributes -for each object: the score, which we use as an index, and an associated -value. When using lexicographical indexes instead, the score is always -set to 0 and basically not used at all. We are left with a single string, -which is the element itself. - -Like we did in the previous completion examples, we are still able to -store associated data using separators. For example we used the colon in -order to add the frequency and the original word for completion. - -In general we can add any kind of associated value to our indexing key. -In order to use a lexicographical index to implement a simple key-value store -we just store the entry as `key:value`: - - ZADD myindex 0 mykey:myvalue - -And search for the key with: - - ZRANGE myindex [mykey: + BYLEX LIMIT 0 1 - 1) "mykey:myvalue" - -Then we extract the part after the colon to retrieve the value. -However a problem to solve in this case is collisions. The colon character -may be part of the key itself, so it must be chosen in order to never -collide with the key we add. - -Since lexicographical ranges in Redis are binary safe you can use any -byte or any sequence of bytes. However if you receive untrusted user -input, it is better to use some form of escaping in order to guarantee -that the separator will never happen to be part of the key. - -For example if you use two null bytes as separator `"\0\0"`, you may -want to always escape null bytes into two bytes sequences in your strings. - -Numerical padding ---- - -Lexicographical indexes may look like good only when the problem at hand -is to index strings. Actually it is very simple to use this kind of index -in order to perform indexing of arbitrary precision numbers. - -In the ASCII character set, digits appear in the order from 0 to 9, so -if we left-pad numbers with leading zeroes, the result is that comparing -them as strings will order them by their numerical value. - - ZADD myindex 0 00324823481:foo - ZADD myindex 0 12838349234:bar - ZADD myindex 0 00000000111:zap - - ZRANGE myindex 0 -1 - 1) "00000000111:zap" - 2) "00324823481:foo" - 3) "12838349234:bar" - -We effectively created an index using a numerical field which can be as -big as we want. This also works with floating point numbers of any precision -by making sure we left pad the numerical part with leading zeroes and the -decimal part with trailing zeroes like in the following list of numbers: - - 01000000000000.11000000000000 - 01000000000000.02200000000000 - 00000002121241.34893482930000 - 00999999999999.00000000000000 - -Using numbers in binary form ---- - -Storing numbers in decimal may use too much memory. An alternative approach -is just to store numbers, for example 128 bit integers, directly in their -binary form. However for this to work, you need to store the numbers in -*big endian format*, so that the most significant bytes are stored before -the least significant bytes. This way when Redis compares the strings with -`memcmp()`, it will effectively sort the numbers by their value. - -Keep in mind that data stored in binary format is less observable for -debugging, harder to parse and export. So it is definitely a trade off. - -Composite indexes -=== - -So far we explored ways to index single fields. However we all know that -SQL stores are able to create indexes using multiple fields. For example -I may index products in a very large store by room number and price. - -I need to run queries in order to retrieve all the products in a given -room having a given price range. What I can do is to index each product -in the following way: - - ZADD myindex 0 0056:0028.44:90 - ZADD myindex 0 0034:0011.00:832 - -Here the fields are `room:price:product_id`. I used just four digits padding -in the example for simplicity. The auxiliary data (the product ID) does not -need any padding. - -With an index like that, to get all the products in room 56 having a price -between 10 and 30 dollars is very easy. We can just run the following -command: - - ZRANGE myindex [0056:0010.00 [0056:0030.00 BYLEX - -The above is called a composed index. Its effectiveness depends on the -order of the fields and the queries I want to run. For example the above -index cannot be used efficiently in order to get all the products having -a specific price range regardless of the room number. However I can use -the primary key in order to run queries regardless of the price, like -*give me all the products in room 44*. - -Composite indexes are very powerful, and are used in traditional stores -in order to optimize complex queries. In Redis they could be useful both -to implement a very fast in-memory Redis index of something stored into -a traditional data store, or in order to directly index Redis data. - -Updating lexicographical indexes -=== - -The value of the index in a lexicographical index can get pretty fancy -and hard or slow to rebuild from what we store about the object. So one -approach to simplify the handling of the index, at the cost of using more -memory, is to also take alongside to the sorted set representing the index -a hash mapping the object ID to the current index value. - -So for example, when we index we also add to a hash: - - MULTI - ZADD myindex 0 0056:0028.44:90 - HSET index.content 90 0056:0028.44:90 - EXEC - -This is not always needed, but simplifies the operations of updating -the index. In order to remove the old information we indexed for the object -ID 90, regardless of the *current* fields values of the object, we just -have to retrieve the hash value by object ID and [`ZREM`](/commands/zrem) it in the sorted -set view. - -Representing and querying graphs using a hexastore -=== - -One cool thing about composite indexes is that they are handy in order -to represent graphs, using a data structure which is called -[Hexastore](http://www.vldb.org/pvldb/vol1/1453965.pdf). - -The hexastore provides a representation for relations between objects, -formed by a *subject*, a *predicate* and an *object*. -A simple relation between objects could be: - - antirez is-friend-of matteocollina - -In order to represent this relation I can store the following element -in my lexicographical index: - - ZADD myindex 0 spo:antirez:is-friend-of:matteocollina - -Note that I prefixed my item with the string **spo**. It means that -the item represents a subject,predicate,object relation. - -In can add 5 more entries for the same relation, but in a different order: - - ZADD myindex 0 sop:antirez:matteocollina:is-friend-of - ZADD myindex 0 ops:matteocollina:is-friend-of:antirez - ZADD myindex 0 osp:matteocollina:antirez:is-friend-of - ZADD myindex 0 pso:is-friend-of:antirez:matteocollina - ZADD myindex 0 pos:is-friend-of:matteocollina:antirez - -Now things start to be interesting, and I can query the graph in many -different ways. For example, who are all the people `antirez` -*is friend of*? - - ZRANGE myindex "[spo:antirez:is-friend-of:" "[spo:antirez:is-friend-of:\xff" BYLEX - 1) "spo:antirez:is-friend-of:matteocollina" - 2) "spo:antirez:is-friend-of:wonderwoman" - 3) "spo:antirez:is-friend-of:spiderman" - -Or, what are all the relationships `antirez` and `matteocollina` have where -the first is the subject and the second is the object? - - ZRANGE myindex "[sop:antirez:matteocollina:" "[sop:antirez:matteocollina:\xff" BYLEX - 1) "sop:antirez:matteocollina:is-friend-of" - 2) "sop:antirez:matteocollina:was-at-conference-with" - 3) "sop:antirez:matteocollina:talked-with" - -By combining different queries, I can ask fancy questions. For example: -*Who are all my friends that, like beer, live in Barcelona, and matteocollina consider friends as well?* -To get this information I start with an `spo` query to find all the people -I'm friend with. Then for each result I get I perform an `spo` query -to check if they like beer, removing the ones for which I can't find -this relation. I do it again to filter by city. Finally I perform an `ops` -query to find, of the list I obtained, who is considered friend by -matteocollina. - -Make sure to check [Matteo Collina's slides about Levelgraph](http://nodejsconfit.levelgraph.io/) in order to better understand these ideas. - -Multi dimensional indexes -=== - -A more complex type of index is an index that allows you to perform queries -where two or more variables are queried at the same time for specific -ranges. For example I may have a data set representing persons age and -salary, and I want to retrieve all the people between 50 and 55 years old -having a salary between 70000 and 85000. - -This query may be performed with a multi column index, but this requires -us to select the first variable and then scan the second, which means we -may do a lot more work than needed. It is possible to perform these kinds of -queries involving multiple variables using different data structures. -For example, multi-dimensional trees such as *k-d trees* or *r-trees* are -sometimes used. Here we'll describe a different way to index data into -multiple dimensions, using a representation trick that allows us to perform -the query in a very efficient way using Redis lexicographical ranges. - -Let's say we have points in the space, which represent our data samples, where `x` and `y` are our coordinates. The max value of both variables is 400. - -In the next figure, the blue box represents our query. We want all the points where `x` is between 50 and 100, and where `y` is between 100 and 300. - -![Points in the space](2idx_0.png) - -In order to represent data that makes these kinds of queries fast to perform, -we start by padding our numbers with 0. So for example imagine we want to -add the point 10,25 (x,y) to our index. Given that the maximum range in the -example is 400 we can just pad to three digits, so we obtain: - - x = 010 - y = 025 - -Now what we do is to interleave the digits, taking the leftmost digit -in x, and the leftmost digit in y, and so forth, in order to create a single -number: - - 001205 - -This is our index, however in order to more easily reconstruct the original -representation, if we want (at the cost of space), we may also add the -original values as additional columns: - - 001205:10:25 - -Now, let's reason about this representation and why it is useful in the -context of range queries. For example let's take the center of our blue -box, which is at `x=75` and `y=200`. We can encode this number as we did -earlier by interleaving the digits, obtaining: - - 027050 - -What happens if we substitute the last two digits respectively with 00 and 99? -We obtain a range which is lexicographically continuous: - - 027000 to 027099 - -What this maps to is to a square representing all values where the `x` -variable is between 70 and 79, and the `y` variable is between 200 and 209. -To identify this specific area, we can write random points in that interval. - -![Small area](2idx_1.png) - -So the above lexicographic query allows us to easily query for points in -a specific square in the picture. However the square may be too small for -the box we are searching, so that too many queries are needed. -So we can do the same but instead of replacing the last two digits with 00 -and 99, we can do it for the last four digits, obtaining the following -range: - - 020000 029999 - -This time the range represents all the points where `x` is between 0 and 99 -and `y` is between 200 and 299. Drawing random points in this interval -shows us this larger area. - -![Large area](2idx_2.png) - -So now our area is too big for our query, and still our search box is -not completely included. We need more granularity, but we can easily obtain -it by representing our numbers in binary form. This time, when we replace -digits instead of getting squares which are ten times bigger, we get squares -which are just two times bigger. - -Our numbers in binary form, assuming we need just 9 bits for each variable -(in order to represent numbers up to 400 in value) would be: - - x = 75 -> 001001011 - y = 200 -> 011001000 - -So by interleaving digits, our representation in the index would be: - - 000111000011001010:75:200 - -Let's see what are our ranges as we substitute the last 2, 4, 6, 8, ... -bits with 0s ad 1s in the interleaved representation: - - 2 bits: x between 74 and 75, y between 200 and 201 (range=2) - 4 bits: x between 72 and 75, y between 200 and 203 (range=4) - 6 bits: x between 72 and 79, y between 200 and 207 (range=8) - 8 bits: x between 64 and 79, y between 192 and 207 (range=16) - -And so forth. Now we have definitely better granularity! -As you can see substituting N bits from the index gives us -search boxes of side `2^(N/2)`. - -So what we do is check the dimension where our search box is smaller, -and check the nearest power of two to this number. Our search box -was 50,100 to 100,300, so it has a width of 50 and a height of 200. -We take the smaller of the two, 50, and check the nearest power of two -which is 64. 64 is 2^6, so we would work with indexes obtained replacing -the latest 12 bits from the interleaved representation (so that we end -replacing just 6 bits of each variable). - -However single squares may not cover all our search, so we may need more. -What we do is to start with the left bottom corner of our search box, -which is 50,100, and find the first range by substituting the last 6 bits -in each number with 0. Then we do the same with the right top corner. - -With two trivial nested for loops where we increment only the significant -bits, we can find all the squares between these two. For each square we -convert the two numbers into our interleaved representation, and create -the range using the converted representation as our start, and the same -representation but with the latest 12 bits turned on as end range. - -For each square found we perform our query and get the elements inside, -removing the elements which are outside our search box. - -Turning this into code is simple. Here is a Ruby example: - - def spacequery(x0,y0,x1,y1,exp) - bits=exp*2 - x_start = x0/(2**exp) - x_end = x1/(2**exp) - y_start = y0/(2**exp) - y_end = y1/(2**exp) - (x_start..x_end).each{|x| - (y_start..y_end).each{|y| - x_range_start = x*(2**exp) - x_range_end = x_range_start | ((2**exp)-1) - y_range_start = y*(2**exp) - y_range_end = y_range_start | ((2**exp)-1) - puts "#{x},#{y} x from #{x_range_start} to #{x_range_end}, y from #{y_range_start} to #{y_range_end}" - - # Turn it into interleaved form for ZRANGE query. - # We assume we need 9 bits for each integer, so the final - # interleaved representation will be 18 bits. - xbin = x_range_start.to_s(2).rjust(9,'0') - ybin = y_range_start.to_s(2).rjust(9,'0') - s = xbin.split("").zip(ybin.split("")).flatten.compact.join("") - # Now that we have the start of the range, calculate the end - # by replacing the specified number of bits from 0 to 1. - e = s[0..-(bits+1)]+("1"*bits) - puts "ZRANGE myindex [#{s} [#{e} BYLEX" - } - } - end - - spacequery(50,100,100,300,6) - -While non immediately trivial this is a very useful indexing strategy that -in the future may be implemented in Redis in a native way. -For now, the good thing is that the complexity may be easily encapsulated -inside a library that can be used in order to perform indexing and queries. -One example of such library is [Redimension](https://github.com/antirez/redimension), a proof of concept Ruby library which indexes N-dimensional data inside Redis using the technique described here. - -Multi dimensional indexes with negative or floating point numbers -=== - -The simplest way to represent negative values is just to work with unsigned -integers and represent them using an offset, so that when you index, before -translating numbers in the indexed representation, you add the absolute value -of your smaller negative integer. - -For floating point numbers, the simplest approach is probably to convert them -to integers by multiplying the integer for a power of ten proportional to the -number of digits after the dot you want to retain. - -Non range indexes -=== - -So far we checked indexes which are useful to query by range or by single -item. However other Redis data structures such as Sets or Lists can be used -in order to build other kind of indexes. They are very commonly used but -maybe we don't always realize they are actually a form of indexing. - -For instance I can index object IDs into a Set data type in order to use -the *get random elements* operation via [`SRANDMEMBER`](/commands/srandmember) in order to retrieve -a set of random objects. Sets can also be used to check for existence when -all I need is to test if a given item exists or not or has a single boolean -property or not. - -Similarly lists can be used in order to index items into a fixed order. -I can add all my items into a Redis list and rotate the list with -[`RPOPLPUSH`](/commands/rpoplpush) using the same key name as source and destination. This is useful -when I want to process a given set of items again and again forever in the -same order. Think of an RSS feed system that needs to refresh the local copy -periodically. - -Another popular index often used with Redis is a **capped list**, where items -are added with [`LPUSH`](/commands/lpush) and trimmed with [`LTRIM`](/commands/ltrim), in order to create a view -with just the latest N items encountered, in the same order they were -seen. - -Index inconsistency -=== - -Keeping the index updated may be challenging, in the course of months -or years it is possible that inconsistencies are added because of software -bugs, network partitions or other events. - -Different strategies could be used. If the index data is outside Redis -*read repair* can be a solution, where data is fixed in a lazy way when -it is requested. When we index data which is stored in Redis itself -the [`SCAN`](/commands/scan) family of commands can be used in order to verify, update or -rebuild the index from scratch, incrementally. diff --git a/content/develop/use/manual/patterns/twitter-clone.md b/content/develop/use/manual/patterns/twitter-clone.md deleted file mode 100644 index b8d3fb823a..0000000000 --- a/content/develop/use/manual/patterns/twitter-clone.md +++ /dev/null @@ -1,460 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: Learn several Redis patterns by building a Twitter clone -linkTitle: Patterns example -title: Redis patterns example -weight: 20 ---- - -This article describes the design and implementation of a [very simple Twitter clone](https://github.com/antirez/retwis) written using PHP with Redis as the only database. The programming community has traditionally considered key-value stores as a special purpose database that couldn't be used as a drop-in replacement for a relational database for the development of web applications. This article will try to show that Redis data structures on top of a key-value layer are an effective data model to implement many kinds of applications. - -Note: the original version of this article was written in 2009 when Redis was -released. It was not exactly clear at that time that the Redis data model was -suitable to write entire applications. Now after 5 years there are many cases of -applications using Redis as their main store, so the goal of the article today -is to be a tutorial for Redis newcomers. You'll learn how to design a simple -data layout using Redis, and how to apply different data structures. - -Our Twitter clone, called [Retwis](https://github.com/antirez/retwis), is structurally simple, has very good performance, and can be distributed among any number of web and Redis servers with little efforts. [View the Retwis source code](https://github.com/antirez/retwis). - -I used PHP for the example because of its universal readability. The same (or better) results can be obtained using Ruby, Python, Erlang, and so on. -A few clones exist (however not all the clones use the same data layout as the -current version of this tutorial, so please, stick with the official PHP -implementation for the sake of following the article better). - -* [Retwis-RB](https://github.com/danlucraft/retwis-rb) is a port of Retwis to Ruby and Sinatra written by Daniel Lucraft. -* [Retwis-J](https://docs.spring.io/spring-data/data-keyvalue/examples/retwisj/current/) is a port of Retwis to Java, using the Spring Data Framework, written by [Costin Leau](http://twitter.com/costinl). Its source code can be found on [GitHub](https://github.com/SpringSource/spring-data-keyvalue-examples), and there is comprehensive documentation available at [springsource.org](http://j.mp/eo6z6I). - -What is a key-value store? ---- -The essence of a key-value store is the ability to store some data, called a _value_, inside a key. The value can be retrieved later only if we know the specific key it was stored in. There is no direct way to search for a key by value. In some sense, it is like a very large hash/dictionary, but it is persistent, i.e. when your application ends, the data doesn't go away. So, for example, I can use the command [`SET`](/commands/set) to store the value *bar* in the key *foo*: - - SET foo bar - -Redis stores data permanently, so if I later ask "_What is the value stored in key foo?_" Redis will reply with *bar*: - - GET foo => bar - -Other common operations provided by key-value stores are [`DEL`](/commands/del), to delete a given key and its associated value, SET-if-not-exists (called [`SETNX`](/commands/setnx) on Redis), to assign a value to a key only if the key does not already exist, and [`INCR`](/commands/incr), to atomically increment a number stored in a given key: - - SET foo 10 - INCR foo => 11 - INCR foo => 12 - INCR foo => 13 - -Atomic operations ---- - -There is something special about [`INCR`](/commands/incr). You may wonder why Redis provides such an operation if we can do it ourselves with a bit of code? After all, it is as simple as: - - x = GET foo - x = x + 1 - SET foo x - -The problem is that incrementing this way will work as long as there is only one client working with the key _foo_ at one time. See what happens if two clients are accessing this key at the same time: - - x = GET foo (yields 10) - y = GET foo (yields 10) - x = x + 1 (x is now 11) - y = y + 1 (y is now 11) - SET foo x (foo is now 11) - SET foo y (foo is now 11) - -Something is wrong! We incremented the value two times, but instead of going from 10 to 12, our key holds 11. This is because the increment done with `GET / increment / SET` *is not an atomic operation*. Instead the INCR provided by Redis, Memcached, ..., are atomic implementations, and the server will take care of protecting the key during the time needed to complete the increment in order to prevent simultaneous accesses. - -What makes Redis different from other key-value stores is that it provides other operations similar to INCR that can be used to model complex problems. This is why you can use Redis to write whole web applications without using another database like an SQL database, and without going crazy. - -Beyond key-value stores: lists ---- - -In this section we will see which Redis features we need to build our Twitter clone. The first thing to know is that Redis values can be more than strings. Redis supports Lists, Sets, Hashes, Sorted Sets, Bitmaps, and HyperLogLog types as values, and there are atomic operations to operate on them so we are safe even with multiple accesses to the same key. Let's start with Lists: - - LPUSH mylist a (now mylist holds 'a') - LPUSH mylist b (now mylist holds 'b','a') - LPUSH mylist c (now mylist holds 'c','b','a') - -[`LPUSH`](/commands/lpush) means _Left Push_, that is, add an element to the left (or to the head) of the list stored in _mylist_. If the key _mylist_ does not exist it is automatically created as an empty list before the PUSH operation. As you can imagine, there is also an [`RPUSH`](/commands/rpush) operation that adds the element to the right of the list (on the tail). This is very useful for our Twitter clone. User updates can be added to a list stored in `username:updates`, for instance. - -There are operations to get data from Lists, of course. For instance, LRANGE returns a range from the list, or the whole list. - - LRANGE mylist 0 1 => c,b - -LRANGE uses zero-based indexes - that is the first element is 0, the second 1, and so on. The command arguments are `LRANGE key first-index last-index`. The _last-index_ argument can be negative, with a special meaning: -1 is the last element of the list, -2 the penultimate, and so on. So, to get the whole list use: - - LRANGE mylist 0 -1 => c,b,a - -Other important operations are LLEN that returns the number of elements in the list, and LTRIM that is like LRANGE but instead of returning the specified range *trims* the list, so it is like _Get range from mylist, Set this range as new value_ but does so atomically. - -The Set data type ---- - -Currently we don't use the Set type in this tutorial, but since we use -Sorted Sets, which are kind of a more capable version of Sets, it is better -to start introducing Sets first (which are a very useful data structure -per se), and later Sorted Sets. - -There are more data types than just Lists. Redis also supports Sets, which are unsorted collections of elements. It is possible to add, remove, and test for existence of members, and perform the intersection between different Sets. Of course it is possible to get the elements of a Set. Some examples will make it more clear. Keep in mind that [`SADD`](/commands/sadd) is the _add to set_ operation, [`SREM`](/commands/srem) is the _remove from set_ operation, [`SISMEMBER`](/commands/sismember) is the _test if member_ operation, and [`SINTER`](/commands/sinter) is the _perform intersection_ operation. Other operations are [`SCARD`](/commands/scard) to get the cardinality (the number of elements) of a Set, and [`SMEMBERS`](/commands/smembers) to return all the members of a Set. - - SADD myset a - SADD myset b - SADD myset foo - SADD myset bar - SCARD myset => 4 - SMEMBERS myset => bar,a,foo,b - -Note that [`SMEMBERS`](/commands/smembers) does not return the elements in the same order we added them since Sets are *unsorted* collections of elements. When you want to store in order it is better to use Lists instead. Some more operations against Sets: - - SADD mynewset b - SADD mynewset foo - SADD mynewset hello - SINTER myset mynewset => foo,b - -[`SINTER`](/commands/sinter) can return the intersection between Sets but it is not limited to two Sets. You may ask for the intersection of 4,5, or 10000 Sets. Finally let's check how [`SISMEMBER`](/commands/sismember) works: - - SISMEMBER myset foo => 1 - SISMEMBER myset notamember => 0 - -The Sorted Set data type ---- - -Sorted Sets are similar to Sets: collection of elements. However in Sorted -Sets each element is associated with a floating point value, called the -*element score*. Because of the score, elements inside a Sorted Set are -ordered, since we can always compare two elements by score (and if the score -happens to be the same, we compare the two elements as strings). - -Like Sets in Sorted Sets it is not possible to add repeated elements, every -element is unique. However it is possible to update an element's score. - -Sorted Set commands are prefixed with `Z`. The following is an example -of Sorted Sets usage: - - ZADD zset 10 a - ZADD zset 5 b - ZADD zset 12.55 c - ZRANGE zset 0 -1 => b,a,c - -In the above example we added a few elements with [`ZADD`](/commands/zadd), and later retrieved -the elements with [`ZRANGE`](/commands/zrange). As you can see the elements are returned in order -according to their score. In order to check if a given element exists, and -also to retrieve its score if it exists, we use the [`ZSCORE`](/commands/zscore) command: - - ZSCORE zset a => 10 - ZSCORE zset non_existing_element => NULL - -Sorted Sets are a very powerful data structure, you can query elements by -score range, lexicographically, in reverse order, and so forth. -To know more [please check the Sorted Set sections in the official Redis commands documentation](https://redis.io/commands/#sorted_set). - -The Hash data type ---- - -This is the last data structure we use in our program, and is extremely easy -to grasp since there is an equivalent in almost every programming language out -there: Hashes. Redis Hashes are basically like Ruby or Python hashes, a -collection of fields associated with values: - - HMSET myuser name Salvatore surname Sanfilippo country Italy - HGET myuser surname => Sanfilippo - -[`HMSET`](/commands/hmset) can be used to set fields in the hash, that can be retrieved with -[`HGET`](/commands/hget) later. It is possible to check if a field exists with [`HEXISTS`](/commands/hexists), or -to increment a hash field with [`HINCRBY`](/commands/hincrby) and so forth. - -Hashes are the ideal data structure to represent *objects*. For example we -use Hashes in order to represent Users and Updates in our Twitter clone. - -Okay, we just exposed the basics of the Redis main data structures, -we are ready to start coding! - -Prerequisites ---- - -If you haven't downloaded the [Retwis source code](https://github.com/antirez/retwis) already please grab it now. It contains a few PHP files, and also a copy of [Predis](https://github.com/nrk/predis), the PHP client library we use in this example. - -Another thing you probably want is a working Redis server. Just get the source, build with `make`, run with `./redis-server`, and you're ready to go. No configuration is required at all in order to play with or run Retwis on your computer. - -Data layout ---- - -When working with a relational database, a database schema must be designed so that we'd know the tables, indexes, and so on that the database will contain. We don't have tables in Redis, so what do we need to design? We need to identify what keys are needed to represent our objects and what kind of values these keys need to hold. - -Let's start with Users. We need to represent users, of course, with their username, userid, password, the set of users following a given user, the set of users a given user follows, and so on. The first question is, how should we identify a user? Like in a relational DB, a good solution is to identify different users with different numbers, so we can associate a unique ID with every user. Every other reference to this user will be done by id. Creating unique IDs is very simple to do by using our atomic [`INCR`](/commands/incr) operation. When we create a new user we can do something like this, assuming the user is called "antirez": - - INCR next_user_id => 1000 - HMSET user:1000 username antirez password p1pp0 - -*Note: you should use a hashed password in a real application, for simplicity -we store the password in clear text.* - -We use the `next_user_id` key in order to always get a unique ID for every new user. Then we use this unique ID to name the key holding a Hash with user's data. *This is a common design pattern* with key-values stores! Keep it in mind. -Besides the fields already defined, we need some more stuff in order to fully define a User. For example, sometimes it can be useful to be able to get the user ID from the username, so every time we add a user, we also populate the `users` key, which is a Hash, with the username as field, and its ID as value. - - HSET users antirez 1000 - -This may appear strange at first, but remember that we are only able to access data in a direct way, without secondary indexes. It's not possible to tell Redis to return the key that holds a specific value. This is also *our strength*. This new paradigm is forcing us to organize data so that everything is accessible by _primary key_, speaking in relational DB terms. - -Followers, following, and updates ---- - -There is another central need in our system. A user might have users who follow them, which we'll call their followers. A user might follow other users, which we'll call a following. We have a perfect data structure for this. That is... Sets. -The uniqueness of Sets elements, and the fact we can test in constant time for -existence, are two interesting features. However what about also remembering -the time at which a given user started following another one? In an enhanced -version of our simple Twitter clone this may be useful, so instead of using -a simple Set, we use a Sorted Set, using the user ID of the following or follower -user as element, and the unix time at which the relation between the users -was created, as our score. - -So let's define our keys: - - followers:1000 => Sorted Set of uids of all the followers users - following:1000 => Sorted Set of uids of all the following users - -We can add new followers with: - - ZADD followers:1000 1401267618 1234 => Add user 1234 with time 1401267618 - -Another important thing we need is a place where we can add the updates to display in the user's home page. We'll need to access this data in chronological order later, from the most recent update to the oldest, so the perfect kind of data structure for this is a List. Basically every new update will be [`LPUSH`](/commands/lpush)ed in the user updates key, and thanks to [`LRANGE`](/commands/lrange), we can implement pagination and so on. Note that we use the words _updates_ and _posts_ interchangeably, since updates are actually "little posts" in some way. - - posts:1000 => a List of post ids - every new post is LPUSHed here. - -This list is basically the User timeline. We'll push the IDs of her/his own -posts, and, the IDs of all the posts of created by the following users. -Basically, we'll implement a write fanout. - -Authentication ---- - -OK, we have more or less everything about the user except for authentication. We'll handle authentication in a simple but robust way: we don't want to use PHP sessions, as our system must be ready to be distributed among different web servers easily, so we'll keep the whole state in our Redis database. All we need is a random **unguessable** string to set as the cookie of an authenticated user, and a key that will contain the user ID of the client holding the string. - -We need two things in order to make this thing work in a robust way. -First: the current authentication *secret* (the random unguessable string) -should be part of the User object, so when the user is created we also set -an `auth` field in its Hash: - - HSET user:1000 auth fea5e81ac8ca77622bed1c2132a021f9 - -Moreover, we need a way to map authentication secrets to user IDs, so -we also take an `auths` key, which has as value a Hash type mapping -authentication secrets to user IDs. - - HSET auths fea5e81ac8ca77622bed1c2132a021f9 1000 - -In order to authenticate a user we'll do these simple steps (see the `login.php` file in the Retwis source code): - - * Get the username and password via the login form. - * Check if the `username` field actually exists in the `users` Hash. - * If it exists we have the user id, (i.e. 1000). - * Check if user:1000 password matches, if not, return an error message. - * Ok authenticated! Set "fea5e81ac8ca77622bed1c2132a021f9" (the value of user:1000 `auth` field) as the "auth" cookie. - -This is the actual code: - - include("retwis.php"); - - # Form sanity checks - if (!gt("username") || !gt("password")) - goback("You need to enter both username and password to login."); - - # The form is ok, check if the username is available - $username = gt("username"); - $password = gt("password"); - $r = redisLink(); - $userid = $r->hget("users",$username); - if (!$userid) - goback("Wrong username or password"); - $realpassword = $r->hget("user:$userid","password"); - if ($realpassword != $password) - goback("Wrong username or password"); - - # Username / password OK, set the cookie and redirect to index.php - $authsecret = $r->hget("user:$userid","auth"); - setcookie("auth",$authsecret,time()+3600*24*365); - header("Location: index.php"); - -This happens every time a user logs in, but we also need a function `isLoggedIn` in order to check if a given user is already authenticated or not. These are the logical steps preformed by the `isLoggedIn` function: - - * Get the "auth" cookie from the user. If there is no cookie, the user is not logged in, of course. Let's call the value of the cookie ``. - * Check if `` field in the `auths` Hash exists, and what the value (the user ID) is (1000 in the example). - * In order for the system to be more robust, also verify that user:1000 auth field also matches. - * OK the user is authenticated, and we loaded a bit of information in the `$User` global variable. - -The code is simpler than the description, possibly: - - function isLoggedIn() { - global $User, $_COOKIE; - - if (isset($User)) return true; - - if (isset($_COOKIE['auth'])) { - $r = redisLink(); - $authcookie = $_COOKIE['auth']; - if ($userid = $r->hget("auths",$authcookie)) { - if ($r->hget("user:$userid","auth") != $authcookie) return false; - loadUserInfo($userid); - return true; - } - } - return false; - } - - function loadUserInfo($userid) { - global $User; - - $r = redisLink(); - $User['id'] = $userid; - $User['username'] = $r->hget("user:$userid","username"); - return true; - } - -Having `loadUserInfo` as a separate function is overkill for our application, but it's a good approach in a complex application. The only thing that's missing from all the authentication is the logout. What do we do on logout? That's simple, we'll just change the random string in user:1000 `auth` field, remove the old authentication secret from the `auths` Hash, and add the new one. - -*Important:* the logout procedure explains why we don't just authenticate the user after looking up the authentication secret in the `auths` Hash, but double check it against user:1000 `auth` field. The true authentication string is the latter, while the `auths` Hash is just an authentication field that may even be volatile, or, if there are bugs in the program or a script gets interrupted, we may even end with multiple entries in the `auths` key pointing to the same user ID. The logout code is the following (`logout.php`): - - include("retwis.php"); - - if (!isLoggedIn()) { - header("Location: index.php"); - exit; - } - - $r = redisLink(); - $newauthsecret = getrand(); - $userid = $User['id']; - $oldauthsecret = $r->hget("user:$userid","auth"); - - $r->hset("user:$userid","auth",$newauthsecret); - $r->hset("auths",$newauthsecret,$userid); - $r->hdel("auths",$oldauthsecret); - - header("Location: index.php"); - -That is just what we described and should be simple to understand. - -Updates ---- - -Updates, also known as posts, are even simpler. In order to create a new post in the database we do something like this: - - INCR next_post_id => 10343 - HMSET post:10343 user_id $owner_id time $time body "I'm having fun with Retwis" - -As you can see each post is just represented by a Hash with three fields. The ID of the user owning the post, the time at which the post was published, and finally, the body of the post, which is, the actual status message. - -After we create a post and we obtain the post ID, we need to LPUSH the ID in the timeline of every user that is following the author of the post, and of course in the list of posts of the author itself (everybody is virtually following herself/himself). This is the file `post.php` that shows how this is performed: - - include("retwis.php"); - - if (!isLoggedIn() || !gt("status")) { - header("Location:index.php"); - exit; - } - - $r = redisLink(); - $postid = $r->incr("next_post_id"); - $status = str_replace("\n"," ",gt("status")); - $r->hmset("post:$postid","user_id",$User['id'],"time",time(),"body",$status); - $followers = $r->zrange("followers:".$User['id'],0,-1); - $followers[] = $User['id']; /* Add the post to our own posts too */ - - foreach($followers as $fid) { - $r->lpush("posts:$fid",$postid); - } - # Push the post on the timeline, and trim the timeline to the - # newest 1000 elements. - $r->lpush("timeline",$postid); - $r->ltrim("timeline",0,1000); - - header("Location: index.php"); - -The core of the function is the `foreach` loop. We use [`ZRANGE`](/commands/zrange) to get all the followers of the current user, then the loop will [`LPUSH`](/commands/lpush) the push the post in every follower timeline List. - -Note that we also maintain a global timeline for all the posts, so that in the Retwis home page we can show everybody's updates easily. This requires just doing an [`LPUSH`](/commands/lpush) to the `timeline` List. Let's face it, aren't you starting to think it was a bit strange to have to sort things added in chronological order using `ORDER BY` with SQL? I think so. - -There is an interesting thing to notice in the code above: we used a new -command called [`LTRIM`](/commands/ltrim) after we perform the [`LPUSH`](/commands/lpush) operation in the global -timeline. This is used in order to trim the list to just 1000 elements. The -global timeline is actually only used in order to show a few posts in the -home page, there is no need to have the full history of all the posts. - -Basically [`LTRIM`](/commands/ltrim) + [`LPUSH`](/commands/lpush) is a way to create a *capped collection* in Redis. - -Paginating updates ---- - -Now it should be pretty clear how we can use [`LRANGE`](/commands/lrange) in order to get ranges of posts, and render these posts on the screen. The code is simple: - - function showPost($id) { - $r = redisLink(); - $post = $r->hgetall("post:$id"); - if (empty($post)) return false; - - $userid = $post['user_id']; - $username = $r->hget("user:$userid","username"); - $elapsed = strElapsed($post['time']); - $userlink = "".utf8entities($username).""; - - echo('

'.$userlink.' '.utf8entities($post['body'])."
"); - echo('posted '.$elapsed.' ago via web
'); - return true; - } - - function showUserPosts($userid,$start,$count) { - $r = redisLink(); - $key = ($userid == -1) ? "timeline" : "posts:$userid"; - $posts = $r->lrange($key,$start,$start+$count); - $c = 0; - foreach($posts as $p) { - if (showPost($p)) $c++; - if ($c == $count) break; - } - return count($posts) == $count+1; - } - -`showPost` will simply convert and print a Post in HTML while `showUserPosts` gets a range of posts and then passes them to `showPosts`. - -*Note: [`LRANGE`](/commands/lrange) is not very efficient if the list of posts start to be very -big, and we want to access elements which are in the middle of the list, since Redis Lists are backed by linked lists. If a system is designed for -deep pagination of million of items, it is better to resort to Sorted Sets -instead.* - -Following users ---- - -It is not hard, but we did not yet check how we create following / follower relationships. If user ID 1000 (antirez) wants to follow user ID 5000 (pippo), we need to create both a following and a follower relationship. We just need to [`ZADD`](/commands/zadd) calls: - - ZADD following:1000 5000 - ZADD followers:5000 1000 - -Note the same pattern again and again. In theory with a relational database, the list of following and followers would be contained in a single table with fields like `following_id` and `follower_id`. You can extract the followers or following of every user using an SQL query. With a key-value DB things are a bit different since we need to set both the `1000 is following 5000` and `5000 is followed by 1000` relations. This is the price to pay, but on the other hand accessing the data is simpler and extremely fast. Having these things as separate sets allows us to do interesting stuff. For example, using [`ZINTERSTORE`](/commands/zinterstore) we can have the intersection of `following` of two different users, so we may add a feature to our Twitter clone so that it is able to tell you very quickly when you visit somebody else's profile, "you and Alice have 34 followers in common", and things like that. - -You can find the code that sets or removes a following / follower relation in the `follow.php` file. - -Making it horizontally scalable ---- - -Gentle reader, if you read till this point you are already a hero. Thank you. Before talking about scaling horizontally it is worth checking performance on a single server. Retwis is *extremely fast*, without any kind of cache. On a very slow and loaded server, an Apache benchmark with 100 parallel clients issuing 100000 requests measured the average pageview to take 5 milliseconds. This means you can serve millions of users every day with just a single Linux box, and this one was monkey ass slow... Imagine the results with more recent hardware. - -However you can't go with a single server forever, how do you scale a key-value -store? - -Retwis does not perform any multi-keys operation, so making it scalable is -simple: you may use client-side sharding, or something like a sharding proxy -like Twemproxy, or the upcoming Redis Cluster. - -To know more about those topics please read -[our documentation about sharding](/topics/partitioning). However, the point here -to stress is that in a key-value store, if you design with care, the data set -is split among **many independent small keys**. To distribute those keys -to multiple nodes is more straightforward and predictable compared to using -a semantically more complex database system. diff --git a/content/develop/use/manual/pipelining/index.md b/content/develop/use/manual/pipelining/index.md deleted file mode 100644 index 88343d0020..0000000000 --- a/content/develop/use/manual/pipelining/index.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -categories: -- docs -- develop -- stack -- oss -- rs -- rc -- oss -- kubernetes -- clients -description: How to optimize round-trip times by batching Redis commands -linkTitle: Pipelining -title: Redis pipelining -weight: 2 ---- - -Redis pipelining is a technique for improving performance by issuing multiple commands at once without waiting for the response to each individual command. Pipelining is supported by most Redis clients. This document describes the problem that pipelining is designed to solve and how pipelining works in Redis. - -## Request/Response protocols and round-trip time (RTT) - -Redis is a TCP server using the client-server model and what is called a *Request/Response* protocol. - -This means that usually a request is accomplished with the following steps: - -* The client sends a query to the server, and reads from the socket, usually in a blocking way, for the server response. -* The server processes the command and sends the response back to the client. - -So for instance a four commands sequence is something like this: - - * *Client:* INCR X - * *Server:* 1 - * *Client:* INCR X - * *Server:* 2 - * *Client:* INCR X - * *Server:* 3 - * *Client:* INCR X - * *Server:* 4 - -Clients and Servers are connected via a network link. -Such a link can be very fast (a loopback interface) or very slow (a connection established over the Internet with many hops between the two hosts). -Whatever the network latency is, it takes time for the packets to travel from the client to the server, and back from the server to the client to carry the reply. - -This time is called RTT (Round Trip Time). -It's easy to see how this can affect performance when a client needs to perform many requests in a row (for instance adding many elements to the same list, or populating a database with many keys). -For instance if the RTT time is 250 milliseconds (in the case of a very slow link over the Internet), even if the server is able to process 100k requests per second, we'll be able to process at max four requests per second. - -If the interface used is a loopback interface, the RTT is much shorter, typically sub-millisecond, but even this will add up to a lot if you need to perform many writes in a row. - -Fortunately there is a way to improve this use case. - -## Redis Pipelining - -A Request/Response server can be implemented so that it is able to process new requests even if the client hasn't already read the old responses. -This way it is possible to send *multiple commands* to the server without waiting for the replies at all, and finally read the replies in a single step. - -This is called pipelining, and is a technique widely in use for many decades. -For instance many POP3 protocol implementations already support this feature, dramatically speeding up the process of downloading new emails from the server. - -Redis has supported pipelining since its early days, so whatever version you are running, you can use pipelining with Redis. -This is an example using the raw netcat utility: - -```bash -$ (printf "PING\r\nPING\r\nPING\r\n"; sleep 1) | nc localhost 6379 -+PONG -+PONG -+PONG -``` - -This time we don't pay the cost of RTT for every call, but just once for the three commands. - -To be explicit, with pipelining the order of operations of our very first example will be the following: - - * *Client:* INCR X - * *Client:* INCR X - * *Client:* INCR X - * *Client:* INCR X - * *Server:* 1 - * *Server:* 2 - * *Server:* 3 - * *Server:* 4 - -> **IMPORTANT NOTE**: While the client sends commands using pipelining, the server will be forced to queue the replies, using memory. So if you need to send a lot of commands with pipelining, it is better to send them as batches each containing a reasonable number, for instance 10k commands, read the replies, and then send another 10k commands again, and so forth. The speed will be nearly the same, but the additional memory used will be at most the amount needed to queue the replies for these 10k commands. - -## It's not just a matter of RTT - -Pipelining is not just a way to reduce the latency cost associated with the -round trip time, it actually greatly improves the number of operations -you can perform per second in a given Redis server. -This is because without using pipelining, serving each command is very cheap from -the point of view of accessing the data structures and producing the reply, -but it is very costly from the point of view of doing the socket I/O. This -involves calling the `read()` and `write()` syscall, that means going from user -land to kernel land. -The context switch is a huge speed penalty. - -When pipelining is used, many commands are usually read with a single `read()` -system call, and multiple replies are delivered with a single `write()` system -call. Consequently, the number of total queries performed per second -initially increases almost linearly with longer pipelines, and eventually -reaches 10 times the baseline obtained without pipelining, as shown in this figure. - -![Pipeline size and IOPs](pipeline_iops.png) - -## A real world code example - - -In the following benchmark we'll use the Redis Ruby client, supporting pipelining, to test the speed improvement due to pipelining: - -```ruby -require 'rubygems' -require 'redis' - -def bench(descr) - start = Time.now - yield - puts "#{descr} #{Time.now - start} seconds" -end - -def without_pipelining - r = Redis.new - 10_000.times do - r.ping - end -end - -def with_pipelining - r = Redis.new - r.pipelined do - 10_000.times do - r.ping - end - end -end - -bench('without pipelining') do - without_pipelining -end -bench('with pipelining') do - with_pipelining -end -``` - -Running the above simple script yields the following figures on my Mac OS X system, running over the loopback interface, where pipelining will provide the smallest improvement as the RTT is already pretty low: - -``` -without pipelining 1.185238 seconds -with pipelining 0.250783 seconds -``` -As you can see, using pipelining, we improved the transfer by a factor of five. - -## Pipelining vs Scripting - -Using [Redis scripting](/commands/eval), available since Redis 2.6, a number of use cases for pipelining can be addressed more efficiently using scripts that perform a lot of the work needed at the server side. -A big advantage of scripting is that it is able to both read and write data with minimal latency, making operations like *read, compute, write* very fast (pipelining can't help in this scenario since the client needs the reply of the read command before it can call the write command). - -Sometimes the application may also want to send [`EVAL`](/commands/eval) or [`EVALSHA`](/commands/evalsha) commands in a pipeline. -This is entirely possible and Redis explicitly supports it with the [SCRIPT LOAD](https://redis.io/commands/script-load) command (it guarantees that [`EVALSHA`](/commands/evalsha) can be called without the risk of failing). - -## Appendix: Why are busy loops slow even on the loopback interface? - -Even with all the background covered in this page, you may still wonder why -a Redis benchmark like the following (in pseudo code), is slow even when -executed in the loopback interface, when the server and the client are running -in the same physical machine: - -```sh -FOR-ONE-SECOND: - Redis.SET("foo","bar") -END -``` - -After all, if both the Redis process and the benchmark are running in the same -box, isn't it just copying messages in memory from one place to another without -any actual latency or networking involved? - -The reason is that processes in a system are not always running, actually it is -the kernel scheduler that lets the process run. -So, for instance, when the benchmark is allowed to run, it reads the reply from the Redis server (related to the last command executed), and writes a new command. -The command is now in the loopback interface buffer, but in order to be read by the server, the kernel should schedule the server process (currently blocked in a system call) -to run, and so forth. -So in practical terms the loopback interface still involves network-like latency, because of how the kernel scheduler works. - -Basically a busy loop benchmark is the silliest thing that can be done when -metering performances on a networked server. The wise thing is just avoiding -benchmarking in this way. diff --git a/content/develop/use/manual/pipelining/pipeline_iops.png b/content/develop/use/manual/pipelining/pipeline_iops.png deleted file mode 100644 index 6ab11079f2a4bc71d196b0e59839215ab40139e6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14577 zcmb_?c|4Tg+y9uTL46F_QW#22$Ui{JBn|9D={>+zSl?{i=0I@k4nzt44@`@U^tpu2-ZhywzF>^OZ& z=R5=gB|;!9?_gWOm4eGXZ4d}Yj*GUo(P?dMgwa(mCl_}|2;|i5gv4zZFSzj6;RtQJ zS$MSW^Mu69S%vI45%D;jqk zWwwpe)ad&*89Qgf4j!iNT0QToI!fU*L-CjG7i|#IF(tDX*WGe#_beL0}@Em1d>vi)<15 zR?iJXeH~QEY$ST2%^|K7>yeR=fZFRaBvLsTIl@zvUBY<+gO#C(<*s%P|K2oR&j^X~*2K zio@5m1XNQ6-d@ID*a@u^=q`<#&^i|p^hfaD^5tFe=LMXH^95!S_yvBKscJt867ej2 zom|??#h)ke{AhG22MAgy18n zukAI~!^Y-|tKS2KO80Rdfdv=%xSWyv-Jzps78djK{L8F@+Y*p>&fa8sG{t+`{rL8^ z%d%zLAPwfM*#d5pryx)6_s5;HdE+9oEn;;WBuRD8eTDF5@GW(UMfQTUqLD3hb;9RBiH7&Ph?y+(&})!D3Z-; zd^=mWC`0XB;JGcYd^((!_|Ix9e%pG*qEJ#zu<3YahRQR}R>v_RzrF9bgbQdyrJ*o& zMhbTX!|(36d4v7qr^|;G${z8Y;rpZhN9d2Hlc3^_+RM94J>E!k?c5!6ug>!mzb)70 z-T59p)|2o6o?9Qk4lnuys3Q05PP*ApD_ouW$?Egc&znB!v->JI26$&;VyhSCzAy8v zv4=@laUrjz??1((#D0Vo2~En!8l6%-c}47p*!G<-Z=cukcq)7-O3?7fNf|I#9(Ov= zVEn0l$`6!XByQ{W>K&9w-QSPc@jx^F#n%uAfqB=rQ5DgaXFc~K#c`QNQs%t6daM%n z-bSC-FVhP)SlM?xUg4qbX>W<*QxEl(^cGK7{LAy|$wkS(=C1pTB0~~GZmG$gj_Y{f zci%zZGf^g~0`Xn`=z$aQhwsnF>peIarzYlq!S$k$WRAqt{-%WH`l>SO)GTekCRM}N4y?=Z(M zXF)DO8;1mkREKAdf%RPt_^;@P7UH8)01g!}17|glqhrJ3*A&?bucg)K z-PnFZo1>feb5d(kkK9k0uNGIGzS^HKJ!g0b<@4;RG z>W&1BTpqbJa)|HAok|YF7p5a`>!n|AZ@>Nci;QD{g5*U$V-4dI*}50YjWat7JFj)V z?G)-r%i7tQ(_!Bc-PzYEsdP~(O!=+SUnLPY9;FNB`x$Wp|?QcG@+iQ4gmiT-c5mSh)$SGO&NCS6W6 zmmiVOvF!Ci9q~SVO68TJppt{&s-V;EBfF{&wJF?D=(X=~S?urmq55t4(cI3RE<5Gp z-Q)S=17C)`+|z!wJ=BR;G4P$NQc#?2w3pqPvqB%4v9Xb zzoX$)14jc|`h5b<7~8qS?QYS#CH{%%u5=F>_cr(Do;4RY+hp6&ngG8t{QTHRT_V@_ ztnUR=F+Dqa6sPu1VTwh)jr?toT~w>^z8Lsh?b6X0)p51jW2t^2!|szZews6D-)cWQ z#_K0b*LQwYZYkPUD{v`BGXeu%CR$1N2*dvB_GfPHATF{9+(I2xN&jy)= zc+CHu-5oNA1evXyq0pE<9ZtF1Z(uQKJ8axueDvZHE+JZ$$#UY$Xz!)ILd`{ z;mw?0G1Vs`buOMblY7;o$g6YKb)mEDwdk8~*6+|wO+_4TJG-sTb*<{G1CQ|R+$9t& zJ}=!DYxOnl?&TB4I%mBtRf$LQ%3^+8o;EGC@;kS9>aNw9bB$NtSWOP}6sX=ReNgI? z#+i4QaAZJ{?7ddUii`Xk`S?@orXsxS6{9xBISvzXe53yL~KbiU;4`dl&r{>CV+Z@Zj0Q zcgoIV+hRrkUX{3dql|RSJ#Y`1o3wYPZaUTJ7}w-lT3u6lf>(;o14>cE1@8&T_;fwY zlka@*UYEB%oo)4;^dwJj3*Ce7;62Uf$iHK1slU&(QKng%obT0Nl`PSq_s@*vw7`_8 z)Y1VD%?uw~0yfXl$C1#G+OYc|uXMnrF!e7QJ%L~5j7WAbKCaxttu+!?esag|y_aB~ zAfa2J+a6n`&VTjILTTq*@4*|{YuR3%Yk#p%BA&#UH}v-Iyd%jmF{E~`zvpYGWT-(| znrp7gD`Z>sl`qI02GIt%Rf+keZO#=fh3Q3V78rvC%i7@Hm5P(Ee6)vkE?Bmse1m*v z*z!21WQ%1-G0JO2t55&d&g&me;;^`#b2LXWNm1Ugn7#LN-Y4r_t~TA|->yBKp~BL~ z-sc6|l>79PRyz$cn;TRnJuNF-d>3kDUt63lk@ZvwcpR8YX<92;=;!ah7br8ML@*^3 zF3Qb#O!558dh7eUrbBpmghKCFn(oSni z2j?zV%m?-l?%pc1Rc^=oe*!9DVRH^qii@kU9muMGMk}6I$m}||D@#~exKj0xX4isi z=j)%{*Sgz>%P{e)7PY%mRYu8vYwz5|-7hca%uY3yUn9)Vyhh3pTQ$d#D`5v+TvzP} zBv%ZT({nTf(d?wdhg0YH2t|v@(K)2%;b=M32_ z25z@!OaGXV#Sg03RF{jUex_&7sM&jY$k{n~U3QcU^6&g(hG+a2bNo1QddKB=Ury!pvZcW6(l%TpJyX+ay& zTE7ayrOBZ#&j`IH2Ul;IFa#oV>9o%Ai$N??{n1ySmGZN-Hs_EjHqH0;`@ZB8a%Y7t zn&#H`@N32TtEUS8+9#TBQ)1)E3WFn15fBzAD{Q;uEbeE3gZ8FNYkLusp<;{?aKVd1 zjy;s+WvSi(7fQ~D?$4(^<_i4N0>IPsL1dWwO;S-cX|xb0nt-Q-M$RNE8LS?<7FKaZ zf2zUWV)%+AZ9X_cp}g9)C)T@CAhQs2H+kD?M`G;4!H!J(&0baTFINVXDxJaoP2aX^ zLh9=J_ujg<>+@lUp|9;I%ihSA#=MTqy>sdmC_)5B)IzMj@EKUTSJqa!o4T@%u>)-Q z--AUOmv#_cYSf?CK`$4Ko2tjyTM&YAfulK1Wy-mm){(XY#8nkVnNJxu zTe%q;#_qN`RZv;Qww3^dB%wtX4 ze$S+l^jF+{eE9{*rc5q2A;zi~b2NQ)5L#1Z0^OKUApym4KMy)@cK5-YTkhVeXMSsK zWapd|8ynlDZGIDmZd+*?KnY`EZ+s}OiKQWX2tEd6b*`CjBK9XKK~)u)PGro8y^h7* zcm6V7u~iL3LFt*~-nNAtdzZUy(3ZTBg0YJT zu6X5Q@497^H|0v|NqBiHdscGxLRh%&v)g^$0>=UeBT418O}ZPDf~z8ol(&nT8k%IP z!{1uWapNtrBK>Iy=w!uP`wzCuFWt6aoi!QFS&Cigo+d0}lm~ksnUvIaF6a0R=Ex&O zVu#TljV5(nt8PRY{*O!^=`!V8xdT zh3OKNz3X`uG(&J@hT_f2DQU_bWAA=+myarUDYQTOs?s0hU}1uH4tdcdO@rJJ6uXuk zD&)CKE2%~PQ5$?4TO{eax53^GqW{?`wWFYjh&02tRce%VJs!n};+=D=Dt9u3M;0;3 zWo4aA8xMm&GDG#Gm#yLrXi-Ec6h`DD{enGW=tQt}VnJpc^Ie)s1y^1`zl-iXsIint z-bqPlvmr(Ujz3wz&dtL(C&3WVmu*99z3{>Qbj;&YF&MQI(dOkMH zsx6m8LMfuF#6};B3WJ+(a`KT5W*&DSSWh zd0(&pW7#wa_BNaOG9h$k^hv^DcA~V@6F}`*w;ALc*)Sd>xpk7|iyH@3|jcLVZauUOcjn zNDr8d;^bpvW3Z9fxkcY=O^vA?4ov<+Oh_SkS^|MzHbH2iL;ku}C2cq&sFe}Y)1tlF zf7!4(oM7FgJM<968ZJiReHOp2wbgvkhVA76b73>;Nox_)178z4tvvT>#A@aa%oumZ zRBAG*Jw+MdjyQVMn&GV1`4u2-Q`ls$` z69_gNH21fF?YQ1Li&ptc_f6tB2J>P(3UX54brt(pf$i=HYvkGv!KU?K*q)&V)nX_!7~SBBCB^}Var*`ag?nMcI$Ug2!LnnZLH?}@L7x8m?Cbfj$r=) zIRDXiz+T{htz1a#f4LFaPG}9e^Xcck-WO5T$b+Nm8wPB(Q9XkZ-_8v2v#?P3o#Hp@ z12bU3OxkO(g&nrs@;&+Sy4IZ8kC-&Pcg=@{*|q z>Pw!id)P*3hQvVb#Ddz7AmCzy#%o*e*s;^x-vOq+nSH2_FDfGalGN`K3rT6()NxOX%76()bcS^}m0`WW0 zST0&(W4D>+gX3qGj`SHrwaCT~K8tN!-O9ggGd`eMrFAJxrseFETS@Pfo3HEci-^k4 zZMv%;ERbQxuH5&Cra7mVAvKxVN?zg1|FxHE&PT>eKl)8SnlI8vNB};+TRv`NJ$Ecn zSq|PCq?{c(8HRQ30#NQ{w-T=$T_Ot1jH)fqkN2b3H>Q+*69zFpmJVvGPM*LB>Rzt(maRQb1HMGKoJcC_D78k}{OGbTP z)D?UlSxqFVd!ec=0)?cmNzf*j~IL>F(ljcVjG#Hl$pm z)|rxkOSBSYJJ@)+&VAmhA~vRC%BwHFfjXz1aJ=*DWntpksDpaRl&f zq1pCEk?;l_GIRmM>avTfoQ~s`ILt&hyim9ecI4CldVPOM7$2;0=b=N#n#B&2_Yj@O z#!BkejG>jktRiqYUKAzjsdpSx`k)t1GjS7`I4PXocVZ1S?JKRIxN#X3Q4Mb3Se^yn z0^1QXu6&EW0NpwXgq8HizDa5<*4VI|MxvWMaQv-qpe``;+5|RT&4o0}p863oP0Usp`NcuRUnM z`TX_jHL}aeEi`JensYB(y^UMT4p)=c@LJ&?w2M+Mt{N~M-AlMe-aEOUrg$(Jw!~cl z!4fBS&q1L>7xDQqO+Q}5~XOz$*x z>QOXVl@D$~n;Xf9<9cn%Nt*M=o%J^1*47jon}@Pq{7i%`opzuqgKt6a)QaV}&l{hz zDo0#ax^AnyG^NAgh#@VYL0Us+ie>*H8oKBEH7g=s`1)PVH9F*J%OTbc=vyxawWgaT!R<(;l=w2r)}lh8K!$ z1-y&>8|^N@!%F~GXQ@Eea9ptSP1nASrUn$dNWebioe!e0)$0O1wCabo9#vr_X_G0vT6VLJ9?mQP!gKY)$8AbQUp&Phr#Yx9JLI9B z$-Y|%)#~+1Fj0%{Hx>3(TSracCQB``4y+6?VX0Q`jAxUj5(>HK*_pT9J&0x( zg#(>TUXiacuDjk$g{A>RM?gG|{N>nLmTkk*8ArG31N`!Fb&;g~27}RH3r4D{1wR<5 z8O}ya*1kPMch?uVOY0Oij2aoEdh+Vsyy>fCfr65X$SA3(s6!b84-b0X_)uwsgJz4- znQhMjBieU$=F;QO3R~mBSHhS-w;x1I5-ys0{T$CS@7gGAfqYwn=&mfg#tn2t(O3R~4bY^{WXKZzTXVdpHz$DDn*v%V zIxZXK0Hu*gBxXePs5Qspc+cUUZqJtPp^bWYV-$wUwDfi~Y3JVyrG_Q2#c>}IP#a{f zyetjoY~gh*K1~l@C-fFLhUPpZLdyo#)ozY8&BK=HTJo2bz|{Dz$AiC)aR*y0#9DQF zRq61bq9LmEyzOJVmb+8+qrD?`;JzCymDLy#KWPUfgz*LBC8Wm2hBz#{` zKvNV*Hu5yIaZf=X2;yvU(P9XW6F+DYB&%U-4S71QB`ucfq0Ga&Sv^o0+{IAHjV6_9 z$M^^Tj)l18{td)uzq6zROc<;99sgFE=wEzf{8jXzxKh?N54sS;Qh24vEdB8dM@b2& z$hEnu@AIzRF|Jri#dL1Or{T^elL7UsK|Cl6Kt zO7na}q*cSe$+R;cghB-{6n5Jqjt(!|OTbW9eIK1f5!JJR1G5sv$LWyA(whkefj$C- zF-+*;8r78WvZm)>2|T9sqWB!+vG0I)-w%4?7>~6B+OE48Lbe^pADK>q7# zK)2%u?D$EP*fH#w0Xt>~J<^$W00Y|-e%xf*0Y&>vZXsb}P}WnEZl@5~DST1l!?5!W zsLL_w$!v0J?;l!G13LqR$dJMVp>3w%&>%Y^A7kQZ)rr&ZRcFjd0bI7`VEkm=N-fN# z-n?E7luD+T11MV{=<+Cay2b%pcThgyg!#Rmu&91yM^(V<{;DIHN9g{rK;LnKlAE^| zFpxt0q8+xUp^wLN3jc?5Ojf-q=?j#;yl2$PTtbV3JkF2R)G+X%*5;WjKbSm*n+mA% z;k$2j&_#qQN}N!fz5ut$RC}^9jsu+?n1%mHO}}ezVyuyJnKj&AYn3-tyX;gHL0l2Lhivv1G`Ss zAaiQ-|+S zSHaP0Q3p7C{t9s)BQr+g-e3EI^!9hqYG|rHVM75ngqndB?aoJx^xOb@gV>{K#&6aT zfRE6uhyDzX!KHZNc+L-gOhsso26^{?QDH-IJvYo?@*XgZtL7-9-i<&_0NWFMZ|Nul zEc*Ws2tig_-vd*QH-IUL|YDEO^)%W z5GHQ^$75K=Z~5jKDQ%_8Z*?u;R&B;jhSVVFy)J_c5&!!!m|`CE&K9%dqYP==5LBT= zx@JXGw*v>-`4o_Jk04k*5KRfwkIWPYnFINI5B@C2@Tb#BbOfQRo-k87r%+&bVd@#K? z%Nd40sEkBVujzM{GUZ1EI7HyD<{F(&Xp5y|ID;EhjrX-o8Ki<>UxVphpSVYV-vU(j zKabIPI!@ZuT(YWxZIr-2x!~(%NMeRq32a9y=e9;ub#fy$-jyH?m08@(l>X;2l;#f6 z_7HH8NzW+i86elQ8wE1VKsW{X|u>O{XBvPc-@q4)Uts`n?&68Op|efI2oGbn;{6Ay2)g z@Q+64LvaBMSI=)@ND^3#>ZH7OxH;auPk4&(;4xD$P@+(d1ITXPsb$67I=s=(L>UwXMv^yDyH%^US+HsHVMitg5Ee|Rj3R_ zK%RHk%;e|@gBtNo`6)v$et{H#LuX2A+%S`zYQXj}*%U{n zxCcps#(d{hM#v&inP6d=H{Q83eVhS05U3Tyd}!<XD}1e{}6+1r^RN|Klp4)R2tizk7&c>CY(ZW`SiWPeT#T?VJ4+5rT*(?W;2!7 zkN$U@U_>ET&y5TVF*=us>MgWAt?LPJ2dXOy5|D0gc7O>kad0I{-QUDO8W4B{{_NTl7hWEvzPh(yMq}+HM}^PP`TDtD6qRL1aw< zcJ#ShnDcEH2kqYZQ+-T%&j%G|l6V@^13?|22G8S)=?6NXYXq-(30M3Px zr^Gxq)otfdJvN&-gtCRR*bUmW2>*WJzMGL^eHJ_9?d6HGhVC1qjEh@9P`2=X<7Z}* z@wS%k<#q({dehMuXp6y#YhKnBX8aW*fJ-QD58A68@MQH9AQ3M&l~>geKz7K9LN;bX z+`yG5cQ!RwTVRdCc+%1j7VDZ~#txV-p%B<(c#s@2-Q}ju;1g;T@OzZ4aMSK&0n8=d zrJq3oYNmpgH&vi@o|yo5cJQVmHc!;`UhGIlR`}GV@^B9W4@NLpodiG`d7u!3CZXKI zev4_wjCin@1P~|L3NcK#EdVqzZgJpPC1_O(0mV$=bCx!JRD>Gp&I4~%u2nEU{b7NI z!`3w4H?C!qz8!K+7XSyT!C?hFo4{kvN39|!WZu6O-MIu`w9iQL*BJ_%>o%I(*zc@~yYP((s*2^D=|nU+_mIC)R0NX$)iStxB%bpiD_P`fyV#s;A!zl+V_GE?64P#-Z0V; zf%@MqOu?{+a10+BVr9+1}$-BdQmnwRRNa49eQtT9JBM zTDN<8)isqZ&qL(g!WMXLrp9$R{{o zUsbZ(IJ`-aQDkK7#Omix4z6gIjaz8fgh}SfH874of-ZuS6jcgo!v9EOAztLo+F}mE zx;)&z>15r9cU3>K{sb z^8~(E(z0F7$KkbWFI+6H*&pv1CUa}Fg7+vstjQ;5s%OkMjW6%Ja$QmJ(S(?(CsT(? zOq@L`_v`oBG#wf)P)6nrx>>Wk6(5@KEhs7;n;~abKQVTVj!mz`klL%<^;>TJLuB7e zD$Y=TqrBat4{G;VJ?W*--G$=8Z+qQi>%-Fp(AFiRd%ByQ%3og1^_UoJ&9@dAbVZij znQOLw|3dg?DrR?cXJ`|?GwP>x$iVAEx*Ek!yyzEseRdzw5zhra98QT@tqHnplahDA zh8)|3SL+izK(dJQbl3j98d_X;#i0+0xB`FI;*s~A z*3NwPF{|~hqlLc87{^eD--EgaSDSx8$&zX1AQrl52o%Whw{M&*j`T*$?j-N}FU{$WY?m-C&TYrDF#_G7Ghzjtgz_taAJ znLL#yUG@2+O?^REkH^|@EQ>Bnv}9fXdL;b0#gBBM*00no&z`RyEu=znH6`HA%zXT< z9mJKAMwgPZybg9R$j3Mcin7{CXiVZrzbJx_$g#Sd*`%V%2Ml=qDOvKt!`!bQ7 zYNzH4p7IDyJQ`c^wkdXma?(AR(oXL58OukGHhjuu0(?7Jw}_WfBu7*?LO)hHWBxqt zWo+XREYST>VaB-#_`(;|KZW_l^q0G!CZ~KHPBMf@el4Va@O}D(fljWL?X~|04#hBK diff --git a/content/develop/use/patterns/distributed-locks.md b/content/develop/use/patterns/distributed-locks.md index 55526d4fc2..2bbccf4464 100644 --- a/content/develop/use/patterns/distributed-locks.md +++ b/content/develop/use/patterns/distributed-locks.md @@ -105,7 +105,7 @@ Basically the random value is used in order to release the lock in a safe way, w end This is important in order to avoid removing a lock that was created by another client. For example a client may acquire the lock, get blocked performing some operation for longer than the lock validity time (the time at which the key will expire), and later remove the lock, that was already acquired by some other client. -Using just [`DEL`](/commands/del) is not safe as a client may remove another client's lock. With the above script instead every lock is “signed” with a random string, so the lock will be removed only if it is still the one that was set by the client trying to remove it. +Using just [`DEL`]({{< relref "/commands/del" >}}) is not safe as a client may remove another client's lock. With the above script instead every lock is “signed” with a random string, so the lock will be removed only if it is still the one that was set by the client trying to remove it. What should this random string be? We assume it’s 20 bytes from `/dev/urandom`, but you can find cheaper ways to make it unique enough for your tasks. For example a safe pick is to seed RC4 with `/dev/urandom`, and generate a pseudo random stream from that. @@ -137,7 +137,7 @@ This paper contains more information about similar systems requiring a bound *cl ### Retry on Failure -When a client is unable to acquire the lock, it should try again after a random delay in order to try to desynchronize multiple clients trying to acquire the lock for the same resource at the same time (this may result in a split brain condition where nobody wins). Also the faster a client tries to acquire the lock in the majority of Redis instances, the smaller the window for a split brain condition (and the need for a retry), so ideally the client should try to send the [`SET`](/commands/set) commands to the N instances at the same time using multiplexing. +When a client is unable to acquire the lock, it should try again after a random delay in order to try to desynchronize multiple clients trying to acquire the lock for the same resource at the same time (this may result in a split brain condition where nobody wins). Also the faster a client tries to acquire the lock in the majority of Redis instances, the smaller the window for a split brain condition (and the need for a retry), so ideally the client should try to send the [`SET`]({{< relref "/commands/set" >}}) commands to the N instances at the same time using multiplexing. It is worth stressing how important it is for clients that fail to acquire the majority of locks, to release the (partially) acquired locks ASAP, so that there is no need to wait for key expiry in order for the lock to be acquired again (however if a network partition happens and the client is no longer able to communicate with the Redis instances, there is an availability penalty to pay as it waits for key expiration). @@ -165,7 +165,7 @@ The system liveness is based on three main features: 2. The fact that clients, usually, will cooperate removing the locks when the lock was not acquired, or when the lock was acquired and the work terminated, making it likely that we don’t have to wait for keys to expire to re-acquire the lock. 3. The fact that when a client needs to retry a lock, it waits a time which is comparably greater than the time needed to acquire the majority of locks, in order to probabilistically make split brain conditions during resource contention unlikely. -However, we pay an availability penalty equal to [`TTL`](/commands/ttl) time on network partitions, so if there are continuous partitions, we can pay this penalty indefinitely. +However, we pay an availability penalty equal to [`TTL`]({{< relref "/commands/ttl" >}}) time on network partitions, so if there are continuous partitions, we can pay this penalty indefinitely. This happens every time a client acquires a lock and gets partitioned away before being able to remove the lock. Basically if there are infinite continuous network partitions, the system may become not available for an infinite amount of time. @@ -178,7 +178,7 @@ However there is another consideration around persistence if we want to target a Basically to see the problem here, let’s assume we configure Redis without persistence at all. A client acquires the lock in 3 of 5 instances. One of the instances where the client was able to acquire the lock is restarted, at this point there are again 3 instances that we can lock for the same resource, and another client can lock it again, violating the safety property of exclusivity of lock. -If we enable AOF persistence, things will improve quite a bit. For example we can upgrade a server by sending it a [`SHUTDOWN`](/commands/shutdown) command and restarting it. Because Redis expires are semantically implemented so that time still elapses when the server is off, all our requirements are fine. +If we enable AOF persistence, things will improve quite a bit. For example we can upgrade a server by sending it a [`SHUTDOWN`]({{< relref "/commands/shutdown" >}}) command and restarting it. Because Redis expires are semantically implemented so that time still elapses when the server is off, all our requirements are fine. However everything is fine as long as it is a clean shutdown. What about a power outage? If Redis is configured, as by default, to fsync on disk every second, it is possible that after a restart our key is missing. In theory, if we want to guarantee the lock safety in the face of any kind of instance restart, we need to enable `fsync=always` in the persistence settings. This will affect performance due to the additional sync overhead. However things are better than they look like at a first glance. Basically, @@ -188,14 +188,14 @@ set of currently active locks when the instance restarts were all obtained by locking instances other than the one which is rejoining the system. To guarantee this we just need to make an instance, after a crash, unavailable -for at least a bit more than the max [`TTL`](/commands/ttl) we use. This is the time needed +for at least a bit more than the max [`TTL`]({{< relref "/commands/ttl" >}}) we use. This is the time needed for all the keys about the locks that existed when the instance crashed to become invalid and be automatically released. Using *delayed restarts* it is basically possible to achieve safety even without any kind of Redis persistence available, however note that this may translate into an availability penalty. For example if a majority of instances -crash, the system will become globally unavailable for [`TTL`](/commands/ttl) (here globally means +crash, the system will become globally unavailable for [`TTL`]({{< relref "/commands/ttl" >}}) (here globally means that no resource at all will be lockable during this time). ### Making the algorithm more reliable: Extending the lock diff --git a/content/develop/use/patterns/indexes/index.md b/content/develop/use/patterns/indexes/index.md index 1eea4a79fd..194ab621cf 100644 --- a/content/develop/use/patterns/indexes/index.md +++ b/content/develop/use/patterns/indexes/index.md @@ -43,8 +43,8 @@ Since the score is a double precision float, indexes you can build with vanilla sorted sets are limited to things where the indexing field is a number within a given range. -The two commands to build these kind of indexes are [`ZADD`](/commands/zadd) and -[`ZRANGE`](/commands/zrange) with the `BYSCORE` argument to respectively add items and retrieve items within a +The two commands to build these kind of indexes are [`ZADD`]({{< relref "/commands/zadd" >}}) and +[`ZRANGE`]({{< relref "/commands/zrange" >}}) with the `BYSCORE` argument to respectively add items and retrieve items within a specified range. For instance, it is possible to index a set of person names by their @@ -63,18 +63,18 @@ command can be used: 1) "Manuel" 2) "Jon" -By using the **WITHSCORES** option of [`ZRANGE`](/commands/zrange) it is also possible +By using the **WITHSCORES** option of [`ZRANGE`]({{< relref "/commands/zrange" >}}) it is also possible to obtain the scores associated with the returned elements. -The [`ZCOUNT`](/commands/zcount) command can be used in order to retrieve the number of elements +The [`ZCOUNT`]({{< relref "/commands/zcount" >}}) command can be used in order to retrieve the number of elements within a given range, without actually fetching the elements, which is also useful, especially given the fact the operation is executed in logarithmic time regardless of the size of the range. -Ranges can be inclusive or exclusive, please refer to the [`ZRANGE`](/commands/zrange) +Ranges can be inclusive or exclusive, please refer to the [`ZRANGE`]({{< relref "/commands/zrange" >}}) command documentation for more information. -**Note**: Using the [`ZRANGE`](/commands/zrange) with the `BYSCORE` and `REV` arguments, it is possible to query a range in +**Note**: Using the [`ZRANGE`]({{< relref "/commands/zrange" >}}) with the `BYSCORE` and `REV` arguments, it is possible to query a range in reversed order, which is often useful when data is indexed in a given direction (ascending or descending) but we want to retrieve information the other way around. @@ -102,8 +102,8 @@ could do: ZADD user.age.index 33 3 This time the value associated with the score in the sorted set is the -ID of the object. So once I query the index with [`ZRANGE`](/commands/zrange) with the `BYSCORE` argument, I'll -also have to retrieve the information I need with [`HGETALL`](/commands/hgetall) or similar +ID of the object. So once I query the index with [`ZRANGE`]({{< relref "/commands/zrange" >}}) with the `BYSCORE` argument, I'll +also have to retrieve the information I need with [`HGETALL`]({{< relref "/commands/hgetall" >}}) or similar commands. The obvious advantage is that objects can change without touching the index, as long as we don't change the indexed field. @@ -120,7 +120,7 @@ make sense to use the birth date as index instead of the age itself, but there are other cases where we simply want some field to change from time to time, and the index to reflect this change. -The [`ZADD`](/commands/zadd) command makes updating simple indexes a very trivial operation +The [`ZADD`]({{< relref "/commands/zadd" >}}) command makes updating simple indexes a very trivial operation since re-adding back an element with a different score and the same value will simply update the score and move the element at the right position, so if the user `antirez` turned 39 years old, in order to update the @@ -130,7 +130,7 @@ to execute the following two commands: HSET user:1 age 39 ZADD user.age.index 39 1 -The operation may be wrapped in a [`MULTI`](/commands/multi)/[`EXEC`](/commands/exec) transaction in order to +The operation may be wrapped in a [`MULTI`]({{< relref "/commands/multi" >}})/[`EXEC`]({{< relref "/commands/exec" >}}) transaction in order to make sure both fields are updated or none. Turning multi dimensional data into linear data @@ -143,7 +143,7 @@ is not always true. If you can efficiently represent something multi-dimensional in a linear way, they it is often possible to use a simple sorted set for indexing. -For example the [Redis geo indexing API](/commands/geoadd) uses a sorted +For example the [Redis geo indexing API]({{< relref "/commands/geoadd" >}}) uses a sorted set to index places by latitude and longitude using a technique called [Geo hash](https://en.wikipedia.org/wiki/Geohash). The sorted set score represents alternating bits of longitude and latitude, so that we map the @@ -179,7 +179,7 @@ the second is checked and so forth. If the common prefix of two strings is the same then the longer string is considered the greater of the two, so "foobar" is greater than "foo". -There are commands such as [`ZRANGE`](/commands/zrange) and [`ZLEXCOUNT`](/commands/zlexcount) that +There are commands such as [`ZRANGE`]({{< relref "/commands/zrange" >}}) and [`ZLEXCOUNT`]({{< relref "/commands/zlexcount" >}}) that are able to query and count ranges in a lexicographically fashion, assuming they are used with sorted sets where all the elements have the same score. @@ -207,7 +207,7 @@ are ordered lexicographically. 3) "baaa" 4) "bbbb" -Now we can use [`ZRANGE`](/commands/zrange) with the `BYLEX` argument in order to perform range queries. +Now we can use [`ZRANGE`]({{< relref "/commands/zrange" >}}) with the `BYLEX` argument in order to perform range queries. ZRANGE myindex [a (b BYLEX 1) "aaaa" @@ -244,7 +244,7 @@ we'll just do: ZADD myindex 0 banana And so forth for each search query ever encountered. Then when we want to -complete the user input, we execute a range query using [`ZRANGE`](/commands/zrange) with the `BYLEX` argument. +complete the user input, we execute a range query using [`ZRANGE`]({{< relref "/commands/zrange" >}}) with the `BYLEX` argument. Imagine the user is typing "bit" inside the search form, and we want to offer possible search keywords starting for "bit". We send Redis a command like that: @@ -289,7 +289,7 @@ commands: ZADD myindex 0 banana:2 Note that because it is possible that there are concurrent updates, the -above three commands should be send via a [Lua script](/commands/eval) +above three commands should be send via a [Lua script]({{< relref "/commands/eval" >}}) instead, so that the Lua script will atomically get the old count and re-add the item with incremented score. @@ -478,7 +478,7 @@ So for example, when we index we also add to a hash: This is not always needed, but simplifies the operations of updating the index. In order to remove the old information we indexed for the object ID 90, regardless of the *current* fields values of the object, we just -have to retrieve the hash value by object ID and [`ZREM`](/commands/zrem) it in the sorted +have to retrieve the hash value by object ID and [`ZREM`]({{< relref "/commands/zrem" >}}) it in the sorted set view. Representing and querying graphs using a hexastore @@ -724,20 +724,20 @@ in order to build other kind of indexes. They are very commonly used but maybe we don't always realize they are actually a form of indexing. For instance I can index object IDs into a Set data type in order to use -the *get random elements* operation via [`SRANDMEMBER`](/commands/srandmember) in order to retrieve +the *get random elements* operation via [`SRANDMEMBER`]({{< relref "/commands/srandmember" >}}) in order to retrieve a set of random objects. Sets can also be used to check for existence when all I need is to test if a given item exists or not or has a single boolean property or not. Similarly lists can be used in order to index items into a fixed order. I can add all my items into a Redis list and rotate the list with -[`RPOPLPUSH`](/commands/rpoplpush) using the same key name as source and destination. This is useful +[`RPOPLPUSH`]({{< relref "/commands/rpoplpush" >}}) using the same key name as source and destination. This is useful when I want to process a given set of items again and again forever in the same order. Think of an RSS feed system that needs to refresh the local copy periodically. Another popular index often used with Redis is a **capped list**, where items -are added with [`LPUSH`](/commands/lpush) and trimmed with [`LTRIM`](/commands/ltrim), in order to create a view +are added with [`LPUSH`]({{< relref "/commands/lpush" >}}) and trimmed with [`LTRIM`]({{< relref "/commands/ltrim" >}}), in order to create a view with just the latest N items encountered, in the same order they were seen. @@ -751,5 +751,5 @@ bugs, network partitions or other events. Different strategies could be used. If the index data is outside Redis *read repair* can be a solution, where data is fixed in a lazy way when it is requested. When we index data which is stored in Redis itself -the [`SCAN`](/commands/scan) family of commands can be used in order to verify, update or +the [`SCAN`]({{< relref "/commands/scan" >}}) family of commands can be used in order to verify, update or rebuild the index from scratch, incrementally. diff --git a/content/develop/use/patterns/twitter-clone.md b/content/develop/use/patterns/twitter-clone.md index b8d3fb823a..ad062e7c76 100644 --- a/content/develop/use/patterns/twitter-clone.md +++ b/content/develop/use/patterns/twitter-clone.md @@ -36,7 +36,7 @@ implementation for the sake of following the article better). What is a key-value store? --- -The essence of a key-value store is the ability to store some data, called a _value_, inside a key. The value can be retrieved later only if we know the specific key it was stored in. There is no direct way to search for a key by value. In some sense, it is like a very large hash/dictionary, but it is persistent, i.e. when your application ends, the data doesn't go away. So, for example, I can use the command [`SET`](/commands/set) to store the value *bar* in the key *foo*: +The essence of a key-value store is the ability to store some data, called a _value_, inside a key. The value can be retrieved later only if we know the specific key it was stored in. There is no direct way to search for a key by value. In some sense, it is like a very large hash/dictionary, but it is persistent, i.e. when your application ends, the data doesn't go away. So, for example, I can use the command [`SET`]({{< relref "/commands/set" >}}) to store the value *bar* in the key *foo*: SET foo bar @@ -44,7 +44,7 @@ Redis stores data permanently, so if I later ask "_What is the value stored in k GET foo => bar -Other common operations provided by key-value stores are [`DEL`](/commands/del), to delete a given key and its associated value, SET-if-not-exists (called [`SETNX`](/commands/setnx) on Redis), to assign a value to a key only if the key does not already exist, and [`INCR`](/commands/incr), to atomically increment a number stored in a given key: +Other common operations provided by key-value stores are [`DEL`]({{< relref "/commands/del" >}}), to delete a given key and its associated value, SET-if-not-exists (called [`SETNX`]({{< relref "/commands/setnx" >}}) on Redis), to assign a value to a key only if the key does not already exist, and [`INCR`]({{< relref "/commands/incr" >}}), to atomically increment a number stored in a given key: SET foo 10 INCR foo => 11 @@ -54,7 +54,7 @@ Other common operations provided by key-value stores are [`DEL`](/commands/del), Atomic operations --- -There is something special about [`INCR`](/commands/incr). You may wonder why Redis provides such an operation if we can do it ourselves with a bit of code? After all, it is as simple as: +There is something special about [`INCR`]({{< relref "/commands/incr" >}}). You may wonder why Redis provides such an operation if we can do it ourselves with a bit of code? After all, it is as simple as: x = GET foo x = x + 1 @@ -82,7 +82,7 @@ In this section we will see which Redis features we need to build our Twitter cl LPUSH mylist b (now mylist holds 'b','a') LPUSH mylist c (now mylist holds 'c','b','a') -[`LPUSH`](/commands/lpush) means _Left Push_, that is, add an element to the left (or to the head) of the list stored in _mylist_. If the key _mylist_ does not exist it is automatically created as an empty list before the PUSH operation. As you can imagine, there is also an [`RPUSH`](/commands/rpush) operation that adds the element to the right of the list (on the tail). This is very useful for our Twitter clone. User updates can be added to a list stored in `username:updates`, for instance. +[`LPUSH`]({{< relref "/commands/lpush" >}}) means _Left Push_, that is, add an element to the left (or to the head) of the list stored in _mylist_. If the key _mylist_ does not exist it is automatically created as an empty list before the PUSH operation. As you can imagine, there is also an [`RPUSH`]({{< relref "/commands/rpush" >}}) operation that adds the element to the right of the list (on the tail). This is very useful for our Twitter clone. User updates can be added to a list stored in `username:updates`, for instance. There are operations to get data from Lists, of course. For instance, LRANGE returns a range from the list, or the whole list. @@ -102,7 +102,7 @@ Sorted Sets, which are kind of a more capable version of Sets, it is better to start introducing Sets first (which are a very useful data structure per se), and later Sorted Sets. -There are more data types than just Lists. Redis also supports Sets, which are unsorted collections of elements. It is possible to add, remove, and test for existence of members, and perform the intersection between different Sets. Of course it is possible to get the elements of a Set. Some examples will make it more clear. Keep in mind that [`SADD`](/commands/sadd) is the _add to set_ operation, [`SREM`](/commands/srem) is the _remove from set_ operation, [`SISMEMBER`](/commands/sismember) is the _test if member_ operation, and [`SINTER`](/commands/sinter) is the _perform intersection_ operation. Other operations are [`SCARD`](/commands/scard) to get the cardinality (the number of elements) of a Set, and [`SMEMBERS`](/commands/smembers) to return all the members of a Set. +There are more data types than just Lists. Redis also supports Sets, which are unsorted collections of elements. It is possible to add, remove, and test for existence of members, and perform the intersection between different Sets. Of course it is possible to get the elements of a Set. Some examples will make it more clear. Keep in mind that [`SADD`]({{< relref "/commands/sadd" >}}) is the _add to set_ operation, [`SREM`]({{< relref "/commands/srem" >}}) is the _remove from set_ operation, [`SISMEMBER`]({{< relref "/commands/sismember" >}}) is the _test if member_ operation, and [`SINTER`]({{< relref "/commands/sinter" >}}) is the _perform intersection_ operation. Other operations are [`SCARD`]({{< relref "/commands/scard" >}}) to get the cardinality (the number of elements) of a Set, and [`SMEMBERS`]({{< relref "/commands/smembers" >}}) to return all the members of a Set. SADD myset a SADD myset b @@ -111,14 +111,14 @@ There are more data types than just Lists. Redis also supports Sets, which are u SCARD myset => 4 SMEMBERS myset => bar,a,foo,b -Note that [`SMEMBERS`](/commands/smembers) does not return the elements in the same order we added them since Sets are *unsorted* collections of elements. When you want to store in order it is better to use Lists instead. Some more operations against Sets: +Note that [`SMEMBERS`]({{< relref "/commands/smembers" >}}) does not return the elements in the same order we added them since Sets are *unsorted* collections of elements. When you want to store in order it is better to use Lists instead. Some more operations against Sets: SADD mynewset b SADD mynewset foo SADD mynewset hello SINTER myset mynewset => foo,b -[`SINTER`](/commands/sinter) can return the intersection between Sets but it is not limited to two Sets. You may ask for the intersection of 4,5, or 10000 Sets. Finally let's check how [`SISMEMBER`](/commands/sismember) works: +[`SINTER`]({{< relref "/commands/sinter" >}}) can return the intersection between Sets but it is not limited to two Sets. You may ask for the intersection of 4,5, or 10000 Sets. Finally let's check how [`SISMEMBER`]({{< relref "/commands/sismember" >}}) works: SISMEMBER myset foo => 1 SISMEMBER myset notamember => 0 @@ -143,10 +143,10 @@ of Sorted Sets usage: ZADD zset 12.55 c ZRANGE zset 0 -1 => b,a,c -In the above example we added a few elements with [`ZADD`](/commands/zadd), and later retrieved -the elements with [`ZRANGE`](/commands/zrange). As you can see the elements are returned in order +In the above example we added a few elements with [`ZADD`]({{< relref "/commands/zadd" >}}), and later retrieved +the elements with [`ZRANGE`]({{< relref "/commands/zrange" >}}). As you can see the elements are returned in order according to their score. In order to check if a given element exists, and -also to retrieve its score if it exists, we use the [`ZSCORE`](/commands/zscore) command: +also to retrieve its score if it exists, we use the [`ZSCORE`]({{< relref "/commands/zscore" >}}) command: ZSCORE zset a => 10 ZSCORE zset non_existing_element => NULL @@ -166,9 +166,9 @@ collection of fields associated with values: HMSET myuser name Salvatore surname Sanfilippo country Italy HGET myuser surname => Sanfilippo -[`HMSET`](/commands/hmset) can be used to set fields in the hash, that can be retrieved with -[`HGET`](/commands/hget) later. It is possible to check if a field exists with [`HEXISTS`](/commands/hexists), or -to increment a hash field with [`HINCRBY`](/commands/hincrby) and so forth. +[`HMSET`]({{< relref "/commands/hmset" >}}) can be used to set fields in the hash, that can be retrieved with +[`HGET`]({{< relref "/commands/hget" >}}) later. It is possible to check if a field exists with [`HEXISTS`]({{< relref "/commands/hexists" >}}), or +to increment a hash field with [`HINCRBY`]({{< relref "/commands/hincrby" >}}) and so forth. Hashes are the ideal data structure to represent *objects*. For example we use Hashes in order to represent Users and Updates in our Twitter clone. @@ -188,7 +188,7 @@ Data layout When working with a relational database, a database schema must be designed so that we'd know the tables, indexes, and so on that the database will contain. We don't have tables in Redis, so what do we need to design? We need to identify what keys are needed to represent our objects and what kind of values these keys need to hold. -Let's start with Users. We need to represent users, of course, with their username, userid, password, the set of users following a given user, the set of users a given user follows, and so on. The first question is, how should we identify a user? Like in a relational DB, a good solution is to identify different users with different numbers, so we can associate a unique ID with every user. Every other reference to this user will be done by id. Creating unique IDs is very simple to do by using our atomic [`INCR`](/commands/incr) operation. When we create a new user we can do something like this, assuming the user is called "antirez": +Let's start with Users. We need to represent users, of course, with their username, userid, password, the set of users following a given user, the set of users a given user follows, and so on. The first question is, how should we identify a user? Like in a relational DB, a good solution is to identify different users with different numbers, so we can associate a unique ID with every user. Every other reference to this user will be done by id. Creating unique IDs is very simple to do by using our atomic [`INCR`]({{< relref "/commands/incr" >}}) operation. When we create a new user we can do something like this, assuming the user is called "antirez": INCR next_user_id => 1000 HMSET user:1000 username antirez password p1pp0 @@ -224,7 +224,7 @@ We can add new followers with: ZADD followers:1000 1401267618 1234 => Add user 1234 with time 1401267618 -Another important thing we need is a place where we can add the updates to display in the user's home page. We'll need to access this data in chronological order later, from the most recent update to the oldest, so the perfect kind of data structure for this is a List. Basically every new update will be [`LPUSH`](/commands/lpush)ed in the user updates key, and thanks to [`LRANGE`](/commands/lrange), we can implement pagination and so on. Note that we use the words _updates_ and _posts_ interchangeably, since updates are actually "little posts" in some way. +Another important thing we need is a place where we can add the updates to display in the user's home page. We'll need to access this data in chronological order later, from the most recent update to the oldest, so the perfect kind of data structure for this is a List. Basically every new update will be [`LPUSH`]({{< relref "/commands/lpush" >}})ed in the user updates key, and thanks to [`LRANGE`]({{< relref "/commands/lrange" >}}), we can implement pagination and so on. Note that we use the words _updates_ and _posts_ interchangeably, since updates are actually "little posts" in some way. posts:1000 => a List of post ids - every new post is LPUSHed here. @@ -377,22 +377,22 @@ After we create a post and we obtain the post ID, we need to LPUSH the ID in the header("Location: index.php"); -The core of the function is the `foreach` loop. We use [`ZRANGE`](/commands/zrange) to get all the followers of the current user, then the loop will [`LPUSH`](/commands/lpush) the push the post in every follower timeline List. +The core of the function is the `foreach` loop. We use [`ZRANGE`]({{< relref "/commands/zrange" >}}) to get all the followers of the current user, then the loop will [`LPUSH`]({{< relref "/commands/lpush" >}}) the push the post in every follower timeline List. -Note that we also maintain a global timeline for all the posts, so that in the Retwis home page we can show everybody's updates easily. This requires just doing an [`LPUSH`](/commands/lpush) to the `timeline` List. Let's face it, aren't you starting to think it was a bit strange to have to sort things added in chronological order using `ORDER BY` with SQL? I think so. +Note that we also maintain a global timeline for all the posts, so that in the Retwis home page we can show everybody's updates easily. This requires just doing an [`LPUSH`]({{< relref "/commands/lpush" >}}) to the `timeline` List. Let's face it, aren't you starting to think it was a bit strange to have to sort things added in chronological order using `ORDER BY` with SQL? I think so. There is an interesting thing to notice in the code above: we used a new -command called [`LTRIM`](/commands/ltrim) after we perform the [`LPUSH`](/commands/lpush) operation in the global +command called [`LTRIM`]({{< relref "/commands/ltrim" >}}) after we perform the [`LPUSH`]({{< relref "/commands/lpush" >}}) operation in the global timeline. This is used in order to trim the list to just 1000 elements. The global timeline is actually only used in order to show a few posts in the home page, there is no need to have the full history of all the posts. -Basically [`LTRIM`](/commands/ltrim) + [`LPUSH`](/commands/lpush) is a way to create a *capped collection* in Redis. +Basically [`LTRIM`]({{< relref "/commands/ltrim" >}}) + [`LPUSH`]({{< relref "/commands/lpush" >}}) is a way to create a *capped collection* in Redis. Paginating updates --- -Now it should be pretty clear how we can use [`LRANGE`](/commands/lrange) in order to get ranges of posts, and render these posts on the screen. The code is simple: +Now it should be pretty clear how we can use [`LRANGE`]({{< relref "/commands/lrange" >}}) in order to get ranges of posts, and render these posts on the screen. The code is simple: function showPost($id) { $r = redisLink(); @@ -423,7 +423,7 @@ Now it should be pretty clear how we can use [`LRANGE`](/commands/lrange) in ord `showPost` will simply convert and print a Post in HTML while `showUserPosts` gets a range of posts and then passes them to `showPosts`. -*Note: [`LRANGE`](/commands/lrange) is not very efficient if the list of posts start to be very +*Note: [`LRANGE`]({{< relref "/commands/lrange" >}}) is not very efficient if the list of posts start to be very big, and we want to access elements which are in the middle of the list, since Redis Lists are backed by linked lists. If a system is designed for deep pagination of million of items, it is better to resort to Sorted Sets instead.* @@ -431,12 +431,12 @@ instead.* Following users --- -It is not hard, but we did not yet check how we create following / follower relationships. If user ID 1000 (antirez) wants to follow user ID 5000 (pippo), we need to create both a following and a follower relationship. We just need to [`ZADD`](/commands/zadd) calls: +It is not hard, but we did not yet check how we create following / follower relationships. If user ID 1000 (antirez) wants to follow user ID 5000 (pippo), we need to create both a following and a follower relationship. We just need to [`ZADD`]({{< relref "/commands/zadd" >}}) calls: ZADD following:1000 5000 ZADD followers:5000 1000 -Note the same pattern again and again. In theory with a relational database, the list of following and followers would be contained in a single table with fields like `following_id` and `follower_id`. You can extract the followers or following of every user using an SQL query. With a key-value DB things are a bit different since we need to set both the `1000 is following 5000` and `5000 is followed by 1000` relations. This is the price to pay, but on the other hand accessing the data is simpler and extremely fast. Having these things as separate sets allows us to do interesting stuff. For example, using [`ZINTERSTORE`](/commands/zinterstore) we can have the intersection of `following` of two different users, so we may add a feature to our Twitter clone so that it is able to tell you very quickly when you visit somebody else's profile, "you and Alice have 34 followers in common", and things like that. +Note the same pattern again and again. In theory with a relational database, the list of following and followers would be contained in a single table with fields like `following_id` and `follower_id`. You can extract the followers or following of every user using an SQL query. With a key-value DB things are a bit different since we need to set both the `1000 is following 5000` and `5000 is followed by 1000` relations. This is the price to pay, but on the other hand accessing the data is simpler and extremely fast. Having these things as separate sets allows us to do interesting stuff. For example, using [`ZINTERSTORE`]({{< relref "/commands/zinterstore" >}}) we can have the intersection of `following` of two different users, so we may add a feature to our Twitter clone so that it is able to tell you very quickly when you visit somebody else's profile, "you and Alice have 34 followers in common", and things like that. You can find the code that sets or removes a following / follower relation in the `follow.php` file. diff --git a/content/develop/use/pipelining/index.md b/content/develop/use/pipelining/index.md index 88343d0020..bf1a721e91 100644 --- a/content/develop/use/pipelining/index.md +++ b/content/develop/use/pipelining/index.md @@ -151,11 +151,11 @@ As you can see, using pipelining, we improved the transfer by a factor of five. ## Pipelining vs Scripting -Using [Redis scripting](/commands/eval), available since Redis 2.6, a number of use cases for pipelining can be addressed more efficiently using scripts that perform a lot of the work needed at the server side. +Using [Redis scripting]({{< relref "/commands/eval" >}}), available since Redis 2.6, a number of use cases for pipelining can be addressed more efficiently using scripts that perform a lot of the work needed at the server side. A big advantage of scripting is that it is able to both read and write data with minimal latency, making operations like *read, compute, write* very fast (pipelining can't help in this scenario since the client needs the reply of the read command before it can call the write command). -Sometimes the application may also want to send [`EVAL`](/commands/eval) or [`EVALSHA`](/commands/evalsha) commands in a pipeline. -This is entirely possible and Redis explicitly supports it with the [SCRIPT LOAD](https://redis.io/commands/script-load) command (it guarantees that [`EVALSHA`](/commands/evalsha) can be called without the risk of failing). +Sometimes the application may also want to send [`EVAL`]({{< relref "/commands/eval" >}}) or [`EVALSHA`]({{< relref "/commands/evalsha" >}}) commands in a pipeline. +This is entirely possible and Redis explicitly supports it with the [SCRIPT LOAD](https://redis.io/commands/script-load) command (it guarantees that [`EVALSHA`]({{< relref "/commands/evalsha" >}}) can be called without the risk of failing). ## Appendix: Why are busy loops slow even on the loopback interface? From 415d1f6d028f5a15bc17b451194c83caf9085fbb Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Wed, 24 Jan 2024 15:22:35 +0100 Subject: [PATCH 12/15] Replaced redis.io links with internal links --- build/migrate.py | 23 ++++++++++++++++++- build/migrate/corrected_refs.csv | 17 ++++++++++++++ content/commands/set/index.md | 2 +- content/commands/setnx/index.md | 2 +- content/develop/connect/clients/dotnet.md | 2 +- content/develop/connect/clients/nodejs.md | 2 +- .../connect/clients/om-clients/stack-node.md | 4 ++-- .../clients/om-clients/stack-python.md | 2 +- .../clients/om-clients/stack-spring.md | 4 ++-- content/develop/connect/insight/_index.md | 2 +- .../tutorials/insight-stream-consumer.md | 2 +- content/develop/data-types/bitmaps.md | 2 +- content/develop/data-types/geospatial.md | 2 +- content/develop/data-types/hashes.md | 4 ++-- content/develop/data-types/json/_index.md | 2 +- content/develop/data-types/lists.md | 2 +- .../data-types/probabilistic/hyperloglogs.md | 2 +- .../data-types/probabilistic/t-digest.md | 4 ++-- content/develop/data-types/sets.md | 2 +- content/develop/data-types/sorted-sets.md | 2 +- content/develop/data-types/streams.md | 2 +- .../data-types/timeseries/quickstart.md | 2 +- .../develop/get-started/vector-database.md | 12 +++++----- .../triggers-and-functions/Quick_Start_CLI.md | 2 +- .../triggers-and-functions/Quick_Start_RI.md | 4 ++-- .../concepts/Sync_Async.md | 2 +- .../concepts/triggers/KeySpace_Triggers.md | 2 +- .../concepts/triggers/Stream_Triggers.md | 4 ++-- .../administration/overview.md | 2 +- .../advanced-concepts/dialects.md | 6 ++--- content/develop/reference/clients.md | 4 ++-- .../reference/modules/modules-api-ref.md | 20 ++++++++-------- content/develop/use/patterns/twitter-clone.md | 2 +- content/develop/use/pipelining/index.md | 2 +- 34 files changed, 94 insertions(+), 56 deletions(-) diff --git a/build/migrate.py b/build/migrate.py index 2435239316..5132c5532f 100755 --- a/build/migrate.py +++ b/build/migrate.py @@ -129,6 +129,18 @@ def _replace_link(match, new_prefix): return match.group(1) + '{{< relref "' + new_prefix + match.group(3) + '" >}}' + match.group(4) +''' +Helps to substitute the prefix https://redis.io with e.g. / within a link +''' +def fq_link_to_page_link_in_file(file_path, old_prefix, new_prefix): + with open(file_path, 'r', encoding='utf-8') as file: + file_content = file.read() + link_pattern = re.compile(r'(\[.*?\]\()(' + re.escape(old_prefix) + r')(.*?)' + r'(\))') + updated_content = re.sub(link_pattern, r'\1' + new_prefix + r'\3' + r'\4', file_content) + + with open(file_path, 'w', encoding='utf-8') as file: + file.write(updated_content) + ''' Replace the link within the file ''' @@ -314,9 +326,10 @@ def migrate_commands(): for f in markdown_files: add_categories(f, 'categories', ['docs', 'develop', 'stack', 'oss', 'rs', 'rc', 'oss', 'kubernetes', 'clients']) remove_prop_from_file(f, "aliases") + replace_links_in_file(f, '/docs', '/develop') replace_links_in_file(f, '/commands', '/commands') - + replace_links_in_file(f, 'https://redis.io/', '/') ''' Migrate the developer documentation ''' @@ -348,9 +361,17 @@ def migrate_developer_docs(): for f in markdown_files: print("Replacing links in {}".format(f)) + + fq_link_to_page_link_in_file(f, 'https://redis.io/', '/') + + # Map /docs to /develop replace_links_in_file(f, '/docs', '/develop') + # Ensures that the URL-s are rewritten in relrefs replace_links_in_file(f, '/commands', '/commands') + + + remove_prop_from_file(f, "aliases") add_categories(f, 'categories', ['docs', 'develop', 'stack', 'oss', 'rs', 'rc', 'oss', 'kubernetes', 'clients']) diff --git a/build/migrate/corrected_refs.csv b/build/migrate/corrected_refs.csv index 04533f7d07..d13e0547ba 100644 --- a/build/migrate/corrected_refs.csv +++ b/build/migrate/corrected_refs.csv @@ -20,6 +20,7 @@ broken_ref;fixed_ref;comment /develop/data-types/lists/;/develop/data-types/lists; /develop/data-types/sets/;/develop/data-types/sets; /develop/data-types/streams/;/develop/data-types/streams; +/develop/manual/data-types/streams/#consumer-groups;/develop/data-types/streams; /develop/data-types/streams-tutorial;/develop/data-types/streams; /develop/data-types/strings/;/develop/data-types/strings; /develop/get-started/data-store/;/develop/get-started/data-store; @@ -28,6 +29,12 @@ broken_ref;fixed_ref;comment /develop/get-started/vector-database/;/develop/get-started/vector-database; /develop/getting-started/;/operate/oss_and_stack/install/; /develop/getting-started/install-stack/;/operate/oss_and_stack/install/install-stack/; +/develop/stack/get-started/install/;/operate/oss_and_stack/install/install-stack/; +/develop/ui/insight/;/develop/connect/insight/; +/develop/manual/keyspace-notifications/;/develop/use/keyspace-notifications; +/develop/manual/keyspace-notifications/#events-generated-by-different-commands/?utm_source=redis\&utm_medium=app\&utm_campaign=redisinsight_triggers_and_functions_guide;/develop/use/keyspace-notifications; +/develop/manual/keyspace-notifications/#events-generated-by-different-commands;/develop/use/keyspace-notifications; +/commands/blpop/;/commands/blpop; /develop/getting-started/install-stack/docker;/operate/oss_and_stack/install/install-stack/docker; /develop/install/install-stack;/operate/oss_and_stack/install/install-stack/; /develop/install/install-stack/;/operate/oss_and_stack/install/install-stack/; @@ -65,6 +72,9 @@ broken_ref;fixed_ref;comment /develop/interact/search-and-query/basic-constructs/schema-definition/;/develop/interact/search-and-query/basic-constructs/schema-definition; /develop/interact/search-and-query/img/polygons.png;/develop/interact/search-and-query/img/polygons.png;Markdown image reference, RelRefs don’t work with images. Markdown syntax ![Name](Ref) /develop/interact/search-and-query/query/combined/;/develop/interact/search-and-query/query/combined; +/develop/interact/search-and-query/query/vector-search/;/develop/interact/search-and-query/query/vector-search; +/develop/interact/search-and-query/query/geo-spatial/;/develop/interact/search-and-query/query/geo-spatial; +/develop/manual/pipelining/;/develop/use/pipelining; /develop/interact/search-and-query/quickstart/;/develop/get-started/document-database; /develop/interact/search-and-query/search/aggregations/;/develop/interact/search-and-query/advanced-concepts/aggregations; /develop/interact/search-and-query/search/aggregations/#cursor-api;/develop/interact/search-and-query/advanced-concepts/aggregations; @@ -78,6 +88,7 @@ broken_ref;fixed_ref;comment /develop/manual/client-side-caching/;/develop/use/client-side-caching; /develop/manual/config/;/operate/oss_and_stack/management/config; /develop/manual/data-types/streams;/develop/data-types/streams; +/develop/manual/data-types/streams/;/develop/data-types/streams; /develop/manual/eviction/;/develop/reference/eviction; /develop/manual/keyspace/;/develop/use/keyspace; /develop/manual/programmability/;/develop/interact/programmability/; @@ -103,6 +114,10 @@ broken_ref;fixed_ref;comment /develop/stack/search/indexing_json;/develop/interact/search-and-query/indexing/; /develop/stack/search/indexing_json/;/develop/interact/search-and-query/indexing/; /develop/stack/search/indexing_json/#index-json-arrays-as-vector;/develop/interact/search-and-query/indexing/; +/develop/stack/search/indexing_json/#index-json-arrays-as-text;/develop/interact/search-and-query/indexing/; +/develop/stack/search/indexing_json/#index-json-arrays-as-tag;/develop/interact/search-and-query/indexing/; +/develop/stack/search/indexing_json/#index-json-arrays-as-numeric;/develop/interact/search-and-query/indexing/; +/develop/stack/search/indexing_json/#index-json-arrays-as-geo;/develop/interact/search-and-query/indexing/; /develop/stack/search/reference/highlight;/develop/interact/search-and-query/advanced-concepts/highlight; /develop/stack/search/reference/query_syntax;/develop/interact/search-and-query/advanced-concepts/query_syntax; /develop/stack/search/reference/query_syntax/;/develop/interact/search-and-query/advanced-concepts/query_syntax; @@ -110,6 +125,7 @@ broken_ref;fixed_ref;comment /develop/stack/search/reference/vectors/;/develop/interact/search-and-query/advanced-concepts/vectors; /develop/stack/search/reference/vectors/#runtime-attributes;/develop/interact/search-and-query/advanced-concepts/vectors; /develop/stack/timeseries/;/develop/data-types/timeseries/; +/commands/blpop/;/commands/blpop; #-- Command docs;; /commands/module-load/;/commands/module-load; /commands/auth/;/commands/auth; @@ -144,6 +160,7 @@ broken_ref;fixed_ref;comment /develop/stack/timeseries/configuration/#chunk_size_bytes;/develop/data-types/timeseries/configuration; /develop/stack/timeseries;/develop/data-types/timeseries/; /develop/manual/;/develop/use/; +/topics/distlock;/develop/use/patterns/distributed-locks; diff --git a/content/commands/set/index.md b/content/commands/set/index.md index ca3890f626..b5f2e7e25e 100644 --- a/content/commands/set/index.md +++ b/content/commands/set/index.md @@ -148,7 +148,7 @@ SET anotherkey "will expire in a minute" EX 60 ## Patterns -**Note:** The following pattern is discouraged in favor of [the Redlock algorithm](https://redis.io/topics/distlock) which is only a bit more complex to implement, but offers better guarantees and is fault tolerant. +**Note:** The following pattern is discouraged in favor of [the Redlock algorithm]({{< relref "/develop/use/patterns/distributed-locks" >}}) which is only a bit more complex to implement, but offers better guarantees and is fault tolerant. The command `SET resource-name anystring NX EX max-lock-time` is a simple way to implement a locking system with Redis. diff --git a/content/commands/setnx/index.md b/content/commands/setnx/index.md index ecd754a182..53fe154751 100644 --- a/content/commands/setnx/index.md +++ b/content/commands/setnx/index.md @@ -72,7 +72,7 @@ GET mykey **Please note that:** -1. The following pattern is discouraged in favor of [the Redlock algorithm](https://redis.io/topics/distlock) which is only a bit more complex to implement, but offers better guarantees and is fault tolerant. +1. The following pattern is discouraged in favor of [the Redlock algorithm]({{< relref "/develop/use/patterns/distributed-locks" >}}) which is only a bit more complex to implement, but offers better guarantees and is fault tolerant. 2. We document the old pattern anyway because certain existing implementations link to this page as a reference. Moreover it is an interesting example of how Redis commands can be used in order to mount programming primitives. 3. Anyway even assuming a single-instance locking primitive, starting with 2.6.12 it is possible to create a much simpler locking primitive, equivalent to the one discussed here, using the [`SET`]({{< relref "/commands/set" >}}) command to acquire the lock, and a simple Lua script to release the lock. The pattern is documented in the [`SET`]({{< relref "/commands/set" >}}) command page. diff --git a/content/develop/connect/clients/dotnet.md b/content/develop/connect/clients/dotnet.md index d060095016..ee0cdd9465 100644 --- a/content/develop/connect/clients/dotnet.md +++ b/content/develop/connect/clients/dotnet.md @@ -20,7 +20,7 @@ Install Redis and the Redis client, then connect your .NET application to a Redi ## NRedisStack [NRedisStack](https://github.com/redis/NRedisStack) is a .NET client for Redis. -`NredisStack` requires a running Redis or [Redis Stack](https://redis.io/docs/getting-started/install-stack/) server. See [Getting started]({{< relref "/operate/oss_and_stack/install/" >}}) for Redis installation instructions. +`NredisStack` requires a running Redis or [Redis Stack]({{< relref "/operate/oss_and_stack/install/install-stack/" >}}) server. See [Getting started]({{< relref "/operate/oss_and_stack/install/" >}}) for Redis installation instructions. ### Install diff --git a/content/develop/connect/clients/nodejs.md b/content/develop/connect/clients/nodejs.md index 6937d43c1d..3116332255 100644 --- a/content/develop/connect/clients/nodejs.md +++ b/content/develop/connect/clients/nodejs.md @@ -20,7 +20,7 @@ Install Redis and the Redis client, then connect your Node.js application to a R ## node-redis [node-redis](https://github.com/redis/node-redis) is a modern, high-performance Redis client for Node.js. -`node-redis` requires a running Redis or [Redis Stack](https://redis.io/docs/getting-started/install-stack/) server. See [Getting started]({{< relref "/operate/oss_and_stack/install/" >}}) for Redis installation instructions. +`node-redis` requires a running Redis or [Redis Stack]({{< relref "/operate/oss_and_stack/install/install-stack/" >}}) server. See [Getting started]({{< relref "/operate/oss_and_stack/install/" >}}) for Redis installation instructions. ### Install diff --git a/content/develop/connect/clients/om-clients/stack-node.md b/content/develop/connect/clients/om-clients/stack-node.md index a1cc29f99f..9bd5e7c60f 100644 --- a/content/develop/connect/clients/om-clients/stack-node.md +++ b/content/develop/connect/clients/om-clients/stack-node.md @@ -758,7 +758,7 @@ app.use('/persons', searchRouter) And that's that. But this just isn't enough to satisfy. It doesn't show you anything new, except maybe the usage of a `date` field. And, it's not really location *tracking*. It just shows where these people last were, no history. So let's add some!. -To add some history, we're going to use a [Redis Stream](https://redis.io/topics/streams-intro). Streams are a big topic but don't worry if you’re not familiar with them, you can think of them as being sort of like a log file stored in a Redis key where each entry represents an event. In our case, the event would be the person moving about or checking in or whatever. +To add some history, we're going to use a [Redis Stream](/topics/streams-intro). Streams are a big topic but don't worry if you’re not familiar with them, you can think of them as being sort of like a log file stored in a Redis key where each entry represents an event. In our case, the event would be the person moving about or checking in or whatever. But there's a problem. Redis OM doesn’t support Streams even though Redis Stack does. So how do we take advantage of them in our application? By using [Node Redis](https://github.com/redis/node-redis). Node Redis is a low-level Redis client for Node.js that gives you access to all the Redis commands and data types. Internally, Redis OM is creating and using a Node Redis connection. You can use that connection too. Or rather, Redis OM can be *told* to use the connection you are using. Let me show you how. @@ -795,7 +795,7 @@ And that's it. Redis OM is now using the `connection` you created. Note that we ## Storing location history with Streams -To add an event to a Stream we need to use the [XADD](https://redis.io/commands/xadd) command. Node Redis exposes that as `.xAdd()`. So, we need to add a call to `.xAdd()` in our route. Modify `location-router.js` to import our `connection`: +To add an event to a Stream we need to use the [XADD]({{< relref "/commands/xadd" >}}) command. Node Redis exposes that as `.xAdd()`. So, we need to add a call to `.xAdd()` in our route. Modify `location-router.js` to import our `connection`: {{< highlight javascript >}} import { connection } from '../om/client.js' diff --git a/content/develop/connect/clients/om-clients/stack-python.md b/content/develop/connect/clients/om-clients/stack-python.md index 8d03c20f45..38c1bbb743 100644 --- a/content/develop/connect/clients/om-clients/stack-python.md +++ b/content/develop/connect/clients/om-clients/stack-python.md @@ -58,7 +58,7 @@ To run this application you'll need: * [git](https://git-scm.com/download) - to clone the repo to your machine. * [Python 3.9 or higher](https://www.python.org/downloads/). -* A [Redis Stack](https://redis.io) database, or Redis with the [Search and Query](https://redis.io/docs/stack/search/) and [JSON](https://redis.io/docs/stack/json/) features installed. We've provided a `docker-compose.yml` for this. You can also [sign up for a free 30Mb database with Redis Cloud](https://redis.com/try-free/?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users) - be sure to check the Redis Stack option when creating your cloud database. +* A [Redis Stack](https://redis.io) database, or Redis with the [Search and Query]({{< relref "/develop/interact/search-and-query/" >}}) and [JSON]({{< relref "/develop/data-types/json/" >}}) features installed. We've provided a `docker-compose.yml` for this. You can also [sign up for a free 30Mb database with Redis Cloud](https://redis.com/try-free/?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users) - be sure to check the Redis Stack option when creating your cloud database. * [curl](https://curl.se/), or [Postman](https://www.postman.com/) - to send HTTP requests to the application. We'll provide examples using curl in this document. * Optional: [RedisInsight](https://redis.com/redis-enterprise/redis-insight/), a free data visualization and database management tool for Redis. When downloading RedisInsight, be sure to select version 2.x or use the version that comes with Redis Stack. diff --git a/content/develop/connect/clients/om-clients/stack-spring.md b/content/develop/connect/clients/om-clients/stack-spring.md index 174127329d..16386c4273 100644 --- a/content/develop/connect/clients/om-clients/stack-spring.md +++ b/content/develop/connect/clients/om-clients/stack-spring.md @@ -23,8 +23,8 @@ Redis OM Spring provides a robust repository and custom object-mapping abstracti ## What you’ll need: -* Redis Stack: See [https://redis.io/docs/stack/get-started/install/](https://redis.io/docs/stack/get-started/install/) -* RedisInsight: See [https://redis.io/docs/ui/insight](https://redis.io/docs/ui/insight/) +* Redis Stack: See [https://redis.io/docs/stack/get-started/install/]({{< relref "/operate/oss_and_stack/install/install-stack/" >}}) +* RedisInsight: See [https://redis.io/docs/ui/insight]({{< relref "/develop/connect/insight/" >}}) * Your favorite browser * Java 11 or greater diff --git a/content/develop/connect/insight/_index.md b/content/develop/connect/insight/_index.md index e311a1b42f..49f5966322 100644 --- a/content/develop/connect/insight/_index.md +++ b/content/develop/connect/insight/_index.md @@ -69,7 +69,7 @@ Advanced command line interface with intelligent command auto-complete and compl * Built-in guides: you can conveniently discover Redis and Redis Stack features using the built-in guides. * Command auto-complete support for all features in Redis and Redis Stack. * Visualizations of your indexes, queries, and aggregations. -* Visualizations of your [time series](https://redis.io/docs/stack/timeseries/) data. +* Visualizations of your [time series]({{< relref "/develop/data-types/timeseries/" >}}) data. diff --git a/content/develop/connect/insight/tutorials/insight-stream-consumer.md b/content/develop/connect/insight/tutorials/insight-stream-consumer.md index 5e354a6ee6..e984c4a7d7 100644 --- a/content/develop/connect/insight/tutorials/insight-stream-consumer.md +++ b/content/develop/connect/insight/tutorials/insight-stream-consumer.md @@ -84,7 +84,7 @@ This example shows how to bring an existing stream into RedisInsight and work wi 1. Install [RedisInsight](https://redis.com/redis-enterprise/redis-insight/?_ga=2.48624486.1318387955.1655817244-1963545967.1655260674#insight-form). 2. Download and install [Node.js](https://nodejs.org/en/download/) (LTS version). -3. Install [Redis](https://redis.io/download/). In Docker, check that Redis is running locally on the default port 6379 (with no password set). +3. Install [Redis](/download/). In Docker, check that Redis is running locally on the default port 6379 (with no password set). 4. Clone the [code repository](https://github.com/redis-developer/introducing-redis-talk) for this example. See the [README](https://github.com/redis-developer/introducing-redis-talk/tree/main/streams) for more information about this example and installation tips. 5. On your command-line, navigate to the folder containing the code repository and install the Node.js package manager (npm). diff --git a/content/develop/data-types/bitmaps.md b/content/develop/data-types/bitmaps.md index e80a203f51..fedf4a84b5 100644 --- a/content/develop/data-types/bitmaps.md +++ b/content/develop/data-types/bitmaps.md @@ -33,7 +33,7 @@ Some examples of bitmap use cases include: * [`SETBIT`]({{< relref "/commands/setbit" >}}) sets a bit at the provided offset to 0 or 1. * [`GETBIT`]({{< relref "/commands/getbit" >}}) returns the value of a bit at a given offset. -See the [complete list of bitmap commands](https://redis.io/commands/?group=bitmap). +See the [complete list of bitmap commands]({{< relref "/commands/?group=bitmap" >}}). ## Example diff --git a/content/develop/data-types/geospatial.md b/content/develop/data-types/geospatial.md index 79e6e79e81..b21f59458d 100644 --- a/content/develop/data-types/geospatial.md +++ b/content/develop/data-types/geospatial.md @@ -25,7 +25,7 @@ This data structure is useful for finding nearby points within a given radius or * [`GEOADD`]({{< relref "/commands/geoadd" >}}) adds a location to a given geospatial index (note that longitude comes before latitude with this command). * [`GEOSEARCH`]({{< relref "/commands/geosearch" >}}) returns locations with a given radius or a bounding box. -See the [complete list of geospatial index commands](https://redis.io/commands/?group=geo). +See the [complete list of geospatial index commands]({{< relref "/commands/?group=geo" >}}). ## Examples diff --git a/content/develop/data-types/hashes.md b/content/develop/data-types/hashes.md index 8897be2bea..c2418cf621 100644 --- a/content/develop/data-types/hashes.md +++ b/content/develop/data-types/hashes.md @@ -63,7 +63,7 @@ as well, like [`HINCRBY`]({{< relref "/commands/hincrby" >}}): (integer) 4972 {{< /clients-example >}} -You can find the [full list of hash commands in the documentation](https://redis.io/commands#hash). +You can find the [full list of hash commands in the documentation]({{< relref "/commands#hash" >}}). It is worth noting that small hashes (i.e., a few elements with small values) are encoded in special way in memory that make them very memory efficient. @@ -75,7 +75,7 @@ encoded in special way in memory that make them very memory efficient. * [`HMGET`]({{< relref "/commands/hmget" >}}) returns the values at one or more given fields. * [`HINCRBY`]({{< relref "/commands/hincrby" >}}) increments the value at a given field by the integer provided. -See the [complete list of hash commands](https://redis.io/commands/?group=hash). +See the [complete list of hash commands]({{< relref "/commands/?group=hash" >}}). ## Examples diff --git a/content/develop/data-types/json/_index.md b/content/develop/data-types/json/_index.md index e32b268154..01158a54be 100644 --- a/content/develop/data-types/json/_index.md +++ b/content/develop/data-types/json/_index.md @@ -19,7 +19,7 @@ weight: 11 [![Discord](https://img.shields.io/discord/697882427875393627?style=flat-square)](https://discord.gg/QUkjSsk) [![Github](https://img.shields.io/static/v1?label=&message=repository&color=5961FF&logo=github)](https://github.com/RedisJSON/RedisJSON/) -The JSON capability of Redis Stack provides JavaScript Object Notation (JSON) support for Redis. It lets you store, update, and retrieve JSON values in a Redis database, similar to any other Redis data type. Redis JSON also works seamlessly with [Search and Query](https://redis.io/docs/stack/search/) to let you [index and query JSON documents](https://redis.io/docs/stack/search/indexing_json). +The JSON capability of Redis Stack provides JavaScript Object Notation (JSON) support for Redis. It lets you store, update, and retrieve JSON values in a Redis database, similar to any other Redis data type. Redis JSON also works seamlessly with [Search and Query]({{< relref "/develop/interact/search-and-query/" >}}) to let you [index and query JSON documents]({{< relref "/develop/interact/search-and-query/indexing/" >}}). ## Primary features diff --git a/content/develop/data-types/lists.md b/content/develop/data-types/lists.md index f8c0112f73..e0f1425d16 100644 --- a/content/develop/data-types/lists.md +++ b/content/develop/data-types/lists.md @@ -41,7 +41,7 @@ For example: * [`BLMOVE`]({{< relref "/commands/blmove" >}}) atomically moves elements from a source list to a target list. If the source list is empty, the command will block until a new element becomes available. -See the [complete series of list commands](https://redis.io/commands/?group=list). +See the [complete series of list commands]({{< relref "/commands/?group=list" >}}). ## Examples diff --git a/content/develop/data-types/probabilistic/hyperloglogs.md b/content/develop/data-types/probabilistic/hyperloglogs.md index 94d68c2dcc..6f30a915a0 100644 --- a/content/develop/data-types/probabilistic/hyperloglogs.md +++ b/content/develop/data-types/probabilistic/hyperloglogs.md @@ -92,7 +92,7 @@ One HyperLogLog is created per page (video/song) per period, and every IP/identi * [`PFCOUNT`]({{< relref "/commands/pfcount" >}}) returns an estimate of the number of items in the set. * [`PFMERGE`]({{< relref "/commands/pfmerge" >}}) combines two or more HyperLogLogs into one. -See the [complete list of HyperLogLog commands](https://redis.io/commands/?group=hyperloglog). +See the [complete list of HyperLogLog commands]({{< relref "/commands/?group=hyperloglog" >}}). ## Performance diff --git a/content/develop/data-types/probabilistic/t-digest.md b/content/develop/data-types/probabilistic/t-digest.md index 19b45a3abd..81feb085b1 100644 --- a/content/develop/data-types/probabilistic/t-digest.md +++ b/content/develop/data-types/probabilistic/t-digest.md @@ -84,7 +84,7 @@ OK {{< /clients-example >}} -You can repeat calling [TDIGEST.ADD](https://redis.io/commands/tdigest.add/) whenever new observations are available +You can repeat calling [TDIGEST.ADD]({{< baseurl >}}/commands/tdigest.add/) whenever new observations are available #### Estimating fractions or ranks by values @@ -109,7 +109,7 @@ OK {{< /clients-example >}} -And lastly, `TDIGEST.REVRANK key value...` is similar to [TDIGEST.RANK](https://redis.io/commands/tdigest.rank/), but returns, for each input value, an estimation of the number of (observations larger than a given value + half the observations equal to the given value). +And lastly, `TDIGEST.REVRANK key value...` is similar to [TDIGEST.RANK]({{< baseurl >}}/commands/tdigest.rank/), but returns, for each input value, an estimation of the number of (observations larger than a given value + half the observations equal to the given value). #### Estimating values by fractions or ranks diff --git a/content/develop/data-types/sets.md b/content/develop/data-types/sets.md index b595affeed..508025caaa 100644 --- a/content/develop/data-types/sets.md +++ b/content/develop/data-types/sets.md @@ -32,7 +32,7 @@ You can use Redis sets to efficiently: * [`SINTER`]({{< relref "/commands/sinter" >}}) returns the set of members that two or more sets have in common (i.e., the intersection). * [`SCARD`]({{< relref "/commands/scard" >}}) returns the size (a.k.a. cardinality) of a set. -See the [complete list of set commands](https://redis.io/commands/?group=set). +See the [complete list of set commands]({{< relref "/commands/?group=set" >}}). ## Examples diff --git a/content/develop/data-types/sorted-sets.md b/content/develop/data-types/sorted-sets.md index 84f564b396..3a6f371ff2 100644 --- a/content/develop/data-types/sorted-sets.md +++ b/content/develop/data-types/sorted-sets.md @@ -237,7 +237,7 @@ You'll see that [`ZADD`]({{< relref "/commands/zadd" >}}) returns 0 when the mem * [`ZRANK`]({{< relref "/commands/zrank" >}}) returns the rank of the provided member, assuming the sorted is in ascending order. * [`ZREVRANK`]({{< relref "/commands/zrevrank" >}}) returns the rank of the provided member, assuming the sorted set is in descending order. -See the [complete list of sorted set commands](https://redis.io/commands/?group=sorted-set). +See the [complete list of sorted set commands]({{< relref "/commands/?group=sorted-set" >}}). ## Performance diff --git a/content/develop/data-types/streams.md b/content/develop/data-types/streams.md index 096203ba71..ca6055d04c 100644 --- a/content/develop/data-types/streams.md +++ b/content/develop/data-types/streams.md @@ -36,7 +36,7 @@ Redis streams support several trimming strategies (to prevent streams from growi * [`XRANGE`]({{< relref "/commands/xrange" >}}) returns a range of entries between two supplied entry IDs. * [`XLEN`]({{< relref "/commands/xlen" >}}) returns the length of a stream. -See the [complete list of stream commands](https://redis.io/commands/?group=stream). +See the [complete list of stream commands]({{< relref "/commands/?group=stream" >}}). ## Examples diff --git a/content/develop/data-types/timeseries/quickstart.md b/content/develop/data-types/timeseries/quickstart.md index 006cebbbf8..5ebe06959e 100644 --- a/content/develop/data-types/timeseries/quickstart.md +++ b/content/develop/data-types/timeseries/quickstart.md @@ -93,7 +93,7 @@ Binary artifacts are placed under the ```bin``` directory. In your redis-server run: `loadmodule bin/redistimeseries.so` -For more information about modules, go to the [redis official documentation](https://redis.io/topics/modules-intro). +For more information about modules, go to the [redis official documentation](/topics/modules-intro). ## Give it a try with `redis-cli` diff --git a/content/develop/get-started/vector-database.md b/content/develop/get-started/vector-database.md index cd53d3cda6..fc3f34e629 100644 --- a/content/develop/get-started/vector-database.md +++ b/content/develop/get-started/vector-database.md @@ -102,7 +102,7 @@ The following code allows you to look at the structure of one of our bike JSON d {{< clients-example search_vss dump_data />}} ### 2. Store the demo data in your database -Then, you iterate over the `bikes` array to store the data as [JSON](https://redis.io/docs/stack/json/) documents in the database by using the [JSON.SET](https://redis.io/commands/json.set/) command. The below code uses a [pipeline](https://redis.io/docs/manual/pipelining/) to minimize the round-trip times: +Then, you iterate over the `bikes` array to store the data as [JSON]({{< relref "/develop/data-types/json/" >}}) documents in the database by using the [JSON.SET]({{< baseurl >}}/commands/json.set/) command. The below code uses a [pipeline]({{< relref "/develop/use/pipelining" >}}) to minimize the round-trip times: {{< clients-example search_vss load_data />}} @@ -125,11 +125,11 @@ In the next step, you must iterate over all the Redis keys with the prefix `bike {{< clients-example search_vss get_keys />}} -Use the keys as a parameter to the [JSON.MGET](https://redis.io/commands/json.mget/) command, along with the JSONPath expression `$.description` to collect the descriptions in a list. Then, pass the list to the `encode` method to get a list of vectorized embeddings: +Use the keys as a parameter to the [JSON.MGET]({{< baseurl >}}/commands/json.mget/) command, along with the JSONPath expression `$.description` to collect the descriptions in a list. Then, pass the list to the `encode` method to get a list of vectorized embeddings: {{< clients-example search_vss generate_embeddings />}} -You now need to add the vectorized descriptions to the JSON documents in Redis using the [JSON.SET](https://redis.io/commands/json.set/) command. The following command inserts a new field in each of the documents under the JSONPath `$.description_embeddings`. Once again, you'll do this using a pipeline: +You now need to add the vectorized descriptions to the JSON documents in Redis using the [JSON.SET]({{< baseurl >}}/commands/json.set/) command. The following command inserts a new field in each of the documents under the JSONPath `$.description_embeddings`. Once again, you'll do this using a pipeline: {{< clients-example search_vss load_embeddings />}} @@ -148,7 +148,7 @@ In the example above, the array was shortened considerably for the sake of reada ### 1. Create an index with a vector field -You must create an index to query based on vector metadata or perform vector searches. Use the [FT.CREATE](https://redis.io/commands/ft.create/) command: +You must create an index to query based on vector metadata or perform vector searches. Use the [FT.CREATE]({{< baseurl >}}/commands/ft.create/) command: {{< clients-example search_vss create_index >}} FT.CREATE idx:bikes_vss ON JSON @@ -174,7 +174,7 @@ You can find further details about all these options in the [vector reference do ### 2. Check the state of the index -As soon as you execute the [FT.CREATE](https://redis.io/commands/ft.create/) command, the indexing process runs in the background. In a short time, all JSON documents should be indexed and ready to be queried. To validate that, you can use the [FT.INFO](https://redis.io/commands/ft.info/) command, which provides details and statistics about the index. Of particular interest are the number of documents successfully indexed and the number of failures: +As soon as you execute the [FT.CREATE]({{< baseurl >}}/commands/ft.create/) command, the indexing process runs in the background. In a short time, all JSON documents should be indexed and ready to be queried. To validate that, you can use the [FT.INFO]({{< baseurl >}}/commands/ft.info/) command, which provides details and statistics about the index. Of particular interest are the number of documents successfully indexed and the number of failures: {{< clients-example search_vss validate_index >}} FT.INFO idx:bikes_vss @@ -246,5 +246,5 @@ From the description, this bike is an excellent match for younger children, and ## Next steps 1. You can learn more about the query options, such as pre-filters and radius queries, by reading the [vector reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/vectors" >}}). -2. The complete [search and query documentation](https://redis.io/docs/interact/search-and-query/) might be interesting for you. +2. The complete [search and query documentation]({{< relref "/develop/interact/search-and-query/" >}}) might be interesting for you. 3. If you want to follow the code examples more interactively, then you can use the [Jupyter notebook](https://github.com/RedisVentures/redis-vss-getting-started/blob/main/vector_similarity_with_redis.ipynb) that inspired this quick start guide. diff --git a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md index 3e561ec87f..d073a9e49c 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md +++ b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_CLI.md @@ -98,7 +98,7 @@ redis-cli -x TFUNCTION LOAD REPLACE < ./main.js Functions within Redis can respond to events using keyspace triggers. While the majority of these events are initiated by command invocations, they also include events that occur when a key expires or is removed from the database. -For the full list of supported events, please refer to the [Redis keyspace notifications page](https://redis.io/docs/manual/keyspace-notifications/#events-generated-by-different-commands/?utm_source=redis\&utm_medium=app\&utm_campaign=redisinsight_triggers_and_functions_guide). +For the full list of supported events, please refer to the [Redis keyspace notifications page]({{< baseurl >}}/develop/use/keyspace-notifications#events-generated-by-different-commands/?utm_source=redis\&utm_medium=app\&utm_campaign=redisinsight_triggers_and_functions_guide). The following code creates a new keyspace trigger that adds a new field to a new or updated hash with the latest update time. diff --git a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md index 018d978922..afd81c8e1b 100644 --- a/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md +++ b/content/develop/interact/programmability/triggers-and-functions/Quick_Start_RI.md @@ -19,7 +19,7 @@ weight: 1 Make sure that you have [Redis Stack installed]({{< relref "/operate/oss_and_stack/install/install-stack/" >}}) and running. Alternatively, you can create a [free Redis Cloud account](https://redis.com/try-free/?utm_source=redisio&utm_medium=referral&utm_campaign=2023-09-try_free&utm_content=cu-redis_cloud_users). -If you haven't already installed RedisInsight, you can download the latest version [here](https://redis.com/redis-enterprise/redis-insight/?_ga=2.232184223.127667221.1704724457-86137583.1685485233&_gl=1*1gygred*_ga*ODYxMzc1ODMuMTY4NTQ4NTIzMw..*_ga_8BKGRQKRPV*MTcwNDkyMzExMC40MDEuMS4xNzA0OTI3MjQ2LjUyLjAuMA..*_gcl_au*MTQzODY1OTU4OS4xNzAxMTg0MzY0). If this is your first time using RedisInsight, you may wish to read through the [RedisInsight guide](https://redis.io/docs/connect/insight/) before continuing with this guide. +If you haven't already installed RedisInsight, you can download the latest version [here](https://redis.com/redis-enterprise/redis-insight/?_ga=2.232184223.127667221.1704724457-86137583.1685485233&_gl=1*1gygred*_ga*ODYxMzc1ODMuMTY4NTQ4NTIzMw..*_ga_8BKGRQKRPV*MTcwNDkyMzExMC40MDEuMS4xNzA0OTI3MjQ2LjUyLjAuMA..*_gcl_au*MTQzODY1OTU4OS4xNzAxMTg0MzY0). If this is your first time using RedisInsight, you may wish to read through the [RedisInsight guide]({{< relref "/develop/connect/insight/" >}}) before continuing with this guide. ## Connect to Redis Stack @@ -59,7 +59,7 @@ Click on the **+ Add Library** button as before and, instead of adding the code Functions within Redis can respond to events using keyspace triggers. While the majority of these events are initiated by command invocations, they also include events that occur when a key expires or is removed from the database. -For the full list of supported events, please refer to the [Redis keyspace notifications page](https://redis.io/docs/manual/keyspace-notifications/#events-generated-by-different-commands/?utm_source=redis\&utm_medium=app\&utm_campaign=redisinsight_triggers_and_functions_guide). +For the full list of supported events, please refer to the [Redis keyspace notifications page]({{< baseurl >}}/develop/use/keyspace-notifications#events-generated-by-different-commands/?utm_source=redis\&utm_medium=app\&utm_campaign=redisinsight_triggers_and_functions_guide). The following code creates a new keyspace trigger that adds a new field to a new or updated hash with the latest update time. diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md b/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md index ec1fe65b72..c5df5e579f 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/Sync_Async.md @@ -198,7 +198,7 @@ redis.registerAsyncFunction('test', function(client, expected_name){ # Call blocking commands -Redis has a few commands that blocks the client and executed asynchronously when some condition holds (commands like [blpop](https://redis.io/commands/blpop/)). In general, such commands are not suppose to be called inside a script and calling them will result in running their none blocking logic. For example, [blpop](https://redis.io/commands/blpop/) will basically runs lpop and return empty result if the list it empty. +Redis has a few commands that blocks the client and executed asynchronously when some condition holds (commands like [blpop]({{< relref "/commands/blpop" >}})). In general, such commands are not suppose to be called inside a script and calling them will result in running their none blocking logic. For example, [blpop]({{< relref "/commands/blpop" >}}) will basically runs lpop and return empty result if the list it empty. RedisGears allows running blocking commands using `client.callAsync` API. `client.callAsync` will execute the blocking command and return a promise object which will be resolved when the command invocation finished (notice that `client.callAsync` allow calling any command and not just blocking but it will always return a promise object that will be resolve later, so **using it for regular commands is less efficient**). diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md index 8b301d0705..08fbfef9ff 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/KeySpace_Triggers.md @@ -22,7 +22,7 @@ Keyspace triggers allow you to register a function that will be executed wheneve 1. Expired: This event is fired when a key expires from the database. 2. Evicted: This event is fired when a key is evicted from the database. -For a complete list of supported events, please refer to the [Redis keyspace notifications page](https://redis.io/docs/manual/keyspace-notifications/#events-generated-by-different-commands). +For a complete list of supported events, please refer to the [Redis keyspace notifications page]({{< baseurl >}}/develop/use/keyspace-notifications#events-generated-by-different-commands). To register a keyspace trigger, you need to use the `redis.registerKeySpaceTrigger` API when loading your library. The following example demonstrates how to register a database trigger that adds a "last updated" field whenever a hash key is modified: diff --git a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md index c9cb053d00..f56c95fce8 100644 --- a/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md +++ b/content/develop/interact/programmability/triggers-and-functions/concepts/triggers/Stream_Triggers.md @@ -17,11 +17,11 @@ title: Stream triggers weight: 2 --- -Redis Stack's triggers and functions feature comes with a full stream API to process data from [Redis streams](https://redis.io/docs/manual/data-types/streams/). Unlike RedisGears v1 that provided a micro batching API, the new triggers and functions feature provides a **real streaming** API, which means that the data will be processed as soon as it enters the stream. +Redis Stack's triggers and functions feature comes with a full stream API to process data from [Redis streams]({{< relref "/develop/data-types/streams" >}}). Unlike RedisGears v1 that provided a micro batching API, the new triggers and functions feature provides a **real streaming** API, which means that the data will be processed as soon as it enters the stream. ## Register a stream consumer -Triggers and functions provide an API that allows to register a stream trigger. Do not get confused with [Redis streams consumer groups](https://redis.io/docs/manual/data-types/streams/#consumer-groups), triggers and functions uses the Redis Module API to efficiently read the stream and manage its consumers. This approach gives a much better performance as there is no need to invoke any Redis commands in order to read from the stream. Lets see a simple example: +Triggers and functions provide an API that allows to register a stream trigger. Do not get confused with [Redis streams consumer groups]({{< baseurl >}}/develop/data-types/streams#consumer-groups), triggers and functions uses the Redis Module API to efficiently read the stream and manage its consumers. This approach gives a much better performance as there is no need to invoke any Redis commands in order to read from the stream. Lets see a simple example: ```js #!js api_version=1.0 name=myFirstLibrary diff --git a/content/develop/interact/search-and-query/administration/overview.md b/content/develop/interact/search-and-query/administration/overview.md index d69d94755e..5e46a754e2 100644 --- a/content/develop/interact/search-and-query/administration/overview.md +++ b/content/develop/interact/search-and-query/administration/overview.md @@ -301,7 +301,7 @@ Redis Stack's auto-completer supports Unicode, allowing for fuzzy matches in non ### The Redis module API -RediSearch is implemented using the [Redis module API](https://redis.io/topics/modules-intro) and is loaded into Redis as an extension module at start-up. +RediSearch is implemented using the [Redis module API](/topics/modules-intro) and is loaded into Redis as an extension module at start-up. Redis modules make it possible to extend Redis's core functionality, implementing new Redis commands, data structures, and capabilities with similar performance to native core Redis itself. Redis modules are dynamic libraries that can be loaded into Redis at start-up or loaded at run-time using the [`MODULE LOAD`]({{< relref "/commands/module-load" >}}) command. Redis exports a C API, in the form of a single C header file called `redismodule.h`. diff --git a/content/develop/interact/search-and-query/advanced-concepts/dialects.md b/content/develop/interact/search-and-query/advanced-concepts/dialects.md index 96b331bb67..c1975c980f 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/dialects.md +++ b/content/develop/interact/search-and-query/advanced-concepts/dialects.md @@ -28,7 +28,7 @@ This dialect is also the default dialect. See below for information about changi ## `DIALECT 2` Dialect version 2 was introduced in the [2.4](https://github.com/RediSearch/RediSearch/releases/tag/v2.4.3) release to address query parser inconsistencies found in previous versions of Redis Stack. Dialect version 1 remains the default dialect. To use dialect version 2, append `DIALECT 2` to your query command. -Support for vector search also was introduced in the 2.4 release and requires `DIALECT 2`. See [here](https://redis.io/docs/interact/search-and-query/query/vector-search/) for more details. +Support for vector search also was introduced in the 2.4 release and requires `DIALECT 2`. See [here]({{< relref "/develop/interact/search-and-query/query/vector-search" >}}) for more details. `FT.SEARCH ... DIALECT 2` It was determined that under certain conditions some query parsing rules did not behave as originally intended. @@ -80,7 +80,7 @@ With `DIALECT 2` you can use un-escaped spaces in tag queries, even with stopwor ## `DIALECT 3` -Dialect version 3 was introduced in the [2.6](https://github.com/RediSearch/RediSearch/releases/tag/v2.6.3) release. This version introduced support for multi-value indexing and querying of attributes for any attribute type ( [TEXT](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-text), [TAG](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-tag), [NUMERIC](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-numeric), [GEO](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-geo) and [VECTOR](https://redis.io/docs/stack/search/indexing_json/#index-json-arrays-as-vector)) defined by a [JSONPath](https://redis.io/docs/stack/json/path/) leading to an array or multiple scalar values. Support for [GEOSHAPE](https://redis.io/docs/interact/search-and-query/query/geo-spatial/) queries was also introduced in this dialect. +Dialect version 3 was introduced in the [2.6](https://github.com/RediSearch/RediSearch/releases/tag/v2.6.3) release. This version introduced support for multi-value indexing and querying of attributes for any attribute type ( [TEXT]({{< baseurl >}}/develop/interact/search-and-query/indexing/#index-json-arrays-as-text), [TAG]({{< baseurl >}}/develop/interact/search-and-query/indexing/#index-json-arrays-as-tag), [NUMERIC]({{< baseurl >}}/develop/interact/search-and-query/indexing/#index-json-arrays-as-numeric), [GEO]({{< baseurl >}}/develop/interact/search-and-query/indexing/#index-json-arrays-as-geo) and [VECTOR]({{< baseurl >}}/develop/interact/search-and-query/indexing/#index-json-arrays-as-vector)) defined by a [JSONPath]({{< relref "/develop/data-types/json/path" >}}) leading to an array or multiple scalar values. Support for [GEOSHAPE]({{< relref "/develop/interact/search-and-query/query/geo-spatial" >}}) queries was also introduced in this dialect. The primary difference between dialects version 2 and version 3 is that JSON is returned rather than scalars for multi-value attributes. Apart from specifying `DIALECT 3` at the end of a [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command, there are no other syntactic changes. Dialect version 1 remains the default dialect. To use dialect version 3, append `DIALECT 3` to your query command. @@ -158,7 +158,7 @@ Dialect version 4 will improve performance in four different scenarios: 1. **No optimization** - If there is a sort by score or by a non-numeric field, there is no other option but to retrieve all results and compare their values to the search parameters. ## Use [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli) to compare dialects -The [[`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli)](https://redis.io/commands/ft.explaincli/) is a powerful tool that provides a window into the inner workings of your queries. It's like a roadmap that details your query's journey from start to finish. +The [[`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli)](/commands/ft.explaincli/) is a powerful tool that provides a window into the inner workings of your queries. It's like a roadmap that details your query's journey from start to finish. When you run [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli), it returns an array representing the execution plan of a complex query. This plan is a step-by-step guide of how Redis interprets your query and how it plans to fetch results. It's a behind-the-scenes look at the process, giving you insights into how the search engine works. diff --git a/content/develop/reference/clients.md b/content/develop/reference/clients.md index c9666e56ba..543abee173 100644 --- a/content/develop/reference/clients.md +++ b/content/develop/reference/clients.md @@ -193,11 +193,11 @@ In the above example two clients are connected to the Redis server. Let's look a * **name**: The client name as set by [`CLIENT SETNAME`]({{< relref "/commands/client-setname" >}}). * **age**: The number of seconds the connection existed for. * **idle**: The number of seconds the connection is idle. -* **flags**: The kind of client (N means normal client, check the [full list of flags](https://redis.io/commands/client-list)). +* **flags**: The kind of client (N means normal client, check the [full list of flags]({{< relref "/commands/client-list" >}})). * **omem**: The amount of memory used by the client for the output buffer. * **cmd**: The last executed command. -See the [[`CLIENT LIST`]({{< relref "/commands/client-list" >}})](https://redis.io/commands/client-list) documentation for the full listing of fields and their purpose. +See the [[`CLIENT LIST`]({{< relref "/commands/client-list" >}})](/commands/client-list) documentation for the full listing of fields and their purpose. Once you have the list of clients, you can close a client's connection using the [`CLIENT KILL`]({{< relref "/commands/client-kill" >}}) command, specifying the client address as its argument. diff --git a/content/develop/reference/modules/modules-api-ref.md b/content/develop/reference/modules/modules-api-ref.md index b80464a3c9..d4407ece25 100644 --- a/content/develop/reference/modules/modules-api-ref.md +++ b/content/develop/reference/modules/modules-api-ref.md @@ -169,7 +169,7 @@ The function returns NULL if `bytes` is 0. These functions are used to implement custom Redis commands. -For examples, see [https://redis.io/topics/modules-intro](https://redis.io/topics/modules-intro). +For examples, see [https://redis.io/topics/modules-intro](/topics/modules-intro). @@ -362,7 +362,7 @@ example "write deny-oom". The set of flags are: the arguments that are channels. The last three parameters specify which arguments of the new command are -Redis keys. See [https://redis.io/commands/command](https://redis.io/commands/command) for more information. +Redis keys. See [https://redis.io/commands/command]({{< relref "/commands/command" >}}) for more information. * `firstkey`: One-based index of the first argument that's a key. Position 0 is always the command name itself. @@ -516,7 +516,7 @@ All fields except `version` are optional. Explanation of the fields: both strings set to NULL. - `tips`: A string of space-separated tips regarding this command, meant for - clients and proxies. See [https://redis.io/topics/command-tips](https://redis.io/topics/command-tips). + clients and proxies. See [https://redis.io/topics/command-tips](/topics/command-tips). - `arity`: Number of arguments, including the command name itself. A positive number specifies an exact number of arguments and a negative number @@ -3090,7 +3090,7 @@ The returned `RedisModuleString` objects should be released with ## Key API for Stream type -For an introduction to streams, see [https://redis.io/topics/streams-intro](https://redis.io/topics/streams-intro). +For an introduction to streams, see [https://redis.io/topics/streams-intro](/topics/streams-intro). The type `RedisModuleStreamID`, which is used in stream functions, is a struct with two 64-bit fields and is defined as @@ -3746,7 +3746,7 @@ Example code fragment: // Do something with myval. } -This API is documented here: [https://redis.io/topics/modules-intro](https://redis.io/topics/modules-intro) +This API is documented here: [https://redis.io/topics/modules-intro](/topics/modules-intro) @@ -3783,7 +3783,7 @@ AOF rewrite, and so forth). In this section we define this API. Register a new data type exported by the module. The parameters are the following. Please for in depth documentation check the modules API -documentation, especially [https://redis.io/topics/modules-native-types](https://redis.io/topics/modules-native-types). +documentation, especially [https://redis.io/topics/modules-native-types](/topics/modules-native-types). * **name**: A 9 characters data type name that MUST be unique in the Redis Modules ecosystem. Be creative... and there will be no collisions. Use @@ -4432,7 +4432,7 @@ latency-monitor-threshold. ## Blocking clients from modules For a guide about blocking commands in modules, see -[https://redis.io/topics/modules-blocking-ops](https://redis.io/topics/modules-blocking-ops). +[https://redis.io/topics/modules-blocking-ops](/topics/modules-blocking-ops). @@ -4986,7 +4986,7 @@ that the notification code will be executed in the middle on Redis logic runs is dangerous and discouraged. In order to react to key space events with write actions, please refer to [`RedisModule_AddPostNotificationJob`](#RedisModule_AddPostNotificationJob). -See [https://redis.io/topics/notifications](https://redis.io/topics/notifications) for more information. +See [https://redis.io/topics/notifications](/topics/notifications) for more information. @@ -5560,7 +5560,7 @@ If the user is able to access the pubsub channel then `REDISMODULE_OK` is return Adds a new entry in the ACL log. Returns `REDISMODULE_OK` on success and `REDISMODULE_ERR` on error. -For more information about ACL log, please refer to [https://redis.io/commands/acl-log](https://redis.io/commands/acl-log) +For more information about ACL log, please refer to [https://redis.io/commands/acl-log]({{< relref "/commands/acl-log" >}}) @@ -5576,7 +5576,7 @@ For more information about ACL log, please refer to [https://redis.io/commands/a Adds a new entry in the ACL log with the `username` `RedisModuleString` provided. Returns `REDISMODULE_OK` on success and `REDISMODULE_ERR` on error. -For more information about ACL log, please refer to [https://redis.io/commands/acl-log](https://redis.io/commands/acl-log) +For more information about ACL log, please refer to [https://redis.io/commands/acl-log]({{< relref "/commands/acl-log" >}}) diff --git a/content/develop/use/patterns/twitter-clone.md b/content/develop/use/patterns/twitter-clone.md index ad062e7c76..56cd2992d2 100644 --- a/content/develop/use/patterns/twitter-clone.md +++ b/content/develop/use/patterns/twitter-clone.md @@ -153,7 +153,7 @@ also to retrieve its score if it exists, we use the [`ZSCORE`]({{< relref "/comm Sorted Sets are a very powerful data structure, you can query elements by score range, lexicographically, in reverse order, and so forth. -To know more [please check the Sorted Set sections in the official Redis commands documentation](https://redis.io/commands/#sorted_set). +To know more [please check the Sorted Set sections in the official Redis commands documentation]({{< relref "/commands/#sorted_set" >}}). The Hash data type --- diff --git a/content/develop/use/pipelining/index.md b/content/develop/use/pipelining/index.md index bf1a721e91..e774ee5bf7 100644 --- a/content/develop/use/pipelining/index.md +++ b/content/develop/use/pipelining/index.md @@ -155,7 +155,7 @@ Using [Redis scripting]({{< relref "/commands/eval" >}}), available since Redis A big advantage of scripting is that it is able to both read and write data with minimal latency, making operations like *read, compute, write* very fast (pipelining can't help in this scenario since the client needs the reply of the read command before it can call the write command). Sometimes the application may also want to send [`EVAL`]({{< relref "/commands/eval" >}}) or [`EVALSHA`]({{< relref "/commands/evalsha" >}}) commands in a pipeline. -This is entirely possible and Redis explicitly supports it with the [SCRIPT LOAD](https://redis.io/commands/script-load) command (it guarantees that [`EVALSHA`]({{< relref "/commands/evalsha" >}}) can be called without the risk of failing). +This is entirely possible and Redis explicitly supports it with the [SCRIPT LOAD]({{< relref "/commands/script-load" >}}) command (it guarantees that [`EVALSHA`]({{< relref "/commands/evalsha" >}}) can be called without the risk of failing). ## Appendix: Why are busy loops slow even on the loopback interface? From 812ad19fc2a42b1cbcce37c1e75e9882109abb31 Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Wed, 24 Jan 2024 16:50:01 +0100 Subject: [PATCH 13/15] Improved the baseurl shortcode --- .gitignore | 1 + layouts/shortcodes/baseurl.html | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index f9b176eecc..4b371b200d 100644 --- a/.gitignore +++ b/.gitignore @@ -2,5 +2,6 @@ public/ resources/ node_modules/ content/tmp/ +examples package-lock.json .hugo_build.lock \ No newline at end of file diff --git a/layouts/shortcodes/baseurl.html b/layouts/shortcodes/baseurl.html index b9d49ea11c..3cbf14917c 100644 --- a/layouts/shortcodes/baseurl.html +++ b/layouts/shortcodes/baseurl.html @@ -1 +1,7 @@ -{{ .Site.BaseURL }} \ No newline at end of file +{{ $base := .Site.BaseURL }} +{{ $parsed := urls.Parse $base }} +{{ $path := $parsed.Path }} + +{{ if not (eq $path "/") }} +{{ $path }} +{{ end }} \ No newline at end of file From e454fa43df26ba8c0f12aa43e4d97ea2a7e72f80 Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Wed, 24 Jan 2024 17:31:25 +0100 Subject: [PATCH 14/15] Trimming the template output --- layouts/shortcodes/baseurl.html | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/layouts/shortcodes/baseurl.html b/layouts/shortcodes/baseurl.html index 3cbf14917c..d989f64d4e 100644 --- a/layouts/shortcodes/baseurl.html +++ b/layouts/shortcodes/baseurl.html @@ -1,7 +1 @@ -{{ $base := .Site.BaseURL }} -{{ $parsed := urls.Parse $base }} -{{ $path := $parsed.Path }} - -{{ if not (eq $path "/") }} -{{ $path }} -{{ end }} \ No newline at end of file +{{ $base := .Site.BaseURL }}{{ $parsed := urls.Parse $base }}{{ $path := $parsed.Path }}{{ if not (eq $path "/") }}{{- $path -}}{{ end }} \ No newline at end of file From ee27b8144bb8a93d197c6573c06deb7558fb7a0c Mon Sep 17 00:00:00 2001 From: dmaier-redislabs Date: Wed, 24 Jan 2024 23:22:17 +0100 Subject: [PATCH 15/15] Minor change to fix an issue with trailing slashes on command pages --- build/migrate.py | 9 +++++-- content/commands/bf.add/index.md | 4 ++-- content/commands/bf.exists/index.md | 2 +- content/commands/bf.insert/index.md | 6 ++--- content/commands/bf.loadchunk/index.md | 10 ++++---- content/commands/bf.madd/index.md | 6 ++--- content/commands/bf.mexists/index.md | 2 +- content/commands/bf.scandump/index.md | 2 +- content/commands/cf.add/index.md | 2 +- content/commands/cf.addnx/index.md | 6 ++--- content/commands/cf.count/index.md | 4 ++-- content/commands/cf.exists/index.md | 4 ++-- content/commands/cf.insert/index.md | 4 ++-- content/commands/cf.insertnx/index.md | 8 +++---- content/commands/cf.loadchunk/index.md | 10 ++++---- content/commands/cf.mexists/index.md | 4 ++-- content/commands/cf.scandump/index.md | 2 +- content/commands/ft.aggregate/index.md | 10 ++++---- content/commands/ft.aliasadd/index.md | 2 +- content/commands/ft.aliasdel/index.md | 2 +- content/commands/ft.aliasupdate/index.md | 2 +- content/commands/ft.alter/index.md | 4 ++-- content/commands/ft.config-get/index.md | 2 +- content/commands/ft.config-help/index.md | 2 +- content/commands/ft.config-set/index.md | 2 +- content/commands/ft.create/index.md | 6 ++--- content/commands/ft.cursor-del/index.md | 2 +- content/commands/ft.cursor-read/index.md | 4 ++-- content/commands/ft.dictadd/index.md | 2 +- content/commands/ft.dictdel/index.md | 2 +- content/commands/ft.dictdump/index.md | 2 +- content/commands/ft.dropindex/index.md | 8 +++---- content/commands/ft.explain/index.md | 6 ++--- content/commands/ft.explaincli/index.md | 6 ++--- content/commands/ft.info/index.md | 6 ++--- content/commands/ft.profile/index.md | 12 +++++----- content/commands/ft.search/index.md | 14 +++++------ content/commands/ft.spellcheck/index.md | 6 ++--- content/commands/ft.sugadd/index.md | 4 ++-- content/commands/ft.sugdel/index.md | 2 +- content/commands/ft.sugget/index.md | 2 +- content/commands/ft.suglen/index.md | 2 +- content/commands/ft.syndump/index.md | 2 +- content/commands/ft.synupdate/index.md | 2 +- content/commands/ft.tagvals/index.md | 4 ++-- content/commands/json.arrappend/index.md | 2 +- content/commands/json.arrindex/index.md | 2 +- content/commands/json.arrinsert/index.md | 2 +- content/commands/json.arrlen/index.md | 2 +- content/commands/json.arrpop/index.md | 2 +- content/commands/json.arrtrim/index.md | 2 +- content/commands/json.clear/index.md | 2 +- content/commands/json.debug-help/index.md | 4 ++-- content/commands/json.debug-memory/index.md | 2 +- content/commands/json.del/index.md | 2 +- content/commands/json.forget/index.md | 2 +- content/commands/json.get/index.md | 2 +- content/commands/json.merge/index.md | 2 +- content/commands/json.mget/index.md | 2 +- content/commands/json.mset/index.md | 2 +- content/commands/json.numincrby/index.md | 2 +- content/commands/json.nummultby/index.md | 2 +- content/commands/json.objkeys/index.md | 2 +- content/commands/json.objlen/index.md | 2 +- content/commands/json.resp/index.md | 2 +- content/commands/json.set/index.md | 2 +- content/commands/json.strappend/index.md | 2 +- content/commands/json.strlen/index.md | 2 +- content/commands/json.toggle/index.md | 2 +- content/commands/json.type/index.md | 2 +- content/commands/ts.add/index.md | 16 ++++++------- content/commands/ts.alter/index.md | 10 ++++---- content/commands/ts.create/index.md | 12 +++++----- content/commands/ts.createrule/index.md | 4 ++-- content/commands/ts.decrby/index.md | 12 +++++----- content/commands/ts.del/index.md | 2 +- content/commands/ts.deleterule/index.md | 2 +- content/commands/ts.get/index.md | 2 +- content/commands/ts.incrby/index.md | 12 +++++----- content/commands/ts.info/index.md | 8 +++---- content/commands/ts.madd/index.md | 4 ++-- content/commands/ts.mget/index.md | 2 +- content/commands/ts.mrange/index.md | 2 +- content/commands/ts.mrevrange/index.md | 2 +- content/commands/ts.queryindex/index.md | 2 +- content/commands/ts.range/index.md | 2 +- content/commands/ts.revrange/index.md | 2 +- content/develop/connect/clients/dotnet.md | 2 +- .../clients/om-clients/stack-spring.md | 4 ++-- content/develop/connect/clients/python.md | 4 ++-- content/develop/data-types/json/_index.md | 10 ++++---- content/develop/data-types/json/path.md | 6 ++--- content/develop/data-types/json/ram.md | 2 +- .../probabilistic/count-min-sketch.md | 2 +- .../data-types/probabilistic/cuckoo-filter.md | 2 +- .../data-types/probabilistic/t-digest.md | 10 ++++---- .../develop/data-types/probabilistic/top-k.md | 10 ++++---- .../data-types/timeseries/configuration.md | 12 +++++----- .../data-types/timeseries/quickstart.md | 12 +++++----- .../develop/get-started/document-database.md | 6 ++--- .../develop/get-started/vector-database.md | 12 +++++----- .../search-and-query/administration/design.md | 2 +- .../administration/overview.md | 4 ++-- .../advanced-concepts/aggregations.md | 14 +++++------ .../advanced-concepts/dialects.md | 20 ++++++++-------- .../advanced-concepts/query_syntax.md | 6 ++--- .../advanced-concepts/scoring.md | 2 +- .../advanced-concepts/sorting.md | 2 +- .../advanced-concepts/spellcheck.md | 4 ++-- .../advanced-concepts/stopwords.md | 4 ++-- .../advanced-concepts/vectors.md | 8 +++---- .../configuration-parameters.md | 14 +++++------ .../field-and-type-options.md | 6 ++--- .../basic-constructs/schema-definition.md | 4 ++-- .../search-and-query/deprecated/payloads.md | 2 +- .../search-and-query/indexing/_index.md | 24 +++++++++---------- .../interact/search-and-query/query/_index.md | 6 ++--- .../search-and-query/query/aggregation.md | 6 ++--- .../search-and-query/query/combined.md | 2 +- .../search-and-query/query/geo-spatial.md | 2 +- .../interact/search-and-query/query/range.md | 4 ++-- .../search-and-query/query/vector-search.md | 6 ++--- 122 files changed, 297 insertions(+), 292 deletions(-) diff --git a/build/migrate.py b/build/migrate.py index 5132c5532f..a24eda8d18 100755 --- a/build/migrate.py +++ b/build/migrate.py @@ -124,10 +124,15 @@ def _load_csv_file(file_path): def _replace_link(match, new_prefix): # Relrefs don't like dots in the link if '.' in match.group(3): - return match.group(1) + '{{< baseurl >}}' + new_prefix + match.group(3) + match.group(4) + result = match.group(1) + '{{< baseurl >}}' + new_prefix + match.group(3) + match.group(4) + + # Some command pages have a . in them which causes issues + if new_prefix == "/commands": + result = match.group(1) + '{{< baseurl >}}' + new_prefix + match.group(3) + "/" + match.group(4) else: - return match.group(1) + '{{< relref "' + new_prefix + match.group(3) + '" >}}' + match.group(4) + result = match.group(1) + '{{< relref "' + new_prefix + match.group(3) + '" >}}' + match.group(4) + return result ''' Helps to substitute the prefix https://redis.io with e.g. / within a link diff --git a/content/commands/bf.add/index.md b/content/commands/bf.add/index.md index 20afeda7c8..35917fbf31 100644 --- a/content/commands/bf.add/index.md +++ b/content/commands/bf.add/index.md @@ -29,7 +29,7 @@ title: BF.ADD --- Adds an item to a Bloom filter. -This command is similar to [`BF.MADD`]({{< baseurl >}}/commands/bf.madd), except that only one item can be added. +This command is similar to [`BF.MADD`]({{< baseurl >}}/commands/bf.madd/), except that only one item can be added. ## Required arguments @@ -37,7 +37,7 @@ This command is similar to [`BF.MADD`]({{< baseurl >}}/commands/bf.madd), except is key name for a Bloom filter to add the item to. -If `key` does not exist - a new Bloom filter is created with default error rate, capacity, and expansion (see [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve)). +If `key` does not exist - a new Bloom filter is created with default error rate, capacity, and expansion (see [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve/)).
item diff --git a/content/commands/bf.exists/index.md b/content/commands/bf.exists/index.md index f7aba4a607..0dda1a8a69 100644 --- a/content/commands/bf.exists/index.md +++ b/content/commands/bf.exists/index.md @@ -29,7 +29,7 @@ title: BF.EXISTS --- Determines whether a given item was added to a Bloom filter. -This command is similar to [`BF.MEXISTS`]({{< baseurl >}}/commands/bf.mexists), except that only one item can be checked. +This command is similar to [`BF.MEXISTS`]({{< baseurl >}}/commands/bf.mexists/), except that only one item can be checked. ## Required arguments diff --git a/content/commands/bf.insert/index.md b/content/commands/bf.insert/index.md index 18edbce2fe..a781f2ebcd 100644 --- a/content/commands/bf.insert/index.md +++ b/content/commands/bf.insert/index.md @@ -58,7 +58,7 @@ title: BF.INSERT --- Creates a new Bloom filter if the `key` does not exist using the specified error rate, capacity, and expansion, then adds all specified items to the Bloom Filter. -This command is similar to [`BF.MADD`]({{< baseurl >}}/commands/bf.madd), except that the error rate, capacity, and expansion can be specified. It is a sugarcoated combination of [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve) and [`BF.MADD`]({{< baseurl >}}/commands/bf.madd). +This command is similar to [`BF.MADD`]({{< baseurl >}}/commands/bf.madd/), except that the error rate, capacity, and expansion can be specified. It is a sugarcoated combination of [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve/) and [`BF.MADD`]({{< baseurl >}}/commands/bf.madd/). ## Required arguments @@ -89,14 +89,14 @@ It is an error to specify `NOCREATE` together with either `CAPACITY` or `ERROR`. Specifies the desired `capacity` for the filter to be created. This parameter is ignored if the filter already exists. If the filter is automatically created and this parameter is absent, then the module-level `capacity` is used. -See [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve) for more information about the impact of this value. +See [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve/) for more information about the impact of this value.
ERROR error Specifies the `error` ratio of the newly created filter if it does not yet exist. If the filter is automatically created and `error` is not specified then the module-level error rate is used. -See [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve) for more information about the format of this value. +See [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve/) for more information about the format of this value.
NONSCALING diff --git a/content/commands/bf.loadchunk/index.md b/content/commands/bf.loadchunk/index.md index 31140cc191..105b684e45 100644 --- a/content/commands/bf.loadchunk/index.md +++ b/content/commands/bf.loadchunk/index.md @@ -29,9 +29,9 @@ syntax_fmt: BF.LOADCHUNK key iterator data syntax_str: iterator data title: BF.LOADCHUNK --- -Restores a Bloom filter previously saved using [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump). +Restores a Bloom filter previously saved using [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump/). -See the [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump) command for example usage. +See the [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump/) command for example usage. Notes @@ -49,12 +49,12 @@ is key name for a Bloom filter to restore.
iterator -Iterator value associated with `data` (returned by [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump)) +Iterator value associated with `data` (returned by [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump/))
data -Current data chunk (returned by [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump)) +Current data chunk (returned by [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump/))
## Return value @@ -66,4 +66,4 @@ Returns one of these replies: ## Examples -See [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump) for an example. +See [`BF.SCANDUMP`]({{< baseurl >}}/commands/bf.scandump/) for an example. diff --git a/content/commands/bf.madd/index.md b/content/commands/bf.madd/index.md index 603731454d..242d504f1d 100644 --- a/content/commands/bf.madd/index.md +++ b/content/commands/bf.madd/index.md @@ -33,9 +33,9 @@ title: BF.MADD --- Adds one or more items to a Bloom filter. -This command is similar to [`BF.ADD`]({{< baseurl >}}/commands/bf.add), except that you can add more than one item. +This command is similar to [`BF.ADD`]({{< baseurl >}}/commands/bf.add/), except that you can add more than one item. -This command is similar to [`BF.INSERT`]({{< baseurl >}}/commands/bf.insert), except that the error rate, capacity, and expansion cannot be specified. +This command is similar to [`BF.INSERT`]({{< baseurl >}}/commands/bf.insert/), except that the error rate, capacity, and expansion cannot be specified. ## Required arguments @@ -43,7 +43,7 @@ This command is similar to [`BF.INSERT`]({{< baseurl >}}/commands/bf.insert), ex is key name for a Bloom filter to add the items to. -If `key` does not exist - a new Bloom filter is created with default error rate, capacity, and expansion (see [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve)). +If `key` does not exist - a new Bloom filter is created with default error rate, capacity, and expansion (see [`BF.RESERVE`]({{< baseurl >}}/commands/bf.reserve/)).
item... diff --git a/content/commands/bf.mexists/index.md b/content/commands/bf.mexists/index.md index 6f2dd54eb9..34ba5597cc 100644 --- a/content/commands/bf.mexists/index.md +++ b/content/commands/bf.mexists/index.md @@ -31,7 +31,7 @@ title: BF.MEXISTS --- Determines whether one or more items were added to a Bloom filter. -This command is similar to [`BF.EXISTS`]({{< baseurl >}}/commands/bf.exists), except that more than one item can be checked. +This command is similar to [`BF.EXISTS`]({{< baseurl >}}/commands/bf.exists/), except that more than one item can be checked. ## Required arguments diff --git a/content/commands/bf.scandump/index.md b/content/commands/bf.scandump/index.md index 57b7047419..86d48656a5 100644 --- a/content/commands/bf.scandump/index.md +++ b/content/commands/bf.scandump/index.md @@ -55,7 +55,7 @@ Returns one of these replies: The Iterator is passed as input to the next invocation of `BF.SCANDUMP`. If _Iterator_ is 0, then it means iteration has completed. - The iterator-data pair should also be passed to [`BF.LOADCHUNK`]({{< baseurl >}}/commands/bf.loadchunk) when restoring the filter. + The iterator-data pair should also be passed to [`BF.LOADCHUNK`]({{< baseurl >}}/commands/bf.loadchunk/) when restoring the filter. - [] on error (invalid arguments, key not found, wrong key type, etc.) diff --git a/content/commands/cf.add/index.md b/content/commands/cf.add/index.md index 754aa39cdb..69c1a38843 100644 --- a/content/commands/cf.add/index.md +++ b/content/commands/cf.add/index.md @@ -30,7 +30,7 @@ title: CF.ADD Adds an item to the cuckoo filter. Cuckoo filters can contain the same item multiple times, and consider each addition as separate. -Use [`CF.ADDNX`]({{< baseurl >}}/commands/cf.addnx) to add an item only if it does not exist. +Use [`CF.ADDNX`]({{< baseurl >}}/commands/cf.addnx/) to add an item only if it does not exist. ## Required arguments diff --git a/content/commands/cf.addnx/index.md b/content/commands/cf.addnx/index.md index ca0ff3531d..6c2264016a 100644 --- a/content/commands/cf.addnx/index.md +++ b/content/commands/cf.addnx/index.md @@ -29,12 +29,12 @@ title: CF.ADDNX --- Adds an item to a cuckoo filter if the item does not exist. -This command is similar to the combination of [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists) and [`CF.ADD`]({{< baseurl >}}/commands/cf.add). It does not add an item into the filter if its fingerprint already exists. +This command is similar to the combination of [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists/) and [`CF.ADD`]({{< baseurl >}}/commands/cf.add/). It does not add an item into the filter if its fingerprint already exists. Notes: -- This command is slower than [`CF.ADD`]({{< baseurl >}}/commands/cf.add) because it first checks whether the item exists. -- Since [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists) can result in false positive, `CF.ADDNX` may not add an item because it is supposedly already exist, which may be wrong. +- This command is slower than [`CF.ADD`]({{< baseurl >}}/commands/cf.add/) because it first checks whether the item exists. +- Since [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists/) can result in false positive, `CF.ADDNX` may not add an item because it is supposedly already exist, which may be wrong. diff --git a/content/commands/cf.count/index.md b/content/commands/cf.count/index.md index 0b318902ab..b9a4e73257 100644 --- a/content/commands/cf.count/index.md +++ b/content/commands/cf.count/index.md @@ -29,7 +29,7 @@ title: CF.COUNT --- Returns an estimation of the number of times a given item was added to a cuckoo filter. -If you just want to check that a given item was added to a cuckoo filter, use [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists). +If you just want to check that a given item was added to a cuckoo filter, use [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists/). ## Required arguments @@ -48,7 +48,7 @@ is an item to check. Returns one of these replies: -- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), where a positive value is an estimation of the number of times `item` was added to the filter. An overestimation is possible, but not an underestimation. `0` means that `key` does not exist or that `item` had not been added to the filter. See note in [`CF.DEL`]({{< baseurl >}}/commands/cf.del). +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), where a positive value is an estimation of the number of times `item` was added to the filter. An overestimation is possible, but not an underestimation. `0` means that `key` does not exist or that `item` had not been added to the filter. See note in [`CF.DEL`]({{< baseurl >}}/commands/cf.del/). - [] on error (invalid arguments, wrong key type, etc.) ## Examples diff --git a/content/commands/cf.exists/index.md b/content/commands/cf.exists/index.md index 0c567a8a0f..802a5af3b5 100644 --- a/content/commands/cf.exists/index.md +++ b/content/commands/cf.exists/index.md @@ -29,7 +29,7 @@ title: CF.EXISTS --- Determines whether a given item was added to a cuckoo filter. -This command is similar to [`CF.MEXISTS`]({{< baseurl >}}/commands/cf.mexists), except that only one item can be checked. +This command is similar to [`CF.MEXISTS`]({{< baseurl >}}/commands/cf.mexists/), except that only one item can be checked. ## Required arguments @@ -48,7 +48,7 @@ is an item to check. Returns one of these replies: -- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), where `1` means that, with high probability, `item` had already been added to the filter, and `0` means that `key` does not exist or that `item` had not been added to the filter. See note in [`CF.DEL`]({{< baseurl >}}/commands/cf.del). +- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}), where `1` means that, with high probability, `item` had already been added to the filter, and `0` means that `key` does not exist or that `item` had not been added to the filter. See note in [`CF.DEL`]({{< baseurl >}}/commands/cf.del/). - [] on error (invalid arguments, wrong key type, and so on) ## Examples diff --git a/content/commands/cf.insert/index.md b/content/commands/cf.insert/index.md index 9afa071d50..e9295256b6 100644 --- a/content/commands/cf.insert/index.md +++ b/content/commands/cf.insert/index.md @@ -44,7 +44,7 @@ title: CF.INSERT --- Adds one or more items to a cuckoo filter, allowing the filter to be created with a custom capacity if it does not exist yet. -This command is similar to [`CF.ADD`]({{< baseurl >}}/commands/cf.add), except that more than one item can be added and capacity can be specified. +This command is similar to [`CF.ADD`]({{< baseurl >}}/commands/cf.add/), except that more than one item can be added and capacity can be specified. ## Required arguments @@ -70,7 +70,7 @@ If the filter already exists, then this parameter is ignored. If the filter does not exist yet and this parameter is *not* specified, then the filter is created with the module-level default capacity which is 1024. -See [`CF.RESERVE`]({{< baseurl >}}/commands/cf.reserve) for more information on cuckoo filter capacities. +See [`CF.RESERVE`]({{< baseurl >}}/commands/cf.reserve/) for more information on cuckoo filter capacities.
NOCREATE diff --git a/content/commands/cf.insertnx/index.md b/content/commands/cf.insertnx/index.md index 8a3de4b547..3d8d9e6f0a 100644 --- a/content/commands/cf.insertnx/index.md +++ b/content/commands/cf.insertnx/index.md @@ -44,12 +44,12 @@ title: CF.INSERTNX --- Adds one or more items to a cuckoo filter if they did not exist previously, allowing the filter to be created with a custom capacity if it does not exist yet. -This command is similar to [`CF.ADDNX`]({{< baseurl >}}/commands/cf.addnx), except that more than one item can be added and capacity can be specified. +This command is similar to [`CF.ADDNX`]({{< baseurl >}}/commands/cf.addnx/), except that more than one item can be added and capacity can be specified. Notes: -- This command is slower than [`CF.INSERT`]({{< baseurl >}}/commands/cf.insert) because it first checks whether each item exists. -- Since [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists) can result in false positive, `CF.INSERTNX` may not add an item because it is supposedly already exist, which may be wrong. +- This command is slower than [`CF.INSERT`]({{< baseurl >}}/commands/cf.insert/) because it first checks whether each item exists. +- Since [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists/) can result in false positive, `CF.INSERTNX` may not add an item because it is supposedly already exist, which may be wrong. @@ -77,7 +77,7 @@ If the filter already exists, then this parameter is ignored. If the filter does not exist yet and this parameter is *not* specified, then the filter is created with the module-level default capacity which is 1024. -See [`CF.RESERVE`]({{< baseurl >}}/commands/cf.reserve) for more information on cuckoo filter capacities. +See [`CF.RESERVE`]({{< baseurl >}}/commands/cf.reserve/) for more information on cuckoo filter capacities.
NOCREATE diff --git a/content/commands/cf.loadchunk/index.md b/content/commands/cf.loadchunk/index.md index 95659ff2c6..d9084ab579 100644 --- a/content/commands/cf.loadchunk/index.md +++ b/content/commands/cf.loadchunk/index.md @@ -29,9 +29,9 @@ syntax_fmt: CF.LOADCHUNK key iterator data syntax_str: iterator data title: CF.LOADCHUNK --- -Restores a cuckoo filter previously saved using [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump). +Restores a cuckoo filter previously saved using [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump/). -See the [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump) command for example usage. +See the [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump/) command for example usage. Notes @@ -49,12 +49,12 @@ is key name for a cuckoo filter to restore.
iterator -Iterator value associated with `data` (returned by [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump)) +Iterator value associated with `data` (returned by [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump/))
data -Current data chunk (returned by [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump)) +Current data chunk (returned by [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump/))
## Return value @@ -66,4 +66,4 @@ Returns one of these replies: ## Examples -See [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump) for an example. +See [`CF.SCANDUMP`]({{< baseurl >}}/commands/cf.scandump/) for an example. diff --git a/content/commands/cf.mexists/index.md b/content/commands/cf.mexists/index.md index 073ef2d762..8a3920c682 100644 --- a/content/commands/cf.mexists/index.md +++ b/content/commands/cf.mexists/index.md @@ -31,7 +31,7 @@ title: CF.MEXISTS --- Determines whether one or more items were added to a cuckoo filter. -This command is similar to [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists), except that more than one item can be checked. +This command is similar to [`CF.EXISTS`]({{< baseurl >}}/commands/cf.exists/), except that more than one item can be checked. ## Required arguments @@ -50,7 +50,7 @@ One or more items to check. Returns one of these replies: -- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - where "1" means that, with high probability, `item` was already added to the filter, and "0" means that `key` does not exist or that `item` had not added to the filter. See note in [`CF.DEL`]({{< baseurl >}}/commands/cf.del). +- [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}) - where "1" means that, with high probability, `item` was already added to the filter, and "0" means that `key` does not exist or that `item` had not added to the filter. See note in [`CF.DEL`]({{< baseurl >}}/commands/cf.del/). - [] on error (invalid arguments, wrong key type, etc.) ## Examples diff --git a/content/commands/cf.scandump/index.md b/content/commands/cf.scandump/index.md index 6eb97e498c..d53f24d372 100644 --- a/content/commands/cf.scandump/index.md +++ b/content/commands/cf.scandump/index.md @@ -55,7 +55,7 @@ Returns one of these replies: The Iterator is passed as input to the next invocation of `CF.SCANDUMP`. If _Iterator_ is 0, then it means iteration has completed. - The iterator-data pair should also be passed to [`CF.LOADCHUNK`]({{< baseurl >}}/commands/cf.loadchunk) when restoring the filter. + The iterator-data pair should also be passed to [`CF.LOADCHUNK`]({{< baseurl >}}/commands/cf.loadchunk/) when restoring the filter. - [] on error (invalid arguments, key not found, wrong key type, etc.) diff --git a/content/commands/ft.aggregate/index.md b/content/commands/ft.aggregate/index.md index f58205d69c..3ad2dba054 100644 --- a/content/commands/ft.aggregate/index.md +++ b/content/commands/ft.aggregate/index.md @@ -202,7 +202,7 @@ Run a search query on an index, and perform aggregate transformations on the res
index -is index name against which the query is executed. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). +is index name against which the query is executed. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/).
@@ -314,7 +314,7 @@ You can reference parameters in the `query` by a `$`, followed by the parameter
DIALECT {dialect_version} -selects the dialect version under which to execute the query. If not specified, the query will execute under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) command. +selects the dialect version under which to execute the query. If not specified, the query will execute under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) command.
## Return @@ -324,8 +324,8 @@ The [integer reply]({{< baseurl >}}/develop/reference/protocol-spec#resp-integer ### Return multiple values -See [Return multiple values]({{< baseurl >}}/commands/ft.search#return-multiple-values) in [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) -The `DIALECT` can be specified as a parameter in the FT.AGGREGATE command. If it is not specified, the `DEFAULT_DIALECT` is used, which can be set using [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) or by passing it as an argument to the `redisearch` module when it is loaded. +See [Return multiple values]({{< baseurl >}}/commands/ft.search#return-multiple-values/) in [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) +The `DIALECT` can be specified as a parameter in the FT.AGGREGATE command. If it is not specified, the `DEFAULT_DIALECT` is used, which can be set using [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) or by passing it as an argument to the `redisearch` module when it is loaded. For example, with the following document and index: @@ -454,7 +454,7 @@ Next, count GitHub events by user (actor), to produce the most active users. ## See also -[`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) | [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) +[`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) | [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) ## Related topics diff --git a/content/commands/ft.aliasadd/index.md b/content/commands/ft.aliasadd/index.md index bcaf121c52..4a70fb5adb 100644 --- a/content/commands/ft.aliasadd/index.md +++ b/content/commands/ft.aliasadd/index.md @@ -74,7 +74,7 @@ Attempting to add the same alias returns a message that the alias already exists ## See also -[`FT.ALIASDEL`]({{< baseurl >}}/commands/ft.aliasdel) | [`FT.ALIASUPDATE`]({{< baseurl >}}/commands/ft.aliasupdate) +[`FT.ALIASDEL`]({{< baseurl >}}/commands/ft.aliasdel/) | [`FT.ALIASUPDATE`]({{< baseurl >}}/commands/ft.aliasupdate/) ## Related topics diff --git a/content/commands/ft.aliasdel/index.md b/content/commands/ft.aliasdel/index.md index 105146394b..bc24c5e871 100644 --- a/content/commands/ft.aliasdel/index.md +++ b/content/commands/ft.aliasdel/index.md @@ -60,7 +60,7 @@ OK ## See also -[`FT.ALIASADD`]({{< baseurl >}}/commands/ft.aliasadd) | [`FT.ALIASUPDATE`]({{< baseurl >}}/commands/ft.aliasupdate) +[`FT.ALIASADD`]({{< baseurl >}}/commands/ft.aliasadd/) | [`FT.ALIASUPDATE`]({{< baseurl >}}/commands/ft.aliasupdate/) ## Related topics diff --git a/content/commands/ft.aliasupdate/index.md b/content/commands/ft.aliasupdate/index.md index 41807e6128..7b747fc18f 100644 --- a/content/commands/ft.aliasupdate/index.md +++ b/content/commands/ft.aliasupdate/index.md @@ -62,7 +62,7 @@ OK ## See also -[`FT.ALIASADD`]({{< baseurl >}}/commands/ft.aliasadd) | [`FT.ALIASDEL`]({{< baseurl >}}/commands/ft.aliasdel) +[`FT.ALIASADD`]({{< baseurl >}}/commands/ft.aliasadd/) | [`FT.ALIASDEL`]({{< baseurl >}}/commands/ft.aliasdel/) ## Related topics diff --git a/content/commands/ft.alter/index.md b/content/commands/ft.alter/index.md index dfdb63736a..25932abc69 100644 --- a/content/commands/ft.alter/index.md +++ b/content/commands/ft.alter/index.md @@ -67,7 +67,7 @@ if set, does not scan and index. after the SCHEMA keyword, declares which fields to add: - `attribute` is attribute to add. -- `options` are attribute options. Refer to [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) for more information. +- `options` are attribute options. Refer to [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) for more information. Note: @@ -96,7 +96,7 @@ OK ## See also -[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) +[`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) ## Related topics diff --git a/content/commands/ft.config-get/index.md b/content/commands/ft.config-get/index.md index 25a72d1d86..9bc78d03f7 100644 --- a/content/commands/ft.config-get/index.md +++ b/content/commands/ft.config-get/index.md @@ -133,7 +133,7 @@ FT.CONFIG GET returns an array reply of the configuration name and value. ## See also -[`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) | [`FT.CONFIG HELP`]({{< baseurl >}}/commands/ft.config-help) +[`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) | [`FT.CONFIG HELP`]({{< baseurl >}}/commands/ft.config-help/) ## Related topics diff --git a/content/commands/ft.config-help/index.md b/content/commands/ft.config-help/index.md index 849740e1d2..365ea04598 100644 --- a/content/commands/ft.config-help/index.md +++ b/content/commands/ft.config-help/index.md @@ -62,7 +62,7 @@ FT.CONFIG HELP returns an array reply of the configuration name and value. ## See also -[`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) | [`FT.CONFIG GET`]({{< baseurl >}}/commands/ft.config-get) +[`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) | [`FT.CONFIG GET`]({{< baseurl >}}/commands/ft.config-get/) ## Related topics diff --git a/content/commands/ft.config-set/index.md b/content/commands/ft.config-set/index.md index b2d6770bcc..c7ffe34c01 100644 --- a/content/commands/ft.config-set/index.md +++ b/content/commands/ft.config-set/index.md @@ -74,7 +74,7 @@ OK ## See also -[`FT.CONFIG GET`]({{< baseurl >}}/commands/ft.config-get) | [`FT.CONFIG HELP`]({{< baseurl >}}/commands/ft.config-help) +[`FT.CONFIG GET`]({{< baseurl >}}/commands/ft.config-get/) | [`FT.CONFIG HELP`]({{< baseurl >}}/commands/ft.config-help/) ## Related topics diff --git a/content/commands/ft.create/index.md b/content/commands/ft.create/index.md index ea106f4173..fd2aa795a7 100644 --- a/content/commands/ft.create/index.md +++ b/content/commands/ft.create/index.md @@ -318,7 +318,7 @@ is document attribute that you use as a binary safe payload string to the docume
MAXTEXTFIELDS -forces RediSearch to encode indexes as if there were more than 32 text attributes, which allows you to add additional attributes (beyond 32) using [`FT.ALTER`]({{< baseurl >}}/commands/ft.alter). For efficiency, RediSearch encodes indexes differently if they are created with less than 32 text attributes. +forces RediSearch to encode indexes as if there were more than 32 text attributes, which allows you to add additional attributes (beyond 32) using [`FT.ALTER`]({{< baseurl >}}/commands/ft.alter/). For efficiency, RediSearch encodes indexes differently if they are created with less than 32 text attributes.
@@ -335,7 +335,7 @@ creates a lightweight temporary index that expires after a specified period of i {{% alert title="Warning" color="warning" %}} When temporary indexes expire, they drop all the records associated with them. -[`FT.DROPINDEX`]({{< baseurl >}}/commands/ft.dropindex) was introduced with a default of not deleting docs and a `DD` flag that enforced deletion. +[`FT.DROPINDEX`]({{< baseurl >}}/commands/ft.dropindex/) was introduced with a default of not deleting docs and a `DD` flag that enforced deletion. However, for temporary indexes, documents are deleted along with the index. Historically, RediSearch used an FT.ADD command, which made a connection between the document and the index. Then, FT.DROP, also a hystoric command, deleted documents by default. In version 2.x, RediSearch indexes hashes and JSONs, and the dependency between the index and documents no longer exists. @@ -453,7 +453,7 @@ Index a JSON document using a JSON Path expression. ## See also -[`FT.ALTER`]({{< baseurl >}}/commands/ft.alter) | [`FT.DROPINDEX`]({{< baseurl >}}/commands/ft.dropindex) +[`FT.ALTER`]({{< baseurl >}}/commands/ft.alter/) | [`FT.DROPINDEX`]({{< baseurl >}}/commands/ft.dropindex/) ## Related topics diff --git a/content/commands/ft.cursor-del/index.md b/content/commands/ft.cursor-del/index.md index fa0d661b72..39a0c66000 100644 --- a/content/commands/ft.cursor-del/index.md +++ b/content/commands/ft.cursor-del/index.md @@ -73,7 +73,7 @@ Check that the cursor is deleted. ## See also -[`FT.CURSOR READ`]({{< baseurl >}}/commands/ft.cursor-read) +[`FT.CURSOR READ`]({{< baseurl >}}/commands/ft.cursor-read/) ## Related topics diff --git a/content/commands/ft.cursor-read/index.md b/content/commands/ft.cursor-read/index.md index 824ebb9eb2..d7e1894ae8 100644 --- a/content/commands/ft.cursor-read/index.md +++ b/content/commands/ft.cursor-read/index.md @@ -58,7 +58,7 @@ is id of the cursor.
[COUNT read_size] -is number of results to read. This parameter overrides `COUNT` specified in [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate). +is number of results to read. This parameter overrides `COUNT` specified in [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/).
## Return @@ -77,7 +77,7 @@ FT.CURSOR READ returns an array reply where each row is an array reply and repre ## See also -[`FT.CURSOR DEL`]({{< baseurl >}}/commands/ft.cursor-del) | [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) +[`FT.CURSOR DEL`]({{< baseurl >}}/commands/ft.cursor-del/) | [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/) ## Related topics diff --git a/content/commands/ft.dictadd/index.md b/content/commands/ft.dictadd/index.md index b15cfc877c..ac376b8718 100644 --- a/content/commands/ft.dictadd/index.md +++ b/content/commands/ft.dictadd/index.md @@ -67,7 +67,7 @@ FT.DICTADD returns an integer reply, the number of new terms that were added. ## See also -[`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel) | [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump) +[`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel/) | [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump/) ## Related topics diff --git a/content/commands/ft.dictdel/index.md b/content/commands/ft.dictdel/index.md index 2656f46b0e..e28f502c2a 100644 --- a/content/commands/ft.dictdel/index.md +++ b/content/commands/ft.dictdel/index.md @@ -67,7 +67,7 @@ FT.DICTDEL returns an integer reply, the number of new terms that were deleted. ## See also -[`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd) | [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump) +[`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd/) | [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump/) ## Related topics diff --git a/content/commands/ft.dictdump/index.md b/content/commands/ft.dictdump/index.md index de27f47bd4..2d6f379990 100644 --- a/content/commands/ft.dictdump/index.md +++ b/content/commands/ft.dictdump/index.md @@ -60,7 +60,7 @@ FT.DICTDUMP returns an array, where each element is term (string). ## See also -[`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd) | [`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel) +[`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd/) | [`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel/) ## Related topics diff --git a/content/commands/ft.dropindex/index.md b/content/commands/ft.dropindex/index.md index a935fa629a..afdceff99c 100644 --- a/content/commands/ft.dropindex/index.md +++ b/content/commands/ft.dropindex/index.md @@ -44,7 +44,7 @@ Delete an index
index -is full-text index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). +is full-text index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/).
## Optional arguments @@ -55,9 +55,9 @@ is full-text index name. You must first create the index using [`FT.CREATE`]({{< drop operation that, if set, deletes the actual document hashes. By default, FT.DROPINDEX does not delete the documents associated with the index. Adding the `DD` option deletes the documents as well. -If an index creation is still running ([`FT.CREATE`]({{< baseurl >}}/commands/ft.create) is running asynchronously), only the document hashes that have already been indexed are deleted. +If an index creation is still running ([`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) is running asynchronously), only the document hashes that have already been indexed are deleted. The document hashes left to be indexed remain in the database. -To check the completion of the indexing, use [`FT.INFO`]({{< baseurl >}}/commands/ft.info). +To check the completion of the indexing, use [`FT.INFO`]({{< baseurl >}}/commands/ft.info/).
@@ -78,7 +78,7 @@ OK ## See also -[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) | [`FT.INFO`]({{< baseurl >}}/commands/ft.info) +[`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) | [`FT.INFO`]({{< baseurl >}}/commands/ft.info/) ## Related topics diff --git a/content/commands/ft.explain/index.md b/content/commands/ft.explain/index.md index 349fb2a355..82894da42d 100644 --- a/content/commands/ft.explain/index.md +++ b/content/commands/ft.explain/index.md @@ -43,7 +43,7 @@ Return the execution plan for a complex query
index -is index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). +is index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/).
@@ -57,7 +57,7 @@ is query string, as if sent to FT.SEARCH`.
DIALECT {dialect_version} -is dialect version under which to execute the query. If not specified, the query executes under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) command. +is dialect version under which to execute the query. If not specified, the query executes under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) command.
{{% alert title="Notes" color="warning" %}} @@ -101,7 +101,7 @@ INTERSECT { ## See also -[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) | [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) | [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) +[`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) | [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) | [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) ## Related topics diff --git a/content/commands/ft.explaincli/index.md b/content/commands/ft.explaincli/index.md index eb7baf79a6..bce9eaf58f 100644 --- a/content/commands/ft.explaincli/index.md +++ b/content/commands/ft.explaincli/index.md @@ -43,7 +43,7 @@ Return the execution plan for a complex query but formatted for easier reading w
index -is index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). +is index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/).
@@ -57,7 +57,7 @@ is query string, as if sent to FT.SEARCH`.
DIALECT {dialect_version} -is dialect version under which to execute the query. If not specified, the query executes under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) command. +is dialect version under which to execute the query. If not specified, the query executes under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) command. {{% alert title="Note" color="warning" %}} @@ -114,7 +114,7 @@ $ redis-cli ## See also -[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) | [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) | [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) +[`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) | [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) | [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) ## Related topics diff --git a/content/commands/ft.info/index.md b/content/commands/ft.info/index.md index 4244f8a449..b76fc942a6 100644 --- a/content/commands/ft.info/index.md +++ b/content/commands/ft.info/index.md @@ -38,7 +38,7 @@ Return information and statistics on the index
index -is full-text index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). +is full-text index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/).
## Return @@ -47,7 +47,7 @@ FT.INFO returns an array reply with pairs of keys and values. Returned values include: -- `index_definition`: reflection of [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) command parameters. +- `index_definition`: reflection of [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) command parameters. - `fields`: index schema - field names, types, and attributes. - Number of documents. - Number of distinct terms. @@ -168,7 +168,7 @@ Optional statistics include: ## See also -[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) | [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) +[`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) | [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) ## Related topics diff --git a/content/commands/ft.profile/index.md b/content/commands/ft.profile/index.md index 8fa5b1167e..613b90175f 100644 --- a/content/commands/ft.profile/index.md +++ b/content/commands/ft.profile/index.md @@ -49,7 +49,7 @@ syntax_str: [LIMITED] QUERY query title: FT.PROFILE --- -Apply [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) or [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) command to collect performance details +Apply [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) or [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/) command to collect performance details [Examples](#examples) @@ -58,13 +58,13 @@ Apply [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) or [`FT.AGGREGATE`]({{<
index -is index name, created using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). +is index name, created using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/).
SEARCH | AGGREGATE -is difference between [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) and [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate). +is difference between [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) and [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/).
@@ -76,14 +76,14 @@ removes details of `reader` iterator.
QUERY {query} -is query string, sent to [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search). +is query string, sent to [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/).
Note: To reduce the size of the output, use `NOCONTENT` or `LIMIT 0 0` to reduce the reply results or `LIMITED` to not reply with details of `reader iterators` inside built-in unions such as `fuzzy` or `prefix`. ## Return -`FT.PROFILE` returns an array reply, with the first array reply identical to the reply of [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) and [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) and a second array reply with information of time in milliseconds (ms) used to create the query and time and count of calls of iterators and result-processors. +`FT.PROFILE` returns an array reply, with the first array reply identical to the reply of [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) and [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/) and a second array reply with information of time in milliseconds (ms) used to create the query and time and count of calls of iterators and result-processors. Return value has an array with two elements: @@ -172,7 +172,7 @@ Return value has an array with two elements: ## See also -[`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) | [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) +[`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) | [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/) ## Related topics diff --git a/content/commands/ft.search/index.md b/content/commands/ft.search/index.md index 197b11a2f9..cf74355456 100644 --- a/content/commands/ft.search/index.md +++ b/content/commands/ft.search/index.md @@ -313,7 +313,7 @@ Search the index with a textual query, returning either documents or just ids
index -is index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). +is index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/).
@@ -345,7 +345,7 @@ also returns the relative internal score of each document. This can be used to m
WITHPAYLOADS -retrieves optional document payloads. See [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). The payloads follow the document id and, if `WITHSCORES` is set, the scores. +retrieves optional document payloads. See [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/). The payloads follow the document id and, if `WITHSCORES` is set, the scores.
@@ -357,7 +357,7 @@ returns the value of the sorting key, right after the id and score and/or payloa
FILTER numeric_attribute min max -limits results to those having numeric values ranging between `min` and `max`, if numeric_attribute is defined as a numeric attribute in [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). +limits results to those having numeric values ranging between `min` and `max`, if numeric_attribute is defined as a numeric attribute in [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/). `min` and `max` follow [`ZRANGE`]({{< relref "/commands/zrange" >}}) syntax, and can be `-inf`, `+inf`, and use `(` for exclusive ranges. Multiple numeric filters for different attributes are supported in one query.
@@ -420,7 +420,7 @@ requires the terms in the document to have the same order as the terms in the qu use a stemmer for the supplied language during search for query expansion. If querying documents in Chinese, set to `chinese` to properly tokenize the query terms. Defaults to English. If an unsupported language is sent, the command returns an error. - See [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) for the list of languages. + See [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) for the list of languages.
@@ -486,7 +486,7 @@ You can reference parameters in the `query` by a `$`, followed by the parameter
DIALECT {dialect_version} -selects the dialect version under which to execute the query. If not specified, the query will execute under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) command. +selects the dialect version under which to execute the query. If not specified, the query will execute under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) command.
## Return @@ -511,7 +511,7 @@ In order to maintain backward compatibility, the default behavior with RediSearc To return all the values, use `DIALECT` 3 (or greater, when available). -The `DIALECT` can be specified as a parameter in the FT.SEARCH command. If it is not specified, the `DEFAULT_DIALECT` is used, which can be set using [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) or by passing it as an argument to the `redisearch` module when it is loaded. +The `DIALECT` can be specified as a parameter in the FT.SEARCH command. If it is not specified, the `DEFAULT_DIALECT` is used, which can be set using [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) or by passing it as an argument to the `redisearch` module when it is loaded. For example, with the following document and index: @@ -818,7 +818,7 @@ Query with `CONTAINS` operator: ## See also -[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) | [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) +[`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) | [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/) ## Related topics diff --git a/content/commands/ft.spellcheck/index.md b/content/commands/ft.spellcheck/index.md index a873b89cc8..b61554497e 100644 --- a/content/commands/ft.spellcheck/index.md +++ b/content/commands/ft.spellcheck/index.md @@ -88,7 +88,7 @@ See [Spellchecking]({{< relref "/develop/interact/search-and-query/advanced-conc
TERMS -specifies an inclusion (`INCLUDE`) or exclusion (`EXCLUDE`) of a custom dictionary named `{dict}`. Refer to [`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd), [`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel) and [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump) about managing custom dictionaries. +specifies an inclusion (`INCLUDE`) or exclusion (`EXCLUDE`) of a custom dictionary named `{dict}`. Refer to [`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd/), [`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel/) and [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump/) about managing custom dictionaries.
@@ -100,7 +100,7 @@ is maximum Levenshtein distance for spelling suggestions (default: 1, max: 4).
DIALECT {dialect_version} -selects the dialect version under which to execute the query. If not specified, the query will execute under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) command. +selects the dialect version under which to execute the query. If not specified, the query will execute under the default dialect version set during module initial loading or via [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) command.
## Return @@ -128,7 +128,7 @@ The score is calculated by dividing the number of documents in which the suggest ## See also -[`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) | [`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd) | [`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel) | [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump) +[`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) | [`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd/) | [`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel/) | [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump/) ## Related topics diff --git a/content/commands/ft.sugadd/index.md b/content/commands/ft.sugadd/index.md index 19f01581d0..aafc0b6664 100644 --- a/content/commands/ft.sugadd/index.md +++ b/content/commands/ft.sugadd/index.md @@ -82,7 +82,7 @@ increments the existing entry of the suggestion by the given score, instead of r
PAYLOAD {payload} -saves an extra payload with the suggestion, that can be fetched by adding the `WITHPAYLOADS` argument to [`FT.SUGGET`]({{< baseurl >}}/commands/ft.sugget). +saves an extra payload with the suggestion, that can be fetched by adding the `WITHPAYLOADS` argument to [`FT.SUGGET`]({{< baseurl >}}/commands/ft.sugget/).
## Return @@ -102,7 +102,7 @@ FT.SUGADD returns an integer reply, which is the current size of the suggestion ## See also -[`FT.SUGGET`]({{< baseurl >}}/commands/ft.sugget) | [`FT.SUGDEL`]({{< baseurl >}}/commands/ft.sugdel) | [`FT.SUGLEN`]({{< baseurl >}}/commands/ft.suglen) +[`FT.SUGGET`]({{< baseurl >}}/commands/ft.sugget/) | [`FT.SUGDEL`]({{< baseurl >}}/commands/ft.sugdel/) | [`FT.SUGLEN`]({{< baseurl >}}/commands/ft.suglen/) ## Related topics diff --git a/content/commands/ft.sugdel/index.md b/content/commands/ft.sugdel/index.md index 1b9cdc9d87..fc11d32410 100644 --- a/content/commands/ft.sugdel/index.md +++ b/content/commands/ft.sugdel/index.md @@ -68,7 +68,7 @@ FT.SUGDEL returns an integer reply, 1 if the string was found and deleted, 0 oth ## See also -[`FT.SUGGET`]({{< baseurl >}}/commands/ft.sugget) | [`FT.SUGADD`]({{< baseurl >}}/commands/ft.sugadd) | [`FT.SUGLEN`]({{< baseurl >}}/commands/ft.suglen) +[`FT.SUGGET`]({{< baseurl >}}/commands/ft.sugget/) | [`FT.SUGADD`]({{< baseurl >}}/commands/ft.sugadd/) | [`FT.SUGLEN`]({{< baseurl >}}/commands/ft.suglen/) ## Related topics diff --git a/content/commands/ft.sugget/index.md b/content/commands/ft.sugget/index.md index d544e9ef25..2d565b8e49 100644 --- a/content/commands/ft.sugget/index.md +++ b/content/commands/ft.sugget/index.md @@ -118,7 +118,7 @@ FT.SUGGET returns an array reply, which is a list of the top suggestions matchin ## See also -[`FT.SUGADD`]({{< baseurl >}}/commands/ft.sugadd) | [`FT.SUGDEL`]({{< baseurl >}}/commands/ft.sugdel) | [`FT.SUGLEN`]({{< baseurl >}}/commands/ft.suglen) +[`FT.SUGADD`]({{< baseurl >}}/commands/ft.sugadd/) | [`FT.SUGDEL`]({{< baseurl >}}/commands/ft.sugdel/) | [`FT.SUGLEN`]({{< baseurl >}}/commands/ft.suglen/) ## Related topics diff --git a/content/commands/ft.suglen/index.md b/content/commands/ft.suglen/index.md index b999b8953c..c160b55b03 100644 --- a/content/commands/ft.suglen/index.md +++ b/content/commands/ft.suglen/index.md @@ -58,7 +58,7 @@ FT.SUGLEN returns an integer reply, which is the current size of the suggestion ## See also -[`FT.SUGADD`]({{< baseurl >}}/commands/ft.sugadd) | [`FT.SUGDEL`]({{< baseurl >}}/commands/ft.sugdel) | [`FT.SUGGET`]({{< baseurl >}}/commands/ft.sugget) +[`FT.SUGADD`]({{< baseurl >}}/commands/ft.sugadd/) | [`FT.SUGDEL`]({{< baseurl >}}/commands/ft.sugdel/) | [`FT.SUGGET`]({{< baseurl >}}/commands/ft.sugget/) ## Related topics diff --git a/content/commands/ft.syndump/index.md b/content/commands/ft.syndump/index.md index 738713c58c..b4402f8474 100644 --- a/content/commands/ft.syndump/index.md +++ b/content/commands/ft.syndump/index.md @@ -66,7 +66,7 @@ FT.SYNDUMP returns an array reply, with a pair of `term` and an array of synonym ## See also -[`FT.SYNUPDATE`]({{< baseurl >}}/commands/ft.synupdate) +[`FT.SYNUPDATE`]({{< baseurl >}}/commands/ft.synupdate/) ## Related topics diff --git a/content/commands/ft.synupdate/index.md b/content/commands/ft.synupdate/index.md index 0b597a86ee..624f640cc0 100644 --- a/content/commands/ft.synupdate/index.md +++ b/content/commands/ft.synupdate/index.md @@ -86,7 +86,7 @@ OK ## See also -[`FT.SYNDUMP`]({{< baseurl >}}/commands/ft.syndump) +[`FT.SYNDUMP`]({{< baseurl >}}/commands/ft.syndump/) ## Related topics diff --git a/content/commands/ft.tagvals/index.md b/content/commands/ft.tagvals/index.md index 4d49fbb41b..a9d43af530 100644 --- a/content/commands/ft.tagvals/index.md +++ b/content/commands/ft.tagvals/index.md @@ -40,7 +40,7 @@ Return a distinct set of values indexed in a Tag field
index -is full-text index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). +is full-text index name. You must first create the index using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/).
@@ -74,7 +74,7 @@ FT.TAGVALS returns an array reply of all distinct tags in the tag index. ## See also -[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) +[`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) ## Related topics diff --git a/content/commands/json.arrappend/index.md b/content/commands/json.arrappend/index.md index aa5e393e1c..a0e891363f 100644 --- a/content/commands/json.arrappend/index.md +++ b/content/commands/json.arrappend/index.md @@ -96,7 +96,7 @@ redis> JSON.GET item:1 ## See also -[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) +[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex/) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert/) ## Related topics diff --git a/content/commands/json.arrindex/index.md b/content/commands/json.arrindex/index.md index 8456fd42e4..244cdb7aa9 100644 --- a/content/commands/json.arrindex/index.md +++ b/content/commands/json.arrindex/index.md @@ -147,7 +147,7 @@ redis> JSON.ARRINDEX item:1 $..colors '"silver"' ## See also -[`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) +[`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend/) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert/) ## Related topics diff --git a/content/commands/json.arrinsert/index.md b/content/commands/json.arrinsert/index.md index d9c708b28a..e2e207f0a7 100644 --- a/content/commands/json.arrinsert/index.md +++ b/content/commands/json.arrinsert/index.md @@ -122,7 +122,7 @@ redis> JSON.GET item:1 $.colors ## See also -[`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend) | [`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) +[`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend/) | [`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex/) ## Related topics diff --git a/content/commands/json.arrlen/index.md b/content/commands/json.arrlen/index.md index 4d21d0ef1d..ea9b3ffee9 100644 --- a/content/commands/json.arrlen/index.md +++ b/content/commands/json.arrlen/index.md @@ -95,7 +95,7 @@ redis> JSON.GET item:2 '$..max_level' ## See also -[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) +[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex/) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert/) ## Related topics diff --git a/content/commands/json.arrpop/index.md b/content/commands/json.arrpop/index.md index b10206ed54..738f3fb045 100644 --- a/content/commands/json.arrpop/index.md +++ b/content/commands/json.arrpop/index.md @@ -116,7 +116,7 @@ redis> JSON.GET key $.[1].max_level ## See also -[`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend) | [`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) +[`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend/) | [`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex/) ## Related topics diff --git a/content/commands/json.arrtrim/index.md b/content/commands/json.arrtrim/index.md index 6c7c747c89..ab7777d8e4 100644 --- a/content/commands/json.arrtrim/index.md +++ b/content/commands/json.arrtrim/index.md @@ -123,7 +123,7 @@ redis> JSON.GET key $.[1].max_level ## See also -[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) +[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex/) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert/) ## Related topics diff --git a/content/commands/json.clear/index.md b/content/commands/json.clear/index.md index dcd28c1b58..38cbb2a0b3 100644 --- a/content/commands/json.clear/index.md +++ b/content/commands/json.clear/index.md @@ -89,7 +89,7 @@ redis> JSON.GET doc $ ## See also -[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) +[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex/) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert/) ## Related topics diff --git a/content/commands/json.debug-help/index.md b/content/commands/json.debug-help/index.md index ecd5a0e0c2..5db6516dc2 100644 --- a/content/commands/json.debug-help/index.md +++ b/content/commands/json.debug-help/index.md @@ -22,7 +22,7 @@ syntax_fmt: JSON.DEBUG HELP syntax_str: '' title: JSON.DEBUG HELP --- -Return helpful information about the [`JSON.DEBUG`]({{< baseurl >}}/commands/json.debug) command +Return helpful information about the [`JSON.DEBUG`]({{< baseurl >}}/commands/json.debug/) command ## Return @@ -30,7 +30,7 @@ JSON.DEBUG HELP returns an array with helpful messages. ## See also -[`JSON.DEBUG`]({{< baseurl >}}/commands/json.debug) +[`JSON.DEBUG`]({{< baseurl >}}/commands/json.debug/) ## Related topics diff --git a/content/commands/json.debug-memory/index.md b/content/commands/json.debug-memory/index.md index 4d933a6f7f..74aba7dddd 100644 --- a/content/commands/json.debug-memory/index.md +++ b/content/commands/json.debug-memory/index.md @@ -75,7 +75,7 @@ redis> JSON.DEBUG MEMORY item:2 ## See also -[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen) +[`JSON.SET`]({{< baseurl >}}/commands/json.set/) | [`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen/) ## Related topics diff --git a/content/commands/json.del/index.md b/content/commands/json.del/index.md index 770574bb67..3ac3cf7222 100644 --- a/content/commands/json.del/index.md +++ b/content/commands/json.del/index.md @@ -88,7 +88,7 @@ redis> JSON.GET doc $ ## See also -[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen) +[`JSON.SET`]({{< baseurl >}}/commands/json.set/) | [`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen/) ## Related topics diff --git a/content/commands/json.forget/index.md b/content/commands/json.forget/index.md index 9359457efb..75627144ec 100644 --- a/content/commands/json.forget/index.md +++ b/content/commands/json.forget/index.md @@ -30,4 +30,4 @@ syntax_fmt: JSON.FORGET key [path] syntax_str: '[path]' title: JSON.FORGET --- -See [`JSON.DEL`]({{< baseurl >}}/commands/json.del). \ No newline at end of file +See [`JSON.DEL`]({{< baseurl >}}/commands/json.del/). \ No newline at end of file diff --git a/content/commands/json.get/index.md b/content/commands/json.get/index.md index 2c5f82d0db..77e65161d5 100644 --- a/content/commands/json.get/index.md +++ b/content/commands/json.get/index.md @@ -137,7 +137,7 @@ redis> JSON.GET doc ..a $..b ## See also -[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.MGET`]({{< baseurl >}}/commands/json.mget) +[`JSON.SET`]({{< baseurl >}}/commands/json.set/) | [`JSON.MGET`]({{< baseurl >}}/commands/json.mget/) ## Related topics diff --git a/content/commands/json.merge/index.md b/content/commands/json.merge/index.md index ef6de1b01d..d3b7eb8805 100644 --- a/content/commands/json.merge/index.md +++ b/content/commands/json.merge/index.md @@ -147,7 +147,7 @@ redis> JSON.GET doc ## See also -[`JSON.GET`]({{< baseurl >}}/commands/json.get) | [`JSON.MGET`]({{< baseurl >}}/commands/json.mget) | [`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.MSET`]({{< baseurl >}}/commands/json.mset) +[`JSON.GET`]({{< baseurl >}}/commands/json.get/) | [`JSON.MGET`]({{< baseurl >}}/commands/json.mget/) | [`JSON.SET`]({{< baseurl >}}/commands/json.set/) | [`JSON.MSET`]({{< baseurl >}}/commands/json.mset/) ## Related topics diff --git a/content/commands/json.mget/index.md b/content/commands/json.mget/index.md index 990b258ece..49df548d22 100644 --- a/content/commands/json.mget/index.md +++ b/content/commands/json.mget/index.md @@ -79,7 +79,7 @@ redis> JSON.MGET doc1 doc2 $..a ## See also -[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.GET`]({{< baseurl >}}/commands/json.get) +[`JSON.SET`]({{< baseurl >}}/commands/json.set/) | [`JSON.GET`]({{< baseurl >}}/commands/json.get/) ## Related topics diff --git a/content/commands/json.mset/index.md b/content/commands/json.mset/index.md index ce25baf1ec..f561e1e92c 100644 --- a/content/commands/json.mset/index.md +++ b/content/commands/json.mset/index.md @@ -91,7 +91,7 @@ redis> JSON.GET doc3 ## See also -[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.MGET`]({{< baseurl >}}/commands/json.mget) | [`JSON.GET`]({{< baseurl >}}/commands/json.get) +[`JSON.SET`]({{< baseurl >}}/commands/json.set/) | [`JSON.MGET`]({{< baseurl >}}/commands/json.mget/) | [`JSON.GET`]({{< baseurl >}}/commands/json.get/) ## Related topics diff --git a/content/commands/json.numincrby/index.md b/content/commands/json.numincrby/index.md index 09a0c714cf..b177fe791c 100644 --- a/content/commands/json.numincrby/index.md +++ b/content/commands/json.numincrby/index.md @@ -86,7 +86,7 @@ redis> JSON.NUMINCRBY doc $..a 2 ## See also -[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) +[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex/) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert/) ## Related topics diff --git a/content/commands/json.nummultby/index.md b/content/commands/json.nummultby/index.md index ca0b885bd4..36967ba7c5 100644 --- a/content/commands/json.nummultby/index.md +++ b/content/commands/json.nummultby/index.md @@ -72,7 +72,7 @@ redis> JSON.NUMMULTBY doc $..a 2 ## See also -[`JSON.NUMINCRBY`]({{< baseurl >}}/commands/json.numincrby) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) +[`JSON.NUMINCRBY`]({{< baseurl >}}/commands/json.numincrby/) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert/) ## Related topics diff --git a/content/commands/json.objkeys/index.md b/content/commands/json.objkeys/index.md index 3f83def2b2..3aa3c8f2a3 100644 --- a/content/commands/json.objkeys/index.md +++ b/content/commands/json.objkeys/index.md @@ -67,7 +67,7 @@ redis> JSON.OBJKEYS doc $..a ## See also -[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) +[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex/) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert/) ## Related topics diff --git a/content/commands/json.objlen/index.md b/content/commands/json.objlen/index.md index 726b8145e4..e4506a5c78 100644 --- a/content/commands/json.objlen/index.md +++ b/content/commands/json.objlen/index.md @@ -65,7 +65,7 @@ redis> JSON.OBJLEN doc $..a ## See also -[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) +[`JSON.ARRINDEX`]({{< baseurl >}}/commands/json.arrindex/) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert/) ## Related topics diff --git a/content/commands/json.resp/index.md b/content/commands/json.resp/index.md index 2dd9719e88..f9ec57ed7e 100644 --- a/content/commands/json.resp/index.md +++ b/content/commands/json.resp/index.md @@ -107,7 +107,7 @@ redis> JSON.RESP item:2 ## See also -[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen) +[`JSON.SET`]({{< baseurl >}}/commands/json.set/) | [`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen/) ## Related topics diff --git a/content/commands/json.set/index.md b/content/commands/json.set/index.md index fa7f4c6a96..d78d4c5d0f 100644 --- a/content/commands/json.set/index.md +++ b/content/commands/json.set/index.md @@ -125,7 +125,7 @@ redis> JSON.GET doc ## See also -[`JSON.GET`]({{< baseurl >}}/commands/json.get) | [`JSON.MGET`]({{< baseurl >}}/commands/json.mget) +[`JSON.GET`]({{< baseurl >}}/commands/json.get/) | [`JSON.MGET`]({{< baseurl >}}/commands/json.mget/) ## Related topics diff --git a/content/commands/json.strappend/index.md b/content/commands/json.strappend/index.md index 2625622112..e170431cec 100644 --- a/content/commands/json.strappend/index.md +++ b/content/commands/json.strappend/index.md @@ -78,7 +78,7 @@ redis> JSON.GET doc $ ## See also -`JSON.ARRAPEND` | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) +`JSON.ARRAPEND` | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert/) ## Related topics diff --git a/content/commands/json.strlen/index.md b/content/commands/json.strlen/index.md index 1f7f6d0be0..01c7a30abd 100644 --- a/content/commands/json.strlen/index.md +++ b/content/commands/json.strlen/index.md @@ -65,7 +65,7 @@ redis> JSON.STRLEN doc $..a ## See also -[`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert) +[`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen/) | [`JSON.ARRINSERT`]({{< baseurl >}}/commands/json.arrinsert/) ## Related topics diff --git a/content/commands/json.toggle/index.md b/content/commands/json.toggle/index.md index 594cf616f2..8b37b78571 100644 --- a/content/commands/json.toggle/index.md +++ b/content/commands/json.toggle/index.md @@ -95,7 +95,7 @@ redis> JSON.GET doc $ ## See also -[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.GET`]({{< baseurl >}}/commands/json.get) +[`JSON.SET`]({{< baseurl >}}/commands/json.set/) | [`JSON.GET`]({{< baseurl >}}/commands/json.get/) ## Related topics diff --git a/content/commands/json.type/index.md b/content/commands/json.type/index.md index 6fdad08ac5..aeaa843639 100644 --- a/content/commands/json.type/index.md +++ b/content/commands/json.type/index.md @@ -69,7 +69,7 @@ redis> JSON.TYPE doc $..dummy ## See also -[`JSON.SET`]({{< baseurl >}}/commands/json.set) | [`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen) +[`JSON.SET`]({{< baseurl >}}/commands/json.set/) | [`JSON.ARRLEN`]({{< baseurl >}}/commands/json.arrlen/) ## Related topics diff --git a/content/commands/ts.add/index.md b/content/commands/ts.add/index.md index 11b490f9e9..9beeb7c3ea 100644 --- a/content/commands/ts.add/index.md +++ b/content/commands/ts.add/index.md @@ -121,36 +121,36 @@ is (double) numeric data value of the sample. The double number should follow [R - If all the original samples for an affected aggregated time bucket are available, the compacted value is recalculated based on the reported sample and the original samples. - If only a part of the original samples for an affected aggregated time bucket is available due to trimming caused in accordance with the time series RETENTION policy, the compacted value is recalculated based on the reported sample and the available original samples. - If the original samples for an affected aggregated time bucket are not available due to trimming caused in accordance with the time series RETENTION policy, the compacted value bucket is not updated. -- Explicitly adding samples to a compacted time series (using `TS.ADD`, [`TS.MADD`]({{< baseurl >}}/commands/ts.madd), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby), or [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. +- Explicitly adding samples to a compacted time series (using `TS.ADD`, [`TS.MADD`]({{< baseurl >}}/commands/ts.madd/), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby/), or [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby/)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. ## Optional arguments -The following arguments are optional because they can be set by [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +The following arguments are optional because they can be set by [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
RETENTION retentionPeriod is maximum retention period, compared to the maximum existing timestamp, in milliseconds. -Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `RETENTION` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `RETENTION` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
ENCODING enc specifies the series sample's encoding format. -Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `ENCODING` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `ENCODING` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
CHUNK_SIZE size is memory size, in bytes, allocated for each data chunk. -Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `CHUNK_SIZE` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `CHUNK_SIZE` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
ON_DUPLICATE policy is overwrite key and database configuration for [DUPLICATE_POLICY]({{< baseurl >}}/develop/data-types/timeseries/configuration#duplicate_policy), the policy for handling samples with identical timestamps. -This override is effective only for this single command and does not set the time series duplication policy (which can be set with [`TS.ALTER`]({{< baseurl >}}/commands/ts.alter)). +This override is effective only for this single command and does not set the time series duplication policy (which can be set with [`TS.ALTER`]({{< baseurl >}}/commands/ts.alter/)). `policy` can be one of the following values: - `BLOCK`: ignore any newly reported value and reply with an error @@ -167,7 +167,7 @@ This argument has no effect when a new time series is created by this command. is set of label-value pairs that represent metadata labels of the time series. -Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `LABELS` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `LABELS` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
Notes: @@ -211,7 +211,7 @@ Add a sample to the time series, setting the sample's timestamp to the current U ## See also -[`TS.CREATE`]({{< baseurl >}}/commands/ts.create) +[`TS.CREATE`]({{< baseurl >}}/commands/ts.create/) ## Related topics diff --git a/content/commands/ts.alter/index.md b/content/commands/ts.alter/index.md index 58672c4ef9..f753e63f15 100644 --- a/content/commands/ts.alter/index.md +++ b/content/commands/ts.alter/index.md @@ -90,24 +90,24 @@ is key name for the time series.
RETENTION retentionPeriod -is maximum retention period, compared to the maximum existing timestamp, in milliseconds. See `RETENTION` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +is maximum retention period, compared to the maximum existing timestamp, in milliseconds. See `RETENTION` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
CHUNK_SIZE size -is the initial allocation size, in bytes, for the data part of each new chunk. Actual chunks may consume more memory. See `CHUNK_SIZE` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). Changing this value does not affect existing chunks. +is the initial allocation size, in bytes, for the data part of each new chunk. Actual chunks may consume more memory. See `CHUNK_SIZE` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/). Changing this value does not affect existing chunks.
DUPLICATE_POLICY policy -is policy for handling multiple samples with identical timestamps. See `DUPLICATE_POLICY` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +is policy for handling multiple samples with identical timestamps. See `DUPLICATE_POLICY` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
LABELS [{label value}...] is set of label-value pairs that represent metadata labels of the key and serve as a secondary index. -If `LABELS` is specified, the given label list is applied. Labels that are not present in the given list are removed implicitly. Specifying `LABELS` with no label-value pairs removes all existing labels. See `LABELS` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +If `LABELS` is specified, the given label list is applied. Labels that are not present in the given list are removed implicitly. Specifying `LABELS` with no label-value pairs removes all existing labels. See `LABELS` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
## Return value @@ -138,7 +138,7 @@ OK ## See also -[`TS.CREATE`]({{< baseurl >}}/commands/ts.create) +[`TS.CREATE`]({{< baseurl >}}/commands/ts.create/) ## Related topics diff --git a/content/commands/ts.create/index.md b/content/commands/ts.create/index.md index fb35aba866..98cb8cb3c3 100644 --- a/content/commands/ts.create/index.md +++ b/content/commands/ts.create/index.md @@ -98,14 +98,14 @@ is key name for the time series. Notes: - If a key already exists, you get a Redis error reply, `TSDB: key already exists`. You can check for the existence of a key with the [`EXISTS`]({{< relref "/commands/exists" >}}) command. -- Other commands that also create a new time series when called with a key that does not exist are [`TS.ADD`]({{< baseurl >}}/commands/ts.add), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby), and [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby). +- Other commands that also create a new time series when called with a key that does not exist are [`TS.ADD`]({{< baseurl >}}/commands/ts.add/), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby/), and [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby/). ## Optional arguments
RETENTION retentionPeriod -is maximum age for samples compared to the highest reported timestamp, in milliseconds. Samples are expired based solely on the difference between their timestamp and the timestamps passed to subsequent [`TS.ADD`]({{< baseurl >}}/commands/ts.add), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby), and [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby) calls with this key. +is maximum age for samples compared to the highest reported timestamp, in milliseconds. Samples are expired based solely on the difference between their timestamp and the timestamps passed to subsequent [`TS.ADD`]({{< baseurl >}}/commands/ts.add/), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd/), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby/), and [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby/) calls with this key. When set to 0, samples never expire. When not specified, the option is set to the global [RETENTION_POLICY]({{< baseurl >}}/develop/data-types/timeseries/configuration#retention_policy) configuration of the database, which by default is 0.
@@ -123,7 +123,7 @@ When not specified, the option is set to `COMPRESSED`.
CHUNK_SIZE size -is initial allocation size, in bytes, for the data part of each new chunk. Actual chunks may consume more memory. Changing chunkSize (using [`TS.ALTER`]({{< baseurl >}}/commands/ts.alter)) does not affect existing chunks. +is initial allocation size, in bytes, for the data part of each new chunk. Actual chunks may consume more memory. Changing chunkSize (using [`TS.ALTER`]({{< baseurl >}}/commands/ts.alter/)) does not affect existing chunks. Must be a multiple of 8 in the range [48 .. 1048576]. When not specified, it is set to the global [CHUNK_SIZE_BYTES]({{< baseurl >}}/develop/data-types/timeseries/configuration#chunk_size_bytes) configuration of the database, which by default is 4096 (a single memory page). @@ -140,7 +140,7 @@ The data in each key is stored in chunks. Each chunk contains header and data fo
DUPLICATE_POLICY policy -is policy for handling insertion ([`TS.ADD`]({{< baseurl >}}/commands/ts.add) and [`TS.MADD`]({{< baseurl >}}/commands/ts.madd)) of multiple samples with identical timestamps, with one of the following values: +is policy for handling insertion ([`TS.ADD`]({{< baseurl >}}/commands/ts.add/) and [`TS.MADD`]({{< baseurl >}}/commands/ts.madd/)) of multiple samples with identical timestamps, with one of the following values: - `BLOCK`: ignore any newly reported value and reply with an error - `FIRST`: ignore any newly reported value - `LAST`: override with the newly reported value @@ -155,7 +155,7 @@ is policy for handling insertion ([`TS.ADD`]({{< baseurl >}}/commands/ts.add) an is set of label-value pairs that represent metadata labels of the key and serve as a secondary index. -The [`TS.MGET`]({{< baseurl >}}/commands/ts.mget), [`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange), and [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) commands operate on multiple time series based on their labels. The [`TS.QUERYINDEX`]({{< baseurl >}}/commands/ts.queryindex) command returns all time series keys matching a given filter based on their labels. +The [`TS.MGET`]({{< baseurl >}}/commands/ts.mget/), [`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange/), and [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange/) commands operate on multiple time series based on their labels. The [`TS.QUERYINDEX`]({{< baseurl >}}/commands/ts.queryindex/) command returns all time series keys matching a given filter based on their labels.
## Return value @@ -177,7 +177,7 @@ OK ## See also -[`TS.ADD`]({{< baseurl >}}/commands/ts.add) | [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby) | [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby) | [`TS.MGET`]({{< baseurl >}}/commands/ts.mget) | [`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) | [`TS.QUERYINDEX`]({{< baseurl >}}/commands/ts.queryindex) +[`TS.ADD`]({{< baseurl >}}/commands/ts.add/) | [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby/) | [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby/) | [`TS.MGET`]({{< baseurl >}}/commands/ts.mget/) | [`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange/) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange/) | [`TS.QUERYINDEX`]({{< baseurl >}}/commands/ts.queryindex/) ## Related topics diff --git a/content/commands/ts.createrule/index.md b/content/commands/ts.createrule/index.md index 5b6c1d9780..cfc15a5151 100644 --- a/content/commands/ts.createrule/index.md +++ b/content/commands/ts.createrule/index.md @@ -126,7 +126,7 @@ aggregates results into time buckets. - Only new samples that are added into the source series after the creation of the rule will be aggregated. - Calling `TS.CREATERULE` with a nonempty `destKey` may result in inconsistencies between the raw and the compacted data. -- Explicitly adding samples to a compacted time series (using [`TS.ADD`]({{< baseurl >}}/commands/ts.add), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby), or [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. +- Explicitly adding samples to a compacted time series (using [`TS.ADD`]({{< baseurl >}}/commands/ts.add/), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd/), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby/), or [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby/)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. - If no samples are added to the source time series during a bucket period. no _compacted sample_ is added to the destination time series. - The timestamp of a compacted sample added to the destination time series is set to the start timestamp the appropriate compaction bucket. For example, for a 10-minute compaction bucket with no alignment, the compacted samples timestamps are `x:00`, `x:10`, `x:20`, and so on. - Deleting `destKey` will cause the compaction rule to be deleted as well. @@ -181,7 +181,7 @@ Now, also create a compacted time series named _dailyDiffTemp_. This time series ## See also -[`TS.DELETERULE`]({{< baseurl >}}/commands/ts.deleterule) +[`TS.DELETERULE`]({{< baseurl >}}/commands/ts.deleterule/) ## Related topics diff --git a/content/commands/ts.decrby/index.md b/content/commands/ts.decrby/index.md index d210e9b682..615bb27f39 100644 --- a/content/commands/ts.decrby/index.md +++ b/content/commands/ts.decrby/index.md @@ -81,7 +81,7 @@ is numeric value of the subtrahend (double). Notes - When specified key does not exist, a new time series is created. - You can use this command as a counter or gauge that automatically gets history as a time series. -- Explicitly adding samples to a compacted time series (using [`TS.ADD`]({{< baseurl >}}/commands/ts.add), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby), or `TS.DECRBY`) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. +- Explicitly adding samples to a compacted time series (using [`TS.ADD`]({{< baseurl >}}/commands/ts.add/), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd/), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby/), or `TS.DECRBY`) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. ## Optional arguments @@ -101,22 +101,22 @@ When not specified, the timestamp is set to the Unix time of the server's clock.
RETENTION retentionPeriod -is maximum retention period, compared to the maximum existing timestamp, in milliseconds. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `RETENTION` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +is maximum retention period, compared to the maximum existing timestamp, in milliseconds. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `RETENTION` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
UNCOMPRESSED -changes data storage from compressed (default) to uncompressed. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `ENCODING` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +changes data storage from compressed (default) to uncompressed. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `ENCODING` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
CHUNK_SIZE size -is memory size, in bytes, allocated for each data chunk. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `CHUNK_SIZE` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +is memory size, in bytes, allocated for each data chunk. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `CHUNK_SIZE` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
LABELS [{label value}...] -is set of label-value pairs that represent metadata labels of the key and serve as a secondary index. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `LABELS` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +is set of label-value pairs that represent metadata labels of the key and serve as a secondary index. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `LABELS` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
Notes @@ -136,7 +136,7 @@ Returns one of these replies: ## See also -[`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby) | [`TS.CREATE`]({{< baseurl >}}/commands/ts.create) +[`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby/) | [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/) ## Related topics diff --git a/content/commands/ts.del/index.md b/content/commands/ts.del/index.md index df7db06947..4f8e02a39e 100644 --- a/content/commands/ts.del/index.md +++ b/content/commands/ts.del/index.md @@ -105,7 +105,7 @@ Delete the range of data points for temperature in Tel Aviv. ## See also -[`TS.ADD`]({{< baseurl >}}/commands/ts.add) +[`TS.ADD`]({{< baseurl >}}/commands/ts.add/) ## Related topics diff --git a/content/commands/ts.deleterule/index.md b/content/commands/ts.deleterule/index.md index 9eab5396b7..d9160ee7d0 100644 --- a/content/commands/ts.deleterule/index.md +++ b/content/commands/ts.deleterule/index.md @@ -56,7 +56,7 @@ Returns one of these replies: ## See also -[`TS.CREATERULE`]({{< baseurl >}}/commands/ts.createrule) +[`TS.CREATERULE`]({{< baseurl >}}/commands/ts.createrule/) ## Related topics diff --git a/content/commands/ts.get/index.md b/content/commands/ts.get/index.md index 8d0054ef23..120ce91845 100644 --- a/content/commands/ts.get/index.md +++ b/content/commands/ts.get/index.md @@ -134,7 +134,7 @@ Get the latest maximum daily temperature (the temperature with the highest times ## See also -[`TS.MGET`]({{< baseurl >}}/commands/ts.mget) +[`TS.MGET`]({{< baseurl >}}/commands/ts.mget/) ## Related topics diff --git a/content/commands/ts.incrby/index.md b/content/commands/ts.incrby/index.md index c7f8dd79d7..5198228005 100644 --- a/content/commands/ts.incrby/index.md +++ b/content/commands/ts.incrby/index.md @@ -81,7 +81,7 @@ is numeric value of the addend (double). Notes - When specified key does not exist, a new time series is created. - You can use this command as a counter or gauge that automatically gets history as a time series. -- Explicitly adding samples to a compacted time series (using [`TS.ADD`]({{< baseurl >}}/commands/ts.add), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd), `TS.INCRBY`, or [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. +- Explicitly adding samples to a compacted time series (using [`TS.ADD`]({{< baseurl >}}/commands/ts.add/), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd/), `TS.INCRBY`, or [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby/)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. ## Optional arguments @@ -101,23 +101,23 @@ When not specified, the timestamp is set to the Unix time of the server's clock.
RETENTION retentionPeriod -is maximum retention period, compared to the maximum existing timestamp, in milliseconds. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `RETENTION` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +is maximum retention period, compared to the maximum existing timestamp, in milliseconds. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `RETENTION` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
UNCOMPRESSED -changes data storage from compressed (default) to uncompressed. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `ENCODING` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +changes data storage from compressed (default) to uncompressed. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `ENCODING` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
CHUNK_SIZE size -is memory size, in bytes, allocated for each data chunk. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `CHUNK_SIZE` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +is memory size, in bytes, allocated for each data chunk. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `CHUNK_SIZE` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
LABELS [{label value}...] -is set of label-value pairs that represent metadata labels of the key and serve as a secondary index. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `LABELS` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). +is set of label-value pairs that represent metadata labels of the key and serve as a secondary index. Use it only if you are creating a new time series. It is ignored if you are adding samples to an existing time series. See `LABELS` in [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/).
Notes @@ -171,7 +171,7 @@ The timestamp is filled automatically. ## See also -[`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby) | [`TS.CREATE`]({{< baseurl >}}/commands/ts.create) +[`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby/) | [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/) ## Related topics diff --git a/content/commands/ts.info/index.md b/content/commands/ts.info/index.md index 1395a70cab..bbdd8cbd96 100644 --- a/content/commands/ts.info/index.md +++ b/content/commands/ts.info/index.md @@ -61,12 +61,12 @@ is an optional flag to get a more detailed information about the chunks. | `lastTimestamp` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
Last timestamp present in this time series (Unix timestamp in milliseconds) | `retentionTime` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
The retention period, in milliseconds, for this time series | `chunkCount` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
Number of chunks used for this time series -| `chunkSize` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
The initial allocation size, in bytes, for the data part of each new chunk.
Actual chunks may consume more memory. Changing the chunk size (using [`TS.ALTER`]({{< baseurl >}}/commands/ts.alter)) does not affect existing chunks. +| `chunkSize` | [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}})
The initial allocation size, in bytes, for the data part of each new chunk.
Actual chunks may consume more memory. Changing the chunk size (using [`TS.ALTER`]({{< baseurl >}}/commands/ts.alter/)) does not affect existing chunks. | `chunkType` | [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}})
The chunks type: `compressed` or `uncompressed` | `duplicatePolicy` | [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}) or [Nil reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})
The [duplicate policy]({{< baseurl >}}/develop/data-types/timeseries/configuration#duplicate_policy) of this time series | `labels` | [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) or [Nil reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})
Metadata labels of this time series
Each element is a 2-elements [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) of ([Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}), [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})) representing (label, value) -| `sourceKey` | [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}) or [Nil reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})
Key name for source time series in case the current series is a target of a [compaction rule]({{< baseurl >}}/commands/ts.createrule/) -| `rules` | [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}})
[Compaction rules]({{< baseurl >}}/commands/ts.createrule/) defined in this time series
Each rule is an [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) with 4 elements:
- [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}): The compaction key
- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}): The bucket duration
- [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}): The aggregator
- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}): The alignment (since RedisTimeSeries v1.8) +| `sourceKey` | [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}) or [Nil reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}})
Key name for source time series in case the current series is a target of a [compaction rule]({{< baseurl >}}/commands/ts.createrule//) +| `rules` | [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}})
[Compaction rules]({{< baseurl >}}/commands/ts.createrule//) defined in this time series
Each rule is an [Array reply]({{< relref "/develop/reference/protocol-spec#arrays" >}}) with 4 elements:
- [Bulk string reply]({{< relref "/develop/reference/protocol-spec#bulk-strings" >}}): The compaction key
- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}): The bucket duration
- [Simple string reply]({{< relref "/develop/reference/protocol-spec#simple-strings" >}}): The aggregator
- [Integer reply]({{< relref "/develop/reference/protocol-spec#integers" >}}): The alignment (since RedisTimeSeries v1.8) When [`DEBUG`]({{< relref "/commands/debug" >}}) is specified, the response also contains: @@ -176,7 +176,7 @@ Query the time series using DEBUG to get more information about the chunks. ## See also -[`TS.RANGE`]({{< baseurl >}}/commands/ts.range) | [`TS.QUERYINDEX`]({{< baseurl >}}/commands/ts.queryindex) | [`TS.GET`]({{< baseurl >}}/commands/ts.get) +[`TS.RANGE`]({{< baseurl >}}/commands/ts.range/) | [`TS.QUERYINDEX`]({{< baseurl >}}/commands/ts.queryindex/) | [`TS.GET`]({{< baseurl >}}/commands/ts.get/) ## Related topics diff --git a/content/commands/ts.madd/index.md b/content/commands/ts.madd/index.md index d24c51f9d9..2317932554 100644 --- a/content/commands/ts.madd/index.md +++ b/content/commands/ts.madd/index.md @@ -66,7 +66,7 @@ is numeric data value of the sample (double). The double number should follow Notes: - If `timestamp` is older than the retention period compared to the maximum existing timestamp, the sample is discarded and an error is returned. -- Explicitly adding samples to a compacted time series (using [`TS.ADD`]({{< baseurl >}}/commands/ts.add), `TS.MADD`, [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby), or [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples. +- Explicitly adding samples to a compacted time series (using [`TS.ADD`]({{< baseurl >}}/commands/ts.add/), `TS.MADD`, [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby/), or [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby/)) may result in inconsistencies between the raw and the compacted data. The compaction process may override such samples.
## Return value @@ -105,7 +105,7 @@ OK ## See also -[`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange) | [`TS.RANGE`]({{< baseurl >}}/commands/ts.range) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange) +[`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange/) | [`TS.RANGE`]({{< baseurl >}}/commands/ts.range/) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange/) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange/) ## Related topics diff --git a/content/commands/ts.mget/index.md b/content/commands/ts.mget/index.md index 954f715690..1c51dea672 100644 --- a/content/commands/ts.mget/index.md +++ b/content/commands/ts.mget/index.md @@ -197,7 +197,7 @@ To get only the `location` label for each last sample, use `SELECTED_LABELS`. ## See also -[`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange) | [`TS.RANGE`]({{< baseurl >}}/commands/ts.range) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange) +[`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange/) | [`TS.RANGE`]({{< baseurl >}}/commands/ts.range/) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange/) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange/) ## Related topics diff --git a/content/commands/ts.mrange/index.md b/content/commands/ts.mrange/index.md index e61bb76b11..ad829e503f 100644 --- a/content/commands/ts.mrange/index.md +++ b/content/commands/ts.mrange/index.md @@ -593,7 +593,7 @@ Query all time series with the metric label equal to `cpu`, but only return the ## See also -[`TS.RANGE`]({{< baseurl >}}/commands/ts.range) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange) +[`TS.RANGE`]({{< baseurl >}}/commands/ts.range/) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange/) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange/) ## Related topics diff --git a/content/commands/ts.mrevrange/index.md b/content/commands/ts.mrevrange/index.md index 37ebba8cbc..491b416447 100644 --- a/content/commands/ts.mrevrange/index.md +++ b/content/commands/ts.mrevrange/index.md @@ -588,7 +588,7 @@ Query all time series with the metric label equal to `cpu`, but only return the ## See also -[`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange) | [`TS.RANGE`]({{< baseurl >}}/commands/ts.range) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange) +[`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange/) | [`TS.RANGE`]({{< baseurl >}}/commands/ts.range/) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange/) ## Related topics diff --git a/content/commands/ts.queryindex/index.md b/content/commands/ts.queryindex/index.md index 814a462688..c543bc49ad 100644 --- a/content/commands/ts.queryindex/index.md +++ b/content/commands/ts.queryindex/index.md @@ -115,7 +115,7 @@ To retrieve the keys of all time series representing sensors that measure temper ## See also -[`TS.CREATE`]({{< baseurl >}}/commands/ts.create) | [`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) | [`TS.MGET`]({{< baseurl >}}/commands/ts.mget) +[`TS.CREATE`]({{< baseurl >}}/commands/ts.create/) | [`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange/) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange/) | [`TS.MGET`]({{< baseurl >}}/commands/ts.mget/) ## Related topics diff --git a/content/commands/ts.range/index.md b/content/commands/ts.range/index.md index 05f8384d56..16caf308d7 100644 --- a/content/commands/ts.range/index.md +++ b/content/commands/ts.range/index.md @@ -388,7 +388,7 @@ Similarly, when the end timestamp for the range query is explicitly stated, you ## See also -[`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) +[`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange/) | [`TS.REVRANGE`]({{< baseurl >}}/commands/ts.revrange/) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange/) ## Related topics diff --git a/content/commands/ts.revrange/index.md b/content/commands/ts.revrange/index.md index 4b296544e6..bfc3cb19a5 100644 --- a/content/commands/ts.revrange/index.md +++ b/content/commands/ts.revrange/index.md @@ -391,7 +391,7 @@ Similarly, when the end timestamp for the range query is explicitly stated, you ## See also -[`TS.RANGE`]({{< baseurl >}}/commands/ts.range) | [`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange) +[`TS.RANGE`]({{< baseurl >}}/commands/ts.range/) | [`TS.MRANGE`]({{< baseurl >}}/commands/ts.mrange/) | [`TS.MREVRANGE`]({{< baseurl >}}/commands/ts.mrevrange/) ## Related topics diff --git a/content/develop/connect/clients/dotnet.md b/content/develop/connect/clients/dotnet.md index ee0cdd9465..a36425cc2a 100644 --- a/content/develop/connect/clients/dotnet.md +++ b/content/develop/connect/clients/dotnet.md @@ -234,7 +234,7 @@ ft.Create( schema); ``` -Use [`JSON.SET`]({{< baseurl >}}/commands/json.set) to set each user value at the specified path. +Use [`JSON.SET`]({{< baseurl >}}/commands/json.set/) to set each user value at the specified path. ```csharp json.Set("user:1", "$", user1); diff --git a/content/develop/connect/clients/om-clients/stack-spring.md b/content/develop/connect/clients/om-clients/stack-spring.md index 16386c4273..20e37f376a 100644 --- a/content/develop/connect/clients/om-clients/stack-spring.md +++ b/content/develop/connect/clients/om-clients/stack-spring.md @@ -343,7 +343,7 @@ Several Redis commands were executed on application startup. Let’s break them ### Index Creation -The first one is a call to [`FT.CREATE`]({{< baseurl >}}/commands/ft.create), which happens after Redis OM Spring scanned the `@Document` annotations. As you can see, since it encountered the annotation on `Person`, it creates the `PersonIdx` index. +The first one is a call to [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/), which happens after Redis OM Spring scanned the `@Document` annotations. As you can see, since it encountered the annotation on `Person`, it creates the `PersonIdx` index. {{< highlight bash >}} "FT.CREATE" @@ -391,7 +391,7 @@ Let's break it down: * The first call uses the generated ULID to check if the id is in the set of primary keys (if it is, it’ll be removed) * The second call checks if JSON document exists (if it is, it’ll be removed) -* The third call uses the [`JSON.SET`]({{< baseurl >}}/commands/json.set) command to save the JSON payload +* The third call uses the [`JSON.SET`]({{< baseurl >}}/commands/json.set/) command to save the JSON payload * The last call adds the primary key of the saved document to the set of primary keys Now that we’ve seen the repository in action via the `.save` method, we know that the trip from Java to Redis work. Now let’s add some more data to make the interactions more interesting: diff --git a/content/develop/connect/clients/python.md b/content/develop/connect/clients/python.md index 92cdf0d22b..f80985e121 100644 --- a/content/develop/connect/clients/python.md +++ b/content/develop/connect/clients/python.md @@ -183,7 +183,7 @@ rs.create_index( # b'OK' ``` -Use [`JSON.SET`]({{< baseurl >}}/commands/json.set) to set each user value at the specified path. +Use [`JSON.SET`]({{< baseurl >}}/commands/json.set/) to set each user value at the specified path. ```python r.json().set("user:1", Path.root_path(), user1) @@ -209,7 +209,7 @@ rs.search( # [Document {'id': 'user:1', 'payload': None, 'city': 'London'}, Document {'id': 'user:3', 'payload': None, 'city': 'Tel Aviv'}] ``` -Aggregate your results using [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate). +Aggregate your results using [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/). ```python req = aggregations.AggregateRequest("*").group_by('@city', reducers.count().alias('count')) diff --git a/content/develop/data-types/json/_index.md b/content/develop/data-types/json/_index.md index 01158a54be..586e26063a 100644 --- a/content/develop/data-types/json/_index.md +++ b/content/develop/data-types/json/_index.md @@ -36,7 +36,7 @@ To learn how to use JSON, it's best to start with the Redis CLI. The following e First, start [`redis-cli`](http://redis.io/topics/rediscli) in interactive mode. -The first JSON command to try is [`JSON.SET`]({{< baseurl >}}/commands/json.set), which sets a Redis key with a JSON value. [`JSON.SET`]({{< baseurl >}}/commands/json.set) accepts all JSON value types. This example creates a JSON string: +The first JSON command to try is [`JSON.SET`]({{< baseurl >}}/commands/json.set/), which sets a Redis key with a JSON value. [`JSON.SET`]({{< baseurl >}}/commands/json.set/) accepts all JSON value types. This example creates a JSON string: ```sh > JSON.SET animal $ '"dog"' @@ -49,7 +49,7 @@ The first JSON command to try is [`JSON.SET`]({{< baseurl >}}/commands/json.set) Note how the commands include the dollar sign character `$`. This is the [path]({{< relref "/develop/data-types/json/path" >}}) to the value in the JSON document (in this case it just means the root). -Here are a few more string operations. [`JSON.STRLEN`]({{< baseurl >}}/commands/json.strlen) tells you the length of the string, and you can append another string to it with [`JSON.STRAPPEND`]({{< baseurl >}}/commands/json.strappend). +Here are a few more string operations. [`JSON.STRLEN`]({{< baseurl >}}/commands/json.strlen/) tells you the length of the string, and you can append another string to it with [`JSON.STRAPPEND`]({{< baseurl >}}/commands/json.strappend/). ```sh > JSON.STRLEN animal $ @@ -60,7 +60,7 @@ Here are a few more string operations. [`JSON.STRLEN`]({{< baseurl >}}/commands/ "[\"dog (Canis familiaris)\"]" ``` -Numbers can be [incremented]({{< baseurl >}}/commands/json.numincrby) and [multiplied]({{< baseurl >}}/commands/json.nummultby): +Numbers can be [incremented]({{< baseurl >}}/commands/json.numincrby/) and [multiplied]({{< baseurl >}}/commands/json.nummultby/): ``` > JSON.SET num $ 0 @@ -90,7 +90,7 @@ OK "[[true,{\"answer\":42}]]" ``` -The [`JSON.DEL`]({{< baseurl >}}/commands/json.del) command deletes any JSON value you specify with the `path` parameter. +The [`JSON.DEL`]({{< baseurl >}}/commands/json.del/) command deletes any JSON value you specify with the `path` parameter. You can manipulate arrays with a dedicated subset of JSON commands: @@ -128,7 +128,7 @@ OK 3) "loggedOut" ``` -To return a JSON response in a more human-readable format, run `redis-cli` in raw output mode and include formatting keywords such as `INDENT`, `NEWLINE`, and `SPACE` with the [`JSON.GET`]({{< baseurl >}}/commands/json.get) command: +To return a JSON response in a more human-readable format, run `redis-cli` in raw output mode and include formatting keywords such as `INDENT`, `NEWLINE`, and `SPACE` with the [`JSON.GET`]({{< baseurl >}}/commands/json.get/) command: ```sh $ redis-cli --raw diff --git a/content/develop/data-types/json/path.md b/content/develop/data-types/json/path.md index fb32361a1b..3c0feb8582 100644 --- a/content/develop/data-types/json/path.md +++ b/content/develop/data-types/json/path.md @@ -133,7 +133,7 @@ JSON.SET store $ '{"inventory":{"headphones":[{"id":12345,"name":"Noise-cancelli #### Access JSON examples -The following examples use the [`JSON.GET`]({{< baseurl >}}/commands/json.get) command to retrieve data from various paths in the JSON document. +The following examples use the [`JSON.GET`]({{< baseurl >}}/commands/json.get/) command to retrieve data from various paths in the JSON document. You can use the wildcard operator `*` to return a list of all items in the inventory: @@ -225,7 +225,7 @@ Now we can match against the value of `regex_pat` instead of a hard-coded regula You can also use JSONPath queries when you want to update specific sections of a JSON document. -For example, you can pass a JSONPath to the [`JSON.SET`]({{< baseurl >}}/commands/json.set) command to update a specific field. This example changes the price of the first item in the headphones list: +For example, you can pass a JSONPath to the [`JSON.SET`]({{< baseurl >}}/commands/json.set/) command to update a specific field. This example changes the price of the first item in the headphones list: ```sh 127.0.0.1:6379> JSON.GET store $..headphones[0].price @@ -245,7 +245,7 @@ You can use filter expressions to update only JSON elements that match certain c "[\"Noise-cancelling Bluetooth headphones\",\"Wireless earbuds\"]" ``` -JSONPath queries also work with other JSON commands that accept a path as an argument. For example, you can add a new color option for a set of headphones with [`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend): +JSONPath queries also work with other JSON commands that accept a path as an argument. For example, you can add a new color option for a set of headphones with [`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend/): ```sh 127.0.0.1:6379> JSON.GET store $..headphones[0].colors diff --git a/content/develop/data-types/json/ram.md b/content/develop/data-types/json/ram.md index df61870734..b71ff59030 100644 --- a/content/develop/data-types/json/ram.md +++ b/content/develop/data-types/json/ram.md @@ -24,7 +24,7 @@ RAM. Redis JSON stores JSON values as binary data after deserializing them. This representation is often more expensive, size-wize, than the serialized form. The JSON data type uses at least 24 bytes (on 64-bit architectures) for every value, as can be seen by sampling an empty string with the -[`JSON.DEBUG MEMORY`]({{< baseurl >}}/commands/json.debug-memory) command: +[`JSON.DEBUG MEMORY`]({{< baseurl >}}/commands/json.debug-memory/) command: ``` 127.0.0.1:6379> JSON.SET emptystring . '""' diff --git a/content/develop/data-types/probabilistic/count-min-sketch.md b/content/develop/data-types/probabilistic/count-min-sketch.md index 6627571147..af16a1c647 100644 --- a/content/develop/data-types/probabilistic/count-min-sketch.md +++ b/content/develop/data-types/probabilistic/count-min-sketch.md @@ -102,7 +102,7 @@ or error = threshold/total_count ``` -where `total_count` is the sum of the count of all elements that can be obtained from the `count` key of the result of the [`CMS.INFO`]({{< baseurl >}}/commands/cms.info) command and is of course dynamic - it changes with every new increment in the sketch. At creation time you can approximate the `total_count` ratio as a product of the average count you'll be expecting in the sketch and the average number of elements. +where `total_count` is the sum of the count of all elements that can be obtained from the `count` key of the result of the [`CMS.INFO`]({{< baseurl >}}/commands/cms.info/) command and is of course dynamic - it changes with every new increment in the sketch. At creation time you can approximate the `total_count` ratio as a product of the average count you'll be expecting in the sketch and the average number of elements. Since the threshold is a function of the total count in the filter it's very important to note that it will grow as the count grows, but knowing the total count we can always dynamically calculate the threshold. If a result is below it - it can be discarded. diff --git a/content/develop/data-types/probabilistic/cuckoo-filter.md b/content/develop/data-types/probabilistic/cuckoo-filter.md index 3c036b364e..bb71c3d6aa 100644 --- a/content/develop/data-types/probabilistic/cuckoo-filter.md +++ b/content/develop/data-types/probabilistic/cuckoo-filter.md @@ -49,7 +49,7 @@ Note> In addition to these two cases, Cuckoo filters serve very well all the Blo ## Examples -> You'll learn how to create an empty cuckoo filter with an initial capacity for 1,000 items, add items, check their existence, and remove them. Even though the [`CF.ADD`]({{< baseurl >}}/commands/cf.add) command can create a new filter if one isn't present, it might not be optimally sized for your needs. It's better to use the [`CF.RESERVE`]({{< baseurl >}}/commands/cf.reserve) command to set up a filter with your preferred capacity. +> You'll learn how to create an empty cuckoo filter with an initial capacity for 1,000 items, add items, check their existence, and remove them. Even though the [`CF.ADD`]({{< baseurl >}}/commands/cf.add/) command can create a new filter if one isn't present, it might not be optimally sized for your needs. It's better to use the [`CF.RESERVE`]({{< baseurl >}}/commands/cf.reserve/) command to set up a filter with your preferred capacity. {{< clients-example cuckoo_tutorial cuckoo >}} > CF.RESERVE bikes:models 1000000 diff --git a/content/develop/data-types/probabilistic/t-digest.md b/content/develop/data-types/probabilistic/t-digest.md index 81feb085b1..8c297d132c 100644 --- a/content/develop/data-types/probabilistic/t-digest.md +++ b/content/develop/data-types/probabilistic/t-digest.md @@ -72,7 +72,7 @@ You measure the IP packets transferred over your network each second and try to ## Examples -In the following example, you'll create a t-digest with a compression of 100 and add items to it. The `COMPRESSION` argument is used to specify the tradeoff between accuracy and memory consumption. The default value is 100. Higher values mean more accuracy. Note: unlike some of the other probabilistic data structures, the [`TDIGEST.ADD`]({{< baseurl >}}/commands/tdigest.add) command will not create a new structure if the key does not exist. +In the following example, you'll create a t-digest with a compression of 100 and add items to it. The `COMPRESSION` argument is used to specify the tradeoff between accuracy and memory consumption. The default value is 100. Higher values mean more accuracy. Note: unlike some of the other probabilistic data structures, the [`TDIGEST.ADD`]({{< baseurl >}}/commands/tdigest.add/) command will not create a new structure if the key does not exist. {{< clients-example tdigest_tutorial tdig_start >}} > TDIGEST.CREATE bikes:sales COMPRESSION 100 @@ -84,13 +84,13 @@ OK {{< /clients-example >}} -You can repeat calling [TDIGEST.ADD]({{< baseurl >}}/commands/tdigest.add/) whenever new observations are available +You can repeat calling [TDIGEST.ADD]({{< baseurl >}}/commands/tdigest.add//) whenever new observations are available #### Estimating fractions or ranks by values Another helpful feature in t-digest is CDF (definition of rank) which gives us the fraction of observations smaller or equal to a certain value. This command is very useful to answer questions like "*What's the percentage of observations with a value lower or equal to X*". ->More precisely, [`TDIGEST.CDF`]({{< baseurl >}}/commands/tdigest.cdf) will return the estimated fraction of observations in the sketch that are smaller than X plus half the number of observations that are equal to X. We can also use the [`TDIGEST.RANK`]({{< baseurl >}}/commands/tdigest.rank) command, which is very similar. Instead of returning a fraction, it returns the ----estimated---- rank of a value. The [`TDIGEST.RANK`]({{< baseurl >}}/commands/tdigest.rank) command is also variadic, meaning you can use a single command to retrieve estimations for one or more values. +>More precisely, [`TDIGEST.CDF`]({{< baseurl >}}/commands/tdigest.cdf/) will return the estimated fraction of observations in the sketch that are smaller than X plus half the number of observations that are equal to X. We can also use the [`TDIGEST.RANK`]({{< baseurl >}}/commands/tdigest.rank/) command, which is very similar. Instead of returning a fraction, it returns the ----estimated---- rank of a value. The [`TDIGEST.RANK`]({{< baseurl >}}/commands/tdigest.rank/) command is also variadic, meaning you can use a single command to retrieve estimations for one or more values. Here's an example. Given a set of biker's ages, you can ask a question like "What's the percentage of bike racers that are younger than 50 years?" @@ -109,7 +109,7 @@ OK {{< /clients-example >}} -And lastly, `TDIGEST.REVRANK key value...` is similar to [TDIGEST.RANK]({{< baseurl >}}/commands/tdigest.rank/), but returns, for each input value, an estimation of the number of (observations larger than a given value + half the observations equal to the given value). +And lastly, `TDIGEST.REVRANK key value...` is similar to [TDIGEST.RANK]({{< baseurl >}}/commands/tdigest.rank//), but returns, for each input value, an estimation of the number of (observations larger than a given value + half the observations equal to the given value). #### Estimating values by fractions or ranks @@ -143,7 +143,7 @@ If `destKey` is an existing sketch, its values are merged with the values of the #### Retrieving sketch information -Use [`TDIGEST.MIN`]({{< baseurl >}}/commands/tdigest.min) and [`TDIGEST.MAX`]({{< baseurl >}}/commands/tdigest.max) to retrieve the minimal and maximal values in the sketch, respectively. +Use [`TDIGEST.MIN`]({{< baseurl >}}/commands/tdigest.min/) and [`TDIGEST.MAX`]({{< baseurl >}}/commands/tdigest.max/) to retrieve the minimal and maximal values in the sketch, respectively. {{< clients-example tdigest_tutorial tdig_min >}} > TDIGEST.MIN racer_ages diff --git a/content/develop/data-types/probabilistic/top-k.md b/content/develop/data-types/probabilistic/top-k.md index 1a7fd90e09..0833ebc795 100644 --- a/content/develop/data-types/probabilistic/top-k.md +++ b/content/develop/data-types/probabilistic/top-k.md @@ -37,25 +37,25 @@ This application answers these questions: Data flow is the incoming social media posts from which you parse out the different hashtags. -The [`TOPK.LIST`]({{< baseurl >}}/commands/topk.list) command has a time complexity of `O(K)` so if `K` is small, there is no need to keep a separate set or sorted set of all the hashtags. You can query directly from the Top K itself. +The [`TOPK.LIST`]({{< baseurl >}}/commands/topk.list/) command has a time complexity of `O(K)` so if `K` is small, there is no need to keep a separate set or sorted set of all the hashtags. You can query directly from the Top K itself. ## Example This example will show you how to track key words used "bike" when shopping online; e.g., "bike store" and "bike handlebars". Proceed as follows. ​ -* Use [`TOPK.RESERVE`]({{< baseurl >}}/commands/topk.reserve) to initialize a top K sketch with specific parameters. Note: the `width`, `depth`, and `decay_constant` parameters can be omitted, as they will be set to the default values 7, 8, and 0.9, respectively, if not present. +* Use [`TOPK.RESERVE`]({{< baseurl >}}/commands/topk.reserve/) to initialize a top K sketch with specific parameters. Note: the `width`, `depth`, and `decay_constant` parameters can be omitted, as they will be set to the default values 7, 8, and 0.9, respectively, if not present. ​ ``` > TOPK.RESERVE key k width depth decay_constant ``` - * Use [`TOPK.ADD`]({{< baseurl >}}/commands/topk.add) to add items to the sketch. As you can see, multiple items can be added at the same time. If an item is returned when adding additional items, it means that item was demoted out of the min heap of the top items, below it will mean the returned item is no longer in the top 5, otherwise `nil` is returned. This allows dynamic heavy-hitter detection of items being entered or expelled from top K list. + * Use [`TOPK.ADD`]({{< baseurl >}}/commands/topk.add/) to add items to the sketch. As you can see, multiple items can be added at the same time. If an item is returned when adding additional items, it means that item was demoted out of the min heap of the top items, below it will mean the returned item is no longer in the top 5, otherwise `nil` is returned. This allows dynamic heavy-hitter detection of items being entered or expelled from top K list. ​ In the example below, "pedals" displaces "handlebars", which is returned after "pedals" is added. Also note that the addition of both "store" and "seat" a second time don't return anything, as they're already in the top K. - * Use [`TOPK.LIST`]({{< baseurl >}}/commands/topk.list) to list the items entered thus far. + * Use [`TOPK.LIST`]({{< baseurl >}}/commands/topk.list/) to list the items entered thus far. ​ - * Use [`TOPK.QUERY`]({{< baseurl >}}/commands/topk.query) to see if an item is on the top K list. Just like [`TOPK.ADD`]({{< baseurl >}}/commands/topk.add) multiple items can be queried at the same time. + * Use [`TOPK.QUERY`]({{< baseurl >}}/commands/topk.query/) to see if an item is on the top K list. Just like [`TOPK.ADD`]({{< baseurl >}}/commands/topk.add/) multiple items can be queried at the same time. {{< clients-example topk_tutorial topk >}} > TOPK.RESERVE bikes:keywords 5 2000 7 0.925 OK diff --git a/content/develop/data-types/timeseries/configuration.md b/content/develop/data-types/timeseries/configuration.md index fd5c4241eb..57fb2f1548 100644 --- a/content/develop/data-types/timeseries/configuration.md +++ b/content/develop/data-types/timeseries/configuration.md @@ -70,9 +70,9 @@ $ redis-server --loadmodule ./redistimeseries.so NUM_THREADS 3 ### COMPACTION_POLICY -Default compaction rules for newly created key with [`TS.ADD`]({{< baseurl >}}/commands/ts.add). +Default compaction rules for newly created key with [`TS.ADD`]({{< baseurl >}}/commands/ts.add/). -Note that `COMPACTION_POLICY` has no effect on keys created with [`TS.CREATE`]({{< baseurl >}}/commands/ts.create). To understand the motivation for this behavior, consider the following scenario: Suppose a `COMPACTION_POLICY` is defined, but then one wants to manually create an additional compaction rule (using [`TS.CREATERULE`]({{< baseurl >}}/commands/ts.createrule)) which requires first creating an empty destination key (using [`TS.CREATE`]({{< baseurl >}}/commands/ts.create)). But now there is a problem: due to the `COMPACTION_POLICY`, automatic compactions would be undesirably created for that destination key. +Note that `COMPACTION_POLICY` has no effect on keys created with [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/). To understand the motivation for this behavior, consider the following scenario: Suppose a `COMPACTION_POLICY` is defined, but then one wants to manually create an additional compaction rule (using [`TS.CREATERULE`]({{< baseurl >}}/commands/ts.createrule/)) which requires first creating an empty destination key (using [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/)). But now there is a problem: due to the `COMPACTION_POLICY`, automatic compactions would be undesirably created for that destination key. Each rule is separated by a semicolon (`;`), the rule consists of multiple fields that are separated by a colon (`:`): @@ -152,7 +152,7 @@ $ redis-server --loadmodule ./redistimeseries.so COMPACTION_POLICY max:1m:1h;min Default retention period, in milliseconds, for newly created keys. -Retention period is the maximum age of samples compared to highest reported timestamp, per key. Samples are expired based solely on the difference between their timestamp and the timestamps passed to subsequent [`TS.ADD`]({{< baseurl >}}/commands/ts.add), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby), and [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby) calls. +Retention period is the maximum age of samples compared to highest reported timestamp, per key. Samples are expired based solely on the difference between their timestamp and the timestamps passed to subsequent [`TS.ADD`]({{< baseurl >}}/commands/ts.add/), [`TS.MADD`]({{< baseurl >}}/commands/ts.madd/), [`TS.INCRBY`]({{< baseurl >}}/commands/ts.incrby/), and [`TS.DECRBY`]({{< baseurl >}}/commands/ts.decrby/) calls. The value `0` means no expiration. @@ -172,7 +172,7 @@ $ redis-server --loadmodule ./redistimeseries.so RETENTION_POLICY 25920000000 ### DUPLICATE_POLICY -Is policy for handling insertion ([`TS.ADD`]({{< baseurl >}}/commands/ts.add) and [`TS.MADD`]({{< baseurl >}}/commands/ts.madd)) of multiple samples with identical timestamps, with one of the following values: +Is policy for handling insertion ([`TS.ADD`]({{< baseurl >}}/commands/ts.add/) and [`TS.MADD`]({{< baseurl >}}/commands/ts.madd/)) of multiple samples with identical timestamps, with one of the following values: | policy | description | | ---------- | ---------------------------------------------------------------- | @@ -186,8 +186,8 @@ Is policy for handling insertion ([`TS.ADD`]({{< baseurl >}}/commands/ts.add) an #### Precedence order Since the duplication policy can be provided at different levels, the actual precedence of the used policy will be: -1. [`TS.ADD`]({{< baseurl >}}/commands/ts.add)'s `ON_DUPLICATE_policy` optional argument -2. Key-level policy (as set with [`TS.CREATE`]({{< baseurl >}}/commands/ts.create)'s and [`TS.ALTER`]({{< baseurl >}}/commands/ts.alter)'s `DUPLICATE_POLICY` optional argument) +1. [`TS.ADD`]({{< baseurl >}}/commands/ts.add/)'s `ON_DUPLICATE_policy` optional argument +2. Key-level policy (as set with [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/)'s and [`TS.ALTER`]({{< baseurl >}}/commands/ts.alter/)'s `DUPLICATE_POLICY` optional argument) 3. The `DUPLICATE_POLICY` module configuration parameter 4. The default policy diff --git a/content/develop/data-types/timeseries/quickstart.md b/content/develop/data-types/timeseries/quickstart.md index 5ebe06959e..5772c875c5 100644 --- a/content/develop/data-types/timeseries/quickstart.md +++ b/content/develop/data-types/timeseries/quickstart.md @@ -107,7 +107,7 @@ OK ## Creating a timeseries -A new timeseries can be created with the [`TS.CREATE`]({{< baseurl >}}/commands/ts.create) command; for example, to create a timeseries named `sensor1` run the following: +A new timeseries can be created with the [`TS.CREATE`]({{< baseurl >}}/commands/ts.create/) command; for example, to create a timeseries named `sensor1` run the following: ``` TS.CREATE sensor1 @@ -122,7 +122,7 @@ This will create a timeseries called `sensor1` and trim it to values of up to on ## Adding data points -For adding new data points to a timeseries we use the [`TS.ADD`]({{< baseurl >}}/commands/ts.add) command: +For adding new data points to a timeseries we use the [`TS.ADD`]({{< baseurl >}}/commands/ts.add/) command: ``` TS.ADD key timestamp value @@ -141,14 +141,14 @@ To **add a datapoint with the current timestamp** you can use a `*` instead of a TS.ADD sensor1 * 26 ``` -You can **append data points to multiple timeseries** at the same time with the [`TS.MADD`]({{< baseurl >}}/commands/ts.madd) command: +You can **append data points to multiple timeseries** at the same time with the [`TS.MADD`]({{< baseurl >}}/commands/ts.madd/) command: ``` TS.MADD key timestamp value [key timestamp value ...] ``` ## Deleting data points -Data points between two timestamps (inclusive) can be deleted with the [`TS.DEL`]({{< baseurl >}}/commands/ts.del) command: +Data points between two timestamps (inclusive) can be deleted with the [`TS.DEL`]({{< baseurl >}}/commands/ts.del/) command: ``` TS.DEL key fromTimestamp toTimestamp ``` @@ -175,7 +175,7 @@ TS.CREATE sensor1 LABELS region east ## Compaction -Another useful feature of Redis Time Series is compacting data by creating a rule for compaction ([`TS.CREATERULE`]({{< baseurl >}}/commands/ts.createrule)). For example, if you have collected more than one billion data points in a day, you could aggregate the data by every minute in order to downsample it, thereby reducing the dataset size to 24 * 60 = 1,440 data points. You can choose one of the many available aggregation types in order to aggregate multiple data points from a certain minute into a single one. The currently supported aggregation types are: `avg, sum, min, max, range, count, first, last, std.p, std.s, var.p, var.s and twa`. +Another useful feature of Redis Time Series is compacting data by creating a rule for compaction ([`TS.CREATERULE`]({{< baseurl >}}/commands/ts.createrule/)). For example, if you have collected more than one billion data points in a day, you could aggregate the data by every minute in order to downsample it, thereby reducing the dataset size to 24 * 60 = 1,440 data points. You can choose one of the many available aggregation types in order to aggregate multiple data points from a certain minute into a single one. The currently supported aggregation types are: `avg, sum, min, max, range, count, first, last, std.p, std.s, var.p, var.s and twa`. It's important to point out that there is no data rewriting on the original timeseries; the compaction happens in a new series, while the original one stays the same. In order to prevent the original timeseries from growing indefinitely, you can use the retention option, which will trim it down to a certain period of time. @@ -207,7 +207,7 @@ TS.MRANGE - + FILTER area_id=32 This query will show data from all sensors (timeseries) that have a label of `area_id` with a value of `32`. The results will be grouped by timeseries. -Or we can also use the [`TS.MGET`]({{< baseurl >}}/commands/ts.mget) command to get the last sample that matches the specific filter: +Or we can also use the [`TS.MGET`]({{< baseurl >}}/commands/ts.mget/) command to get the last sample that matches the specific filter: ``` TS.MGET FILTER area_id=32 diff --git a/content/develop/get-started/document-database.md b/content/develop/get-started/document-database.md index 16cd3f284f..c4f4490c62 100644 --- a/content/develop/get-started/document-database.md +++ b/content/develop/get-started/document-database.md @@ -67,7 +67,7 @@ You can copy and paste the connection details from the Redis Cloud database conf As explained in the [in-memory data store]({{< relref "/develop/get-started/data-store" >}}) quick start guide, Redis allows you to access an item directly via its key. You also learned how to scan the keyspace. Whereby you can use other data structures (e.g., hashes and sorted sets) as secondary indexes, your application would need to maintain those indexes manually. Redis Stack turns Redis into a document database by allowing you to declare which fields are auto-indexed. Redis Stack currently supports secondary index creation on the [hashes]({{< relref "/develop/data-types/hashes" >}}) and [JSON]({{< relref "/develop/data-types/json" >}}) documents. -The following example shows an [FT.CREATE]({{< baseurl >}}/commands/ft.create/) command that creates an index with some text fields, a numeric field (price), and a tag field (condition). The text fields have a weight of 1.0, meaning they have the same relevancy in the context of full-text searches. The field names follow the [JSONPath]({{< relref "/develop/data-types/json/path" >}}) notion. Each such index field maps to a property within the JSON document. +The following example shows an [FT.CREATE]({{< baseurl >}}/commands/ft.create//) command that creates an index with some text fields, a numeric field (price), and a tag field (condition). The text fields have a weight of 1.0, meaning they have the same relevancy in the context of full-text searches. The field names follow the [JSONPath]({{< relref "/develop/data-types/json/path" >}}) notion. Each such index field maps to a property within the JSON document. {{< clients-example search_quickstart create_index >}} @@ -79,7 +79,7 @@ Any pre-existing JSON documents with a key prefix `bicycle:` are automatically a ## Add JSON documents -The example below shows you how to use the [JSON.SET]({{< baseurl >}}/commands/json.set/) command to create new JSON documents: +The example below shows you how to use the [JSON.SET]({{< baseurl >}}/commands/json.set//) command to create new JSON documents: {{< clients-example search_quickstart add_documents "" 2 >}} > JSON.SET "bicycle:0" "." "{\"brand\": \"Velorim\", \"model\": \"Jigger\", \"price\": 270, \"description\": \"Small and powerful, the Jigger is the best ride for the smallest of tikes! This is the tiniest kids\\u2019 pedal bike on the market available without a coaster brake, the Jigger is the vehicle of choice for the rare tenacious little rider raring to go.\", \"condition\": \"new\"}" @@ -108,7 +108,7 @@ OK ### Wildcard query -You can retrieve all indexed documents using the [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) command. Note the `LIMIT` clause below, which allows result pagination. +You can retrieve all indexed documents using the [FT.SEARCH]({{< baseurl >}}/commands/ft.search//) command. Note the `LIMIT` clause below, which allows result pagination. {{< clients-example search_quickstart wildcard_query "" 10 >}} > FT.SEARCH "idx:bicycle" "*" LIMIT 0 10 diff --git a/content/develop/get-started/vector-database.md b/content/develop/get-started/vector-database.md index fc3f34e629..87443760b8 100644 --- a/content/develop/get-started/vector-database.md +++ b/content/develop/get-started/vector-database.md @@ -102,7 +102,7 @@ The following code allows you to look at the structure of one of our bike JSON d {{< clients-example search_vss dump_data />}} ### 2. Store the demo data in your database -Then, you iterate over the `bikes` array to store the data as [JSON]({{< relref "/develop/data-types/json/" >}}) documents in the database by using the [JSON.SET]({{< baseurl >}}/commands/json.set/) command. The below code uses a [pipeline]({{< relref "/develop/use/pipelining" >}}) to minimize the round-trip times: +Then, you iterate over the `bikes` array to store the data as [JSON]({{< relref "/develop/data-types/json/" >}}) documents in the database by using the [JSON.SET]({{< baseurl >}}/commands/json.set//) command. The below code uses a [pipeline]({{< relref "/develop/use/pipelining" >}}) to minimize the round-trip times: {{< clients-example search_vss load_data />}} @@ -125,11 +125,11 @@ In the next step, you must iterate over all the Redis keys with the prefix `bike {{< clients-example search_vss get_keys />}} -Use the keys as a parameter to the [JSON.MGET]({{< baseurl >}}/commands/json.mget/) command, along with the JSONPath expression `$.description` to collect the descriptions in a list. Then, pass the list to the `encode` method to get a list of vectorized embeddings: +Use the keys as a parameter to the [JSON.MGET]({{< baseurl >}}/commands/json.mget//) command, along with the JSONPath expression `$.description` to collect the descriptions in a list. Then, pass the list to the `encode` method to get a list of vectorized embeddings: {{< clients-example search_vss generate_embeddings />}} -You now need to add the vectorized descriptions to the JSON documents in Redis using the [JSON.SET]({{< baseurl >}}/commands/json.set/) command. The following command inserts a new field in each of the documents under the JSONPath `$.description_embeddings`. Once again, you'll do this using a pipeline: +You now need to add the vectorized descriptions to the JSON documents in Redis using the [JSON.SET]({{< baseurl >}}/commands/json.set//) command. The following command inserts a new field in each of the documents under the JSONPath `$.description_embeddings`. Once again, you'll do this using a pipeline: {{< clients-example search_vss load_embeddings />}} @@ -148,7 +148,7 @@ In the example above, the array was shortened considerably for the sake of reada ### 1. Create an index with a vector field -You must create an index to query based on vector metadata or perform vector searches. Use the [FT.CREATE]({{< baseurl >}}/commands/ft.create/) command: +You must create an index to query based on vector metadata or perform vector searches. Use the [FT.CREATE]({{< baseurl >}}/commands/ft.create//) command: {{< clients-example search_vss create_index >}} FT.CREATE idx:bikes_vss ON JSON @@ -174,7 +174,7 @@ You can find further details about all these options in the [vector reference do ### 2. Check the state of the index -As soon as you execute the [FT.CREATE]({{< baseurl >}}/commands/ft.create/) command, the indexing process runs in the background. In a short time, all JSON documents should be indexed and ready to be queried. To validate that, you can use the [FT.INFO]({{< baseurl >}}/commands/ft.info/) command, which provides details and statistics about the index. Of particular interest are the number of documents successfully indexed and the number of failures: +As soon as you execute the [FT.CREATE]({{< baseurl >}}/commands/ft.create//) command, the indexing process runs in the background. In a short time, all JSON documents should be indexed and ready to be queried. To validate that, you can use the [FT.INFO]({{< baseurl >}}/commands/ft.info//) command, which provides details and statistics about the index. Of particular interest are the number of documents successfully indexed and the number of failures: {{< clients-example search_vss validate_index >}} FT.INFO idx:bikes_vss @@ -213,7 +213,7 @@ query = ( ``` {{% alert title="Note" color="warning" %}} -To utilize a vector query with the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command, you must specify DIALECT 2 or greater. +To utilize a vector query with the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) command, you must specify DIALECT 2 or greater. {{% /alert %}} You must pass the vectorized query as `$query_vector` as a byte array. The following code shows an example of creating a Python NumPy array from a vectorized query prompt (`encoded_query`) as a single precision floating point array and converting it into a compact, byte-level representation that can be passed as a parameter to the query: diff --git a/content/develop/interact/search-and-query/administration/design.md b/content/develop/interact/search-and-query/administration/design.md index 27ea98feeb..839badaea2 100644 --- a/content/develop/interact/search-and-query/administration/design.md +++ b/content/develop/interact/search-and-query/administration/design.md @@ -75,7 +75,7 @@ When searching, priority queue of the top N results requested is maintained, whi ## Index ppecs and field weights -When creating an "index" using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create), the user specifies the fields to be indexed and their respective weights. This can be used to give some document fields, like a title, more weight in ranking results. +When creating an "index" using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/), the user specifies the fields to be indexed and their respective weights. This can be used to give some document fields, like a title, more weight in ranking results. For example: diff --git a/content/develop/interact/search-and-query/administration/overview.md b/content/develop/interact/search-and-query/administration/overview.md index 5e46a754e2..72451e7e4c 100644 --- a/content/develop/interact/search-and-query/administration/overview.md +++ b/content/develop/interact/search-and-query/administration/overview.md @@ -163,7 +163,7 @@ The auto-complete engine (see below for a fuller description) utilizes a compact ## Query language -Simple syntax is supported for complex queries that can be combined together to express complex filtering and matching rules. The query is a text string in the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) request that is parsed using a complex query processor. +Simple syntax is supported for complex queries that can be combined together to express complex filtering and matching rules. The query is a text string in the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) request that is parsed using a complex query processor. * Multi-word phrases are lists of tokens, e.g., `foo bar baz`, and imply intersection (logical AND) of the terms. * Exact phrases are wrapped in quotes, e.g `"hello world"`. @@ -258,7 +258,7 @@ These are the pre-bundled scoring functions available in Redis Stack: It is possible to bypass the scoring function mechanism and order search results by the value of different document properties (fields) directly, even if the sorting field is not used by the query. For example, you can search for first name and sort by the last name. -When creating the index with [`FT.CREATE`]({{< baseurl >}}/commands/ft.create), you can declare `TEXT`, `TAG`, `NUMERIC`, and `GEO` properties as `SORTABLE`. When a property is sortable, you can later decide to order the results by its values with relatively low latency. When a property is not sortable, it can still be sorted by its values, but may increase latency. For example, the following schema: +When creating the index with [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/), you can declare `TEXT`, `TAG`, `NUMERIC`, and `GEO` properties as `SORTABLE`. When a property is sortable, you can later decide to order the results by its values with relatively low latency. When a property is not sortable, it can still be sorted by its values, but may increase latency. For example, the following schema: ``` FT.CREATE users SCHEMA first_name TEXT last_name TEXT SORTABLE age NUMERIC SORTABLE diff --git a/content/develop/interact/search-and-query/advanced-concepts/aggregations.md b/content/develop/interact/search-and-query/advanced-concepts/aggregations.md index 6c1d7acb63..51e2b52804 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/aggregations.md +++ b/content/develop/interact/search-and-query/advanced-concepts/aggregations.md @@ -481,12 +481,12 @@ FT.CURSOR READ {idx} {cid} [COUNT {read size}] FT.CURSOR DEL {idx} {cid} ``` -You can use cursors with [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate), with the `WITHCURSOR` keyword. Cursors allow you to +You can use cursors with [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/), with the `WITHCURSOR` keyword. Cursors allow you to consume only part of the response, allowing you to fetch additional results as needed. This is much quicker than using `LIMIT` with offset, since the query is executed only once, and its state is stored on the server. -To use cursors, specify the `WITHCURSOR` keyword in [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate). For example: +To use cursors, specify the `WITHCURSOR` keyword in [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/). For example: ``` FT.AGGREGATE idx * WITHCURSOR @@ -494,16 +494,16 @@ FT.AGGREGATE idx * WITHCURSOR This will return a response of an array with two elements. The first element is the actual (partial) result, and the second is the cursor ID. The cursor ID -can then be fed to [`FT.CURSOR READ`]({{< baseurl >}}/commands/ft.cursor-read) repeatedly until the cursor ID is 0, in +can then be fed to [`FT.CURSOR READ`]({{< baseurl >}}/commands/ft.cursor-read/) repeatedly until the cursor ID is 0, in which case all results have been returned. -To read from an existing cursor, use [`FT.CURSOR READ`]({{< baseurl >}}/commands/ft.cursor-read). For example: +To read from an existing cursor, use [`FT.CURSOR READ`]({{< baseurl >}}/commands/ft.cursor-read/). For example: ``` FT.CURSOR READ idx 342459320 ``` -Assuming `342459320` is the cursor ID returned from the [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) request, here is an example in pseudo-code: +Assuming `342459320` is the cursor ID returned from the [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/) request, here is an example in pseudo-code: ``` response, cursor = FT.AGGREGATE "idx" "redis" "WITHCURSOR"; @@ -523,8 +523,8 @@ Note that even if the cursor is 0, a partial result may still be returned. #### Read size You can control how many rows are read for each cursor fetch by using the -`COUNT` parameter. This parameter can be specified both in [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) -(immediately after `WITHCURSOR`) or in [`FT.CURSOR READ`]({{< baseurl >}}/commands/ft.cursor-read). +`COUNT` parameter. This parameter can be specified both in [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/) +(immediately after `WITHCURSOR`) or in [`FT.CURSOR READ`]({{< baseurl >}}/commands/ft.cursor-read/). The following example will read 10 rows at a time: ``` diff --git a/content/develop/interact/search-and-query/advanced-concepts/dialects.md b/content/develop/interact/search-and-query/advanced-concepts/dialects.md index c1975c980f..ae17e7ba4c 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/dialects.md +++ b/content/develop/interact/search-and-query/advanced-concepts/dialects.md @@ -17,7 +17,7 @@ title: Query dialects weight: 5 --- -Redis Stack currently supports four query dialects for use with the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search), [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate), and other search and query commands. +Redis Stack currently supports four query dialects for use with the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/), [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/), and other search and query commands. Dialects provide for enhancing the query API incrementally, introducing innovative behaviors and new features that support new use cases in a way that does not break the API for existing applications.``` ## `DIALECT 1` @@ -82,7 +82,7 @@ With `DIALECT 2` you can use un-escaped spaces in tag queries, even with stopwor Dialect version 3 was introduced in the [2.6](https://github.com/RediSearch/RediSearch/releases/tag/v2.6.3) release. This version introduced support for multi-value indexing and querying of attributes for any attribute type ( [TEXT]({{< baseurl >}}/develop/interact/search-and-query/indexing/#index-json-arrays-as-text), [TAG]({{< baseurl >}}/develop/interact/search-and-query/indexing/#index-json-arrays-as-tag), [NUMERIC]({{< baseurl >}}/develop/interact/search-and-query/indexing/#index-json-arrays-as-numeric), [GEO]({{< baseurl >}}/develop/interact/search-and-query/indexing/#index-json-arrays-as-geo) and [VECTOR]({{< baseurl >}}/develop/interact/search-and-query/indexing/#index-json-arrays-as-vector)) defined by a [JSONPath]({{< relref "/develop/data-types/json/path" >}}) leading to an array or multiple scalar values. Support for [GEOSHAPE]({{< relref "/develop/interact/search-and-query/query/geo-spatial" >}}) queries was also introduced in this dialect. -The primary difference between dialects version 2 and version 3 is that JSON is returned rather than scalars for multi-value attributes. Apart from specifying `DIALECT 3` at the end of a [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command, there are no other syntactic changes. Dialect version 1 remains the default dialect. To use dialect version 3, append `DIALECT 3` to your query command. +The primary difference between dialects version 2 and version 3 is that JSON is returned rather than scalars for multi-value attributes. Apart from specifying `DIALECT 3` at the end of a [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) command, there are no other syntactic changes. Dialect version 1 remains the default dialect. To use dialect version 3, append `DIALECT 3` to your query command. `FT.SEARCH ... DIALECT 3` @@ -146,7 +146,7 @@ DIALECT 3 is required for shape-based (`POINT` or `POLYGON`) geospatial queries. ## `DIALECT 4` -Dialect version 4 was introduced in the [2.8](https://github.com/RediSearch/RediSearch/releases/tag/v2.8.4) release. It introduces performance optimizations for sorting operations on [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) and [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate). Apart from specifying `DIALECT 4` at the end of a [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command, there are no other syntactic changes. Dialect version 1 remains the default dialect. To use dialect version 4, append `DIALECT 4` to your query command. +Dialect version 4 was introduced in the [2.8](https://github.com/RediSearch/RediSearch/releases/tag/v2.8.4) release. It introduces performance optimizations for sorting operations on [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) and [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/). Apart from specifying `DIALECT 4` at the end of a [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) command, there are no other syntactic changes. Dialect version 1 remains the default dialect. To use dialect version 4, append `DIALECT 4` to your query command. `FT.SEARCH ... DIALECT 4` @@ -156,17 +156,17 @@ Dialect version 4 will improve performance in four different scenarios: 1. **Partial range** - applied when there is a `SORTBY` on a numeric field, either with no filter or with a filter by the same numeric field. Such queries will iterate on a range large enough to satisfy the `LIMIT` of requested results. 1. **Hybrid** - applied when there is a `SORTBY` on a numeric field in addition to another non-numeric filter. It could be the case that some results will get filtered, leaving too small a range to satisfy any specified `LIMIT`. In such cases, the iterator then is re-wound and additional iterations occur to collect result up to the requested `LIMIT`. 1. **No optimization** - If there is a sort by score or by a non-numeric field, there is no other option but to retrieve all results and compare their values to the search parameters. -## Use [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli) to compare dialects +## Use [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli/) to compare dialects -The [[`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli)](/commands/ft.explaincli/) is a powerful tool that provides a window into the inner workings of your queries. It's like a roadmap that details your query's journey from start to finish. +The [[`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli/)](/commands/ft.explaincli/) is a powerful tool that provides a window into the inner workings of your queries. It's like a roadmap that details your query's journey from start to finish. -When you run [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli), it returns an array representing the execution plan of a complex query. This plan is a step-by-step guide of how Redis interprets your query and how it plans to fetch results. It's a behind-the-scenes look at the process, giving you insights into how the search engine works. +When you run [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli/), it returns an array representing the execution plan of a complex query. This plan is a step-by-step guide of how Redis interprets your query and how it plans to fetch results. It's a behind-the-scenes look at the process, giving you insights into how the search engine works. -The [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli) accepts a `DIALECT` argument, allowing you to execute the query using different dialect versions, allowing you to compare the resulting query plans. +The [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli/) accepts a `DIALECT` argument, allowing you to execute the query using different dialect versions, allowing you to compare the resulting query plans. -To use [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli), you need to provide an index and a query predicate. The index is the name of the index you created using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create), and the query predicate is the same as if you were sending it to [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) or [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate). +To use [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli/), you need to provide an index and a query predicate. The index is the name of the index you created using [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/), and the query predicate is the same as if you were sending it to [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) or [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/). -Here's an example of how to use [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli) to understand differences in dialect versions 1 and 2. +Here's an example of how to use [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli/) to understand differences in dialect versions 1 and 2. Negation of the intersection between tokens `hello` and `world`: @@ -210,7 +210,7 @@ FT.EXPLAINCLI idx:dialects "-(hello world)" DIALECT 2 ``` {{% alert title=Note %}} -[`FT.EXPLAIN`]({{< baseurl >}}/commands/ft.explain) doesn't execute the query. It only explains the plan. It's a way to understand how your query is interpreted by the query engine, which can be invaluable when you're trying to optimize your searches. +[`FT.EXPLAIN`]({{< baseurl >}}/commands/ft.explain/) doesn't execute the query. It only explains the plan. It's a way to understand how your query is interpreted by the query engine, which can be invaluable when you're trying to optimize your searches. {{% /alert %}} ## Change the default dialect diff --git a/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md b/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md index 5516319370..fb5d91f66b 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md +++ b/content/develop/interact/search-and-query/advanced-concepts/query_syntax.md @@ -160,7 +160,7 @@ There is a new schema field type called `GEOSHAPE`, which can be specified as ei - `FLAT` for Cartesian X Y coordinates - `SPHERICAL` for geographic longitude and latitude coordinates. This is the default coordinate system. -Finally, there's new [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) syntax that allows you to query for polygons that either contain or are within a given geoshape. +Finally, there's new [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) syntax that allows you to query for polygons that either contain or are within a given geoshape. `@field:[{WITHIN|CONTAINS} $geometry] PARAMS 2 geometry {geometry}` @@ -178,7 +178,7 @@ Next, create the data structures that represent the geometries in the picture. HSET shape:1 t "this is my house" g "POLYGON((2 2, 2 8, 6 11, 10 8, 10 2, 2 2))" HSET shape:2 t "this is a square in my house" g "POLYGON((4 4, 4 6, 6 6, 6 4, 4 4))" ``` -Finally, use [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) to query the geometries. Note the use of `DIALECT 3`, which is required. Here are a few examples. +Finally, use [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) to query the geometries. Note the use of `DIALECT 3`, which is required. Here are a few examples. Search for a polygon that contains a specified point: @@ -239,7 +239,7 @@ Note that both the house and box shapes were returned. GEOSHAPE does not support JSON multi-value or SORTABLE options. {{< /alert >}} -For more examples, see the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) and [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command pages. +For more examples, see the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) and [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) command pages. ## Vector search diff --git a/content/develop/interact/search-and-query/advanced-concepts/scoring.md b/content/develop/interact/search-and-query/advanced-concepts/scoring.md index ff33c9a2f4..59cc810319 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/scoring.md +++ b/content/develop/interact/search-and-query/advanced-concepts/scoring.md @@ -23,7 +23,7 @@ Redis Stack comes with a few very basic scoring functions to evaluate document r If you prefer a custom scoring function, it is possible to add more functions using the [extension API]({{< relref "/develop/interact/search-and-query/administration/extensions" >}}). -The following is a list of the pre-bundled scoring functions available in Redis Stack and a short explanation about how they work. Each function is mentioned by registered name, which can be passed as a `SCORER` argument in [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search). +The following is a list of the pre-bundled scoring functions available in Redis Stack and a short explanation about how they work. Each function is mentioned by registered name, which can be passed as a `SCORER` argument in [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/). ## TFIDF (default) diff --git a/content/develop/interact/search-and-query/advanced-concepts/sorting.md b/content/develop/interact/search-and-query/advanced-concepts/sorting.md index 8dabf01bd3..6d870bf737 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/sorting.md +++ b/content/develop/interact/search-and-query/advanced-concepts/sorting.md @@ -21,7 +21,7 @@ As of RediSearch 0.15, you can bypass the scoring function mechanism and order s ## Declaring sortable fields -When creating an index with [`FT.CREATE`]({{< baseurl >}}/commands/ft.create), you can declare `TEXT`, `TAG`, `NUMERIC`, and `GEO` attributes as `SORTABLE`. When an attribute is sortable, you can order the results by its values with relatively low latency. When an attribute is not sortable, it can still be sorted by its values, but with increased latency. For example, in the following schema: +When creating an index with [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/), you can declare `TEXT`, `TAG`, `NUMERIC`, and `GEO` attributes as `SORTABLE`. When an attribute is sortable, you can order the results by its values with relatively low latency. When an attribute is not sortable, it can still be sorted by its values, but with increased latency. For example, in the following schema: ``` FT.CREATE users SCHEMA first_name TEXT last_name TEXT SORTABLE age NUMERIC SORTABLE diff --git a/content/develop/interact/search-and-query/advanced-concepts/spellcheck.md b/content/develop/interact/search-and-query/advanced-concepts/spellcheck.md index ce202a2691..8dee67d59c 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/spellcheck.md +++ b/content/develop/interact/search-and-query/advanced-concepts/spellcheck.md @@ -27,11 +27,11 @@ In such cases, and as of v1.4, RediSearch can be used for generating alternative The alternatives for a misspelled term are generated from the corpus of already-indexed terms and, optionally, one or more custom dictionaries. Alternatives become spelling suggestions based on their respective Levenshtein distances from the misspelled term. Each spelling suggestion is given a normalized score based on its occurrences in the index. -To obtain the spelling corrections for a query, refer to the documentation of the [`FT.SPELLCHECK`]({{< baseurl >}}/commands/ft.spellcheck) command. +To obtain the spelling corrections for a query, refer to the documentation of the [`FT.SPELLCHECK`]({{< baseurl >}}/commands/ft.spellcheck/) command. ## Custom dictionaries -A dictionary is a set of terms. Dictionaries can be added with terms, have terms deleted from them, and have their entire contents dumped using the [`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd), [`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel) and [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump) commands, respectively. +A dictionary is a set of terms. Dictionaries can be added with terms, have terms deleted from them, and have their entire contents dumped using the [`FT.DICTADD`]({{< baseurl >}}/commands/ft.dictadd/), [`FT.DICTDEL`]({{< baseurl >}}/commands/ft.dictdel/) and [`FT.DICTDUMP`]({{< baseurl >}}/commands/ft.dictdump/) commands, respectively. Dictionaries can be used to modify the behavior of spelling corrections by including or excluding their contents from potential spelling correction suggestions. diff --git a/content/develop/interact/search-and-query/advanced-concepts/stopwords.md b/content/develop/interact/search-and-query/advanced-concepts/stopwords.md index 6060347d94..8e7a50f45f 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/stopwords.md +++ b/content/develop/interact/search-and-query/advanced-concepts/stopwords.md @@ -35,7 +35,7 @@ The following words are treated as stop words by default: ## Overriding the default stop word list -Stop words for an index can be defined (or disabled completely) on index creation using the `STOPWORDS` argument with the [[`FT.CREATE`]({{< baseurl >}}/commands/ft.create) command. +Stop words for an index can be defined (or disabled completely) on index creation using the `STOPWORDS` argument with the [[`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) command. The format is `STOPWORDS {number} {stopword} ...` where number is the number of stop words given. The `STOPWORDS` argument must come before the `SCHEMA` argument. For example: @@ -45,7 +45,7 @@ FT.CREATE myIndex STOPWORDS 3 foo bar baz SCHEMA title TEXT body TEXT ## Disable the use of stop words -Disabling stop words completely can be done by passing `STOPWORDS 0` to [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). +Disabling stop words completely can be done by passing `STOPWORDS 0` to [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/). ## Avoiding stop word detection in search queries diff --git a/content/develop/interact/search-and-query/advanced-concepts/vectors.md b/content/develop/interact/search-and-query/advanced-concepts/vectors.md index 27e30f76a5..4d0f5aa354 100644 --- a/content/develop/interact/search-and-query/advanced-concepts/vectors.md +++ b/content/develop/interact/search-and-query/advanced-concepts/vectors.md @@ -16,7 +16,7 @@ title: Vectors weight: 14 --- -*Vector fields* allow you to use vector similarity queries in the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command. +*Vector fields* allow you to use vector similarity queries in the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) command. *Vector similarity* enables you to load, index, and query vectors stored as fields in Redis hashes or in JSON documents (via integration with the [JSON module]({{< relref "/develop/data-types/json/" >}})) Vector similarity provides these functionalities: @@ -190,7 +190,7 @@ JSON.SET 1 $ '{"foo":{"vec":[1,2,3,4]}, "bar":{"vec":[5,6,7,8]}}' ## Querying vector fields -You can use vector similarity queries in the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) query command. To use a vector similarity query, you must specify the option `DIALECT 2` or greater in the command itself, or set the `DEFAULT_DIALECT` option to `2` or greater, by either using the command [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) or when loading the `redisearch` module and passing it the argument `DEFAULT_DIALECT 2`. +You can use vector similarity queries in the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) query command. To use a vector similarity query, you must specify the option `DIALECT 2` or greater in the command itself, or set the `DEFAULT_DIALECT` option to `2` or greater, by either using the command [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) or when loading the `redisearch` module and passing it the argument `DEFAULT_DIALECT 2`. There are two types of vector queries: *KNN* and *range*: @@ -213,7 +213,7 @@ The `` part inside the square brackets needs to be in t KNN ( | $) @ $ [ |$] [...]] [ AS | $] ``` -Every `*_attribute` parameter should refer to an attribute in the [`PARAMS`]({{< baseurl >}}/commands/ft.search) section. +Every `*_attribute` parameter should refer to an attribute in the [`PARAMS`]({{< baseurl >}}/commands/ft.search/) section. * ` | $` - Number of requested results ("K"). @@ -312,7 +312,7 @@ Now, sort the results by their distance from the query vector: ``` FT.SEARCH idx "*=>[KNN 10 @vec $BLOB]" PARAMS 2 BLOB "\x12\xa9\xf5\x6c" SORTBY __vec_score DIALECT 2 ``` -Return the top 10 similar documents, use *query params* (see "params" section in [FT.SEARCH command]({{< baseurl >}}/commands/ft.search/)) for specifying `K` and `EF_RUNTIME` parameter, and set `EF_RUNTIME` value to 150 (assuming `vec` is an HNSW index): +Return the top 10 similar documents, use *query params* (see "params" section in [FT.SEARCH command]({{< baseurl >}}/commands/ft.search//)) for specifying `K` and `EF_RUNTIME` parameter, and set `EF_RUNTIME` value to 150 (assuming `vec` is an HNSW index): ``` FT.SEARCH idx "*=>[KNN $K @vec $BLOB EF_RUNTIME $EF]" PARAMS 6 BLOB "\x12\xa9\xf5\x6c" K 10 EF 150 DIALECT 2 ``` diff --git a/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md b/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md index b530414adc..2ec384f9a6 100644 --- a/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md +++ b/content/develop/interact/search-and-query/basic-constructs/configuration-parameters.md @@ -58,7 +58,7 @@ FT.CONFIG GET OPT1 FT.CONFIG GET * ``` -Values set using [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set) are not persisted after server restart. +Values set using [`FT.CONFIG SET`]({{< baseurl >}}/commands/ft.config-set/) are not persisted after server restart. ## RediSearch configuration parameters @@ -248,7 +248,7 @@ $ redis-server --loadmodule ./redisearch.so MAXDOCTABLESIZE 3000000 ### MAXSEARCHRESULTS -The maximum number of results to be returned by the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command if LIMIT is used. +The maximum number of results to be returned by the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) command if LIMIT is used. Setting value to `-1` will remove the limit. #### Default @@ -265,7 +265,7 @@ $ redis-server --loadmodule ./redisearch.so MAXSEARCHRESULTS 3000000 ### MAXAGGREGATERESULTS -The maximum number of results to be returned by the [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) command if LIMIT is used. +The maximum number of results to be returned by the [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/) command if LIMIT is used. Setting value to `-1` will remove the limit. #### Default @@ -479,11 +479,11 @@ $ redis-server --loadmodule ./redisearch.so GC_POLICY FORK FORK_GC_CLEAN_THRESHO ### UPGRADE_INDEX -This configuration is a special configuration option introduced to upgrade indices from v1.x RediSearch versions, otherwise known as legacy indices. This configuration option needs to be given for each legacy index, followed by the index name and all valid options for the index description (also referred to as the `ON` arguments for following hashes) as described on [ft.create api]({{< baseurl >}}/commands/ft.create). +This configuration is a special configuration option introduced to upgrade indices from v1.x RediSearch versions, otherwise known as legacy indices. This configuration option needs to be given for each legacy index, followed by the index name and all valid options for the index description (also referred to as the `ON` arguments for following hashes) as described on [ft.create api]({{< baseurl >}}/commands/ft.create/). #### Default -There is no default for index name, and the other arguments have the same defaults as with the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) API. +There is no default for index name, and the other arguments have the same defaults as with the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) API. #### Example @@ -525,7 +525,7 @@ $ redis-server --loadmodule ./redisearch.so OSS_GLOBAL_PASSWORD password ### DEFAULT_DIALECT -The default DIALECT to be used by [`FT.CREATE`]({{< baseurl >}}/commands/ft.create), [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate), [`FT.EXPLAIN`]({{< baseurl >}}/commands/ft.explain), [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli), and [`FT.SPELLCHECK`]({{< baseurl >}}/commands/ft.spellcheck). +The default DIALECT to be used by [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/), [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/), [`FT.EXPLAIN`]({{< baseurl >}}/commands/ft.explain/), [`FT.EXPLAINCLI`]({{< baseurl >}}/commands/ft.explaincli/), and [`FT.SPELLCHECK`]({{< baseurl >}}/commands/ft.spellcheck/). #### Default @@ -540,7 +540,7 @@ $ redis-server --loadmodule ./redisearch.so DEFAULT_DIALECT 2 {{% alert title="Notes" color="info" %}} * Vector search, added in v2.4.3, requires `DIALECT 2` or greater. -* Returning multiple values from [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) and [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) requires `DIALECT 3` or greater. +* Returning multiple values from [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) and [`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/) requires `DIALECT 3` or greater. {{% /alert %}} diff --git a/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md b/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md index 50318944cd..d5437e1e07 100644 --- a/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md +++ b/content/develop/interact/search-and-query/basic-constructs/field-and-type-options.md @@ -24,7 +24,7 @@ Redis Stack provides various field types that allow you to store and search diff Number fields are used to store non-textual, countable values. They can hold integer or floating-point values. Number fields are sortable, meaning you can perform range-based queries and retrieve documents based on specific numeric conditions. For example, you can search for documents with a price between a certain range or retrieve documents with a specific rating value. -You can add number fields to a schema in [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) using this syntax: +You can add number fields to a schema in [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) using this syntax: ``` FT.CREATE ... SCHEMA ... {field_name} NUMBER [SORTABLE] [NOINDEX] @@ -57,7 +57,7 @@ You can also use the following query syntax to perform more complex numeric quer Geo fields are used to store geographical coordinates such as longitude and latitude. They enable geospatial radius queries, which allow you to implement location-based search functionality in your applications such as finding nearby restaurants, stores, or any other points of interest. -You can add geo fields to the schema in [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) using this syntax: +You can add geo fields to the schema in [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) using this syntax: ``` FT.CREATE ... SCHEMA ... {field_name} GEO [SORTABLE] [NOINDEX] @@ -77,7 +77,7 @@ FT.SEARCH cities "@coords:[2.34 48.86 1000 km]" Vector fields are floating-point vectors that are typically generated by external machine learning models. These vectors represent unstructured data such as text, images, or other complex features. Redis Stack allows you to search for similar vectors using vector search algorithms like cosine similarity, Euclidean distance, and inner product. This enables you to build advanced search applications, recommendation systems, or content similarity analysis. -You can add vector fields to the schema in [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) using this syntax: +You can add vector fields to the schema in [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) using this syntax: ``` FT.CREATE ... SCHEMA ... {field_name} VECTOR {algorithm} {count} [{attribute_name} {attribute_value} ...] diff --git a/content/develop/interact/search-and-query/basic-constructs/schema-definition.md b/content/develop/interact/search-and-query/basic-constructs/schema-definition.md index de4464062d..4813e6afe1 100644 --- a/content/develop/interact/search-and-query/basic-constructs/schema-definition.md +++ b/content/develop/interact/search-and-query/basic-constructs/schema-definition.md @@ -34,7 +34,7 @@ SCHEMA In this example, a schema is defined for an index named `idx` that will index all hash documents whose keyname starts with `blog:post:`. The schema includes the fields `title`, `content`, `author`, `created_date`, and `views`. The `TEXT` type indicates that the `title` and `content` fields are text-based, the `TAG` type is used for the `author` field, and the `NUMERIC` type is used for the `created_date` and `views` fields. Additionally, a weight of 5.0 is assigned to the `title` field to give it more relevance in search results, and `created_date` is marked as `SORTABLE` to enable sorting based on this field. -You can learn more about the available field types and options on the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) page. +You can learn more about the available field types and options on the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) page. ## More schema definition examples @@ -116,4 +116,4 @@ SCHEMA ``` -You can learn more about the available field types and options on the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) page. \ No newline at end of file +You can learn more about the available field types and options on the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) page. \ No newline at end of file diff --git a/content/develop/interact/search-and-query/deprecated/payloads.md b/content/develop/interact/search-and-query/deprecated/payloads.md index 1e6678ec0d..a25eb87da7 100644 --- a/content/develop/interact/search-and-query/deprecated/payloads.md +++ b/content/develop/interact/search-and-query/deprecated/payloads.md @@ -63,7 +63,7 @@ If no payload was set to the document, it is simply NULL. If it is not, you can When searching, it is possible to request the document payloads from the engine. -This is done by adding the keyword `WITHPAYLOADS` to [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search). +This is done by adding the keyword `WITHPAYLOADS` to [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/). If `WITHPAYLOADS` is set, the payloads follow the document id in the returned result. If `WITHSCORES` is set as well, the payloads follow the scores. e.g.: diff --git a/content/develop/interact/search-and-query/indexing/_index.md b/content/develop/interact/search-and-query/indexing/_index.md index 2dd7848c9b..bef287ae8f 100644 --- a/content/develop/interact/search-and-query/indexing/_index.md +++ b/content/develop/interact/search-and-query/indexing/_index.md @@ -28,14 +28,14 @@ Before you can index and search JSON documents, you need a database with either: ## Create index with JSON schema -When you create an index with the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create) command, include the `ON JSON` keyword to index any existing and future JSON documents stored in the database. +When you create an index with the [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/) command, include the `ON JSON` keyword to index any existing and future JSON documents stored in the database. To define the `SCHEMA`, you can provide [JSONPath]({{< relref "/develop/data-types/json/path" >}}) expressions. The result of each JSONPath expression is indexed and associated with a logical name called an `attribute` (previously known as a `field`). You can use these attributes in queries. {{% alert title="Note" color="info" %}} -Note: `attribute` is optional for [`FT.CREATE`]({{< baseurl >}}/commands/ft.create). +Note: `attribute` is optional for [`FT.CREATE`]({{< baseurl >}}/commands/ft.create/). {{% /alert %}} Use the following syntax to create a JSON index: @@ -56,7 +56,7 @@ See [Index limitations](#index-limitations) for more details about JSON index `S After you create an index, Redis Stack automatically indexes any existing, modified, or newly created JSON documents stored in the database. For existing documents, indexing runs asynchronously in the background, so it can take some time before the document is available. Modified and newly created documents are indexed synchronously, so the document will be available by the time the add or modify command finishes. -You can use any JSON write command, such as [`JSON.SET`]({{< baseurl >}}/commands/json.set) and [`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend), to create or modify JSON documents. +You can use any JSON write command, such as [`JSON.SET`]({{< baseurl >}}/commands/json.set/) and [`JSON.ARRAPPEND`]({{< baseurl >}}/commands/json.arrappend/), to create or modify JSON documents. The following examples use these JSON documents to represent individual inventory items. @@ -100,7 +100,7 @@ Item 2 JSON document: } ``` -Use [`JSON.SET`]({{< baseurl >}}/commands/json.set) to store these documents in the database: +Use [`JSON.SET`]({{< baseurl >}}/commands/json.set/) to store these documents in the database: ```sql 127.0.0.1:6379> JSON.SET item:1 $ '{"name":"Noise-cancelling Bluetooth headphones","description":"Wireless Bluetooth headphones with noise-cancelling technology","connection":{"wireless":true,"type":"Bluetooth"},"price":99.98,"stock":25,"colors":["black","silver"],"embedding":[0.87,-0.15,0.55,0.03]}' @@ -109,12 +109,12 @@ Use [`JSON.SET`]({{< baseurl >}}/commands/json.set) to store these documents in "OK" ``` -Because indexing is synchronous in this case, the documents will be available on the index as soon as the [`JSON.SET`]({{< baseurl >}}/commands/json.set) command returns. +Because indexing is synchronous in this case, the documents will be available on the index as soon as the [`JSON.SET`]({{< baseurl >}}/commands/json.set/) command returns. Any subsequent queries that match the indexed content will return the document. ## Search the index -To search the index for JSON documents, use the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command. +To search the index for JSON documents, use the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) command. You can search any attribute defined in the `SCHEMA`. For example, use this query to search for items with the word "earbuds" in the name: @@ -170,7 +170,7 @@ And lastly, search for the Bluetooth headphones that are most similar to an imag For more information about search queries, see [Search query syntax]({{< relref "/develop/interact/search-and-query/advanced-concepts/query_syntax" >}}). {{% alert title="Note" color="info" %}} -[`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) queries require `attribute` modifiers. Don't use JSONPath expressions in queries because the query parser doesn't fully support them. +[`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) queries require `attribute` modifiers. Don't use JSONPath expressions in queries because the query parser doesn't fully support them. {{% /alert %}} ## Index JSON arrays as TAG @@ -223,7 +223,7 @@ Now you can do full text search for light colored headphones: ``` ### Limitations -- When a JSONPath may lead to multiple values and not only to a single array, e.g., when a JSONPath contains wildcards, etc., specifying `SLOP` or `INORDER` in [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) will return an error, since the order of the values matching the JSONPath is not well defined, leading to potentially inconsistent results. +- When a JSONPath may lead to multiple values and not only to a single array, e.g., when a JSONPath contains wildcards, etc., specifying `SLOP` or `INORDER` in [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) will return an error, since the order of the values matching the JSONPath is not well defined, leading to potentially inconsistent results. For example, using a JSONPath such as `$..b[*]` on a JSON value such as ```json @@ -252,7 +252,7 @@ Now you can do full text search for light colored headphones: ### Handling phrases in different array slots: -When indexing, a predefined delta is used to increase positional offsets between array slots for multiple text values. This delta controls the level of separation between phrases in different array slots (related to the `SLOP` parameter of [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search)). +When indexing, a predefined delta is used to increase positional offsets between array slots for multiple text values. This delta controls the level of separation between phrases in different array slots (related to the `SLOP` parameter of [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/)). This predefined value is set by the configuration parameter `MULTI_TEXT_SLOP` (at module load-time). The default value is 100. ## Index JSON arrays as NUMERIC @@ -502,7 +502,7 @@ You can also search for items with a Bluetooth connection type: ## Field projection -[`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) returns the entire JSON document by default. If you want to limit the returned search results to specific attributes, you can use field projection. +[`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) returns the entire JSON document by default. If you want to limit the returned search results to specific attributes, you can use field projection. ### Return specific attributes @@ -579,7 +579,7 @@ This query returns the field as the alias `"stock"` instead of the JSONPath expr You can [highlight]({{< relref "/develop/interact/search-and-query/advanced-concepts/highlight" >}}) relevant search terms in any indexed `TEXT` attribute. -For [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search), you have to explicitly set which attributes you want highlighted after the `RETURN` and `HIGHLIGHT` parameters. +For [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/), you have to explicitly set which attributes you want highlighted after the `RETURN` and `HIGHLIGHT` parameters. Use the optional `TAGS` keyword to specify the strings that will surround (or highlight) the matching search terms. @@ -630,7 +630,7 @@ This example uses aggregation to calculate a 10% price discount for each item an ``` {{% alert title="Note" color="info" %}} -[`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate) queries require `attribute` modifiers. Don't use JSONPath expressions in queries, except with the `LOAD` option, because the query parser doesn't fully support them. +[`FT.AGGREGATE`]({{< baseurl >}}/commands/ft.aggregate/) queries require `attribute` modifiers. Don't use JSONPath expressions in queries, except with the `LOAD` option, because the query parser doesn't fully support them. {{% /alert %}} ## Index limitations diff --git a/content/develop/interact/search-and-query/query/_index.md b/content/develop/interact/search-and-query/query/_index.md index 2e984bc123..e03223030c 100644 --- a/content/develop/interact/search-and-query/query/_index.md +++ b/content/develop/interact/search-and-query/query/_index.md @@ -16,7 +16,7 @@ title: Query data weight: 5 --- -Redis Stack distinguishes between the [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) and [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate/) query commands. You should use [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) if you want to perform selections and projections only. If you also need to apply mapping functions, group, or aggregate data, use the [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate/) command. +Redis Stack distinguishes between the [FT.SEARCH]({{< baseurl >}}/commands/ft.search//) and [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate//) query commands. You should use [FT.SEARCH]({{< baseurl >}}/commands/ft.search//) if you want to perform selections and projections only. If you also need to apply mapping functions, group, or aggregate data, use the [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate//) command. * **Selection**: A selection allows you to return all documents that fulfill specific criteria. * **Projection**: Projections are used to return specific fields of the result set. You can also map/project to calculated field values. @@ -31,7 +31,7 @@ Here is a short SQL comparison using the [bicycle dataset](./data/bicycles.txt): | Calculated projection| `SELECT id, price-price*0.1 AS discounted FROM bicycles`| `FT.AGGREGATE idx:bicycle "*" LOAD 2 __key price APPLY "@price-@price*0.1" AS discounted`| | Aggregation | `SELECT condition, AVG(price) AS avg_price FROM bicycles GROUP BY condition` | `FT.AGGREGATE idx:bicycle "*" GROUPBY 1 @condition REDUCE AVG 1 @price AS avg_price` | -The following articles provide an overview of how to query data with the [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) command: +The following articles provide an overview of how to query data with the [FT.SEARCH]({{< baseurl >}}/commands/ft.search//) command: * [Exact match queries]({{< relref "/develop/interact/search-and-query/query/exact-match" >}}) * [Range queries]({{< relref "/develop/interact/search-and-query/query/range" >}}) @@ -40,6 +40,6 @@ The following articles provide an overview of how to query data with the [FT.SEA * [Vector search]({{< relref "/develop/interact/search-and-query/query/vector-search" >}}) * [Combined queries]({{< relref "/develop/interact/search-and-query/query/combined" >}}) -You can find further details about aggregation queries with [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate/) in the following article: +You can find further details about aggregation queries with [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate//) in the following article: * [Aggregation queries]({{< relref "/develop/interact/search-and-query/query/aggregation" >}}) \ No newline at end of file diff --git a/content/develop/interact/search-and-query/query/aggregation.md b/content/develop/interact/search-and-query/query/aggregation.md index 1e2efc15de..90fa50c36f 100644 --- a/content/develop/interact/search-and-query/query/aggregation.md +++ b/content/develop/interact/search-and-query/query/aggregation.md @@ -21,7 +21,7 @@ An aggregation query allows you to perform the following actions: - Group data based on field values. - Apply aggregation functions on the grouped data. -This article explains the basic usage of the [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate/) command. For further details, see the [command specification]({{< baseurl >}}/commands/ft.aggregate/) and the [aggregations reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/aggregations" >}}). +This article explains the basic usage of the [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate//) command. For further details, see the [command specification]({{< baseurl >}}/commands/ft.aggregate//) and the [aggregations reference documentation]({{< relref "/develop/interact/search-and-query/advanced-concepts/aggregations" >}}). The examples in this article use a schema with the following fields: @@ -40,7 +40,7 @@ FT.AGGREGATE index "query_expr" LOAD n "field_1" .. "field_n" APPLY "function_ex Here is a more detailed explanation of the query syntax: -1. **Query expression**: you can use the same query expressions as you would use with the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command. You can substitute `query_expr` with any of the expressions explained in the articles of this [query topic]({{< relref "/develop/interact/search-and-query/query/" >}}). Vector search queries are an exception. You can't combine a vector search with an aggregation query. +1. **Query expression**: you can use the same query expressions as you would use with the [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) command. You can substitute `query_expr` with any of the expressions explained in the articles of this [query topic]({{< relref "/develop/interact/search-and-query/query/" >}}). Vector search queries are an exception. You can't combine a vector search with an aggregation query. 2. **Loaded fields**: if field values weren't already loaded into the aggregation pipeline, you can force their presence via the `LOAD` clause. This clause takes the number of fields (`n`), followed by the field names (`"field_1" .. "field_n"`). 3. **Mapping function**: this mapping function operates on the field values. A specific field is referenced as `@field_name` within the function expression. The result is returned as `result_field`. @@ -113,7 +113,7 @@ FT.AGGREGATE idx:bicycle "*" LOAD 1 price APPLY "@price<1000" AS price_category ``` {{% alert title="Note" color="warning" %}} -You can also create more complex aggregation pipelines with [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate/). Applying multiple reduction functions under one `GROUPBY` clause is possible. In addition, you can also chain groupings and mix in additional mapping steps (e.g., `GROUPBY ... REDUCE ... APPLY ... GROUPBY ... REDUCE`) +You can also create more complex aggregation pipelines with [FT.AGGREGATE]({{< baseurl >}}/commands/ft.aggregate//). Applying multiple reduction functions under one `GROUPBY` clause is possible. In addition, you can also chain groupings and mix in additional mapping steps (e.g., `GROUPBY ... REDUCE ... APPLY ... GROUPBY ... REDUCE`) {{% /alert %}} diff --git a/content/develop/interact/search-and-query/query/combined.md b/content/develop/interact/search-and-query/query/combined.md index 075c6ad0cb..87629f2f21 100644 --- a/content/develop/interact/search-and-query/query/combined.md +++ b/content/develop/interact/search-and-query/query/combined.md @@ -121,7 +121,7 @@ FT.SEARCH idx:bicycle "@price:[500 1000] -@condition:{new}" ## Numeric filter -The [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) command allows you to combine any query expression with a numeric filter. +The [FT.SEARCH]({{< baseurl >}}/commands/ft.search//) command allows you to combine any query expression with a numeric filter. ``` FT.SEARCH index "expr" FILTER numeric_field start end diff --git a/content/develop/interact/search-and-query/query/geo-spatial.md b/content/develop/interact/search-and-query/query/geo-spatial.md index 13539a1901..ef7d45de4a 100644 --- a/content/develop/interact/search-and-query/query/geo-spatial.md +++ b/content/develop/interact/search-and-query/query/geo-spatial.md @@ -31,7 +31,7 @@ Redis Stack version 7.2.0 or higher is required to use the `GEOSHAPE` field type ## Radius -You can construct a radius query by passing the center coordinates (longitude, latitude), the radius, and the distance unit to the [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) command. +You can construct a radius query by passing the center coordinates (longitude, latitude), the radius, and the distance unit to the [FT.SEARCH]({{< baseurl >}}/commands/ft.search//) command. ``` FT.SEARCH index "@geo_field:[lon lat radius unit]" diff --git a/content/develop/interact/search-and-query/query/range.md b/content/develop/interact/search-and-query/query/range.md index 0e6e672938..7f3782a2d2 100644 --- a/content/develop/interact/search-and-query/query/range.md +++ b/content/develop/interact/search-and-query/query/range.md @@ -37,13 +37,13 @@ The values `-inf`, `inf`, and `+inf` are valid values that allow you to define o An open-range query can lead to a large result set. -By default, [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) returns only the first ten results. The `LIMIT` argument helps you to scroll through the result set. The `SORTBY` argument ensures that the documents in the result set are returned in the specified order. +By default, [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) returns only the first ten results. The `LIMIT` argument helps you to scroll through the result set. The `SORTBY` argument ensures that the documents in the result set are returned in the specified order. ``` FT.SEARCH index "@field:[start end]" SORTBY field LIMIT page_start page_end ``` -You can find further details about using the `LIMIT` and `SORTBY` in the [[`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) command reference](/commands/ft.search/). +You can find further details about using the `LIMIT` and `SORTBY` in the [[`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) command reference](/commands/ft.search/). ## Examples diff --git a/content/develop/interact/search-and-query/query/vector-search.md b/content/develop/interact/search-and-query/query/vector-search.md index 344c18b09b..aa8f1056e8 100644 --- a/content/develop/interact/search-and-query/query/vector-search.md +++ b/content/develop/interact/search-and-query/query/vector-search.md @@ -28,7 +28,7 @@ The examples in this article use a schema with the following fields: ## K-neareast neighbours (KNN) -The Redis command [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) takes the index name, the query string, and additional query parameters as arguments. You need to pass the number of nearest neighbors, the vector field name, and the vector's binary representation in the following way: +The Redis command [FT.SEARCH]({{< baseurl >}}/commands/ft.search//) takes the index name, the query string, and additional query parameters as arguments. You need to pass the number of nearest neighbors, the vector field name, and the vector's binary representation in the following way: ``` FT.SEARCH index "(*)=>[KNN num_neighbours @field $vector]" PARAMS 2 vector "binary_data" DIALECT 2 @@ -42,7 +42,7 @@ Here is a more detailed explanation of this query: 4. **Vector binary data**: You need to use the `PARAMS` argument to substitute `$vector` with the binary representation of the vector. The value `2` indicates that `PARAMS` is followed by two arguments, the parameter name `vector` and the parameter value. 5. **Dialect**: The vector search feature has been available since version two of the query dialect. -You can read more about the `PARAMS` argument in the [FT.SEARCH]({{< baseurl >}}/commands/ft.search/) command reference. +You can read more about the `PARAMS` argument in the [FT.SEARCH]({{< baseurl >}}/commands/ft.search//) command reference. The following example shows you how to query for three bikes based on their description embeddings, and by using the field alias `vector`. The result is returned in ascending order based on the distance. You can see that the query only returns the fields `__vector_score` and `description`. The field `__vector_score` is present by default. Because you can have multiple vector fields in your schema, the vector score field name depends on the name of the vector field. If you change the field name `@vector` to `@foo`, the score field name changes to `__foo_score`. @@ -88,7 +88,7 @@ Here is a more detailed explanation of this query: {{% alert title="Note" color="warning" %}} -By default, [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search) returns only the first ten results. The [range query article]({{< relref "/develop/interact/search-and-query/query/range" >}}) explains to you how to scroll through the result set. +By default, [`FT.SEARCH`]({{< baseurl >}}/commands/ft.search/) returns only the first ten results. The [range query article]({{< relref "/develop/interact/search-and-query/query/range" >}}) explains to you how to scroll through the result set. {{% /alert %}} The example below shows a radius query that returns the description and the distance within a radius of `0.5`. The result is sorted by the distance.