diff --git a/.circleci/config.yml b/.circleci/config.yml index 524aca56..f920307a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -38,7 +38,7 @@ commands: steps: - run: name: Run mock tests - command: cargo test --lib --features mocks --no-default-features + command: cargo test --lib --features "mocks i-keys" --no-default-features test_default_features: steps: - checkout @@ -111,6 +111,18 @@ commands: name: Run tests with unix socket features command: source tests/environ && tests/runners/unix-socket.sh - save-cargo-deps-cache + test_valkey: + steps: + - checkout + - restore-cargo-deps-cache + - run: + name: Run tests against local valkey deployments + command: | + source tests/environ + USE_VALKEY=1 tests/scripts/install_redis_centralized.sh + USE_VALKEY=1 tests/scripts/install_redis_clustered.sh + tests/runners/default-features.sh + - save-cargo-deps-cache jobs: test-default-nil-types-7_2: @@ -196,18 +208,37 @@ jobs: REDIS_VERSION: 7.2.4 steps: - test_sentinel + test-valkey-7_2: + machine: + image: ubuntu-2204:2022.10.2 + docker_layer_caching: true + resource_class: medium + environment: + REDIS_VERSION: 7.2.4 + steps: + - test_valkey test-misc: docker: - - image: cimg/rust:1.72.1 + - image: cimg/rust:1.77.1 environment: CARGO_NET_GIT_FETCH_WITH_CLI: true steps: - checkout - build_docs - test_mocks + check-all-interface-features: + docker: + - image: cimg/rust:1.77.1 + environment: + CARGO_NET_GIT_FETCH_WITH_CLI: true + steps: + - checkout + - run: + name: Check all features + command: tests/scripts/check_features.sh clippy-lint: docker: - - image: cimg/rust:1.75.0 + - image: cimg/rust:1.77.1 environment: CARGO_NET_GIT_FETCH_WITH_CLI: true steps: @@ -231,4 +262,5 @@ workflows: - test-misc - test-cluster-tls-features-7_2 - test-cluster-rustls-features-7_2 - - clippy-lint \ No newline at end of file + - clippy-lint + - check-all-interface-features \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 0a86afdd..7900d421 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -7,20 +7,19 @@ assignees: aembke --- +Fred version - X Redis version - X Platform - linux|mac|windows -Using Docker and/or Kubernetes - yes|no Deployment type - cluster|sentinel|centralized **Describe the bug** **To Reproduce** Steps to reproduce the behavior: + 1. X 2. X **Logs** -(Set `RUST_LOG=fred=trace` and run with `--features debug-ids`) +(If possible set `RUST_LOG=fred=trace` and run with `--features debug-ids`) -**Additional context** -Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..79630d7c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,14 @@ +--- +name: Feature Request +about: Discuss a feature request +title: "[Feature]" +labels: enhancement +assignees: aembke + +--- + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.gitignore b/.gitignore index a49f1a58..4d26f224 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,5 @@ redis_centralized.conf redis_server.pid tests/users.acl tests/docker/overrides/* -!tests/docker/overrides/.gitkeep \ No newline at end of file +!tests/docker/overrides/.gitkeep +process.yml \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 1472156c..fe0e0216 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,28 @@ +## 9.0.0 + +This version should reduce compilation times for most use cases. + +* **RPITIT / AFIT** +* Set MSRV to 1.75 +* Upgrade `rustls` to 0.23 +* Upgrade `redis-protocol` to 5.0.0 +* Split public interfaces with new feature flags. +* Add `ClusterDiscoveryPolicy` configuration options. +* Add `SORT` and `SORT_RO` +* Add `cluster_hash` policy to `Options` +* Change tracing span names to + follow [OpenTelemetry naming conventions](https://opentelemetry.io/docs/specs/semconv/general/attribute-naming/). + +### Upgrading from 8.x + +* Callers that use certain managed services or Kubernetes-based deployment models should review the + new `ClusterDiscoveryPolicy`. +* Double-check the new feature flags. The `codec` feature was also moved + to [redis-protocol](https://github.com/aembke/redis-protocol.rs). +* Rustls - Check the new [aws-lc-rs](https://aws.github.io/aws-lc-rs/requirements/index.html) requirements or switch + back to `rustls/ring`. +* Note the new [tracing span names](src/trace/README.md). + ## 8.0.6 * Add `TransactionInterface` to `RedisPool` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b6eb2362..84f4d41d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,30 +1,44 @@ Contributing =========== -This document gives some background on how the library is structured and how to contribute. It focuses primarily on how to add new commands. See the [design doc](docs/README.md) for more background on how the library is designed. +This document gives some background on how the library is structured and how to contribute. It focuses primarily on how +to add new commands. See the [design doc](docs/README.md) for more info. # General * Use 2 spaces instead of tabs. * Run rustfmt and clippy before submitting any changes. -* Clean up any compiler warnings. -* Use the `async` syntax rather than `impl Future` where possible. * Please use [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/#summary). # File Structure The code has the following structure: -* The [commands](src/commands) folder contains the public interface and private implementation for each of the Redis commands, organized by category. This is roughly the same categorization used by the [public docs](https://redis.io/commands/). Each of these public command category interfaces are exposed as a trait with default implementations for each command. -* The [clients](src/clients) folder contains public client structs that implement and/or override the traits from [the command category traits folder](src/commands/impls). The [interfaces](src/interfaces.rs) file contains the shared traits required by most of the command category traits, such as `ClientLike`. -* The [monitor](src/monitor) folder contains the implementation of the `MONITOR` command and the parser for the response stream. -* The [protocol](src/protocol) folder contains the implementation of the base `Connection` struct and the logic for splitting a connection to interact with reader and writer halves in separate tasks. The [TLS interface](src/protocol/tls.rs) is also implemented here. -* The [router](src/router) folder contains the logic that implements the sentinel and cluster interfaces. Clients interact with this struct via a message passing interface. The interface exposed by the `Router` attempts to hide all the complexity associated with sentinel or clustered deployments. -* The [trace](src/trace) folder contains the tracing implementation. Span IDs are manually tracked on each command as they move across tasks. -* The [types](src/types) folder contains the type definitions used by the public interface. The Redis interface is relatively loosely typed but Rust can support more strongly typed interfaces. The types in this module aim to support an optional but more strongly typed interface for callers. -* The [modules](src/modules) folder contains smaller helper interfaces such as a lazy [Backchannel](src/modules/backchannel.rs) connection interface and the [response type conversion logic](src/modules/response.rs). - -## Examples +* The [commands](src/commands) folder contains the public interface and private implementation for each of the Redis + commands, organized by category. This is roughly the same categorization used by + the [public docs](https://redis.io/commands/). Each of these public command category interfaces are exposed as a trait + with default implementations for each command. +* The [clients](src/clients) folder contains public client structs that implement and/or override the traits + from [the command category traits folder](src/commands/impls). The [interfaces](src/interfaces.rs) file contains the + shared traits required by most of the command category traits, such as `ClientLike`. +* The [monitor](src/monitor) folder contains the implementation of the `MONITOR` command and the parser for the response + stream. +* The [protocol](src/protocol) folder contains the implementation of the base `Connection` struct and the logic for + splitting a connection to interact with reader and writer halves in separate tasks. + The [TLS interface](src/protocol/tls.rs) is also implemented here. +* The [router](src/router) folder contains the logic that implements the sentinel and cluster interfaces. Clients + interact with this struct via a message passing interface. The interface exposed by the `Router` attempts to hide all + the complexity associated with sentinel or clustered deployments. +* The [trace](src/trace) folder contains the tracing implementation. Span IDs are manually tracked on each command as + they move across tasks. +* The [types](src/types) folder contains the type definitions used by the public interface. The Redis interface is + relatively loosely typed but Rust can support more strongly typed interfaces. The types in this module aim to support + an optional but more strongly typed interface for callers. +* The [modules](src/modules) folder contains smaller helper interfaces such as a + lazy [Backchannel](src/modules/backchannel.rs) connection interface and + the [response type conversion logic](src/modules/response.rs). + +## Examples ## Add A New Command @@ -40,9 +54,8 @@ pub enum RedisCommandKind { } impl RedisCommandKind { - // .. - + pub fn to_str_debug(&self) -> &str { match *self { // .. @@ -50,9 +63,9 @@ impl RedisCommandKind { // .. } } - + // .. - + pub fn cmd_str(&self) -> Str { match *self { // .. @@ -60,7 +73,7 @@ impl RedisCommandKind { // .. } } - + // .. } ``` @@ -76,7 +89,7 @@ pub async fn mget(client: &C, keys: MultipleKeys) -> Result(client: &C, keys: MultipleKeys) -> Result { - utils::check_empty_keys(&keys)?; + utils::check_empty_keys(&keys)?; args_values_cmd(client, keys.into_values()).await } ``` -3. Create the public function in the [src/commands/interfaces/keys.rs](src/commands/interfaces/keys.rs) file. +3. Create the public function in the [src/commands/interfaces/keys.rs](src/commands/interfaces/keys.rs) file. ```rust // ... -#[async_trait] pub trait KeysInterface: ClientLike { - + // ... /// Returns the values of all specified keys. For every key that does not hold a string value or does not exist, the special value nil is returned. /// /// - async fn mget(&self, keys: K) -> RedisResult - where - R: FromRedis, - K: Into + Send, + fn mget(&self, keys: K) -> impl Future> + Send + where + R: FromRedis, + K: Into + Send, { - into!(keys); - commands::keys::mget(self, keys).await?.convert() + async move { + into!(keys); + commands::keys::mget(self, keys).await?.convert() + } } // ... } @@ -133,7 +147,8 @@ impl KeysInterface for Transaction {} # Adding Tests -Integration tests are in the [tests/integration](tests/integration) folder organized by category. See the tests [README](tests/README.md) for more information. +Integration tests are in the [tests/integration](tests/integration) folder organized by category. See the +tests [README](tests/README.md) for more information. Using `MGET` as an example again: @@ -141,10 +156,6 @@ Using `MGET` as an example again: ```rust pub async fn should_mget_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "a{1}"); - check_null!(client, "b{1}"); - check_null!(client, "c{1}"); - let expected: Vec<(&str, RedisValue)> = vec![("a{1}", 1.into()), ("b{1}", 2.into()), ("c{1}", 3.into())]; for (key, value) in expected.iter() { let _: () = client.set(key, value.clone(), None, None, false).await?; @@ -160,7 +171,6 @@ pub async fn should_mget_values(client: RedisClient, _: RedisConfig) -> Result<( ```rust mod keys { - // .. centralized_test!(keys, should_mget_values); } @@ -171,7 +181,6 @@ mod keys { ```rust mod keys { - // .. cluster_test!(keys, should_mget_values); } diff --git a/Cargo.toml b/Cargo.toml index e6201d80..c7724b33 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fred" -version = "8.0.6" +version = "9.0.0" authors = ["Alec Embke "] edition = "2021" description = "An async Redis client built on Tokio." @@ -10,43 +10,22 @@ homepage = "https://github.com/aembke/fred.rs" keywords = ["redis", "async", "cluster", "sentinel"] categories = ["asynchronous", "database", "web-programming"] license = "MIT" +rust-version = "1.75" +exclude = ["tests", ".circleci", "bin", ".github", "docs"] [package.metadata.docs.rs] -features = [ - "serde-json", - "subscriber-client", - "mocks", - "metrics", - "dns", - "enable-rustls", - "enable-native-tls", - "full-tracing", - "partial-tracing", - "blocking-encoding", - "custom-reconnect-errors", - "monitor", - "sentinel-client", - "sentinel-auth", - "replicas", - "client-tracking", - "default-nil-types", - "codec", - "redis-json", - "sha-1", - "transactions", - "time-series" -] +all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -arc-swap = "1.6" +arc-swap = "1.7" tokio = { version = "1.34", features = ["net", "sync", "rt", "rt-multi-thread", "macros"] } tokio-util = { version = "0.7", features = ["codec"] } -bytes = "1.5" +bytes = "1.6" bytes-utils = "0.1" futures = { version = "0.3", features = ["std"] } parking_lot = "0.12" -redis-protocol = { version = "4.1", features = ["decode-mut"] } +redis-protocol = { version = "5.0.1", features = ["resp2", "resp3", "bytes"] } log = "0.4" float-cmp = "0.9" url = "2.4" @@ -57,24 +36,23 @@ semver = "1.0" socket2 = "0.5" urlencoding = "2.1" crossbeam-queue = "0.3" -async-trait = { version = "0.1" } -rustls = { version = "0.22.1", optional = true } +rustls = { version = "0.23", optional = true } native-tls = { version = "0.2", optional = true } tokio-native-tls = { version = "0.3", optional = true } tracing = { version = "0.1", optional = true } tracing-futures = { version = "0.2", optional = true } nom = { version = "7.1", optional = true } serde_json = { version = "1", optional = true } -tokio-rustls = { version = "0.25.0", optional = true } -webpki = { package = "rustls-webpki", version = "0.102.0", features = ["alloc", "std"], optional = true } -rustls-native-certs = { version = "0.7.0", optional = true } +tokio-rustls = { version = "0.26", optional = true } +rustls-native-certs = { version = "0.7", optional = true } trust-dns-resolver = { version = "0.23", optional = true } +async-trait = { version = "0.1" } [dev-dependencies] base64 = "0.22.0" subprocess = "0.2" pretty_env_logger = "0.5" -bollard = "0.15" +bollard = "0.16" serde = "1.0" tokio-stream = { version = "0.1", features = ["sync"] } axum = { version = "0.7", features = ["macros"] } @@ -84,6 +62,14 @@ doc = true name = "fred" test = true +[[example]] +name = "misc" +required-features = ["i-all"] + +[[example]] +name = "scan" +required-features = ["i-all"] + [[example]] name = "monitor" required-features = ["monitor"] @@ -102,7 +88,11 @@ required-features = ["serde-json"] [[example]] name = "redis_json" -required-features = ["redis-json"] +required-features = ["i-redis-json"] + +[[example]] +name = "replicas" +required-features = ["i-std", "i-cluster", "replicas"] [[example]] name = "dns" @@ -110,29 +100,29 @@ required-features = ["dns"] [[example]] name = "client_tracking" -required-features = ["client-tracking"] +required-features = ["i-tracking", "i-std"] [[example]] name = "lua" -required-features = ["sha-1"] +required-features = ["sha-1", "i-scripts"] [[example]] name = "events" -required-features = ["tokio-stream/sync"] +required-features = ["tokio-stream/sync", "i-std"] [[example]] name = "transactions" -required-features = ["transactions"] +required-features = ["transactions", "i-std"] [features] -default = ["transactions"] +default = ["transactions", "i-std"] transactions = [] serde-json = ["serde_json"] -subscriber-client = [] +subscriber-client = ["i-pubsub"] metrics = [] mocks = [] dns = ["trust-dns-resolver", "trust-dns-resolver/tokio"] -enable-rustls = ["rustls", "tokio-rustls", "rustls-native-certs", "webpki"] +enable-rustls = ["rustls", "tokio-rustls", "rustls-native-certs"] enable-native-tls = ["native-tls", "tokio-native-tls"] vendored-openssl = ["enable-native-tls", "native-tls/vendored"] full-tracing = ["partial-tracing"] @@ -143,14 +133,45 @@ monitor = ["nom"] sentinel-client = [] sentinel-auth = [] replicas = [] -client-tracking = [] default-nil-types = [] -codec = [] unix-sockets = [] -# Redis Stack Features -redis-stack = ["redis-json", "time-series"] -redis-json = ["serde-json"] -time-series = [] +# Standard Redis Interfaces +i-all = [ + "i-acl", "i-client", "i-cluster", + "i-config", "i-geo", "i-hashes", + "i-hyperloglog", "i-keys", "i-lists", + "i-scripts", "i-memory", "i-pubsub", + "i-server", "i-streams", "i-tracking", + "i-sorted-sets", "i-slowlog", "i-sets" +] +i-std = [ + "i-hashes", "i-keys", + "i-lists", "i-sets", + "i-streams", "i-pubsub", + "i-sorted-sets", "i-server" +] +i-acl = [] +i-client = [] +i-cluster = [] +i-config = [] +i-geo = ["i-sorted-sets"] +i-hashes = [] +i-hyperloglog = [] +i-keys = [] +i-lists = [] +i-scripts = [] +i-memory = [] +i-pubsub = [] +i-server = [] +i-sets = [] +i-slowlog = [] +i-sorted-sets = [] +i-streams = [] +i-tracking = ["i-client", "i-pubsub"] +# Redis Stack Interfaces +i-redis-stack = ["i-redis-json", "i-time-series"] +i-redis-json = ["serde-json"] +i-time-series = [] # Debugging Features debug-ids = [] network-logs = [] diff --git a/LICENSE-APACHE b/LICENSE-APACHE index 4d8cd551..408f6d20 100644 --- a/LICENSE-APACHE +++ b/LICENSE-APACHE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2023 Alec Embke + Copyright 2024 Alec Embke Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/LICENSE-MIT b/LICENSE-MIT index c80fa4b0..f3ac9bbe 100644 --- a/LICENSE-MIT +++ b/LICENSE-MIT @@ -1,4 +1,4 @@ -Copyright 2023 Alec Embke +Copyright 2024 Alec Embke Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: diff --git a/README.md b/README.md index 524eb768..7932e01e 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ use fred::prelude::*; #[tokio::main] async fn main() -> Result<(), RedisError> { - let client = RedisClient::default(); + let client = RedisClient::default(); client.init().await?; // convert responses to many common Rust types @@ -44,48 +44,83 @@ See the [examples](https://github.com/aembke/fred.rs/tree/main/examples) for mor * Clustered, centralized, and sentinel Redis deployments. * TLS via `native-tls` or `rustls`. * Unix sockets. -* Optional reconnection logic with multiple backoff policies. +* [Efficient automatic pipelining](bin/benchmark) +* [Zero-copy frame parsing](https://github.com/aembke/redis-protocol.rs) +* Optional reconnection features. * Publish-Subscribe and keyspace events interfaces. * A round-robin client pooling interface. -* Lua [scripts](https://redis.io/docs/interact/programmability/eval-intro/) or [functions](https://redis.io/docs/interact/programmability/functions-intro/). -* Streaming results from the `MONITOR` command. +* Lua [scripts](https://redis.io/docs/interact/programmability/eval-intro/) + or [functions](https://redis.io/docs/interact/programmability/functions-intro/). +* Streaming results from the `MONITOR` command. * Custom commands. * Streaming interfaces for scanning functions. * [Transactions](https://redis.io/docs/interact/transactions/) * [Pipelining](https://redis.io/topics/pipelining) * [Client Tracking](https://redis.io/docs/manual/client-side-caching/) -* An optional [RedisJSON](https://github.com/RedisJSON/RedisJSON) interface. +* A [RedisJSON](https://github.com/RedisJSON/RedisJSON) interface. * A round-robin cluster replica routing interface. -* An optional pubsub subscriber client that will automatically manage channel subscriptions. +* A subscriber client that will automatically manage channel subscriptions. * [Tracing](https://github.com/tokio-rs/tracing) -## Build Features - -| Name | Default | Description | -|-------------------------|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------| -| transactions | x | Enable a [Transaction](https://redis.io/docs/interact/transactions/) interface. | -| enable-native-tls | | Enable TLS support via [native-tls](https://crates.io/crates/native-tls). | -| enable-rustls | | Enable TLS support via [rustls](https://crates.io/crates/rustls). | -| vendored-openssl | | Enable the `native-tls/vendored` feature. | -| metrics | | Enable the metrics interface to track overall latency, network latency, and request/response sizes. | -| full-tracing | | Enable full [tracing](./src/trace/README.md) support. This can emit a lot of data. | -| partial-tracing | | Enable partial [tracing](./src/trace/README.md) support, only emitting traces for top level commands and network latency. | -| blocking-encoding | | Use a blocking task for encoding or decoding frames. This can be useful for clients that send or receive large payloads, but requires a multi-thread Tokio runtime. | -| network-logs | | Enable TRACE level logging statements that will print out all data sent to or received from the server. These are the only logging statements that can ever contain potentially sensitive user data. | -| custom-reconnect-errors | | Enable an interface for callers to customize the types of errors that should automatically trigger reconnection logic. | -| monitor | | Enable an interface for running the `MONITOR` command. | -| sentinel-client | | Enable an interface for communicating directly with Sentinel nodes. This is not necessary to use normal Redis clients behind a sentinel layer. | -| sentinel-auth | | Enable an interface for using different authentication credentials to sentinel nodes. | -| subscriber-client | | Enable a subscriber client interface that manages channel subscription state for callers. | -| serde-json | | Enable an interface to automatically convert Redis types to JSON via `serde-json`. | -| mocks | | Enable a mocking layer interface that can be used to intercept and process commands in tests. | -| dns | | Enable an interface that allows callers to override the DNS lookup logic. | -| replicas | | Enable an interface that routes commands to replica nodes. | -| client-tracking | | Enable a [client tracking](https://redis.io/docs/manual/client-side-caching/) interface. | -| default-nil-types | | Enable a looser parsing interface for `nil` values. | -| redis-json | | Enable an interface for [RedisJSON](https://github.com/RedisJSON/RedisJSON). | -| codec | | Enable a lower level framed codec interface for use with [tokio-util](https://docs.rs/tokio-util/latest/tokio_util/codec/index.html). | -| sha-1 | | Enable an interface for hashing Lua scripts. | -| unix-sockets | | Enable Unix socket support. | -| time-series | | Enable an interface for [Redis Timeseries](https://redis.io/docs/data-types/timeseries/). | +## Build Features +| Name | Default | Description | +|---------------------------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `transactions` | x | Enable a [Transaction](https://redis.io/docs/interact/transactions/) interface. | +| `enable-native-tls` | | Enable TLS support via [native-tls](https://crates.io/crates/native-tls). | +| `enable-rustls` | | Enable TLS support via [rustls](https://crates.io/crates/rustls). | +| `vendored-openssl` | | Enable the `native-tls/vendored` feature. | +| `metrics` | | Enable the metrics interface to track overall latency, network latency, and request/response sizes. | +| `full-tracing` | | Enable full [tracing](./src/trace/README.md) support. This can emit a lot of data. | +| `partial-tracing` | | Enable partial [tracing](./src/trace/README.md) support, only emitting traces for top level commands and network latency. | +| `blocking-encoding` | | Use a blocking task for encoding or decoding frames. This can be useful for clients that send or receive large payloads, but requires a multi-thread Tokio runtime. | +| `custom-reconnect-errors` | | Enable an interface for callers to customize the types of errors that should automatically trigger reconnection logic. | +| `monitor` | | Enable an interface for running the `MONITOR` command. | +| `sentinel-client` | | Enable an interface for communicating directly with Sentinel nodes. This is not necessary to use normal Redis clients behind a sentinel layer. | +| `sentinel-auth` | | Enable an interface for using different authentication credentials to sentinel nodes. | +| `subscriber-client` | | Enable a subscriber client interface that manages channel subscription state for callers. | +| `serde-json` | | Enable an interface to automatically convert Redis types to JSON via `serde-json`. | +| `mocks` | | Enable a mocking layer interface that can be used to intercept and process commands in tests. | +| `dns` | | Enable an interface that allows callers to override the DNS lookup logic. | +| `replicas` | | Enable an interface that routes commands to replica nodes. | +| `default-nil-types` | | Enable a looser parsing interface for `nil` values. | +| `sha-1` | | Enable an interface for hashing Lua scripts. | +| `unix-sockets` | | Enable Unix socket support. | + +## Interface Features + +The Redis interface has many functions and compile times can add up quickly. Interface features begin with `i-` and +control which public interfaces are built. + +| Name | Default | Description | +|-----------------|---------|---------------------------------------------------------------------------------------------------------------| +| `i-all` | | Enable the interfaces included with a basic Redis installation. This does not include `redis-stack` features. | +| `i-std` | x | Enable the common data structure interfaces (lists, sets, streams, keys, etc). | +| `i-acl` | | Enable the ACL command interface. | +| `i-client` | | Enable the CLIENT command interface. | +| `i-cluster` | | Enable the CLUSTER command interface. | +| `i-config` | | Enable the CONFIG command interface. | +| `i-geo` | | Enable the GEO command interface. | +| `i-hashes` | | Enable the hashes (HGET, etc) command interface. | +| `i-hyperloglog` | | Enable the hyperloglog command interface. | +| `i-keys` | | Enable the main keys (GET, SET, etc) command interface. | +| `i-lists` | | Enable the lists (LPUSH, etc) command interface. | +| `i-scripts` | | Enable the scripting command interfaces. | +| `i-memory` | | Enable the MEMORY command interfaces. | +| `i-pubsub` | | Enable the publish-subscribe command interfaces. | +| `i-server` | | Enable the server control (SHUTDOWN, BGSAVE, etc) interfaces. | +| `i-sets` | | Enable the sets (SADD, etc) interface. | +| `i-sorted-sets` | | Enable the sorted sets (ZADD, etc) interface. | +| `i-slowlog` | | Enable the SLOWLOG interface. | +| `i-streams` | | Enable the streams (XADD, etc) interface. | +| `i-tracking` | | Enable a [client tracking](https://redis.io/docs/manual/client-side-caching/) interface. | +| `i-time-series` | | Enable a [Redis Timeseries](https://redis.io/docs/data-types/timeseries/). interface. | +| `i-redis-json` | | Enable a [RedisJSON](https://github.com/RedisJSON/RedisJSON) interface. | +| `i-redis-stack` | | Enable the [Redis Stack](https://github.com/redis-stack) interfaces (`i-redis-json`, `i-time-series`, etc). | + +## Debugging Features + +| Name | Default | Description | +|----------------|---------|-----------------------------------------------------------------| +| `debug-ids` | | Enable a global counter used to differentiate commands in logs. | +| `network-logs` | | Enable additional TRACE logs for all frames on all sockets. | diff --git a/bin/benchmark/README.md b/bin/benchmark/README.md index f551cedf..1298a65d 100644 --- a/bin/benchmark/README.md +++ b/bin/benchmark/README.md @@ -1,36 +1,41 @@ Fred Benchmark -============= +============== -Redis includes a [benchmarking tool](https://redis.io/docs/management/optimization/benchmarks/) that can be used to measure the throughput of a client/connection pool. This module attempts to reproduce the same process with Tokio and Fred. +Redis includes a [benchmarking tool](https://redis.io/docs/management/optimization/benchmarks/) that can be used to +measure the throughput of a client/connection pool. This module attempts to reproduce the same process with Tokio and +Fred. -The general strategy involves using an atomic global counter and spawning `-c` Tokio tasks that fight over `-P` clients in order to send `-n` total `INCR` commands to the server as quickly as possible. +The general strategy involves using an atomic global counter and spawning `-c` Tokio tasks that share`-P` clients +in order to send `-n` total `INCR` commands to the server as quickly as possible. -Each of the `-c` Tokio tasks use a different random key so commands are uniformly distributed across a cluster or replica set. +Each of the `-c` Tokio tasks use a different random key so commands are uniformly distributed across a cluster or +replica set. -This strategy also has the benefit of being somewhat representative of an Axum or Actix web server use case where requests run in separate Tokio tasks but share a common client pool. +This strategy also has the benefit of being somewhat representative of an Axum or Actix web server use case where +requests run in separate Tokio tasks but share a common client pool. ## Tuning -`fred` supports several additional features or performance tuning options that can affect these results. For example: +There are several additional features or performance tuning options that can affect these results. For example: -* Tracing. Simply enabling the FF cut throughput by ~20% in my tests. -* Pipelining. The `auto_pipeline` feature can dramatically improve throughput in scenarios like this where a client or pool is shared among many Tokio tasks. +* Tracing. Enabling the FF cut throughput by ~20% in my tests. +* Pipelining. The `auto_pipeline` feature can dramatically improve throughput in scenarios like this where a client or + pool is shared among many Tokio tasks. * Clustering * Backpressure settings * Network latency * Log levels, often indirectly for the same reason as `tracing` (contention on a pipe, file handle, or socket). * The size of the client connection pool. -* And much more... Callers should take care to consider each of these when deciding on argv values. -This module also includes an optional `assert-expected` feature flag that adds an `assert!` call after each `INCR` command to ensure the response is actually correct. +This module also includes an optional `assert-expected` feature flag that adds an `assert!` call after each `INCR` +command to ensure the response is actually correct. -## Tracing +## Tracing -**This part frequently breaks since I rarely use tracing while benchmarking.** - -This also shows how to configure the client with tracing enabled against a local Jaeger instance. A [docker compose](../../tests/docker/compose/jaeger.yml) file is included that will run a local Jaeger instance. +This also shows how to configure the client with tracing enabled against a local Jaeger instance. +A [docker compose](../../tests/docker/compose/jaeger.yml) file is included that will run a local Jaeger instance. ``` docker-compose -f /path/to/fred/tests/docker/compose/jaeger.yml up @@ -38,7 +43,8 @@ docker-compose -f /path/to/fred/tests/docker/compose/jaeger.yml up Then navigate to . -By default, this module does not compile any tracing features, but there are 3 flags that can toggle how tracing is configured. +By default, this module does not compile any tracing features, but there are 3 flags that can toggle how tracing is +configured. * `partial-tracing` - Enables `fred/partial-tracing` and emits traces to the local jaeger instance. * `full-tracing` - Enables `fred/full-tracing` and emits traces to the local jaeger instance. @@ -46,25 +52,24 @@ By default, this module does not compile any tracing features, but there are 3 f ## Docker -Linux+Docker is the best supported option via the `./run.sh` script. The `Cargo.toml` provided here has a comment/toggle around the lines that need to change if callers want to use a remote server. +Linux+Docker is the best supported option via the `./run.sh` script. The `Cargo.toml` provided here has a comment/toggle +around the lines that need to change if callers want to use a remote server. Callers may have to also change `run.sh` to enable additional features in docker. -**I would not even bother trying to run this on OS X, especially on Apple Silicon, with Docker at the moment.** It will be very slow compared to Linux or any other deployment model that avoids an Apple FS virtualization layer. All the docker tooling assumes a local docker engine and makes frequent use of `VOLUME`s. - -## Usage +## Usage ``` USAGE: fred_benchmark [FLAGS] [OPTIONS] [SUBCOMMAND] FLAGS: - --cluster Whether or not to assume a clustered deployment. + --cluster Whether to assume a clustered deployment. --help Prints help information -q, --quiet Only print the final req/sec measurement. - --replicas Whether or not to use `GET` with replica nodes instead of `INCR` with primary nodes. + --replicas Whether to use `GET` with replica nodes instead of `INCR` with primary nodes. -t, --tls Enable TLS via whichever build flag is provided. - -t, --tracing Whether or not to enable tracing via a local Jeager instance. See tests/docker-compose.yml to + -t, --tracing Whether to enable tracing via a local Jeager instance. See tests/docker-compose.yml to start up a local Jaeger instance. -V, --version Prints version information @@ -97,21 +102,21 @@ All the examples below use the following parameters: With `auto_pipeline` **disabled**: ``` -foo@bar:/path/to/fred.rs/bin/benchmark$ ./run.sh --cluster -c 10000 -n 10000000 -P 15 -h redis-cluster-1 -p 30001 -a bar no-pipeline +$ ./run.sh --cluster -c 10000 -n 10000000 -P 15 -h redis-cluster-1 -p 30001 -a bar no-pipeline Performed 10000000 operations in: 31.496934107s. Throughput: 317500 req/sec ``` With `auto_pipeline` **enabled**: ``` -foo@bar:/path/to/fred.rs/bin/benchmark$ ./run.sh --cluster -c 10000 -n 10000000 -P 15 -h redis-cluster-1 -p 30001 -a bar pipeline +$ ./run.sh --cluster -c 10000 -n 10000000 -P 15 -h redis-cluster-1 -p 30001 -a bar pipeline Performed 10000000 operations in: 4.125544401s. Throughput: 2424242 req/sec ``` With `auto_pipeline` **enabled** and using `GET` with replica nodes instead of `INCR` with primary nodes: ``` -foo@bar:/path/to/fred.rs/bin/benchmark$ ./run.sh --cluster -c 10000 -n 10000000 -P 15 -h redis-cluster-1 -p 30001 -a bar --replicas pipeline +$ ./run.sh --cluster -c 10000 -n 10000000 -P 15 -h redis-cluster-1 -p 30001 -a bar --replicas pipeline Performed 10000000 operations in: 3.356416674s. Throughput: 2979737 req/sec ``` @@ -122,11 +127,14 @@ Maybe Relevant Specs: ## `redis-rs` Comparison -The `USE_REDIS_RS` environment variable can be toggled to [switch the benchmark logic](./src/_redis.rs) to use `redis-rs` instead of `fred`. There's also an `info` level log line that can confirm this at runtime. +The `USE_REDIS_RS` environment variable can be toggled to [switch the benchmark logic](./src/_redis.rs) to +use `redis-rs` instead of `fred`. There's also an `info` level log line that can confirm this at runtime. -The `redis-rs` variant uses the same general strategy, but with [bb8-redis](https://crates.io/crates/bb8-redis) (specifically `Pool`) instead of `fred::clients::RedisPool`. All the other more structural components in the benchmark logic are the same. +The `redis-rs` variant uses the same general strategy, but with [bb8-redis](https://crates.io/crates/bb8-redis) ( +specifically `Pool`) instead of `fred::clients::RedisPool`. All the other more +structural components in the benchmark logic are the same. -Please reach out if you think this tooling or strategy is not representative of a real-world Tokio-based use case. +Please reach out if you think this tooling or strategy is not representative of a real-world Tokio-based use case. ### Examples diff --git a/bin/benchmark/cli.yml b/bin/benchmark/cli.yml index 7e8a56c1..61bbfb57 100644 --- a/bin/benchmark/cli.yml +++ b/bin/benchmark/cli.yml @@ -6,15 +6,15 @@ args: - tracing: short: t long: tracing - help: Whether or not to enable tracing via a local Jeager instance. See tests/docker-compose.yml to start up a local Jaeger instance. + help: Whether to enable tracing via a local Jeager instance. See tests/docker-compose.yml to start up a local Jaeger instance. takes_value: false - cluster: long: cluster - help: Whether or not to assume a clustered deployment. + help: Whether to assume a clustered deployment. takes_value: false - replicas: long: replicas - help: Whether or not to use `GET` with replica nodes instead of `INCR` with primary nodes. + help: Whether to use `GET` with replica nodes instead of `INCR` with primary nodes. takes_value: false - quiet: short: q diff --git a/bin/benchmark/src/_redis.rs b/bin/benchmark/src/_redis.rs index 12733cab..da9dc797 100644 --- a/bin/benchmark/src/_redis.rs +++ b/bin/benchmark/src/_redis.rs @@ -2,8 +2,7 @@ use crate::{utils, Argv}; use bb8_redis::{ bb8::{self, Pool, PooledConnection}, redis::{cmd, AsyncCommands, ErrorKind as RedisErrorKind, RedisError}, - RedisConnectionManager, - RedisMultiplexedConnectionManager, + RedisConnectionManager, RedisMultiplexedConnectionManager, }; use futures::TryStreamExt; use indicatif::ProgressBar; @@ -85,8 +84,8 @@ async fn init(argv: &Arc) -> Pool { .expect("Failed to create client pool"); // try to warm up the pool first - let mut warmup_ft = Vec::with_capacity(argv.pool); - for _ in 0 .. argv.pool + 1 { + let mut warmup_ft = Vec::with_capacity(argv.pool + 1); + for _ in 0..argv.pool + 1 { warmup_ft.push(async { incr_key(&pool, "foo").await }); } futures::future::join_all(warmup_ft).await; @@ -95,46 +94,6 @@ async fn init(argv: &Arc) -> Pool { pool } -// ### Background -// -// First, I'd recommend reading this: https://redis.io/docs/manual/pipelining. It's important to understand what RTT is, -// why pipelining minimizes its impact in general, and why it's often the only thing that really matters for the -// overall throughput of an IO-bound application with dependencies like Redis. -// -// These applications often share an important characteristic: -// -// End-user requests run concurrently, often in parallel in separate Tokio tasks, but need to share a small pool of -// Redis connections via some kind of dependency injection interface. These request tasks rarely have any kind of -// synchronization requirements (there's usually no reason one user's request should have to wait for another to -// finish), so ideally we could efficiently interleave their Redis commands on the wire in a way that can take -// advantage of this. -// -// For example, -// -// 1. Task A writes command 1 to server. -// 2. Task B writes command 2 to server. -// 3. Task A reads command response 1 from server. -// 4. Task B reads command response 2 from server. -// -// reduces the impact of RTT much more effectively than -// -// 1. Task A writes command 1 to server. -// 2. Task A reads command response 1 from server. -// 3. Task B writes command 2 to server. -// 4. Task B reads command response 2 from server. -// -// and the effect becomes even more pronounced as concurrency (the number of tasks) increases, at least until other -// bottlenecks kick in. You'll often see me describe this as "pipelining across tasks", whereas the `redis::Pipeline` -// and `fred::clients::Pipeline` interfaces control pipelining __within__ a task. -// -// This benchmarking tool is built specifically to represent this class of high concurrency use cases and to measure -// the impact of this particular pipelining optimization (`auto_pipeline: true` in `fred`), so it seemed interesting -// to adapt it to compare the two libraries. If this pipelining strategy is really that effective then we should see -// `fred` perform roughly the same as `redis-rs` when `auto_pipeline: false`, but it should outperform when -// `auto_pipeline: true`. -// -// If your use case is not structured this way or your stack does not use Tokio concurrency features then these -// results are likely less relevant. pub async fn run(argv: Arc, counter: Arc, bar: Option) -> Duration { info!("Running with redis-rs"); @@ -146,7 +105,7 @@ pub async fn run(argv: Arc, counter: Arc, bar: Option Result { - // ... -} -``` - -One of the most important design decisions involves whether to use `&mut MyClient` or `&MyClient`. In a roundabout way this ends up determining whether it's possible, or at least practical, to implement the pipelining optimization described above. However, this does not mean that `&MyClient` is always the best choice. - -If we want to expose a thin or generic transport layer such as `MyClient` then `T` will almost certainly end up being based on `AsyncRead + AsyncWrite` or `Stream + Sink`, both of which use `&mut self` on their relevant send/poll interfaces. This means my `call` function will probably* have to use `&mut MyClient`. If callers want to use my client in a context with `Send` restrictions (across Tokio tasks for example) then somebody will have to use an async lock somewhere. Maybe the client hides this, or perhaps an external pool library hides this, but there's an async lock somewhere that holds a lock guard across an `await` point until the server responds. This ultimately conflicts with implementing the optimization above since no other task can interact with that `&mut MyClient` instance while the lock guard is being held. - -However, there are many use cases where a thin and generic transport layer is more important than the particular pipelining optimization described above. In my case I only needed TCP, TCP+TLS, and maybe unix sockets, so the generic aspect of the interface was less important to me. In the end I decided to focus on an interface and design that worked well with the pipelining optimization above, which incidentally lead to `&MyClient` instead of `&mut MyClient`. The drawback of this decision is that callers cannot access or implement their own transport layers. +With this model we're not reducing network latency or RTT, but by rarely waiting for the server to respond we +can pack many more requests on the wire and dramatically increase throughput in high concurrency scenarios. -However, under the hood all of these networking types use `&mut self` at some point, so we still have to reconcile the mutability constraint somehow, and we know that it can't involve holding an async lock guard across an `await` point that waits for the server to respond. At its core the primary challenge with this strategy is that it requires not only separating write and read operations on the socket, but also requires operating on each half of the socket concurrently according to RESP's frame ordering rules. +However, there are some interesting tradeoffs with the optimization described above, at least in Rust. At its core the +primary challenge with this strategy is that it requires not only separating write and read operations on the socket, +but it also requires operating on each half of the socket concurrently according to RESP's frame ordering rules. ### Message Passing & Queues -Another mental model that tends to work well with pipelined protocols is to think of the client as a series of queues. For example: +Another mental model that tends to work well with pipelined protocols is to think of the client as a series of queues. +For example: 1. An end user task interacts with the client by putting a message in an in-memory queue. -2. Some time later the client pops this message off the queue, serializes it, and sends it to the server over a TCP connection, which also effectively acts as a queue in this context. We also put the original request into another in-memory queue associated with the chosen connection. -3. Some time later we pop a frame off the TCP connection. Since the server always* responds to commands in order we know that the request at the front of the in-memory queue associated with this connection must be associated with the frame we just received. -4. We pop the request off the in-memory queue mentioned above and use this to respond to the caller in the original end user task. - -This kind of approach also tends to work well in high concurrency scenarios since "thread safe" shared queues can be implemented without locks (just atomics). There are several options for this, including several interfaces within Tokio. - -If we think of the client as a series of routing adapters between a set of queues like this then it becomes much easier to reason about how the pipelining above should be implemented. All we really need to do is implement some policy checks in step 2 that determine whether we should wait for step 4 to finish before processing the next command in step 2. In most cases we want to write commands as quickly as possible, but in some cases maybe the client should wait (like `AUTH`, `HELLO`, blocking commands, etc). It may not be immediately obvious, but Tokio offers several easy ways to coordinate tasks like this. - -This is why the library uses a significantly more complicated message passing implementation. The `auto_pipeline` flag controls this optimization and the benchmarking results show a dramatic improvement when enabled. +2. Some time later the client pops this message off the queue, serializes it, and sends it to the server over a TCP + connection, which also effectively acts as a queue in this context. We also put the original request into another + in-memory queue associated with the chosen connection. +3. Some time later we pop a frame off the TCP connection. Since the server always* responds to commands in order we know + that the request at the front of the in-memory queue associated with this connection must be associated with the + frame we just received. +4. We pop the request off the in-memory queue mentioned above and use this to respond to the caller in the original end + user task. + +This kind of approach also tends to work well in high concurrency scenarios since shared queues can be +implemented without locks (just atomics). There are several options for this, including several interfaces within Tokio. + +If we think of the client as a set of queues like this then it becomes much easier to reason about how the pipelining +above should be implemented. All we really need to do is implement some policy checks in step 2 that determine whether +we should wait for step 4 to finish before processing the next command in step 2. In most cases we want to write +commands as quickly as possible, but in some cases maybe the client should wait (like `AUTH`, `HELLO`, blocking +commands, etc). Fortunately Tokio offers several ways to coordinate tasks like this. + +This is why the library uses a message passing implementation. The `auto_pipeline` flag controls this optimization and +the benchmarking results show a dramatic improvement when enabled. ## Technical Overview -As mentioned above, for the most part the library uses message passing and dependency injection patterns. The primary concern is to support highly concurrent use cases, therefore we want to minimize contention on shared resources. There are several ways to do this, but the one used here is to utilize something like the actor model. Thankfully Tokio provides all the underlying interfaces one would need to use basic actor model patterns without any additional frameworks or libraries. +As mentioned above, for the most part the library uses message passing and dependency injection patterns. The primary +concern is to support highly concurrent use cases, therefore we want to minimize contention on shared resources. There +are several ways to do this, but the one used here is to utilize something like the actor model. Thankfully Tokio +provides all the underlying interfaces one would need to use basic actor model patterns without any additional +frameworks or libraries. -If you're not familiar with message passing in Rust I would strongly recommend reading [the Tokio docs](https://docs.rs/tokio/latest/tokio/sync/index.html#message-passing) first. +If you're not familiar with message passing in Rust I would strongly recommend +reading [the Tokio docs](https://docs.rs/tokio/latest/tokio/sync/index.html#message-passing) first. -Here's a top-down way to visualize the communication patterns between Tokio tasks within `fred` in the context of an Axum app. This diagram assumes we're targeting the use case described above. Sorry for this. +Here's a top-down way to visualize the communication patterns between Tokio tasks within `fred` in the context of an +Axum app. This diagram assumes we're targeting the use case described above. Sorry for this. * Blue boxes are Tokio tasks. * Green arrows use a shared [MPSC channel](https://docs.rs/tokio/latest/tokio/sync/mpsc/fn.unbounded_channel.html). -* Brown arrows use [oneshot channels](https://docs.rs/tokio/latest/tokio/sync/oneshot/index.html). Callers include their [oneshot sender](https://docs.rs/tokio/latest/tokio/sync/oneshot/struct.Sender.html) half in any messages they send via the green arrows. +* Brown arrows use [oneshot channels](https://docs.rs/tokio/latest/tokio/sync/oneshot/index.html). Callers include + their [oneshot sender](https://docs.rs/tokio/latest/tokio/sync/oneshot/struct.Sender.html) half in any messages they + send via the green arrows. ![Bad Design Doc](./design.png) -The shared state in this diagram is an `Arc` that's shared between the Axum request tasks. Each of these tasks can write to the channel without acquiring a lock, minimizing contention that could slow down the application layer. +The shared state in this diagram is an `Arc` that's shared between the Axum request tasks. Each of +these tasks can write to the channel without acquiring a lock, minimizing contention that could slow down the +application layer. -At a high level all the public client types are thin wrappers around an `Arc`. A `RedisPool` is really a `Arc>>` with an additional atomic increment-mod-length trick in the mix. Cloning anything `ClientLike` usually just clones one of these `Arc`s. +At a high level all the public client types are thin wrappers around an `Arc`. A `RedisPool` is really +a `Arc>>` with an additional atomic increment-mod-length trick in the mix. Cloning +anything `ClientLike` usually just clones one of these `Arc`s. -Generally speaking the router task sits in a `recv` loop. +Generally speaking the router task sits in a `recv` loop. ```rust async fn example(connections: &mut HashMap, rx: UnboundedReceiver) -> Result<(), RedisError> { while let Some(command) = rx.recv().await { send_to_server(connections, command).await?; } - + Ok(()) } ``` -Commands are processed in series, but the `auto_pipeline` flag controls whether the `send_to_server` function waits on the server to respond or not. When commands can be pipelined this way the loop can process requests as quickly as they can be written to a socket. This model also creates a pleasant developer experience where we can pretty much ignore many synchronization issues, and as a result it's much easier to reason about how features like reconnection should work. It's also easy to implement socket flushing optimizations with this model. +Commands are processed in series, but the `auto_pipeline` flag controls whether the `send_to_server` function waits on +the server to respond or not. When commands can be pipelined this way the loop can process requests as quickly as they +can be written to a socket. This model also creates a pleasant developer experience where we can pretty much ignore many +synchronization issues, and as a result it's easier to reason about how features like reconnection should work. It's +also relatively easy to implement socket flushing optimizations with this model. However, this has some drawbacks: -* Once a command is in the `UnboundedSender` channel it's difficult to inspect or remove. There's no practical way to get any kind of random access into this. -* It can be difficult to preempt commands with this model. For example, forcing a reconnection should take precedence over a blocking command. This is more difficult to implement with this model. -* Callers should at least be aware of this channel so that they're aware of how server failure modes can lead to increased memory usage. This makes it perhaps surprisingly important to properly tune reconnection or backpressure settings, especially if memory is in short supply. -* Some things that would ideally be synchronous must instead be asynchronous. For example, I've often wanted a synchronous interface to inspect active connections. -As of 8.x there's a new `max_command_buffer_len` field that can be used as a circuit breaker to trigger backpressure if this buffer grows too large. +* Once a command is in the `UnboundedSender` channel it's difficult to inspect or remove. There's no practical way to + get any kind of random access into this. +* It can be difficult to preempt commands with this model. For example, forcing a reconnection should take precedence + over a blocking command. This is more difficult to implement with this model. +* Callers should at least be aware of this channel so that they're aware of how server failure modes can lead to + increased memory usage. This makes it perhaps surprisingly important to properly tune reconnection or backpressure + settings, especially if memory is in short supply. +* Some things that would ideally be synchronous must instead be asynchronous. For example, I've often wanted a + synchronous interface to inspect active connections. + +As of 8.x there's a new `max_command_buffer_len` field that can be used as a circuit breaker to trigger backpressure if +this buffer grows too large. Similarly, the reader tasks also use a `recv` loop: @@ -137,9 +177,91 @@ async fn example(state: &mut State, stream: SplitTcpStream) -> Result<(), // responding to oneshot channels only uses atomics and is not async, so this loop is quick command.respond_to_caller(frame); } - + Ok(()) } ``` -In order for the reader task to respond to the caller in the Axum task we need a mechanism for the caller's oneshot sender half to move between the router task and the reader task that receives the response. An [`Arc`](https://docs.rs/crossbeam-queue/latest/crossbeam_queue/struct.SegQueue.html) is shared between the router and each reader task to support this. +In order for the reader task to respond to the caller in the Axum task we need a mechanism for the caller's oneshot +sender half to move between the router task and the reader task that receives the response. +An [`Arc`](https://docs.rs/crossbeam-queue/latest/crossbeam_queue/struct.SegQueue.html) is shared between the +router and each reader task to support this. + +## Connections + +This section describes how the connection layer works. + +Most connections are wrapped with a `Framed` [codec](https://docs.rs/tokio-util/latest/tokio_util/codec/index.html). +The [redis_protocol](https://github.com/aembke/redis-protocol.rs) crate includes a general +purpose [codec interface](https://docs.rs/redis-protocol/latest/redis_protocol/codec/index.html) similar to the one used +here, but without the `metrics` feature flag additions. + +### Handshake + +After a connection is established the client does the following: + +1. Authenticate, if necessary + 1. If RESP3 - Send `HELLO 3 AUTH ` + 2. If RESP2 - Send `AUTH ` + 3. If no auth - Send `PING` +2. If not clustered - `SELECT` the database from the associated `RedisConfig` +3. If `auto_client_setname` then send `CLIENT SETNAME ` +4. Read and cache connection ID via `CLIENT ID`. +5. Read the server version via `INFO server`. +6. If clustered and not `disable_cluster_health_check` then check cluster state via `CLUSTER INFO` +7. If connecting to a replica - send `READONLY` + +### Backchannel + +There are several features that require or benefit from having some kind of backchannel connection to the server(s), +since the main connection used by callers could be blocked, unresponsive, or otherwise unusable. However, with clustered +deployments it would generally be wasteful to keep twice the number of connections open to each cluster +node just for these scenarios. The library tries to balance these concerns by using a single backchannel connection and +lazily moving it around the cluster as needed. There are probably some use cases where this strategy causes problems, so +it might be worth adding a new strategy via a new config option in the future. + +The backchannel is used for the following scenarios: + +* Sending `CLUSTER SLOTS` or `CLUSTER SHARDS` +* Sending `CLIENT UNBLOCK` +* Sending `ROLE` to check or discover replicas +* Sending `FUNCTION KILL` or `FUNCTION STATS` + +Unlike connections managed by the `Router`, the backchannel connection does not use a split socket interface with a +dedicated reader task. Instead the transport is stored in an `Arc` without automatic pipelining features, +which means it may close or become unresponsive without triggering an error until the next write attempt. + +### Clustering + +Cluster nodes are discovered via the function `sync_cluster` with the following process: + +1. Send `CLUSTER SLOTS` on the backchannel. + 1. If the old backchannel connection is unusable, or an error occurs, then try connecting to a new node as specified + by the `ClusterDiscoveryPolicy`. If multiple hosts are provided they are tried in series. + 2. Send `CLUSTER SLOTS` on this new connection, then cache the new connection as the new backchannel. +2. Parse the `CLUSTER SLOTS` response into a `ClusterRouting` struct. +3. Compare the new `ClusterRouting::unique_primary_nodes` output with the old connection map stored on the `Router` to + determine which connections should be added or removed. +4. Drop the connections that no longer point to primary cluster nodes. +5. Connect to the newly discovered primary cluster nodes concurrently. +6. Split each connection and spawn reader tasks for each of the stream halves. +7. Store the new writer halves on the `Router` connection map. + +The client will use this process whenever a connection closes unexpectedly or a `MOVED` error is received. In many cases +this is wrapped in a loop and delayed based on the client's `ReconnectPolicy`. + +The initial cluster sync operation after a `MOVED` redirection may be delayed by `cluster_cache_update_delay`. This can +be useful with large keys that often take several milliseconds to move. + +When the `Router` task receives a command it uses the command's `ClusterHash` policy +with [redis_keyslot](https://docs.rs/redis-protocol/latest/redis_protocol/fn.redis_keyslot.html) to determine the +correct cluster hash slot. This is then used with the cached `ClusterRouting::get_server` interface to map the command +to a cluster node `RedisSink`. + +### Pipelining + +WIP + +### Transactions + +WIP \ No newline at end of file diff --git a/docs/design.md b/docs/design.md deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/performance.md b/docs/performance.md deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/protocol.md b/docs/protocol.md deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/README.md b/examples/README.md index 05bef862..71d836fe 100644 --- a/examples/README.md +++ b/examples/README.md @@ -4,23 +4,27 @@ Examples * [Basic](./basic.rs) - Basic client usage. * [Axum](./axum.rs) - Use a client pool with [Axum](https://crates.io/crates/axum). * [TLS](./tls.rs) - Setting up a client that uses TLS. -* [Publish-Subscribe](./pubsub.rs) - Use multiple clients together with the pubsub interface in a way that survives network interruptions. +* [Publish-Subscribe](./pubsub.rs) - Use multiple clients together with the pubsub interface in a way that survives + network interruptions. * [Blocking](./blocking.rs) - Use multiple clients with the blocking list interface. * [Transactions](./transactions.rs) - Use the MULTI/EXEC interface on a client. * [Pipeline](./pipeline.rs) - Use the manual pipeline interface. -* [Streams](./streams.rs) - Use `XADD` and `XREAD` to communicate between tasks. +* [Streams](./streams.rs) - Use `XADD` and `XREAD` to communicate between tasks. * [Lua](./lua.rs) - Use the Lua scripting interface on a client. * [Scan](./scan.rs) - Use the SCAN interface to scan and read keys. -* [Pool](./pool.rs) - Use a redis connection pool. +* [Pool](./pool.rs) - Use a round-robin client pool. * [Monitor](./monitor.rs) - Process a `MONITOR` stream. * [Sentinel](./sentinel.rs) - Connect using a sentinel deployment. -* [Serde JSON](./serde_json.rs) - Use the `serde-json` feature to convert between Redis types and JSON. -* [Redis JSON](./redis_json.rs) - Use the `redis-json` feature with `serde-json` types. +* [Serde JSON](./serde_json.rs) - Use the `serde-json` feature to convert between Redis types and JSON. +* [Redis JSON](./redis_json.rs) - Use the `i-redis-json` feature with `serde-json` types. * [Custom](./custom.rs) - Send custom commands or operate on RESP frames. -* [DNS](./dns.rs) - Customize the DNS resolution logic. -* [Client Tracking](./client_tracking.rs) - Implement [client side caching](https://redis.io/docs/manual/client-side-caching/). +* [DNS](./dns.rs) - Customize the DNS resolution logic. +* [Client Tracking](./client_tracking.rs) - + Implement [client side caching](https://redis.io/docs/manual/client-side-caching/). * [Events](./events.rs) - Respond to connection events with the `EventsInterface`. -* [Keyspace Notifications](./keyspace.rs) - Use the [keyspace notifications](https://redis.io/docs/manual/keyspace-notifications/) interface. -* [Misc](./misc.rs) - Miscellaneous or advanced features. +* [Keyspace Notifications](./keyspace.rs) - Use + the [keyspace notifications](https://redis.io/docs/manual/keyspace-notifications/) interface. +* [Misc](./misc.rs) - Miscellaneous or advanced features. +* [Replicas](./replicas.rs) - Interact with cluster replica nodes via a `RedisPool`. Or see the [tests](../tests/integration) for more examples. \ No newline at end of file diff --git a/examples/pubsub.rs b/examples/pubsub.rs index 6facaf03..68aaf5f3 100644 --- a/examples/pubsub.rs +++ b/examples/pubsub.rs @@ -20,7 +20,7 @@ async fn main() -> Result<(), RedisError> { Ok::<_, RedisError>(()) }); - for idx in 0 .. 50 { + for idx in 0..50 { publisher_client.publish("foo", idx).await?; sleep(Duration::from_secs(1)).await; } @@ -32,6 +32,7 @@ async fn main() -> Result<(), RedisError> { } #[cfg(feature = "subscriber-client")] +#[allow(dead_code)] async fn subscriber_example() -> Result<(), RedisError> { let subscriber = Builder::default_centralized().build_subscriber_client()?; subscriber.init().await?; diff --git a/examples/replicas.rs b/examples/replicas.rs new file mode 100644 index 00000000..091f2112 --- /dev/null +++ b/examples/replicas.rs @@ -0,0 +1,94 @@ +#![allow(clippy::disallowed_names)] +#![allow(clippy::let_underscore_future)] +#![allow(clippy::mutable_key_type)] + +use fred::{ + prelude::*, + types::{ClusterDiscoveryPolicy, ClusterHash, ReplicaConfig, RespVersion}, + util::redis_keyslot, +}; +use futures::future::try_join_all; +use log::info; +use std::collections::HashSet; + +#[tokio::main] +async fn main() -> Result<(), RedisError> { + pretty_env_logger::init(); + + let config = RedisConfig::from_url("redis-cluster://foo:bar@redis-cluster-1:30001")?; + let pool = Builder::from_config(config) + .with_config(|config| { + config.version = RespVersion::RESP3; + config + .server + .set_cluster_discovery_policy(ClusterDiscoveryPolicy::ConfigEndpoint) + .expect("Failed to set discovery policy."); + }) + .with_connection_config(|config| { + config.replica = ReplicaConfig { + lazy_connections: true, + primary_fallback: true, + ..Default::default() + }; + }) + .set_policy(ReconnectPolicy::new_exponential(0, 100, 30_000, 2)) + .build_pool(5)?; + + pool.init().await?; + info!("Connected to redis."); + lazy_connection_example(pool.next()).await?; + + // use pipelines and WAIT to concurrently SET then GET a value from replica nodes + let mut ops = Vec::with_capacity(1000); + for idx in 0..1000 { + let pool = pool.clone(); + ops.push(async move { + let key: RedisKey = format!("foo-{}", idx).into(); + let cluster_hash = ClusterHash::Custom(redis_keyslot(key.as_bytes())); + + // send WAIT to the cluster node that received SET + let pipeline = pool.next().pipeline(); + pipeline.set(&key, idx, None, None, false).await?; + pipeline + .with_options(&Options { + cluster_hash: Some(cluster_hash), + ..Default::default() + }) + .wait(1, 10_000) + .await?; + pipeline.all().await?; + + assert_eq!(pool.replicas().get::(&key).await?, idx); + Ok::<_, RedisError>(()) + }); + } + try_join_all(ops).await?; + + Ok(()) +} + +// use one client to demonstrate how lazy connections are created. in this case each primary node is expected to have one replica. +async fn lazy_connection_example(client: &RedisClient) -> Result<(), RedisError> { + let replica_routing = client.replicas().nodes(); + let cluster_routing = client + .cached_cluster_state() + .expect("Failed to read cached cluster state."); + let expected_primary = cluster_routing + .get_server(redis_keyslot(b"foo")) + .expect("Failed to read primary node owner for 'foo'"); + let old_connections: HashSet<_> = client.active_connections().await?.into_iter().collect(); + + // if `lazy_connections: true` the client creates the connection here + client.replicas().get("foo").await?; + let new_connections: HashSet<_> = client.active_connections().await?.into_iter().collect(); + let new_servers: Vec<_> = new_connections.difference(&old_connections).collect(); + // verify that 1 new connection was created, and that it's in the replica map as a replica of the expected primary node + assert_eq!(new_servers.len(), 1); + assert_eq!(replica_routing.get(new_servers[0]), Some(expected_primary)); + + // update the replica routing table and reset replica connections + client.replicas().sync(true).await?; + assert_eq!(old_connections.len(), client.active_connections().await?.len()); + + Ok(()) +} diff --git a/src/clients/options.rs b/src/clients/options.rs index 24b7471a..85c757a0 100644 --- a/src/clients/options.rs +++ b/src/clients/options.rs @@ -1,9 +1,5 @@ use crate::{ - error::RedisError, - interfaces::*, - modules::inner::RedisClientInner, - protocol::command::RedisCommand, - types::Options, + error::RedisError, interfaces::*, modules::inner::RedisClientInner, protocol::command::RedisCommand, types::Options, }; use std::{fmt, ops::Deref, sync::Arc}; @@ -43,7 +39,7 @@ use std::{fmt, ops::Deref, sync::Arc}; /// ``` #[derive(Clone)] pub struct WithOptions { - pub(crate) client: C, + pub(crate) client: C, pub(crate) options: Options, } @@ -94,27 +90,63 @@ impl ClientLike for WithOptions { } } +#[cfg(feature = "i-acl")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-acl")))] impl AclInterface for WithOptions {} +#[cfg(feature = "i-client")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-client")))] impl ClientInterface for WithOptions {} +#[cfg(feature = "i-cluster")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-cluster")))] impl ClusterInterface for WithOptions {} +#[cfg(feature = "i-pubsub")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-pubsub")))] impl PubsubInterface for WithOptions {} +#[cfg(feature = "i-config")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-config")))] impl ConfigInterface for WithOptions {} +#[cfg(feature = "i-geo")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] impl GeoInterface for WithOptions {} +#[cfg(feature = "i-hashes")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hashes")))] impl HashesInterface for WithOptions {} +#[cfg(feature = "i-hyperloglog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hyperloglog")))] impl HyperloglogInterface for WithOptions {} +#[cfg(feature = "i-keys")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-keys")))] impl KeysInterface for WithOptions {} +#[cfg(feature = "i-lists")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-lists")))] impl ListInterface for WithOptions {} +#[cfg(feature = "i-memory")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] impl MemoryInterface for WithOptions {} +#[cfg(feature = "i-server")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] impl AuthInterface for WithOptions {} +#[cfg(feature = "i-server")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] impl ServerInterface for WithOptions {} +#[cfg(feature = "i-slowlog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-slowlog")))] impl SlowlogInterface for WithOptions {} +#[cfg(feature = "i-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sets")))] impl SetsInterface for WithOptions {} +#[cfg(feature = "i-sorted-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sorted-sets")))] impl SortedSetsInterface for WithOptions {} +#[cfg(feature = "i-streams")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] impl StreamsInterface for WithOptions {} +#[cfg(feature = "i-scripts")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] impl FunctionInterface for WithOptions {} -#[cfg(feature = "redis-json")] -#[cfg_attr(docsrs, doc(cfg(feature = "redis-json")))] +#[cfg(feature = "i-redis-json")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-redis-json")))] impl RedisJsonInterface for WithOptions {} -#[cfg(feature = "time-series")] -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg(feature = "i-time-series")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] impl TimeSeriesInterface for WithOptions {} diff --git a/src/clients/pipeline.rs b/src/clients/pipeline.rs index 1e288e77..1395d7dd 100644 --- a/src/clients/pipeline.rs +++ b/src/clients/pipeline.rs @@ -1,28 +1,6 @@ use crate::{ error::RedisError, - interfaces, - interfaces::{ - AclInterface, - AuthInterface, - ClientInterface, - ClientLike, - ClusterInterface, - ConfigInterface, - FunctionInterface, - GeoInterface, - HashesInterface, - HyperloglogInterface, - KeysInterface, - ListInterface, - MemoryInterface, - PubsubInterface, - Resp3Frame, - ServerInterface, - SetsInterface, - SlowlogInterface, - SortedSetsInterface, - StreamsInterface, - }, + interfaces::{self, *}, modules::{inner::RedisClientInner, response::FromRedis}, prelude::{RedisResult, RedisValue}, protocol::{ @@ -36,11 +14,6 @@ use parking_lot::Mutex; use std::{collections::VecDeque, fmt, fmt::Formatter, sync::Arc}; use tokio::sync::oneshot::{channel as oneshot_channel, Receiver as OneshotReceiver}; -#[cfg(feature = "redis-json")] -use crate::interfaces::RedisJsonInterface; -#[cfg(feature = "time-series")] -use crate::interfaces::TimeSeriesInterface; - fn clone_buffered_commands(buffer: &Mutex>) -> VecDeque { let guard = buffer.lock(); let mut out = VecDeque::with_capacity(guard.len()); @@ -83,7 +56,7 @@ fn prepare_all_commands( /// See the [all](Self::all), [last](Self::last), and [try_all](Self::try_all) functions for more information. pub struct Pipeline { commands: Arc>>, - client: C, + client: C, } #[doc(hidden)] @@ -91,7 +64,7 @@ impl Clone for Pipeline { fn clone(&self) -> Self { Pipeline { commands: self.commands.clone(), - client: self.client.clone(), + client: self.client.clone(), } } } @@ -148,29 +121,65 @@ impl ClientLike for Pipeline { } } +#[cfg(feature = "i-acl")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-acl")))] impl AclInterface for Pipeline {} +#[cfg(feature = "i-client")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-client")))] impl ClientInterface for Pipeline {} +#[cfg(feature = "i-cluster")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-cluster")))] impl ClusterInterface for Pipeline {} +#[cfg(feature = "i-pubsub")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-pubsub")))] impl PubsubInterface for Pipeline {} +#[cfg(feature = "i-config")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-config")))] impl ConfigInterface for Pipeline {} +#[cfg(feature = "i-geo")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] impl GeoInterface for Pipeline {} +#[cfg(feature = "i-hashes")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hashes")))] impl HashesInterface for Pipeline {} +#[cfg(feature = "i-hyperloglog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hyperloglog")))] impl HyperloglogInterface for Pipeline {} +#[cfg(feature = "i-keys")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-keys")))] impl KeysInterface for Pipeline {} +#[cfg(feature = "i-lists")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-lists")))] impl ListInterface for Pipeline {} +#[cfg(feature = "i-memory")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] impl MemoryInterface for Pipeline {} +#[cfg(feature = "i-server")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] impl AuthInterface for Pipeline {} +#[cfg(feature = "i-server")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] impl ServerInterface for Pipeline {} +#[cfg(feature = "i-slowlog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-slowlog")))] impl SlowlogInterface for Pipeline {} +#[cfg(feature = "i-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sets")))] impl SetsInterface for Pipeline {} +#[cfg(feature = "i-sorted-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sorted-sets")))] impl SortedSetsInterface for Pipeline {} +#[cfg(feature = "i-streams")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] impl StreamsInterface for Pipeline {} +#[cfg(feature = "i-scripts")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] impl FunctionInterface for Pipeline {} -#[cfg(feature = "redis-json")] -#[cfg_attr(docsrs, doc(cfg(feature = "redis-json")))] +#[cfg(feature = "i-redis-json")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-redis-json")))] impl RedisJsonInterface for Pipeline {} -#[cfg(feature = "time-series")] -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg(feature = "i-time-series")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] impl TimeSeriesInterface for Pipeline {} impl Pipeline { @@ -213,7 +222,7 @@ impl Pipeline { /// let _: () = pipeline.hgetall("bar").await?; // this will error since `bar` is an integer /// /// let results = pipeline.try_all::().await; - /// assert_eq!(results[0].unwrap().convert::(), 1); + /// assert_eq!(results[0].clone().unwrap().convert::(), 1); /// assert!(results[1].is_err()); /// /// Ok(()) @@ -270,7 +279,7 @@ async fn try_send_all( if let Err(e) = interfaces::send_to_router(inner, command) { return vec![Err(e)]; }; - let frame = match utils::apply_timeout(rx, timeout_dur).await { + let frame = match utils::timeout(rx, timeout_dur).await { Ok(result) => match result { Ok(f) => f, Err(e) => return vec![Err(e)], @@ -295,7 +304,7 @@ async fn send_all(inner: &Arc, commands: VecDeque, - counter: Arc, + clients: Vec, + counter: Arc, prefer_connected: Arc, } @@ -39,6 +37,7 @@ struct RedisPoolInner { /// * [PubsubInterface](crate::interfaces::PubsubInterface) /// * [EventInterface](crate::interfaces::EventInterface) /// * [ClientInterface](crate::interfaces::ClientInterface) +/// * [AuthInterface](crate::interfaces::AuthInterface) /// /// In some cases, such as [publish](crate::interfaces::PubsubInterface::publish), callers can work around this by /// adding a call to [next](Self::next), but in other scenarios this may not work. As a general rule, any commands @@ -87,7 +86,7 @@ impl RedisPool { Err(RedisError::new(RedisErrorKind::Config, "Pool cannot be empty.")) } else { let mut clients = Vec::with_capacity(size); - for _ in 0 .. size { + for _ in 0..size { clients.push(RedisClient::new( config.clone(), perf.clone(), @@ -133,7 +132,7 @@ impl RedisPool { pub fn next_connected(&self) -> &RedisClient { let mut idx = utils::incr_atomic(&self.inner.counter) % self.inner.clients.len(); - for _ in 0 .. self.inner.clients.len() { + for _ in 0..self.inner.clients.len() { let client = &self.inner.clients[idx]; if client.is_connected() { return client; @@ -162,7 +161,6 @@ impl RedisPool { } } -#[async_trait] impl ClientLike for RedisPool { #[doc(hidden)] fn inner(&self) -> &Arc { @@ -200,6 +198,7 @@ impl ClientLike for RedisPool { /// Override the DNS resolution logic for all clients in the pool. #[cfg(feature = "dns")] #[cfg_attr(docsrs, doc(cfg(feature = "dns")))] + #[allow(refining_impl_trait)] async fn set_resolver(&self, resolver: Arc) { for client in self.inner.clients.iter() { client.set_resolver(resolver.clone()).await; @@ -230,14 +229,14 @@ impl ClientLike for RedisPool { /// /// When running against a cluster this function will also refresh the cached cluster routing table. async fn force_reconnection(&self) -> RedisResult<()> { - let _ = try_join_all(self.inner.clients.iter().map(|c| c.force_reconnection())).await?; + try_join_all(self.inner.clients.iter().map(|c| c.force_reconnection())).await?; Ok(()) } /// Wait for all the clients to connect to the server. async fn wait_for_connect(&self) -> RedisResult<()> { - let _ = try_join_all(self.inner.clients.iter().map(|c| c.wait_for_connect())).await?; + try_join_all(self.inner.clients.iter().map(|c| c.wait_for_connect())).await?; Ok(()) } @@ -289,13 +288,12 @@ impl ClientLike for RedisPool { /// This function will also close all error, pubsub message, and reconnection event streams on all clients in the /// pool. async fn quit(&self) -> RedisResult<()> { - let _ = join_all(self.inner.clients.iter().map(|c| c.quit())).await; + join_all(self.inner.clients.iter().map(|c| c.quit())).await; Ok(()) } } -#[async_trait] impl HeartbeatInterface for RedisPool { async fn enable_heartbeat(&self, interval: Duration, break_on_error: bool) -> RedisResult<()> { let mut interval = tokio_interval(interval); @@ -312,29 +310,63 @@ impl HeartbeatInterface for RedisPool { } } +#[cfg(feature = "i-acl")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-acl")))] impl AclInterface for RedisPool {} +#[cfg(feature = "i-client")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-client")))] +impl ClientInterface for RedisPool {} +#[cfg(feature = "i-cluster")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-cluster")))] impl ClusterInterface for RedisPool {} +#[cfg(feature = "i-config")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-config")))] impl ConfigInterface for RedisPool {} +#[cfg(feature = "i-geo")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] impl GeoInterface for RedisPool {} +#[cfg(feature = "i-hashes")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hashes")))] impl HashesInterface for RedisPool {} +#[cfg(feature = "i-hyperloglog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hyperloglog")))] impl HyperloglogInterface for RedisPool {} +#[cfg(feature = "transactions")] +#[cfg_attr(docsrs, doc(cfg(feature = "transactions")))] +impl TransactionInterface for RedisPool {} +#[cfg(feature = "i-keys")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-keys")))] impl KeysInterface for RedisPool {} +#[cfg(feature = "i-scripts")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] impl LuaInterface for RedisPool {} +#[cfg(feature = "i-lists")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-lists")))] impl ListInterface for RedisPool {} +#[cfg(feature = "i-memory")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] impl MemoryInterface for RedisPool {} -impl AuthInterface for RedisPool {} +#[cfg(feature = "i-server")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] impl ServerInterface for RedisPool {} +#[cfg(feature = "i-slowlog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-slowlog")))] impl SlowlogInterface for RedisPool {} +#[cfg(feature = "i-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sets")))] impl SetsInterface for RedisPool {} +#[cfg(feature = "i-sorted-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sorted-sets")))] impl SortedSetsInterface for RedisPool {} +#[cfg(feature = "i-streams")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] impl StreamsInterface for RedisPool {} +#[cfg(feature = "i-scripts")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] impl FunctionInterface for RedisPool {} -#[cfg(feature = "transactions")] -#[cfg_attr(docsrs, doc(cfg(feature = "transactions")))] -impl TransactionInterface for RedisPool {} -#[cfg(feature = "redis-json")] -#[cfg_attr(docsrs, doc(cfg(feature = "redis-json")))] +#[cfg(feature = "i-redis-json")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-redis-json")))] impl RedisJsonInterface for RedisPool {} -#[cfg(feature = "time-series")] -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg(feature = "i-time-series")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] impl TimeSeriesInterface for RedisPool {} diff --git a/src/clients/pubsub.rs b/src/clients/pubsub.rs index 6dbe445d..1c89bb0e 100644 --- a/src/clients/pubsub.rs +++ b/src/clients/pubsub.rs @@ -12,9 +12,6 @@ use parking_lot::RwLock; use std::{collections::BTreeSet, fmt, fmt::Formatter, mem, sync::Arc}; use tokio::task::JoinHandle; -#[cfg(feature = "client-tracking")] -use crate::interfaces::TrackingInterface; - type ChannelSet = Arc>>; /// A subscriber client that will manage subscription state to any [pubsub](https://redis.io/docs/manual/pubsub/) channels or patterns for the caller. @@ -58,10 +55,10 @@ type ChannelSet = Arc>>; #[derive(Clone)] #[cfg_attr(docsrs, doc(cfg(feature = "subscriber-client")))] pub struct SubscriberClient { - channels: ChannelSet, - patterns: ChannelSet, + channels: ChannelSet, + patterns: ChannelSet, shard_channels: ChannelSet, - inner: Arc, + inner: Arc, } impl fmt::Debug for SubscriberClient { @@ -83,39 +80,77 @@ impl ClientLike for SubscriberClient { } impl EventInterface for SubscriberClient {} +#[cfg(feature = "i-acl")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-acl")))] impl AclInterface for SubscriberClient {} +#[cfg(feature = "i-client")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-client")))] impl ClientInterface for SubscriberClient {} +#[cfg(feature = "i-cluster")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-cluster")))] impl ClusterInterface for SubscriberClient {} +#[cfg(feature = "i-config")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-config")))] impl ConfigInterface for SubscriberClient {} +#[cfg(feature = "i-geo")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] impl GeoInterface for SubscriberClient {} +#[cfg(feature = "i-hashes")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hashes")))] impl HashesInterface for SubscriberClient {} +#[cfg(feature = "i-hyperloglog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hyperloglog")))] impl HyperloglogInterface for SubscriberClient {} impl MetricsInterface for SubscriberClient {} +#[cfg(feature = "transactions")] +#[cfg_attr(docsrs, doc(cfg(feature = "transactions")))] impl TransactionInterface for SubscriberClient {} +#[cfg(feature = "i-keys")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-keys")))] impl KeysInterface for SubscriberClient {} +#[cfg(feature = "i-scripts")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] impl LuaInterface for SubscriberClient {} +#[cfg(feature = "i-lists")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-lists")))] impl ListInterface for SubscriberClient {} +#[cfg(feature = "i-memory")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] impl MemoryInterface for SubscriberClient {} impl AuthInterface for SubscriberClient {} +#[cfg(feature = "i-server")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] impl ServerInterface for SubscriberClient {} +#[cfg(feature = "i-slowlog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-slowlog")))] impl SlowlogInterface for SubscriberClient {} +#[cfg(feature = "i-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sets")))] impl SetsInterface for SubscriberClient {} +#[cfg(feature = "i-sorted-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sorted-sets")))] impl SortedSetsInterface for SubscriberClient {} +#[cfg(feature = "i-server")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] impl HeartbeatInterface for SubscriberClient {} +#[cfg(feature = "i-streams")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] impl StreamsInterface for SubscriberClient {} +#[cfg(feature = "i-scripts")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] impl FunctionInterface for SubscriberClient {} -#[cfg(feature = "redis-json")] -#[cfg_attr(docsrs, doc(cfg(feature = "redis-json")))] +#[cfg(feature = "i-redis-json")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-redis-json")))] impl RedisJsonInterface for SubscriberClient {} -#[cfg(feature = "time-series")] -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg(feature = "i-time-series")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] impl TimeSeriesInterface for SubscriberClient {} - -#[cfg(feature = "client-tracking")] -#[cfg_attr(docsrs, doc(cfg(feature = "client-tracking")))] +#[cfg(feature = "i-tracking")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] impl TrackingInterface for SubscriberClient {} -#[async_trait] +#[cfg(feature = "i-pubsub")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-pubsub")))] impl PubsubInterface for SubscriberClient { async fn subscribe(&self, channels: S) -> RedisResult<()> where @@ -137,42 +172,42 @@ impl PubsubInterface for SubscriberClient { result } - async fn psubscribe(&self, patterns: S) -> RedisResult<()> + async fn unsubscribe(&self, channels: S) -> RedisResult<()> where S: Into + Send, { - into!(patterns); + into!(channels); - let result = commands::pubsub::psubscribe(self, patterns.clone()).await; + let result = commands::pubsub::unsubscribe(self, channels.clone()).await; if result.is_ok() { - let mut guard = self.patterns.write(); + let mut guard = self.channels.write(); - for pattern in patterns.inner().into_iter() { - if let Some(pattern) = pattern.as_bytes_str() { - guard.insert(pattern); + if channels.len() == 0 { + guard.clear(); + } else { + for channel in channels.inner().into_iter() { + if let Some(channel) = channel.as_bytes_str() { + let _ = guard.remove(&channel); + } } } } result } - async fn unsubscribe(&self, channels: S) -> RedisResult<()> + async fn psubscribe(&self, patterns: S) -> RedisResult<()> where S: Into + Send, { - into!(channels); + into!(patterns); - let result = commands::pubsub::unsubscribe(self, channels.clone()).await; + let result = commands::pubsub::psubscribe(self, patterns.clone()).await; if result.is_ok() { - let mut guard = self.channels.write(); + let mut guard = self.patterns.write(); - if channels.len() == 0 { - guard.clear(); - } else { - for channel in channels.inner().into_iter() { - if let Some(channel) = channel.as_bytes_str() { - let _ = guard.remove(&channel); - } + for pattern in patterns.inner().into_iter() { + if let Some(pattern) = pattern.as_bytes_str() { + guard.insert(pattern); } } } @@ -256,10 +291,10 @@ impl SubscriberClient { policy: Option, ) -> SubscriberClient { SubscriberClient { - channels: Arc::new(RwLock::new(BTreeSet::new())), - patterns: Arc::new(RwLock::new(BTreeSet::new())), + channels: Arc::new(RwLock::new(BTreeSet::new())), + patterns: Arc::new(RwLock::new(BTreeSet::new())), shard_channels: Arc::new(RwLock::new(BTreeSet::new())), - inner: RedisClientInner::new(config, perf.unwrap_or_default(), connection.unwrap_or_default(), policy), + inner: RedisClientInner::new(config, perf.unwrap_or_default(), connection.unwrap_or_default(), policy), } } diff --git a/src/clients/redis.rs b/src/clients/redis.rs index 59e311b5..6fa08e64 100644 --- a/src/clients/redis.rs +++ b/src/clients/redis.rs @@ -4,7 +4,7 @@ use crate::{ error::{RedisError, RedisErrorKind}, interfaces::*, modules::inner::RedisClientInner, - prelude::{ClientLike, StreamsInterface}, + prelude::ClientLike, types::*, }; use bytes_utils::Str; @@ -13,7 +13,7 @@ use std::{fmt, fmt::Formatter, sync::Arc}; #[cfg(feature = "replicas")] use crate::clients::Replicas; -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] use crate::interfaces::TrackingInterface; /// A cheaply cloneable Redis client struct. @@ -58,39 +58,77 @@ impl ClientLike for RedisClient { } impl EventInterface for RedisClient {} +#[cfg(feature = "i-redis-json")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-redis-json")))] +impl RedisJsonInterface for RedisClient {} +#[cfg(feature = "i-time-series")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] +impl TimeSeriesInterface for RedisClient {} +#[cfg(feature = "i-acl")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-acl")))] impl AclInterface for RedisClient {} +#[cfg(feature = "i-client")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-client")))] impl ClientInterface for RedisClient {} +#[cfg(feature = "i-cluster")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-cluster")))] impl ClusterInterface for RedisClient {} -impl PubsubInterface for RedisClient {} +#[cfg(feature = "i-config")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-config")))] impl ConfigInterface for RedisClient {} +#[cfg(feature = "i-geo")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] impl GeoInterface for RedisClient {} +#[cfg(feature = "i-hashes")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hashes")))] impl HashesInterface for RedisClient {} +#[cfg(feature = "i-hyperloglog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hyperloglog")))] impl HyperloglogInterface for RedisClient {} impl MetricsInterface for RedisClient {} #[cfg(feature = "transactions")] #[cfg_attr(docsrs, doc(cfg(feature = "transactions")))] impl TransactionInterface for RedisClient {} +#[cfg(feature = "i-keys")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-keys")))] impl KeysInterface for RedisClient {} +#[cfg(feature = "i-scripts")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] impl LuaInterface for RedisClient {} +#[cfg(feature = "i-lists")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-lists")))] impl ListInterface for RedisClient {} +#[cfg(feature = "i-memory")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] impl MemoryInterface for RedisClient {} impl AuthInterface for RedisClient {} +#[cfg(feature = "i-server")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] impl ServerInterface for RedisClient {} +#[cfg(feature = "i-slowlog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-slowlog")))] impl SlowlogInterface for RedisClient {} +#[cfg(feature = "i-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sets")))] impl SetsInterface for RedisClient {} +#[cfg(feature = "i-sorted-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sorted-sets")))] impl SortedSetsInterface for RedisClient {} +#[cfg(feature = "i-server")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] impl HeartbeatInterface for RedisClient {} +#[cfg(feature = "i-streams")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] impl StreamsInterface for RedisClient {} +#[cfg(feature = "i-scripts")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] impl FunctionInterface for RedisClient {} -#[cfg(feature = "redis-json")] -#[cfg_attr(docsrs, doc(cfg(feature = "redis-json")))] -impl RedisJsonInterface for RedisClient {} -#[cfg(feature = "time-series")] -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] -impl TimeSeriesInterface for RedisClient {} -#[cfg(feature = "client-tracking")] -#[cfg_attr(docsrs, doc(cfg(feature = "client-tracking")))] +#[cfg(feature = "i-tracking")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] impl TrackingInterface for RedisClient {} +#[cfg(feature = "i-pubsub")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-pubsub")))] +impl PubsubInterface for RedisClient {} impl RedisClient { /// Create a new client instance without connecting to the server. @@ -287,7 +325,7 @@ impl RedisClient { S: Into, { WithOptions { - client: self.clone(), + client: self.clone(), options: Options { cluster_node: Some(server.into()), ..Default::default() diff --git a/src/clients/replica.rs b/src/clients/replica.rs index 183c7bee..6503541a 100644 --- a/src/clients/replica.rs +++ b/src/clients/replica.rs @@ -48,27 +48,57 @@ impl ClientLike for Replicas { } } +#[cfg(feature = "i-redis-json")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-redis-json")))] +impl RedisJsonInterface for Replicas {} +#[cfg(feature = "i-time-series")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] +impl TimeSeriesInterface for Replicas {} +#[cfg(feature = "i-cluster")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-cluster")))] +impl ClusterInterface for Replicas {} +#[cfg(feature = "i-config")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-config")))] +impl ConfigInterface for Replicas {} +#[cfg(feature = "i-geo")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] impl GeoInterface for Replicas {} +#[cfg(feature = "i-hashes")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hashes")))] impl HashesInterface for Replicas {} +#[cfg(feature = "i-hyperloglog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hyperloglog")))] impl HyperloglogInterface for Replicas {} -impl MetricsInterface for Replicas {} +#[cfg(feature = "i-keys")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-keys")))] impl KeysInterface for Replicas {} +#[cfg(feature = "i-scripts")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] impl LuaInterface for Replicas {} -impl FunctionInterface for Replicas {} +#[cfg(feature = "i-lists")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-lists")))] impl ListInterface for Replicas {} +#[cfg(feature = "i-memory")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] impl MemoryInterface for Replicas {} -impl AuthInterface for Replicas {} +#[cfg(feature = "i-server")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] impl ServerInterface for Replicas {} +#[cfg(feature = "i-slowlog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-slowlog")))] impl SlowlogInterface for Replicas {} +#[cfg(feature = "i-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sets")))] impl SetsInterface for Replicas {} +#[cfg(feature = "i-sorted-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sorted-sets")))] impl SortedSetsInterface for Replicas {} +#[cfg(feature = "i-streams")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] impl StreamsInterface for Replicas {} -#[cfg(feature = "redis-json")] -#[cfg_attr(docsrs, doc(cfg(feature = "redis-json")))] -impl RedisJsonInterface for Replicas {} -#[cfg(feature = "time-series")] -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] -impl TimeSeriesInterface for Replicas {} +#[cfg(feature = "i-scripts")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] +impl FunctionInterface for Replicas {} impl Replicas { /// Read a mapping of replica server IDs to primary server IDs. @@ -88,10 +118,10 @@ impl Replicas { /// Sync the cached replica routing table with the server(s). /// - /// This will also disconnect and reset any replica connections. - pub async fn sync(&self) -> Result<(), RedisError> { + /// If `reset: true` the client will forcefully disconnect from replicas even if the connections could otherwise be reused. + pub async fn sync(&self, reset: bool) -> Result<(), RedisError> { let (tx, rx) = oneshot_channel(); - let cmd = RouterCommand::SyncReplicas { tx }; + let cmd = RouterCommand::SyncReplicas { tx, reset }; interfaces::send_to_router(&self.inner, cmd)?; rx.await? } diff --git a/src/clients/sentinel.rs b/src/clients/sentinel.rs index 8ddba2b6..bbc4c5ec 100644 --- a/src/clients/sentinel.rs +++ b/src/clients/sentinel.rs @@ -45,10 +45,18 @@ impl<'a> From<&'a Arc> for SentinelClient { impl EventInterface for SentinelClient {} impl SentinelInterface for SentinelClient {} impl MetricsInterface for SentinelClient {} +#[cfg(feature = "i-acl")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-acl")))] impl AclInterface for SentinelClient {} +#[cfg(feature = "i-pubsub")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-pubsub")))] impl PubsubInterface for SentinelClient {} +#[cfg(feature = "i-client")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-client")))] impl ClientInterface for SentinelClient {} impl AuthInterface for SentinelClient {} +#[cfg(feature = "i-server")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] impl HeartbeatInterface for SentinelClient {} impl SentinelClient { diff --git a/src/clients/transaction.rs b/src/clients/transaction.rs index 1ad8ece9..f4b94dea 100644 --- a/src/clients/transaction.rs +++ b/src/clients/transaction.rs @@ -22,10 +22,10 @@ use tokio::sync::oneshot::channel as oneshot_channel; #[cfg(feature = "transactions")] #[cfg_attr(docsrs, doc(cfg(feature = "transactions")))] pub struct Transaction { - id: u64, - inner: Arc, - commands: Arc>>, - watched: Arc>>, + id: u64, + inner: Arc, + commands: Arc>>, + watched: Arc>>, hash_slot: Arc>>, } @@ -78,39 +78,68 @@ impl ClientLike for Transaction { } } +#[cfg(feature = "i-acl")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-acl")))] impl AclInterface for Transaction {} +#[cfg(feature = "i-client")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-client")))] impl ClientInterface for Transaction {} +#[cfg(feature = "i-pubsub")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-pubsub")))] impl PubsubInterface for Transaction {} +#[cfg(feature = "i-config")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-config")))] impl ConfigInterface for Transaction {} +#[cfg(feature = "i-geo")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] impl GeoInterface for Transaction {} +#[cfg(feature = "i-hashes")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hashes")))] impl HashesInterface for Transaction {} +#[cfg(feature = "i-hyperloglog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hyperloglog")))] impl HyperloglogInterface for Transaction {} -impl MetricsInterface for Transaction {} +#[cfg(feature = "i-keys")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-keys")))] impl KeysInterface for Transaction {} +#[cfg(feature = "i-lists")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-lists")))] impl ListInterface for Transaction {} +#[cfg(feature = "i-memory")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] impl MemoryInterface for Transaction {} impl AuthInterface for Transaction {} +#[cfg(feature = "i-server")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] impl ServerInterface for Transaction {} +#[cfg(feature = "i-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sets")))] impl SetsInterface for Transaction {} +#[cfg(feature = "i-sorted-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sorted-sets")))] impl SortedSetsInterface for Transaction {} +#[cfg(feature = "i-streams")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] impl StreamsInterface for Transaction {} +#[cfg(feature = "i-scripts")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] impl FunctionInterface for Transaction {} -#[cfg(feature = "redis-json")] -#[cfg_attr(docsrs, doc(cfg(feature = "redis-json")))] +#[cfg(feature = "i-redis-json")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-redis-json")))] impl RedisJsonInterface for Transaction {} -#[cfg(feature = "time-series")] -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg(feature = "i-time-series")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] impl TimeSeriesInterface for Transaction {} impl Transaction { /// Create a new transaction. pub(crate) fn from_inner(inner: &Arc) -> Self { Transaction { - inner: inner.clone(), - commands: Arc::new(Mutex::new(VecDeque::new())), - watched: Arc::new(Mutex::new(VecDeque::new())), + inner: inner.clone(), + commands: Arc::new(Mutex::new(VecDeque::new())), + watched: Arc::new(Mutex::new(VecDeque::new())), hash_slot: Arc::new(Mutex::new(None)), - id: utils::random_u64(u64::MAX), + id: utils::random_u64(u64::MAX), } } @@ -310,6 +339,6 @@ async fn exec( let timeout_dur = trx_options.timeout.unwrap_or_else(|| inner.default_command_timeout()); interfaces::send_to_router(inner, command)?; - let frame = utils::apply_timeout(rx, timeout_dur).await??; + let frame = utils::timeout(rx, timeout_dur).await??; protocol_utils::frame_to_results(frame) } diff --git a/src/commands/impls/client.rs b/src/commands/impls/client.rs index 174c8b36..c20b2033 100644 --- a/src/commands/impls/client.rs +++ b/src/commands/impls/client.rs @@ -1,15 +1,13 @@ use super::*; use crate::{ - interfaces, protocol::{ - command::{RedisCommand, RedisCommandKind, RouterCommand}, + command::{RedisCommand, RedisCommandKind}, utils as protocol_utils, }, types::*, utils, }; use bytes_utils::Str; -use tokio::sync::oneshot::channel as oneshot_channel; value_cmd!(client_id, ClientID); value_cmd!(client_info, ClientInfo); @@ -132,11 +130,3 @@ pub async fn unblock_self(client: &C, flag: Option(client: &C) -> Result, RedisError> { - let (tx, rx) = oneshot_channel(); - let command = RouterCommand::Connections { tx }; - interfaces::send_to_router(client.inner(), command)?; - - rx.await.map_err(|e| e.into()) -} diff --git a/src/commands/impls/hashes.rs b/src/commands/impls/hashes.rs index fec1537e..1943a3da 100644 --- a/src/commands/impls/hashes.rs +++ b/src/commands/impls/hashes.rs @@ -4,7 +4,17 @@ use crate::{ types::*, utils, }; -use std::convert::TryInto; +use redis_protocol::resp3::types::{BytesFrame as Resp3Frame, Resp3Frame as _Resp3Frame}; +use std::{convert::TryInto, str}; + +fn frame_is_queued(frame: &Resp3Frame) -> bool { + match frame { + Resp3Frame::SimpleString { ref data, .. } | Resp3Frame::BlobString { ref data, .. } => { + str::from_utf8(data).ok().map(|s| s == QUEUED).unwrap_or(false) + }, + _ => false, + } +} pub async fn hdel(client: &C, key: RedisKey, fields: MultipleKeys) -> Result { let frame = utils::request_response(client, move || { @@ -35,7 +45,7 @@ pub async fn hget(client: &C, key: RedisKey, field: RedisKey) -> pub async fn hgetall(client: &C, key: RedisKey) -> Result { let frame = utils::request_response(client, move || Ok((RedisCommandKind::HGetAll, vec![key.into()]))).await?; - if protocol_utils::frame_is_queued(&frame) { + if frame.as_str().map(|s| s == QUEUED).unwrap_or(false) { protocol_utils::frame_to_results(frame) } else { Ok(RedisValue::Map(protocol_utils::frame_to_map(frame)?)) @@ -156,7 +166,7 @@ pub async fn hrandfield( .await?; if has_count { - if has_values && !protocol_utils::frame_is_queued(&frame) { + if has_values && frame.as_str().map(|s| s != QUEUED).unwrap_or(true) { let frame = protocol_utils::flatten_frame(frame); protocol_utils::frame_to_map(frame).map(RedisValue::Map) } else { diff --git a/src/commands/impls/keys.rs b/src/commands/impls/keys.rs index 846401cf..96f30f9a 100644 --- a/src/commands/impls/keys.rs +++ b/src/commands/impls/keys.rs @@ -7,6 +7,17 @@ use crate::{ }; use std::convert::TryInto; +fn check_empty_keys(keys: &MultipleKeys) -> Result<(), RedisError> { + if keys.len() == 0 { + Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "At least one key is required.", + )) + } else { + Ok(()) + } +} + value_cmd!(randomkey, Randomkey); pub async fn get(client: &C, key: RedisKey) -> Result { @@ -48,7 +59,7 @@ pub async fn set( } pub async fn del(client: &C, keys: MultipleKeys) -> Result { - utils::check_empty_keys(&keys)?; + check_empty_keys(&keys)?; let args: Vec = keys.inner().drain(..).map(|k| k.into()).collect(); let frame = utils::request_response(client, move || Ok((RedisCommandKind::Del, args))).await?; @@ -56,7 +67,7 @@ pub async fn del(client: &C, keys: MultipleKeys) -> Result(client: &C, keys: MultipleKeys) -> Result { - utils::check_empty_keys(&keys)?; + check_empty_keys(&keys)?; let args: Vec = keys.inner().drain(..).map(|k| k.into()).collect(); let frame = utils::request_response(client, move || Ok((RedisCommandKind::Unlink, args))).await?; @@ -134,7 +145,7 @@ pub async fn expire_at(client: &C, key: RedisKey, timestamp: i64) } pub async fn exists(client: &C, keys: MultipleKeys) -> Result { - utils::check_empty_keys(&keys)?; + check_empty_keys(&keys)?; let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(keys.len()); @@ -199,11 +210,10 @@ pub async fn getrange( end: usize, ) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::GetRange, vec![ - key.into(), - start.try_into()?, - end.try_into()?, - ])) + Ok(( + RedisCommandKind::GetRange, + vec![key.into(), start.try_into()?, end.try_into()?], + )) }) .await?; @@ -233,10 +243,11 @@ pub async fn rename( source: RedisKey, destination: RedisKey, ) -> Result { - args_values_cmd(client, RedisCommandKind::Rename, vec![ - source.into(), - destination.into(), - ]) + args_values_cmd( + client, + RedisCommandKind::Rename, + vec![source.into(), destination.into()], + ) .await } @@ -245,10 +256,11 @@ pub async fn renamenx( source: RedisKey, destination: RedisKey, ) -> Result { - args_values_cmd(client, RedisCommandKind::Renamenx, vec![ - source.into(), - destination.into(), - ]) + args_values_cmd( + client, + RedisCommandKind::Renamenx, + vec![source.into(), destination.into()], + ) .await } @@ -261,7 +273,7 @@ pub async fn strlen(client: &C, key: RedisKey) -> Result(client: &C, keys: MultipleKeys) -> Result { - utils::check_empty_keys(&keys)?; + check_empty_keys(&keys)?; let frame = utils::request_response(client, move || { let mut args = Vec::with_capacity(keys.len()); diff --git a/src/commands/impls/lists.rs b/src/commands/impls/lists.rs index 833e117b..3e20d816 100644 --- a/src/commands/impls/lists.rs +++ b/src/commands/impls/lists.rs @@ -4,8 +4,94 @@ use crate::{ types::*, utils, }; +use bytes_utils::Str; use std::convert::TryInto; +pub async fn sort_ro( + client: &C, + key: RedisKey, + by: Option, + limit: Option, + get: MultipleStrings, + order: Option, + alpha: bool, +) -> Result { + let frame = utils::request_response(client, move || { + let mut args = Vec::with_capacity(8 + get.len() * 2); + args.push(key.into()); + + if let Some(pattern) = by { + args.push(static_val!("BY")); + args.push(pattern.into()); + } + if let Some((offset, count)) = limit { + args.push(static_val!(LIMIT)); + args.push(offset.into()); + args.push(count.into()); + } + for pattern in get.inner().into_iter() { + args.push(static_val!(GET)); + args.push(pattern.into()); + } + if let Some(order) = order { + args.push(order.to_str().into()); + } + if alpha { + args.push(static_val!("ALPHA")); + } + + Ok((RedisCommandKind::SortRo, args)) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + +pub async fn sort( + client: &C, + key: RedisKey, + by: Option, + limit: Option, + get: MultipleStrings, + order: Option, + alpha: bool, + store: Option, +) -> Result { + let frame = utils::request_response(client, move || { + let mut args = Vec::with_capacity(10 + get.len() * 2); + args.push(key.into()); + + if let Some(pattern) = by { + args.push(static_val!("BY")); + args.push(pattern.into()); + } + if let Some((offset, count)) = limit { + args.push(static_val!(LIMIT)); + args.push(offset.into()); + args.push(count.into()); + } + for pattern in get.inner().into_iter() { + args.push(static_val!(GET)); + args.push(pattern.into()); + } + if let Some(order) = order { + args.push(order.to_str().into()); + } + if alpha { + args.push(static_val!("ALPHA")); + } + if let Some(dest) = store { + args.push(static_val!(STORE)); + args.push(dest.into()); + } + + Ok((RedisCommandKind::Sort, args)) + }) + .await?; + + protocol_utils::frame_to_results(frame) +} + pub async fn blmpop( client: &C, timeout: f64, @@ -81,11 +167,10 @@ pub async fn brpoplpush( let timeout: RedisValue = timeout.try_into()?; let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::BrPopLPush, vec![ - source.into(), - destination.into(), - timeout, - ])) + Ok(( + RedisCommandKind::BrPopLPush, + vec![source.into(), destination.into(), timeout], + )) }) .await?; @@ -158,12 +243,10 @@ pub async fn linsert( element: RedisValue, ) -> Result { let frame = utils::request_response(client, move || { - Ok((RedisCommandKind::LInsert, vec![ - key.into(), - location.to_str().into(), - pivot, - element, - ])) + Ok(( + RedisCommandKind::LInsert, + vec![key.into(), location.to_str().into(), pivot, element], + )) }) .await?; diff --git a/src/commands/impls/lua.rs b/src/commands/impls/lua.rs index b1b834bd..ddff850b 100644 --- a/src/commands/impls/lua.rs +++ b/src/commands/impls/lua.rs @@ -8,7 +8,6 @@ use crate::{ command::{RedisCommand, RedisCommandKind}, hashers::ClusterHash, responders::ResponseKind, - types::*, utils as protocol_utils, }, types::*, @@ -16,6 +15,7 @@ use crate::{ }; use bytes::Bytes; use bytes_utils::Str; +use redis_protocol::resp3::types::BytesFrame as Resp3Frame; use std::{convert::TryInto, str, sync::Arc}; use tokio::sync::oneshot::channel as oneshot_channel; @@ -26,7 +26,7 @@ pub fn check_key_slot(inner: &Arc, keys: &[RedisKey]) -> Resul inner.with_cluster_state(|state| { let (mut cmd_server, mut cmd_slot) = (None, None); for key in keys.iter() { - let key_slot = redis_keyslot(key.as_bytes()); + let key_slot = redis_protocol::redis_keyslot(key.as_bytes()); if let Some(server) = state.get_server(key_slot) { if let Some(ref cmd_server) = cmd_server { @@ -72,7 +72,7 @@ pub async fn script_load_cluster(client: &C, script: Str) -> Resu let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; - let _ = utils::apply_timeout(rx, timeout_dur).await??; + let _ = utils::timeout(rx, timeout_dur).await??; Ok(hash.into()) } @@ -89,7 +89,7 @@ pub async fn script_kill_cluster(client: &C) -> Result<(), RedisE let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; - let _ = utils::apply_timeout(rx, timeout_dur).await??; + let _ = utils::timeout(rx, timeout_dur).await??; Ok(()) } @@ -117,7 +117,7 @@ pub async fn script_flush_cluster(client: &C, r#async: bool) -> R let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; - let _ = utils::apply_timeout(rx, timeout_dur).await??; + let _ = utils::timeout(rx, timeout_dur).await??; Ok(()) } @@ -292,7 +292,7 @@ pub async fn function_delete_cluster(client: &C, library_name: St let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; - let _ = utils::apply_timeout(rx, timeout_dur).await??; + let _ = utils::timeout(rx, timeout_dur).await??; Ok(()) } @@ -400,8 +400,8 @@ pub async fn function_load_cluster( client.send_command(command)?; // each value in the response array is the response from a different primary node - match utils::apply_timeout(rx, timeout_dur).await?? { - Frame::Array { mut data, .. } => { + match utils::timeout(rx, timeout_dur).await?? { + Resp3Frame::Array { mut data, .. } => { if let Some(frame) = data.pop() { protocol_utils::frame_to_results(frame) } else { @@ -411,8 +411,8 @@ pub async fn function_load_cluster( )) } }, - Frame::SimpleError { data, .. } => Err(protocol_utils::pretty_error(&data)), - Frame::BlobError { data, .. } => { + Resp3Frame::SimpleError { data, .. } => Err(protocol_utils::pretty_error(&data)), + Resp3Frame::BlobError { data, .. } => { let parsed = str::from_utf8(&data)?; Err(protocol_utils::pretty_error(parsed)) }, @@ -454,7 +454,7 @@ pub async fn function_restore_cluster( let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; - let _ = utils::apply_timeout(rx, timeout_dur).await??; + let _ = utils::timeout(rx, timeout_dur).await??; Ok(()) } diff --git a/src/commands/impls/mod.rs b/src/commands/impls/mod.rs index bf3cea4a..d97f57de 100644 --- a/src/commands/impls/mod.rs +++ b/src/commands/impls/mod.rs @@ -1,3 +1,6 @@ +#![allow(unused_macros)] +#![allow(dead_code)] + use crate::{ error::RedisError, interfaces::ClientLike, @@ -9,12 +12,10 @@ use crate::{ pub static MATCH: &str = "MATCH"; pub static COUNT: &str = "COUNT"; pub static TYPE: &str = "TYPE"; +#[cfg(any(feature = "i-geo", feature = "i-sorted-sets"))] pub static CHANGED: &str = "CH"; -pub static INCR: &str = "INCR"; -pub static WITH_SCORES: &str = "WITHSCORES"; +#[cfg(any(feature = "i-lists", feature = "i-sorted-sets", feature = "i-streams"))] pub static LIMIT: &str = "LIMIT"; -pub static AGGREGATE: &str = "AGGREGATE"; -pub static WEIGHTS: &str = "WEIGHTS"; pub static GET: &str = "GET"; pub static RESET: &str = "RESET"; pub static TO: &str = "TO"; @@ -162,31 +163,46 @@ pub async fn args_ok_cmd( protocol_utils::expect_ok(&response) } +#[cfg(feature = "i-acl")] pub mod acl; +#[cfg(feature = "i-client")] pub mod client; +#[cfg(feature = "i-cluster")] pub mod cluster; +#[cfg(feature = "i-config")] pub mod config; +#[cfg(feature = "i-geo")] pub mod geo; +#[cfg(feature = "i-hashes")] pub mod hashes; +#[cfg(feature = "i-hyperloglog")] pub mod hyperloglog; +#[cfg(feature = "i-keys")] pub mod keys; +#[cfg(feature = "i-lists")] pub mod lists; +#[cfg(feature = "i-scripts")] pub mod lua; +#[cfg(feature = "i-memory")] pub mod memory; +#[cfg(feature = "i-pubsub")] pub mod pubsub; +#[cfg(feature = "i-redis-json")] +pub mod redis_json; pub mod scan; +#[cfg(feature = "sentinel-client")] +pub mod sentinel; pub mod server; +#[cfg(feature = "i-sets")] pub mod sets; +#[cfg(feature = "i-slowlog")] pub mod slowlog; +#[cfg(feature = "i-sorted-sets")] pub mod sorted_sets; +#[cfg(feature = "i-streams")] pub mod streams; pub mod strings; - -#[cfg(feature = "redis-json")] -pub mod redis_json; -#[cfg(feature = "sentinel-client")] -pub mod sentinel; -#[cfg(feature = "time-series")] +#[cfg(feature = "i-time-series")] pub mod timeseries; -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] pub mod tracking; diff --git a/src/commands/impls/scan.rs b/src/commands/impls/scan.rs index a00ec236..20cfe825 100644 --- a/src/commands/impls/scan.rs +++ b/src/commands/impls/scan.rs @@ -75,11 +75,11 @@ pub fn scan_cluster( for slot in hash_slots.into_iter() { _trace!(inner, "Scan cluster hash slot server: {}", slot); let response = ResponseKind::KeyScan(KeyScanInner { - hash_slot: Some(slot), - args: args.clone(), + hash_slot: Some(slot), + args: args.clone(), cursor_idx: 0, - tx: tx.clone(), - server: None, + tx: tx.clone(), + server: None, }); let command: RedisCommand = (RedisCommandKind::Scan, Vec::new(), response).into(); @@ -103,7 +103,7 @@ pub fn scan( let hash_slot = if inner.config.server.is_clustered() { if utils::clustered_scan_pattern_has_hash_tag(inner, &pattern) { - Some(redis_keyslot(pattern.as_bytes())) + Some(redis_protocol::redis_keyslot(pattern.as_bytes())) } else { None } diff --git a/src/commands/impls/server.rs b/src/commands/impls/server.rs index d15b0432..7a5181c2 100644 --- a/src/commands/impls/server.rs +++ b/src/commands/impls/server.rs @@ -17,6 +17,14 @@ use bytes_utils::Str; use std::sync::Arc; use tokio::sync::oneshot::channel as oneshot_channel; +pub async fn active_connections(client: &C) -> Result, RedisError> { + let (tx, rx) = oneshot_channel(); + let command = RouterCommand::Connections { tx }; + interfaces::send_to_router(client.inner(), command)?; + + rx.await.map_err(|e| e.into()) +} + pub async fn quit(client: &C) -> Result<(), RedisError> { let inner = client.inner().clone(); _debug!(inner, "Closing Redis connection with Quit command."); @@ -34,7 +42,7 @@ pub async fn quit(client: &C) -> Result<(), RedisError> { let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; - let _ = utils::apply_timeout(rx, timeout_dur).await??; + let _ = utils::timeout(rx, timeout_dur).await??; inner .notifications .close_public_receivers(inner.with_perf_config(|c| c.broadcast_channel_capacity)); @@ -65,7 +73,7 @@ pub async fn shutdown(client: &C, flags: Option) - let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; - let _ = utils::apply_timeout(rx, timeout_dur).await??; + let _ = utils::timeout(rx, timeout_dur).await??; inner .notifications .close_public_receivers(inner.with_perf_config(|c| c.broadcast_channel_capacity)); @@ -104,11 +112,11 @@ pub fn split(inner: &Arc) -> Result, RedisErr pub async fn force_reconnection(inner: &Arc) -> Result<(), RedisError> { let (tx, rx) = oneshot_channel(); let command = RouterCommand::Reconnect { - server: None, - force: true, - tx: Some(tx), + server: None, + force: true, + tx: Some(tx), #[cfg(feature = "replicas")] - replica: false, + replica: false, }; interfaces::send_to_router(inner, command)?; @@ -133,7 +141,7 @@ pub async fn flushall_cluster(client: &C) -> Result<(), RedisErro let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; - let _ = utils::apply_timeout(rx, timeout_dur).await??; + let _ = utils::timeout(rx, timeout_dur).await??; Ok(()) } @@ -164,13 +172,17 @@ pub async fn info(client: &C, section: Option) -> Resul pub async fn hello( client: &C, version: RespVersion, - auth: Option<(String, String)>, + auth: Option<(Str, Str)>, + setname: Option, ) -> Result<(), RedisError> { - let args = if let Some((username, password)) = auth { + let mut args = if let Some((username, password)) = auth { vec![username.into(), password.into()] } else { vec![] }; + if let Some(name) = setname { + args.push(name.into()); + } if client.inner().config.server.is_clustered() { let (tx, rx) = oneshot_channel(); @@ -179,7 +191,7 @@ pub async fn hello( let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; - let _ = utils::apply_timeout(rx, timeout_dur).await??; + let _ = utils::timeout(rx, timeout_dur).await??; Ok(()) } else { let frame = utils::request_response(client, move || Ok((RedisCommandKind::_Hello(version), args))).await?; @@ -202,7 +214,7 @@ pub async fn auth(client: &C, username: Option, password: let timeout_dur = utils::prepare_command(client, &mut command); client.send_command(command)?; - let _ = utils::apply_timeout(rx, timeout_dur).await??; + let _ = utils::timeout(rx, timeout_dur).await??; Ok(()) } else { let frame = utils::request_response(client, move || Ok((RedisCommandKind::Auth, args))).await?; @@ -228,10 +240,14 @@ pub async fn custom_raw( utils::request_response(client, move || Ok((RedisCommandKind::_Custom(cmd), args))).await } +#[cfg(feature = "i-server")] value_cmd!(dbsize, DBSize); +#[cfg(feature = "i-server")] value_cmd!(bgrewriteaof, BgreWriteAof); +#[cfg(feature = "i-server")] value_cmd!(bgsave, BgSave); +#[cfg(feature = "i-server")] pub async fn failover( client: &C, to: Option<(String, u16)>, @@ -265,8 +281,10 @@ pub async fn failover( protocol_utils::expect_ok(&response) } +#[cfg(feature = "i-server")] value_cmd!(lastsave, LastSave); +#[cfg(feature = "i-server")] pub async fn wait(client: &C, numreplicas: i64, timeout: i64) -> Result { let frame = utils::request_response(client, move || { Ok((RedisCommandKind::Wait, vec![numreplicas.into(), timeout.into()])) diff --git a/src/commands/impls/slowlog.rs b/src/commands/impls/slowlog.rs index 3659a442..06275636 100644 --- a/src/commands/impls/slowlog.rs +++ b/src/commands/impls/slowlog.rs @@ -1,6 +1,5 @@ use super::*; use crate::{ - prelude::*, protocol::{command::RedisCommandKind, utils as protocol_utils}, utils, }; diff --git a/src/commands/impls/sorted_sets.rs b/src/commands/impls/sorted_sets.rs index f452655b..99a167ac 100644 --- a/src/commands/impls/sorted_sets.rs +++ b/src/commands/impls/sorted_sets.rs @@ -7,6 +7,11 @@ use crate::{ }; use std::convert::TryInto; +static INCR: &str = "INCR"; +static WITH_SCORES: &str = "WITHSCORES"; +static AGGREGATE: &str = "AGGREGATE"; +static WEIGHTS: &str = "WEIGHTS"; + fn new_range_error(kind: &Option) -> Result<(), RedisError> { if let Some(ref sort) = *kind { Err(RedisError::new( @@ -596,11 +601,10 @@ pub async fn zremrangebylex( let frame = utils::request_response(client, move || { check_range_types(&min, &max, &Some(ZSort::ByLex))?; - Ok((RedisCommandKind::Zremrangebylex, vec![ - key.into(), - min.into_value()?, - max.into_value()?, - ])) + Ok(( + RedisCommandKind::Zremrangebylex, + vec![key.into(), min.into_value()?, max.into_value()?], + )) }) .await?; @@ -626,11 +630,10 @@ pub async fn zremrangebyscore( let frame = utils::request_response(client, move || { check_range_types(&min, &max, &Some(ZSort::ByScore))?; - Ok((RedisCommandKind::Zremrangebyscore, vec![ - key.into(), - min.into_value()?, - max.into_value()?, - ])) + Ok(( + RedisCommandKind::Zremrangebyscore, + vec![key.into(), min.into_value()?, max.into_value()?], + )) }) .await?; diff --git a/src/commands/impls/tracking.rs b/src/commands/impls/tracking.rs index c3e8af45..df853141 100644 --- a/src/commands/impls/tracking.rs +++ b/src/commands/impls/tracking.rs @@ -90,7 +90,7 @@ pub async fn start_tracking( let command: RedisCommand = (RedisCommandKind::_ClientTrackingCluster, args, response).into(); client.send_command(command)?; - let frame = utils::apply_timeout(rx, client.inner().internal_command_timeout()).await??; + let frame = utils::timeout(rx, client.inner().internal_command_timeout()).await??; let _ = protocol_utils::frame_to_results(frame)?; Ok(()) } @@ -118,7 +118,7 @@ pub async fn stop_tracking(client: &C) -> Result<(), RedisError> let command: RedisCommand = (RedisCommandKind::_ClientTrackingCluster, args, response).into(); client.send_command(command)?; - let frame = utils::apply_timeout(rx, client.inner().internal_command_timeout()).await??; + let frame = utils::timeout(rx, client.inner().internal_command_timeout()).await??; let _ = protocol_utils::frame_to_results(frame)?; Ok(()) } else { diff --git a/src/commands/interfaces/acl.rs b/src/commands/interfaces/acl.rs index 6de423ec..1d734e47 100644 --- a/src/commands/interfaces/acl.rs +++ b/src/commands/interfaces/acl.rs @@ -5,130 +5,136 @@ use crate::{ types::{FromRedis, MultipleStrings, MultipleValues}, }; use bytes_utils::Str; +use futures::Future; /// Functions that implement the [ACL](https://redis.io/commands#server) interface. -#[async_trait] pub trait AclInterface: ClientLike + Sized { /// Create an ACL user with the specified rules or modify the rules of an existing user. /// /// - async fn acl_setuser(&self, username: S, rules: V) -> RedisResult<()> + fn acl_setuser(&self, username: S, rules: V) -> impl Future> + Send where S: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(username); - try_into!(rules); - commands::acl::acl_setuser(self, username, rules).await + async move { + into!(username); + try_into!(rules); + commands::acl::acl_setuser(self, username, rules).await + } } /// When Redis is configured to use an ACL file (with the aclfile configuration option), this command will reload /// the ACLs from the file, replacing all the current ACL rules with the ones defined in the file. /// /// - async fn acl_load(&self) -> RedisResult<()> { - commands::acl::acl_load(self).await + fn acl_load(&self) -> impl Future> + Send { + async move { commands::acl::acl_load(self).await } } /// When Redis is configured to use an ACL file (with the aclfile configuration option), this command will save the /// currently defined ACLs from the server memory to the ACL file. /// /// - async fn acl_save(&self) -> RedisResult<()> { - commands::acl::acl_save(self).await + fn acl_save(&self) -> impl Future> + Send { + async move { commands::acl::acl_save(self).await } } /// The command shows the currently active ACL rules in the Redis server. /// /// - async fn acl_list(&self) -> RedisResult + fn acl_list(&self) -> impl Future> + Send where R: FromRedis, { - commands::acl::acl_list(self).await?.convert() + async move { commands::acl::acl_list(self).await?.convert() } } /// The command shows a list of all the usernames of the currently configured users in the Redis ACL system. /// /// - async fn acl_users(&self) -> RedisResult + fn acl_users(&self) -> impl Future> + Send where R: FromRedis, { - commands::acl::acl_users(self).await?.convert() + async move { commands::acl::acl_users(self).await?.convert() } } /// The command returns all the rules defined for an existing ACL user. /// /// - async fn acl_getuser(&self, username: S) -> RedisResult + fn acl_getuser(&self, username: S) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(username); - commands::acl::acl_getuser(self, username).await?.convert() + async move { + into!(username); + commands::acl::acl_getuser(self, username).await?.convert() + } } /// Delete all the specified ACL users and terminate all the connections that are authenticated with such users. /// /// - async fn acl_deluser(&self, usernames: S) -> RedisResult + fn acl_deluser(&self, usernames: S) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(usernames); - commands::acl::acl_deluser(self, usernames).await?.convert() + async move { + into!(usernames); + commands::acl::acl_deluser(self, usernames).await?.convert() + } } /// The command shows the available ACL categories if called without arguments. If a category name is given, /// the command shows all the Redis commands in the specified category. /// /// - async fn acl_cat(&self, category: Option) -> RedisResult + fn acl_cat(&self, category: Option) -> impl Future> + Send where R: FromRedis, { - commands::acl::acl_cat(self, category).await?.convert() + async move { commands::acl::acl_cat(self, category).await?.convert() } } /// Generate a password with length `bits`, returning the password. /// /// - async fn acl_genpass(&self, bits: Option) -> RedisResult + fn acl_genpass(&self, bits: Option) -> impl Future> + Send where R: FromRedis, { - commands::acl::acl_genpass(self, bits).await?.convert() + async move { commands::acl::acl_genpass(self, bits).await?.convert() } } /// Return the username the current connection is authenticated with. New connections are authenticated /// with the "default" user. /// /// - async fn acl_whoami(&self) -> RedisResult + fn acl_whoami(&self) -> impl Future> + Send where R: FromRedis, { - commands::acl::acl_whoami(self).await?.convert() + async move { commands::acl::acl_whoami(self).await?.convert() } } /// Read `count` recent ACL security events. /// /// - async fn acl_log_count(&self, count: Option) -> RedisResult + fn acl_log_count(&self, count: Option) -> impl Future> + Send where R: FromRedis, { - commands::acl::acl_log_count(self, count).await?.convert() + async move { commands::acl::acl_log_count(self, count).await?.convert() } } /// Clear the ACL security events logs. /// /// - async fn acl_log_reset(&self) -> RedisResult<()> { - commands::acl::acl_log_reset(self).await + fn acl_log_reset(&self) -> impl Future> + Send { + async move { commands::acl::acl_log_reset(self).await } } } diff --git a/src/commands/interfaces/client.rs b/src/commands/interfaces/client.rs index a15fa212..a9099f6d 100644 --- a/src/commands/interfaces/client.rs +++ b/src/commands/interfaces/client.rs @@ -2,27 +2,21 @@ use crate::{ commands, interfaces::{ClientLike, RedisResult}, types::{ - ClientKillFilter, - ClientKillType, - ClientPauseKind, - ClientReplyFlag, - ClientUnblockFlag, - FromRedis, - RedisValue, + ClientKillFilter, ClientKillType, ClientPauseKind, ClientReplyFlag, ClientUnblockFlag, FromRedis, RedisValue, Server, }, }; use bytes_utils::Str; +use futures::Future; use std::collections::HashMap; -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] use crate::{ error::RedisError, types::{MultipleStrings, Toggle}, }; /// Functions that implement the [client](https://redis.io/commands#connection) interface. -#[async_trait] pub trait ClientInterface: ClientLike + Sized { /// Return the ID of the current connection. /// @@ -30,11 +24,11 @@ pub trait ClientInterface: ClientLike + Sized { /// [connection_ids](Self::connection_ids) for more information. /// /// - async fn client_id(&self) -> RedisResult + fn client_id(&self) -> impl Future> + Send where R: FromRedis, { - commands::client::client_id(self).await?.convert() + async move { commands::client::client_id(self).await?.convert() } } /// Read the connection IDs for the active connections to each server. @@ -42,50 +36,54 @@ pub trait ClientInterface: ClientLike + Sized { /// The returned map contains each server's `host:port` and the result of calling `CLIENT ID` on the connection. /// /// Note: despite being async this function will return cached information from the client if possible. - async fn connection_ids(&self) -> HashMap { - self.inner().backchannel.read().await.connection_ids.clone() + fn connection_ids(&self) -> impl Future> + Send { + async move { self.inner().backchannel.read().await.connection_ids.clone() } } /// The command returns information and statistics about the current client connection in a mostly human readable /// format. /// /// - async fn client_info(&self) -> RedisResult + fn client_info(&self) -> impl Future> + Send where R: FromRedis, { - commands::client::client_info(self).await?.convert() + async move { commands::client::client_info(self).await?.convert() } } /// Close a given connection or set of connections. /// /// - async fn client_kill(&self, filters: Vec) -> RedisResult + fn client_kill(&self, filters: Vec) -> impl Future> + Send where R: FromRedis, { - commands::client::client_kill(self, filters).await?.convert() + async move { commands::client::client_kill(self, filters).await?.convert() } } /// The CLIENT LIST command returns information and statistics about the client connections server in a mostly human /// readable format. /// /// - async fn client_list(&self, r#type: Option, ids: Option>) -> RedisResult + fn client_list( + &self, + r#type: Option, + ids: Option>, + ) -> impl Future> + Send where R: FromRedis, { - commands::client::client_list(self, r#type, ids).await?.convert() + async move { commands::client::client_list(self, r#type, ids).await?.convert() } } /// The CLIENT GETNAME returns the name of the current connection as set by CLIENT SETNAME. /// /// - async fn client_getname(&self) -> RedisResult + fn client_getname(&self) -> impl Future> + Send where R: FromRedis, { - commands::client::client_getname(self).await?.convert() + async move { commands::client::client_getname(self).await?.convert() } } /// Assign a name to the current connection. @@ -94,35 +92,41 @@ pub trait ClientInterface: ClientLike + Sized { /// connections. Use `self.id() to read the automatically generated name.** /// /// - async fn client_setname(&self, name: S) -> RedisResult<()> + fn client_setname(&self, name: S) -> impl Future> + Send where S: Into + Send, { - into!(name); - commands::client::client_setname(self, name).await + async move { + into!(name); + commands::client::client_setname(self, name).await + } } /// CLIENT PAUSE is a connections control command able to suspend all the Redis clients for the specified amount of /// time (in milliseconds). /// /// - async fn client_pause(&self, timeout: i64, mode: Option) -> RedisResult<()> { - commands::client::client_pause(self, timeout, mode).await + fn client_pause( + &self, + timeout: i64, + mode: Option, + ) -> impl Future> + Send { + async move { commands::client::client_pause(self, timeout, mode).await } } /// CLIENT UNPAUSE is used to resume command processing for all clients that were paused by CLIENT PAUSE. /// /// - async fn client_unpause(&self) -> RedisResult<()> { - commands::client::client_unpause(self).await + fn client_unpause(&self) -> impl Future> + Send { + async move { commands::client::client_unpause(self).await } } /// The CLIENT REPLY command controls whether the server will reply the client's commands. The following modes are /// available: /// /// - async fn client_reply(&self, flag: ClientReplyFlag) -> RedisResult<()> { - commands::client::client_reply(self, flag).await + fn client_reply(&self, flag: ClientReplyFlag) -> impl Future> + Send { + async move { commands::client::client_reply(self, flag).await } } /// This command can unblock, from a different connection, a client blocked in a blocking operation, such as for @@ -131,18 +135,24 @@ pub trait ClientInterface: ClientLike + Sized { /// Note: this command is sent on a backchannel connection and will work even when the main connection is blocked. /// /// - async fn client_unblock(&self, id: S, flag: Option) -> RedisResult + fn client_unblock( + &self, + id: S, + flag: Option, + ) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(id); - commands::client::client_unblock(self, id, flag).await?.convert() + async move { + into!(id); + commands::client::client_unblock(self, id, flag).await?.convert() + } } /// A convenience function to unblock any blocked connection on this client. - async fn unblock_self(&self, flag: Option) -> RedisResult<()> { - commands::client::unblock_self(self, flag).await + fn unblock_self(&self, flag: Option) -> impl Future> + Send { + async move { commands::client::unblock_self(self, flag).await } } /// This command enables the tracking feature of the Redis server that is used for server assisted client side @@ -154,9 +164,9 @@ pub trait ClientInterface: ClientLike + Sized { /// [with_options](crate::interfaces::ClientLike::with_options). See /// [crate::interfaces::TrackingInterface::start_tracking] for a version that works with all server deployment /// modes. - #[cfg(feature = "client-tracking")] - #[cfg_attr(docsrs, doc(cfg(feature = "client-tracking")))] - async fn client_tracking( + #[cfg(feature = "i-tracking")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] + fn client_tracking( &self, toggle: T, redirect: Option, @@ -165,43 +175,45 @@ pub trait ClientInterface: ClientLike + Sized { optin: bool, optout: bool, noloop: bool, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, T: TryInto + Send, T::Error: Into + Send, P: Into + Send, { - try_into!(toggle); - into!(prefixes); - commands::tracking::client_tracking(self, toggle, redirect, prefixes, bcast, optin, optout, noloop) - .await? - .convert() + async move { + try_into!(toggle); + into!(prefixes); + commands::tracking::client_tracking(self, toggle, redirect, prefixes, bcast, optin, optout, noloop) + .await? + .convert() + } } /// The command returns information about the current client connection's use of the server assisted client side /// caching feature. /// /// - #[cfg(feature = "client-tracking")] - #[cfg_attr(docsrs, doc(cfg(feature = "client-tracking")))] - async fn client_trackinginfo(&self) -> RedisResult + #[cfg(feature = "i-tracking")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] + fn client_trackinginfo(&self) -> impl Future> + Send where R: FromRedis, { - commands::tracking::client_trackinginfo(self).await?.convert() + async move { commands::tracking::client_trackinginfo(self).await?.convert() } } /// This command returns the client ID we are redirecting our tracking notifications to. /// /// - #[cfg(feature = "client-tracking")] - #[cfg_attr(docsrs, doc(cfg(feature = "client-tracking")))] - async fn client_getredir(&self) -> RedisResult + #[cfg(feature = "i-tracking")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] + fn client_getredir(&self) -> impl Future> + Send where R: FromRedis, { - commands::tracking::client_getredir(self).await?.convert() + async move { commands::tracking::client_getredir(self).await?.convert() } } /// This command controls the tracking of the keys in the next command executed by the connection, when tracking is @@ -212,12 +224,12 @@ pub trait ClientInterface: ClientLike + Sized { /// This function is designed to work against a specific server. See /// [with_options](crate::interfaces::ClientLike::with_options) for a variation that works with all deployment /// types. - #[cfg(feature = "client-tracking")] - #[cfg_attr(docsrs, doc(cfg(feature = "client-tracking")))] - async fn client_caching(&self, enabled: bool) -> RedisResult + #[cfg(feature = "i-tracking")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] + fn client_caching(&self, enabled: bool) -> impl Future> + Send where R: FromRedis, { - commands::tracking::client_caching(self, enabled).await?.convert() + async move { commands::tracking::client_caching(self, enabled).await?.convert() } } } diff --git a/src/commands/interfaces/cluster.rs b/src/commands/interfaces/cluster.rs index 92a78e80..244303d2 100644 --- a/src/commands/interfaces/cluster.rs +++ b/src/commands/interfaces/cluster.rs @@ -6,9 +6,9 @@ use crate::{ types::{ClusterFailoverFlag, ClusterResetFlag, ClusterSetSlotState, FromRedis, MultipleHashSlots, RedisKey}, }; use bytes_utils::Str; +use futures::Future; /// Functions that implement the [cluster](https://redis.io/commands#cluster) interface. -#[async_trait] pub trait ClusterInterface: ClientLike + Sized { /// Read the cached cluster state used for routing commands to the correct cluster nodes. fn cached_cluster_state(&self) -> Option { @@ -24,35 +24,35 @@ pub trait ClusterInterface: ClientLike + Sized { } /// Update the cached cluster state and add or remove any changed cluster node connections. - async fn sync_cluster(&self) -> Result<(), RedisError> { - commands::cluster::sync_cluster(self).await + fn sync_cluster(&self) -> impl Future> + Send { + async move { commands::cluster::sync_cluster(self).await } } /// Advances the cluster config epoch. /// /// - async fn cluster_bumpepoch(&self) -> RedisResult + fn cluster_bumpepoch(&self) -> impl Future> + Send where R: FromRedis, { - commands::cluster::cluster_bumpepoch(self).await?.convert() + async move { commands::cluster::cluster_bumpepoch(self).await?.convert() } } /// Deletes all slots from a node. /// /// - async fn cluster_flushslots(&self) -> RedisResult<()> { - commands::cluster::cluster_flushslots(self).await + fn cluster_flushslots(&self) -> impl Future> + Send { + async move { commands::cluster::cluster_flushslots(self).await } } /// Returns the node's id. /// /// - async fn cluster_myid(&self) -> RedisResult + fn cluster_myid(&self) -> impl Future> + Send where R: FromRedis, { - commands::cluster::cluster_myid(self).await?.convert() + async move { commands::cluster::cluster_myid(self).await?.convert() } } /// Read the current cluster node configuration. @@ -61,96 +61,104 @@ pub trait ClusterInterface: ClientLike + Sized { /// [cached_cluster_state](Self::cached_cluster_state). /// /// - async fn cluster_nodes(&self) -> RedisResult + fn cluster_nodes(&self) -> impl Future> + Send where R: FromRedis, { - commands::cluster::cluster_nodes(self).await?.convert() + async move { commands::cluster::cluster_nodes(self).await?.convert() } } /// Forces a node to save the nodes.conf configuration on disk. /// /// - async fn cluster_saveconfig(&self) -> RedisResult<()> { - commands::cluster::cluster_saveconfig(self).await + fn cluster_saveconfig(&self) -> impl Future> + Send { + async move { commands::cluster::cluster_saveconfig(self).await } } /// CLUSTER SLOTS returns details about which cluster slots map to which Redis instances. /// /// - async fn cluster_slots(&self) -> RedisResult + fn cluster_slots(&self) -> impl Future> + Send where R: FromRedis, { - commands::cluster::cluster_slots(self).await?.convert() + async move { commands::cluster::cluster_slots(self).await?.convert() } } /// CLUSTER INFO provides INFO style information about Redis Cluster vital parameters. /// /// - async fn cluster_info(&self) -> RedisResult + fn cluster_info(&self) -> impl Future> + Send where R: FromRedis, { - commands::cluster::cluster_info(self).await?.convert() + async move { commands::cluster::cluster_info(self).await?.convert() } } /// This command is useful in order to modify a node's view of the cluster configuration. Specifically it assigns a /// set of hash slots to the node receiving the command. /// /// - async fn cluster_add_slots(&self, slots: S) -> RedisResult<()> + fn cluster_add_slots(&self, slots: S) -> impl Future> + Send where S: Into + Send, { - into!(slots); - commands::cluster::cluster_add_slots(self, slots).await + async move { + into!(slots); + commands::cluster::cluster_add_slots(self, slots).await + } } /// The command returns the number of failure reports for the specified node. /// /// - async fn cluster_count_failure_reports(&self, node_id: S) -> RedisResult + fn cluster_count_failure_reports(&self, node_id: S) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(node_id); - commands::cluster::cluster_count_failure_reports(self, node_id) - .await? - .convert() + async move { + into!(node_id); + commands::cluster::cluster_count_failure_reports(self, node_id) + .await? + .convert() + } } /// Returns the number of keys in the specified Redis Cluster hash slot. /// /// - async fn cluster_count_keys_in_slot(&self, slot: u16) -> RedisResult + fn cluster_count_keys_in_slot(&self, slot: u16) -> impl Future> + Send where R: FromRedis, { - commands::cluster::cluster_count_keys_in_slot(self, slot) - .await? - .convert() + async move { + commands::cluster::cluster_count_keys_in_slot(self, slot) + .await? + .convert() + } } /// The CLUSTER DELSLOTS command asks a particular Redis Cluster node to forget which master is serving the hash /// slots specified as arguments. /// /// - async fn cluster_del_slots(&self, slots: S) -> RedisResult<()> + fn cluster_del_slots(&self, slots: S) -> impl Future> + Send where S: Into + Send, { - into!(slots); - commands::cluster::cluster_del_slots(self, slots).await + async move { + into!(slots); + commands::cluster::cluster_del_slots(self, slots).await + } } /// This command, that can only be sent to a Redis Cluster replica node, forces the replica to start a manual /// failover of its master instance. /// /// - async fn cluster_failover(&self, flag: Option) -> RedisResult<()> { - commands::cluster::cluster_failover(self, flag).await + fn cluster_failover(&self, flag: Option) -> impl Future> + Send { + async move { commands::cluster::cluster_failover(self, flag).await } } /// The command is used in order to remove a node, specified via its node ID, from the set of known nodes of the @@ -158,72 +166,84 @@ pub trait ClusterInterface: ClientLike + Sized { /// the node receiving the command. /// /// - async fn cluster_forget(&self, node_id: S) -> RedisResult<()> + fn cluster_forget(&self, node_id: S) -> impl Future> + Send where S: Into + Send, { - into!(node_id); - commands::cluster::cluster_forget(self, node_id).await + async move { + into!(node_id); + commands::cluster::cluster_forget(self, node_id).await + } } /// The command returns an array of keys names stored in the contacted node and hashing to the specified hash slot. /// /// - async fn cluster_get_keys_in_slot(&self, slot: u16, count: u64) -> RedisResult + fn cluster_get_keys_in_slot(&self, slot: u16, count: u64) -> impl Future> + Send where R: FromRedis, { - commands::cluster::cluster_get_keys_in_slot(self, slot, count) - .await? - .convert() + async move { + commands::cluster::cluster_get_keys_in_slot(self, slot, count) + .await? + .convert() + } } /// Returns an integer identifying the hash slot the specified key hashes to. /// /// - async fn cluster_keyslot(&self, key: K) -> RedisResult + fn cluster_keyslot(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::cluster::cluster_keyslot(self, key).await?.convert() + async move { + into!(key); + commands::cluster::cluster_keyslot(self, key).await?.convert() + } } /// CLUSTER MEET is used in order to connect different Redis nodes with cluster support enabled, into a working /// cluster. /// /// - async fn cluster_meet(&self, ip: S, port: u16) -> RedisResult<()> + fn cluster_meet(&self, ip: S, port: u16) -> impl Future> + Send where S: Into + Send, { - into!(ip); - commands::cluster::cluster_meet(self, ip, port).await + async move { + into!(ip); + commands::cluster::cluster_meet(self, ip, port).await + } } /// The command reconfigures a node as a replica of the specified master. If the node receiving the command is an /// empty master, as a side effect of the command, the node role is changed from master to replica. /// /// - async fn cluster_replicate(&self, node_id: S) -> RedisResult<()> + fn cluster_replicate(&self, node_id: S) -> impl Future> + Send where S: Into + Send, { - into!(node_id); - commands::cluster::cluster_replicate(self, node_id).await + async move { + into!(node_id); + commands::cluster::cluster_replicate(self, node_id).await + } } /// The command provides a list of replica nodes replicating from the specified master node. /// /// - async fn cluster_replicas(&self, node_id: S) -> RedisResult + fn cluster_replicas(&self, node_id: S) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(node_id); - commands::cluster::cluster_replicas(self, node_id).await?.convert() + async move { + into!(node_id); + commands::cluster::cluster_replicas(self, node_id).await?.convert() + } } /// Reset a Redis Cluster node, in a more or less drastic way depending on the reset type, that can be hard or soft. @@ -231,21 +251,21 @@ pub trait ClusterInterface: ClientLike + Sized { /// reset a master node keys must be removed first, e.g. by using FLUSHALL first, and then CLUSTER RESET. /// /// - async fn cluster_reset(&self, mode: Option) -> RedisResult<()> { - commands::cluster::cluster_reset(self, mode).await + fn cluster_reset(&self, mode: Option) -> impl Future> + Send { + async move { commands::cluster::cluster_reset(self, mode).await } } /// This command sets a specific config epoch in a fresh node. /// /// - async fn cluster_set_config_epoch(&self, epoch: u64) -> RedisResult<()> { - commands::cluster::cluster_set_config_epoch(self, epoch).await + fn cluster_set_config_epoch(&self, epoch: u64) -> impl Future> + Send { + async move { commands::cluster::cluster_set_config_epoch(self, epoch).await } } /// CLUSTER SETSLOT is responsible of changing the state of a hash slot in the receiving node in different ways. /// /// - async fn cluster_setslot(&self, slot: u16, state: ClusterSetSlotState) -> RedisResult<()> { - commands::cluster::cluster_setslot(self, slot, state).await + fn cluster_setslot(&self, slot: u16, state: ClusterSetSlotState) -> impl Future> + Send { + async move { commands::cluster::cluster_setslot(self, slot, state).await } } } diff --git a/src/commands/interfaces/config.rs b/src/commands/interfaces/config.rs index 33c30d38..fd8a3906 100644 --- a/src/commands/interfaces/config.rs +++ b/src/commands/interfaces/config.rs @@ -5,16 +5,16 @@ use crate::{ types::{FromRedis, RedisValue}, }; use bytes_utils::Str; +use futures::Future; use std::convert::TryInto; /// Functions that implement the [config](https://redis.io/commands#server) interface. -#[async_trait] pub trait ConfigInterface: ClientLike + Sized { /// Resets the statistics reported by Redis using the INFO command. /// /// - async fn config_resetstat(&self) -> RedisResult<()> { - commands::config::config_resetstat(self).await + fn config_resetstat(&self) -> impl Future> + Send { + async move { commands::config::config_resetstat(self).await } } /// The CONFIG REWRITE command rewrites the redis.conf file the server was started with, applying the minimal @@ -22,33 +22,37 @@ pub trait ConfigInterface: ClientLike + Sized { /// compared to the original one because of the use of the CONFIG SET command. /// /// - async fn config_rewrite(&self) -> RedisResult<()> { - commands::config::config_rewrite(self).await + fn config_rewrite(&self) -> impl Future> + Send { + async move { commands::config::config_rewrite(self).await } } /// The CONFIG GET command is used to read the configuration parameters of a running Redis server. /// /// - async fn config_get(&self, parameter: S) -> RedisResult + fn config_get(&self, parameter: S) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(parameter); - commands::config::config_get(self, parameter).await?.convert() + async move { + into!(parameter); + commands::config::config_get(self, parameter).await?.convert() + } } /// The CONFIG SET command is used in order to reconfigure the server at run time without the need to restart Redis. /// /// - async fn config_set(&self, parameter: P, value: V) -> RedisResult<()> + fn config_set(&self, parameter: P, value: V) -> impl Future> + Send where P: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(parameter); - try_into!(value); - commands::config::config_set(self, parameter, value).await + async move { + into!(parameter); + try_into!(value); + commands::config::config_set(self, parameter, value).await + } } } diff --git a/src/commands/interfaces/geo.rs b/src/commands/interfaces/geo.rs index c17c6247..7d0be181 100644 --- a/src/commands/interfaces/geo.rs +++ b/src/commands/interfaces/geo.rs @@ -1,3 +1,5 @@ +use futures::Future; + use crate::{ commands, error::RedisError, @@ -18,37 +20,46 @@ use crate::{ use std::convert::TryInto; /// Functions that implement the [geo](https://redis.io/commands#geo) interface. -#[async_trait] pub trait GeoInterface: ClientLike + Sized { /// Adds the specified geospatial items (longitude, latitude, name) to the specified key. /// /// - async fn geoadd(&self, key: K, options: Option, changed: bool, values: V) -> RedisResult + fn geoadd( + &self, + key: K, + options: Option, + changed: bool, + values: V, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: Into + Send, { - into!(key, values); - commands::geo::geoadd(self, key, options, changed, values) - .await? - .convert() + async move { + into!(key, values); + commands::geo::geoadd(self, key, options, changed, values) + .await? + .convert() + } } /// Return valid Geohash strings representing the position of one or more elements in a sorted set value /// representing a geospatial index (where elements were added using GEOADD). /// /// - async fn geohash(&self, key: K, members: V) -> RedisResult + fn geohash(&self, key: K, members: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(members); - commands::geo::geohash(self, key, members).await?.convert() + async move { + into!(key); + try_into!(members); + commands::geo::geohash(self, key, members).await?.convert() + } } /// Return the positions (longitude,latitude) of all the specified members of the geospatial index represented by @@ -57,22 +68,30 @@ pub trait GeoInterface: ClientLike + Sized { /// Callers can use [as_geo_position](crate::types::RedisValue::as_geo_position) to lazily parse results as needed. /// /// - async fn geopos(&self, key: K, members: V) -> RedisResult + fn geopos(&self, key: K, members: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(members); - commands::geo::geopos(self, key, members).await?.convert() + async move { + into!(key); + try_into!(members); + commands::geo::geopos(self, key, members).await?.convert() + } } /// Return the distance between two members in the geospatial index represented by the sorted set. /// /// - async fn geodist(&self, key: K, src: S, dest: D, unit: Option) -> RedisResult + fn geodist( + &self, + key: K, + src: S, + dest: D, + unit: Option, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -81,16 +100,18 @@ pub trait GeoInterface: ClientLike + Sized { D: TryInto + Send, D::Error: Into + Send, { - into!(key); - try_into!(src, dest); - commands::geo::geodist(self, key, src, dest, unit).await?.convert() + async move { + into!(key); + try_into!(src, dest); + commands::geo::geodist(self, key, src, dest, unit).await?.convert() + } } /// Return the members of a sorted set populated with geospatial information using GEOADD, which are within the /// borders of the area specified with the center location and the maximum distance from the center (the radius). /// /// - async fn georadius( + fn georadius( &self, key: K, position: P, @@ -103,18 +124,20 @@ pub trait GeoInterface: ClientLike + Sized { ord: Option, store: Option, storedist: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, { - into!(key, position); - commands::geo::georadius( - self, key, position, radius, unit, withcoord, withdist, withhash, count, ord, store, storedist, - ) - .await? - .convert() + async move { + into!(key, position); + commands::geo::georadius( + self, key, position, radius, unit, withcoord, withdist, withhash, count, ord, store, storedist, + ) + .await? + .convert() + } } /// This command is exactly like GEORADIUS with the sole difference that instead of taking, as the center of the @@ -122,7 +145,7 @@ pub trait GeoInterface: ClientLike + Sized { /// geospatial index represented by the sorted set. /// /// - async fn georadiusbymember( + fn georadiusbymember( &self, key: K, member: V, @@ -135,38 +158,40 @@ pub trait GeoInterface: ClientLike + Sized { ord: Option, store: Option, storedist: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(member); - commands::geo::georadiusbymember( - self, - key, - to!(member)?, - radius, - unit, - withcoord, - withdist, - withhash, - count, - ord, - store, - storedist, - ) - .await? - .convert() + async move { + into!(key); + try_into!(member); + commands::geo::georadiusbymember( + self, + key, + to!(member)?, + radius, + unit, + withcoord, + withdist, + withhash, + count, + ord, + store, + storedist, + ) + .await? + .convert() + } } /// Return the members of a sorted set populated with geospatial information using GEOADD, which are within the /// borders of the area specified by a given shape. /// /// - async fn geosearch( + fn geosearch( &self, key: K, from_member: Option, @@ -178,34 +203,36 @@ pub trait GeoInterface: ClientLike + Sized { withcoord: bool, withdist: bool, withhash: bool, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::geo::geosearch( - self, - key, - from_member, - from_lonlat, - by_radius, - by_box, - ord, - count, - withcoord, - withdist, - withhash, - ) - .await? - .convert() + async move { + into!(key); + commands::geo::geosearch( + self, + key, + from_member, + from_lonlat, + by_radius, + by_box, + ord, + count, + withcoord, + withdist, + withhash, + ) + .await? + .convert() + } } /// This command is like GEOSEARCH, but stores the result in destination key. Returns the number of members added to /// the destination key. /// /// - async fn geosearchstore( + fn geosearchstore( &self, dest: D, source: S, @@ -216,26 +243,28 @@ pub trait GeoInterface: ClientLike + Sized { ord: Option, count: Option<(u64, Any)>, storedist: bool, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, D: Into + Send, S: Into + Send, { - into!(dest, source); - commands::geo::geosearchstore( - self, - dest, - source, - from_member, - from_lonlat, - by_radius, - by_box, - ord, - count, - storedist, - ) - .await? - .convert() + async move { + into!(dest, source); + commands::geo::geosearchstore( + self, + dest, + source, + from_member, + from_lonlat, + by_radius, + by_box, + ord, + count, + storedist, + ) + .await? + .convert() + } } } diff --git a/src/commands/interfaces/hashes.rs b/src/commands/interfaces/hashes.rs index 511070d0..fafca543 100644 --- a/src/commands/interfaces/hashes.rs +++ b/src/commands/interfaces/hashes.rs @@ -1,3 +1,5 @@ +use futures::Future; + use crate::{ commands, error::RedisError, @@ -7,159 +9,180 @@ use crate::{ use std::convert::TryInto; /// Functions that implement the [hashes](https://redis.io/commands#hashes) interface. -#[async_trait] pub trait HashesInterface: ClientLike + Sized { /// Returns all fields and values of the hash stored at `key`. /// /// - async fn hgetall(&self, key: K) -> RedisResult + fn hgetall(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::hashes::hgetall(self, key).await?.convert() + async move { + into!(key); + commands::hashes::hgetall(self, key).await?.convert() + } } /// Removes the specified fields from the hash stored at `key`. /// /// - async fn hdel(&self, key: K, fields: F) -> RedisResult + fn hdel(&self, key: K, fields: F) -> impl Future> + Send where R: FromRedis, K: Into + Send, F: Into + Send, { - into!(key, fields); - commands::hashes::hdel(self, key, fields).await?.convert() + async move { + into!(key, fields); + commands::hashes::hdel(self, key, fields).await?.convert() + } } /// Returns if `field` is an existing field in the hash stored at `key`. /// /// - async fn hexists(&self, key: K, field: F) -> RedisResult + fn hexists(&self, key: K, field: F) -> impl Future> + Send where R: FromRedis, K: Into + Send, F: Into + Send, { - into!(key, field); - commands::hashes::hexists(self, key, field).await?.convert() + async move { + into!(key, field); + commands::hashes::hexists(self, key, field).await?.convert() + } } /// Returns the value associated with `field` in the hash stored at `key`. /// /// - async fn hget(&self, key: K, field: F) -> RedisResult + fn hget(&self, key: K, field: F) -> impl Future> + Send where R: FromRedis, K: Into + Send, F: Into + Send, { - into!(key, field); - commands::hashes::hget(self, key, field).await?.convert() + async move { + into!(key, field); + commands::hashes::hget(self, key, field).await?.convert() + } } /// Increments the number stored at `field` in the hash stored at `key` by `increment`. /// /// - async fn hincrby(&self, key: K, field: F, increment: i64) -> RedisResult + fn hincrby(&self, key: K, field: F, increment: i64) -> impl Future> + Send where R: FromRedis, K: Into + Send, F: Into + Send, { - into!(key, field); - commands::hashes::hincrby(self, key, field, increment).await?.convert() + async move { + into!(key, field); + commands::hashes::hincrby(self, key, field, increment).await?.convert() + } } /// Increment the specified `field` of a hash stored at `key`, and representing a floating point number, by the /// specified `increment`. /// /// - async fn hincrbyfloat(&self, key: K, field: F, increment: f64) -> RedisResult + fn hincrbyfloat(&self, key: K, field: F, increment: f64) -> impl Future> + Send where R: FromRedis, K: Into + Send, F: Into + Send, { - into!(key, field); - commands::hashes::hincrbyfloat(self, key, field, increment) - .await? - .convert() + async move { + into!(key, field); + commands::hashes::hincrbyfloat(self, key, field, increment) + .await? + .convert() + } } /// Returns all field names in the hash stored at `key`. /// /// - async fn hkeys(&self, key: K) -> RedisResult + fn hkeys(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::hashes::hkeys(self, key).await?.convert() + async move { + into!(key); + commands::hashes::hkeys(self, key).await?.convert() + } } /// Returns the number of fields contained in the hash stored at `key`. /// /// - async fn hlen(&self, key: K) -> RedisResult + fn hlen(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::hashes::hlen(self, key).await?.convert() + async move { + into!(key); + commands::hashes::hlen(self, key).await?.convert() + } } /// Returns the values associated with the specified `fields` in the hash stored at `key`. /// /// - async fn hmget(&self, key: K, fields: F) -> RedisResult + fn hmget(&self, key: K, fields: F) -> impl Future> + Send where R: FromRedis, K: Into + Send, F: Into + Send, { - into!(key, fields); - commands::hashes::hmget(self, key, fields).await?.convert() + async move { + into!(key, fields); + commands::hashes::hmget(self, key, fields).await?.convert() + } } /// Sets the specified fields to their respective values in the hash stored at `key`. /// /// - async fn hmset(&self, key: K, values: V) -> RedisResult + fn hmset(&self, key: K, values: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(values); - commands::hashes::hmset(self, key, values).await?.convert() + async move { + into!(key); + try_into!(values); + commands::hashes::hmset(self, key, values).await?.convert() + } } /// Sets fields in the hash stored at `key` to their provided values. /// /// - async fn hset(&self, key: K, values: V) -> RedisResult + fn hset(&self, key: K, values: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(values); - commands::hashes::hset(self, key, values).await?.convert() + async move { + into!(key); + try_into!(values); + commands::hashes::hset(self, key, values).await?.convert() + } } /// Sets `field` in the hash stored at `key` to `value`, only if `field` does not yet exist. /// /// - async fn hsetnx(&self, key: K, field: F, value: V) -> RedisResult + fn hsetnx(&self, key: K, field: F, value: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -167,9 +190,11 @@ pub trait HashesInterface: ClientLike + Sized { V: TryInto + Send, V::Error: Into + Send, { - into!(key, field); - try_into!(value); - commands::hashes::hsetnx(self, key, field, value).await?.convert() + async move { + into!(key, field); + try_into!(value); + commands::hashes::hsetnx(self, key, field, value).await?.convert() + } } /// When called with just the `key` argument, return a random field from the hash value stored at `key`. @@ -177,37 +202,43 @@ pub trait HashesInterface: ClientLike + Sized { /// If the provided `count` argument is positive, return an array of distinct fields. /// /// - async fn hrandfield(&self, key: K, count: Option<(i64, bool)>) -> RedisResult + fn hrandfield(&self, key: K, count: Option<(i64, bool)>) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::hashes::hrandfield(self, key, count).await?.convert() + async move { + into!(key); + commands::hashes::hrandfield(self, key, count).await?.convert() + } } /// Returns the string length of the value associated with `field` in the hash stored at `key`. /// /// - async fn hstrlen(&self, key: K, field: F) -> RedisResult + fn hstrlen(&self, key: K, field: F) -> impl Future> + Send where R: FromRedis, K: Into + Send, F: Into + Send, { - into!(key, field); - commands::hashes::hstrlen(self, key, field).await?.convert() + async move { + into!(key, field); + commands::hashes::hstrlen(self, key, field).await?.convert() + } } /// Returns all values in the hash stored at `key`. /// /// - async fn hvals(&self, key: K) -> RedisResult + fn hvals(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::hashes::hvals(self, key).await?.convert() + async move { + into!(key); + commands::hashes::hvals(self, key).await?.convert() + } } } diff --git a/src/commands/interfaces/hyperloglog.rs b/src/commands/interfaces/hyperloglog.rs index 2df45555..6ccb7a4f 100644 --- a/src/commands/interfaces/hyperloglog.rs +++ b/src/commands/interfaces/hyperloglog.rs @@ -1,3 +1,5 @@ +use futures::Future; + use crate::{ commands, error::RedisError, @@ -7,22 +9,23 @@ use crate::{ use std::convert::TryInto; /// Functions that implement the [HyperLogLog](https://redis.io/commands#hyperloglog) interface. -#[async_trait] pub trait HyperloglogInterface: ClientLike + Sized { /// Adds all the element arguments to the HyperLogLog data structure stored at the variable name specified as first /// argument. /// /// - async fn pfadd(&self, key: K, elements: V) -> RedisResult + fn pfadd(&self, key: K, elements: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(elements); - commands::hyperloglog::pfadd(self, key, elements).await?.convert() + async move { + into!(key); + try_into!(elements); + commands::hyperloglog::pfadd(self, key, elements).await?.convert() + } } /// When called with a single key, returns the approximated cardinality computed by the HyperLogLog data structure @@ -32,26 +35,30 @@ pub trait HyperloglogInterface: ClientLike + Sized { /// internally merging the HyperLogLogs stored at the provided keys into a temporary HyperLogLog. /// /// - async fn pfcount(&self, keys: K) -> RedisResult + fn pfcount(&self, keys: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::hyperloglog::pfcount(self, keys).await?.convert() + async move { + into!(keys); + commands::hyperloglog::pfcount(self, keys).await?.convert() + } } /// Merge multiple HyperLogLog values into an unique value that will approximate the cardinality of the union of the /// observed sets of the source HyperLogLog structures. /// /// - async fn pfmerge(&self, dest: D, sources: S) -> RedisResult + fn pfmerge(&self, dest: D, sources: S) -> impl Future> + Send where R: FromRedis, D: Into + Send, S: Into + Send, { - into!(dest, sources); - commands::hyperloglog::pfmerge(self, dest, sources).await?.convert() + async move { + into!(dest, sources); + commands::hyperloglog::pfmerge(self, dest, sources).await?.convert() + } } } diff --git a/src/commands/interfaces/keys.rs b/src/commands/interfaces/keys.rs index f020ae1d..4fd1b0dc 100644 --- a/src/commands/interfaces/keys.rs +++ b/src/commands/interfaces/keys.rs @@ -1,3 +1,5 @@ +use futures::Future; + use crate::{ commands, error::RedisError, @@ -7,67 +9,78 @@ use crate::{ use std::convert::TryInto; /// Functions that implement the generic [keys](https://redis.io/commands#generic) interface. -#[async_trait] pub trait KeysInterface: ClientLike + Sized { /// Marks the given keys to be watched for conditional execution of a transaction. /// /// - async fn watch(&self, keys: K) -> RedisResult<()> + fn watch(&self, keys: K) -> impl Future> + Send where K: Into + Send, { - into!(keys); - commands::keys::watch(self, keys).await + async move { + into!(keys); + commands::keys::watch(self, keys).await + } } /// Flushes all the previously watched keys for a transaction. /// /// - async fn unwatch(&self) -> RedisResult<()> { - commands::keys::unwatch(self).await + fn unwatch(&self) -> impl Future> + Send { + async move { commands::keys::unwatch(self).await } } /// Return a random key from the currently selected database. /// /// - async fn randomkey(&self) -> RedisResult + fn randomkey(&self) -> impl Future> + Send where R: FromRedis, { - commands::keys::randomkey(self).await?.convert() + async move { commands::keys::randomkey(self).await?.convert() } } /// This command copies the value stored at the source key to the destination key. /// /// - async fn copy(&self, source: S, destination: D, db: Option, replace: bool) -> RedisResult + fn copy( + &self, + source: S, + destination: D, + db: Option, + replace: bool, + ) -> impl Future> + Send where R: FromRedis, S: Into + Send, D: Into + Send, { - into!(source, destination); - commands::keys::copy(self, source, destination, db, replace) - .await? - .convert() + async move { + into!(source, destination); + commands::keys::copy(self, source, destination, db, replace) + .await? + .convert() + } } /// Serialize the value stored at `key` in a Redis-specific format and return it as bulk string. /// /// - async fn dump(&self, key: K) -> RedisResult + fn dump(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::dump(self, key).await?.convert() + async move { + into!(key); + commands::keys::dump(self, key).await?.convert() + } } /// Create a key associated with a value that is obtained by deserializing the provided serialized value /// /// - async fn restore( + fn restore( &self, key: K, ttl: i64, @@ -76,15 +89,17 @@ pub trait KeysInterface: ClientLike + Sized { absttl: bool, idletime: Option, frequency: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::restore(self, key, ttl, serialized, replace, absttl, idletime, frequency) - .await? - .convert() + async move { + into!(key); + commands::keys::restore(self, key, ttl, serialized, replace, absttl, idletime, frequency) + .await? + .convert() + } } /// Set a value with optional NX|XX, EX|PX|EXAT|PXAT|KEEPTTL, and GET arguments. @@ -92,37 +107,41 @@ pub trait KeysInterface: ClientLike + Sized { /// Note: the `get` flag was added in 6.2.0. Setting it as `false` works with Redis versions <=6.2.0. /// /// - async fn set( + fn set( &self, key: K, value: V, expire: Option, options: Option, get: bool, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(value); - commands::keys::set(self, key, value, expire, options, get) - .await? - .convert() + async move { + into!(key); + try_into!(value); + commands::keys::set(self, key, value, expire, options, get) + .await? + .convert() + } } /// Read a value from the server. /// /// - async fn get(&self, key: K) -> RedisResult + fn get(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::get(self, key).await?.convert() + async move { + into!(key); + commands::keys::get(self, key).await?.convert() + } } /// Returns the substring of the string value stored at `key` with offsets `start` and `end` (both inclusive). @@ -130,29 +149,33 @@ pub trait KeysInterface: ClientLike + Sized { /// Note: Command formerly called SUBSTR in Redis verison <=2.0. /// /// - async fn getrange(&self, key: K, start: usize, end: usize) -> RedisResult + fn getrange(&self, key: K, start: usize, end: usize) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::getrange(self, key, start, end).await?.convert() + async move { + into!(key); + commands::keys::getrange(self, key, start, end).await?.convert() + } } /// Overwrites part of the string stored at `key`, starting at the specified `offset`, for the entire length of /// `value`. /// /// - async fn setrange(&self, key: K, offset: u32, value: V) -> RedisResult + fn setrange(&self, key: K, offset: u32, value: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(value); - commands::keys::setrange(self, key, offset, value).await?.convert() + async move { + into!(key); + try_into!(value); + commands::keys::setrange(self, key, offset, value).await?.convert() + } } /// Atomically sets `key` to `value` and returns the old value stored at `key`. @@ -160,41 +183,47 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns an error if `key` does not hold string value. Returns nil if `key` does not exist. /// /// - async fn getset(&self, key: K, value: V) -> RedisResult + fn getset(&self, key: K, value: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(value); - commands::keys::getset(self, key, value).await?.convert() + async move { + into!(key); + try_into!(value); + commands::keys::getset(self, key, value).await?.convert() + } } /// Get the value of key and delete the key. This command is similar to GET, except for the fact that it also /// deletes the key on success (if and only if the key's value type is a string). /// /// - async fn getdel(&self, key: K) -> RedisResult + fn getdel(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::getdel(self, key).await?.convert() + async move { + into!(key); + commands::keys::getdel(self, key).await?.convert() + } } /// Returns the length of the string value stored at key. An error is returned when key holds a non-string value. /// /// - async fn strlen(&self, key: K) -> RedisResult + fn strlen(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::strlen(self, key).await?.convert() + async move { + into!(key); + commands::keys::strlen(self, key).await?.convert() + } } /// Removes the specified keys. A key is ignored if it does not exist. @@ -202,13 +231,15 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns the number of keys removed. /// /// - async fn del(&self, keys: K) -> RedisResult + fn del(&self, keys: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::keys::del(self, keys).await?.convert() + async move { + into!(keys); + commands::keys::del(self, keys).await?.convert() + } } /// Unlinks the specified keys. A key is ignored if it does not exist @@ -216,13 +247,15 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns the number of keys removed. /// /// - async fn unlink(&self, keys: K) -> RedisResult + fn unlink(&self, keys: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::keys::unlink(self, keys).await?.convert() + async move { + into!(keys); + commands::keys::unlink(self, keys).await?.convert() + } } /// Renames `source` key to `destination`. @@ -230,15 +263,17 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns an error when `source` does not exist. If `destination` exists, it gets overwritten. /// /// - async fn rename(&self, source: S, destination: D) -> RedisResult + fn rename(&self, source: S, destination: D) -> impl Future> + Send where R: FromRedis, S: Into + Send, D: Into + Send, { - into!(source); - into!(destination); - commands::keys::rename(self, source, destination).await?.convert() + async move { + into!(source); + into!(destination); + commands::keys::rename(self, source, destination).await?.convert() + } } /// Renames `source` key to `destination` if `destination` does not yet exist. @@ -246,69 +281,79 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns an error when `source` does not exist. /// /// - async fn renamenx(&self, source: S, destination: D) -> RedisResult + fn renamenx(&self, source: S, destination: D) -> impl Future> + Send where R: FromRedis, S: Into + Send, D: Into + Send, { - into!(source); - into!(destination); - commands::keys::renamenx(self, source, destination).await?.convert() + async move { + into!(source); + into!(destination); + commands::keys::renamenx(self, source, destination).await?.convert() + } } /// Append `value` to `key` if it's a string. /// /// - async fn append(&self, key: K, value: V) -> RedisResult + fn append(&self, key: K, value: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(value); - commands::keys::append(self, key, value).await?.convert() + async move { + into!(key); + try_into!(value); + commands::keys::append(self, key, value).await?.convert() + } } /// Returns the values of all specified keys. For every key that does not hold a string value or does not exist, the /// special value nil is returned. /// /// - async fn mget(&self, keys: K) -> RedisResult + fn mget(&self, keys: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::keys::mget(self, keys).await?.convert() + async move { + into!(keys); + commands::keys::mget(self, keys).await?.convert() + } } /// Sets the given keys to their respective values. /// /// - async fn mset(&self, values: V) -> RedisResult<()> + fn mset(&self, values: V) -> impl Future> + Send where V: TryInto + Send, V::Error: Into + Send, { - try_into!(values); - commands::keys::mset(self, values).await?.convert() + async move { + try_into!(values); + commands::keys::mset(self, values).await?.convert() + } } /// Sets the given keys to their respective values. MSETNX will not perform any operation at all even if just a /// single key already exists. /// /// - async fn msetnx(&self, values: V) -> RedisResult + fn msetnx(&self, values: V) -> impl Future> + Send where R: FromRedis, V: TryInto + Send, V::Error: Into + Send, { - try_into!(values); - commands::keys::msetnx(self, values).await?.convert() + async move { + try_into!(values); + commands::keys::msetnx(self, values).await?.convert() + } } /// Increments the number stored at `key` by one. If the key does not exist, it is set to 0 before performing the @@ -317,13 +362,15 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns an error if the value at key is of the wrong type. /// /// - async fn incr(&self, key: K) -> RedisResult + fn incr(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::incr(self, key).await?.convert() + async move { + into!(key); + commands::keys::incr(self, key).await?.convert() + } } /// Increments the number stored at `key` by `val`. If the key does not exist, it is set to 0 before performing the @@ -332,13 +379,15 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns an error if the value at key is of the wrong type. /// /// - async fn incr_by(&self, key: K, val: i64) -> RedisResult + fn incr_by(&self, key: K, val: i64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::incr_by(self, key, val).await?.convert() + async move { + into!(key); + commands::keys::incr_by(self, key, val).await?.convert() + } } /// Increment the string representing a floating point number stored at key by `val`. If the key does not exist, it @@ -348,13 +397,15 @@ pub trait KeysInterface: ClientLike + Sized { /// value. /// /// - async fn incr_by_float(&self, key: K, val: f64) -> RedisResult + fn incr_by_float(&self, key: K, val: f64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::incr_by_float(self, key, val).await?.convert() + async move { + into!(key); + commands::keys::incr_by_float(self, key, val).await?.convert() + } } /// Decrements the number stored at `key` by one. If the key does not exist, it is set to 0 before performing the @@ -363,13 +414,15 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns an error if the key contains a value of the wrong type. /// /// - async fn decr(&self, key: K) -> RedisResult + fn decr(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::decr(self, key).await?.convert() + async move { + into!(key); + commands::keys::decr(self, key).await?.convert() + } } /// Decrements the number stored at `key` by `val`. If the key does not exist, it is set to 0 before performing the @@ -378,98 +431,112 @@ pub trait KeysInterface: ClientLike + Sized { /// Returns an error if the key contains a value of the wrong type. /// /// - async fn decr_by(&self, key: K, val: i64) -> RedisResult + fn decr_by(&self, key: K, val: i64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::decr_by(self, key, val).await?.convert() + async move { + into!(key); + commands::keys::decr_by(self, key, val).await?.convert() + } } /// Returns the remaining time to live of a key that has a timeout, in seconds. /// /// - async fn ttl(&self, key: K) -> RedisResult + fn ttl(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::ttl(self, key).await?.convert() + async move { + into!(key); + commands::keys::ttl(self, key).await?.convert() + } } /// Returns the remaining time to live of a key that has a timeout, in milliseconds. /// /// - async fn pttl(&self, key: K) -> RedisResult + fn pttl(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::pttl(self, key).await?.convert() + async move { + into!(key); + commands::keys::pttl(self, key).await?.convert() + } } /// Remove the existing timeout on a key, turning the key from volatile (a key with an expiration) /// to persistent (a key that will never expire as no timeout is associated). /// - /// Returns a boolean value describing whether or not the timeout was removed. + /// Returns a boolean value describing whether the timeout was removed. /// /// - async fn persist(&self, key: K) -> RedisResult + fn persist(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::persist(self, key).await?.convert() + async move { + into!(key); + commands::keys::persist(self, key).await?.convert() + } } /// Set a timeout on key. After the timeout has expired, the key will be automatically deleted. /// - /// Returns a boolean value describing whether or not the timeout was added. + /// Returns a boolean value describing whether the timeout was added. /// /// - async fn expire(&self, key: K, seconds: i64) -> RedisResult + fn expire(&self, key: K, seconds: i64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::expire(self, key, seconds).await?.convert() + async move { + into!(key); + commands::keys::expire(self, key, seconds).await?.convert() + } } /// Set a timeout on a key based on a UNIX timestamp. /// - /// Returns a boolean value describing whether or not the timeout was added. + /// Returns a boolean value describing whether the timeout was added. /// /// - async fn expire_at(&self, key: K, timestamp: i64) -> RedisResult + fn expire_at(&self, key: K, timestamp: i64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::keys::expire_at(self, key, timestamp).await?.convert() + async move { + into!(key); + commands::keys::expire_at(self, key, timestamp).await?.convert() + } } /// Returns number of keys that exist from the `keys` arguments. /// /// - async fn exists(&self, keys: K) -> RedisResult + fn exists(&self, keys: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::keys::exists(self, keys).await?.convert() + async move { + into!(keys); + commands::keys::exists(self, keys).await?.convert() + } } /// Runs the longest common subsequence algorithm on two keys. /// /// - async fn lcs( + fn lcs( &self, key1: K1, key2: K2, @@ -477,15 +544,17 @@ pub trait KeysInterface: ClientLike + Sized { idx: bool, minmatchlen: Option, withmatchlen: bool, - ) -> Result + ) -> impl Future> + Send where R: FromRedis, K1: Into + Send, K2: Into + Send, { - into!(key1, key2); - commands::keys::lcs(self, key1, key2, len, idx, minmatchlen, withmatchlen) - .await? - .convert() + async move { + into!(key1, key2); + commands::keys::lcs(self, key1, key2, len, idx, minmatchlen, withmatchlen) + .await? + .convert() + } } } diff --git a/src/commands/interfaces/lists.rs b/src/commands/interfaces/lists.rs index 916caa21..b8f97e8c 100644 --- a/src/commands/interfaces/lists.rs +++ b/src/commands/interfaces/lists.rs @@ -1,26 +1,37 @@ +use futures::Future; + +use crate::types::{Limit, MultipleStrings, SortOrder}; use crate::{ commands, error::RedisError, interfaces::{ClientLike, RedisResult}, types::{FromRedis, LMoveDirection, ListLocation, MultipleKeys, MultipleValues, RedisKey, RedisValue}, }; +use bytes_utils::Str; use std::convert::TryInto; /// Functions that implement the [lists](https://redis.io/commands#lists) interface. -#[async_trait] pub trait ListInterface: ClientLike + Sized { /// The blocking variant of [Self::lmpop]. /// /// - async fn blmpop(&self, timeout: f64, keys: K, direction: LMoveDirection, count: Option) -> RedisResult + fn blmpop( + &self, + timeout: f64, + keys: K, + direction: LMoveDirection, + count: Option, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::lists::blmpop(self, timeout, keys, direction, count) - .await? - .convert() + async move { + into!(keys); + commands::lists::blmpop(self, timeout, keys, direction, count) + .await? + .convert() + } } /// BLPOP is a blocking list pop primitive. It is the blocking version of LPOP because it blocks the connection when @@ -28,13 +39,15 @@ pub trait ListInterface: ClientLike + Sized { /// that is non-empty, with the given keys being checked in the order that they are given. /// /// - async fn blpop(&self, keys: K, timeout: f64) -> RedisResult + fn blpop(&self, keys: K, timeout: f64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::lists::blpop(self, keys, timeout).await?.convert() + async move { + into!(keys); + commands::lists::blpop(self, keys, timeout).await?.convert() + } } /// BRPOP is a blocking list pop primitive. It is the blocking version of RPOP because it blocks the connection when @@ -42,87 +55,113 @@ pub trait ListInterface: ClientLike + Sized { /// that is non-empty, with the given keys being checked in the order that they are given. /// /// - async fn brpop(&self, keys: K, timeout: f64) -> RedisResult + fn brpop(&self, keys: K, timeout: f64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::lists::brpop(self, keys, timeout).await?.convert() + async move { + into!(keys); + commands::lists::brpop(self, keys, timeout).await?.convert() + } } /// The blocking equivalent of [Self::rpoplpush]. /// /// - async fn brpoplpush(&self, source: S, destination: D, timeout: f64) -> RedisResult + fn brpoplpush( + &self, + source: S, + destination: D, + timeout: f64, + ) -> impl Future> + Send where R: FromRedis, S: Into + Send, D: Into + Send, { - into!(source, destination); - commands::lists::brpoplpush(self, source, destination, timeout) - .await? - .convert() + async move { + into!(source, destination); + commands::lists::brpoplpush(self, source, destination, timeout) + .await? + .convert() + } } /// The blocking equivalent of [Self::lmove]. /// /// - async fn blmove( + fn blmove( &self, source: S, destination: D, source_direction: LMoveDirection, destination_direction: LMoveDirection, timeout: f64, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, S: Into + Send, D: Into + Send, { - into!(source, destination); - commands::lists::blmove( - self, - source, - destination, - source_direction, - destination_direction, - timeout, - ) - .await? - .convert() + async move { + into!(source, destination); + commands::lists::blmove( + self, + source, + destination, + source_direction, + destination_direction, + timeout, + ) + .await? + .convert() + } } /// Pops one or more elements from the first non-empty list key from the list of provided key names. /// /// - async fn lmpop(&self, keys: K, direction: LMoveDirection, count: Option) -> RedisResult + fn lmpop( + &self, + keys: K, + direction: LMoveDirection, + count: Option, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::lists::lmpop(self, keys, direction, count).await?.convert() + async move { + into!(keys); + commands::lists::lmpop(self, keys, direction, count).await?.convert() + } } /// Returns the element at index index in the list stored at key. /// /// - async fn lindex(&self, key: K, index: i64) -> RedisResult + fn lindex(&self, key: K, index: i64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::lists::lindex(self, key, index).await?.convert() + async move { + into!(key); + commands::lists::lindex(self, key, index).await?.convert() + } } /// Inserts element in the list stored at key either before or after the reference value `pivot`. /// /// - async fn linsert(&self, key: K, location: ListLocation, pivot: P, element: V) -> RedisResult + fn linsert( + &self, + key: K, + location: ListLocation, + pivot: P, + element: V, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -131,169 +170,193 @@ pub trait ListInterface: ClientLike + Sized { V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(pivot, element); - commands::lists::linsert(self, key, location, pivot, element) - .await? - .convert() + async move { + into!(key); + try_into!(pivot, element); + commands::lists::linsert(self, key, location, pivot, element) + .await? + .convert() + } } /// Returns the length of the list stored at key. /// /// - async fn llen(&self, key: K) -> RedisResult + fn llen(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::lists::llen(self, key).await?.convert() + async move { + into!(key); + commands::lists::llen(self, key).await?.convert() + } } /// Removes and returns the first elements of the list stored at key. /// /// - async fn lpop(&self, key: K, count: Option) -> RedisResult + fn lpop(&self, key: K, count: Option) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::lists::lpop(self, key, count).await?.convert() + async move { + into!(key); + commands::lists::lpop(self, key, count).await?.convert() + } } /// The command returns the index of matching elements inside a Redis list. /// /// - async fn lpos( + fn lpos( &self, key: K, element: V, rank: Option, count: Option, maxlen: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(element); - commands::lists::lpos(self, key, element, rank, count, maxlen) - .await? - .convert() + async move { + into!(key); + try_into!(element); + commands::lists::lpos(self, key, element, rank, count, maxlen) + .await? + .convert() + } } /// Insert all the specified values at the head of the list stored at `key`. /// /// - async fn lpush(&self, key: K, elements: V) -> RedisResult + fn lpush(&self, key: K, elements: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(elements); - commands::lists::lpush(self, key, elements).await?.convert() + async move { + into!(key); + try_into!(elements); + commands::lists::lpush(self, key, elements).await?.convert() + } } /// Inserts specified values at the head of the list stored at `key`, only if `key` already exists and holds a list. /// /// - async fn lpushx(&self, key: K, elements: V) -> RedisResult + fn lpushx(&self, key: K, elements: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(elements); - commands::lists::lpushx(self, key, elements).await?.convert() + async move { + into!(key); + try_into!(elements); + commands::lists::lpushx(self, key, elements).await?.convert() + } } /// Returns the specified elements of the list stored at `key`. /// /// - async fn lrange(&self, key: K, start: i64, stop: i64) -> RedisResult + fn lrange(&self, key: K, start: i64, stop: i64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::lists::lrange(self, key, start, stop).await?.convert() + async move { + into!(key); + commands::lists::lrange(self, key, start, stop).await?.convert() + } } /// Removes the first `count` occurrences of elements equal to `element` from the list stored at `key`. /// /// - async fn lrem(&self, key: K, count: i64, element: V) -> RedisResult + fn lrem(&self, key: K, count: i64, element: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(element); - commands::lists::lrem(self, key, count, element).await?.convert() + async move { + into!(key); + try_into!(element); + commands::lists::lrem(self, key, count, element).await?.convert() + } } /// Sets the list element at `index` to `element`. /// /// - async fn lset(&self, key: K, index: i64, element: V) -> RedisResult + fn lset(&self, key: K, index: i64, element: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(element); - commands::lists::lset(self, key, index, element).await?.convert() + async move { + into!(key); + try_into!(element); + commands::lists::lset(self, key, index, element).await?.convert() + } } /// Trim an existing list so that it will contain only the specified range of elements specified. /// /// - async fn ltrim(&self, key: K, start: i64, stop: i64) -> RedisResult + fn ltrim(&self, key: K, start: i64, stop: i64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::lists::ltrim(self, key, start, stop).await?.convert() + async move { + into!(key); + commands::lists::ltrim(self, key, start, stop).await?.convert() + } } /// Removes and returns the last elements of the list stored at `key`. /// /// - async fn rpop(&self, key: K, count: Option) -> RedisResult + fn rpop(&self, key: K, count: Option) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::lists::rpop(self, key, count).await?.convert() + async move { + into!(key); + commands::lists::rpop(self, key, count).await?.convert() + } } /// Atomically returns and removes the last element (tail) of the list stored at `source`, and pushes the element at /// the first element (head) of the list stored at `destination`. /// /// - async fn rpoplpush(&self, source: S, dest: D) -> RedisResult + fn rpoplpush(&self, source: S, dest: D) -> impl Future> + Send where R: FromRedis, S: Into + Send, D: Into + Send, { - into!(source, dest); - commands::lists::rpoplpush(self, source, dest).await?.convert() + async move { + into!(source, dest); + commands::lists::rpoplpush(self, source, dest).await?.convert() + } } /// Atomically returns and removes the first/last element (head/tail depending on the source direction argument) of @@ -301,51 +364,108 @@ pub trait ListInterface: ClientLike + Sized { /// destination direction argument) of the list stored at `destination`. /// /// - async fn lmove( + fn lmove( &self, source: S, dest: D, source_direction: LMoveDirection, dest_direction: LMoveDirection, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, S: Into + Send, D: Into + Send, { - into!(source, dest); - commands::lists::lmove(self, source, dest, source_direction, dest_direction) - .await? - .convert() + async move { + into!(source, dest); + commands::lists::lmove(self, source, dest, source_direction, dest_direction) + .await? + .convert() + } } /// Insert all the specified values at the tail of the list stored at `key`. /// /// - async fn rpush(&self, key: K, elements: V) -> RedisResult + fn rpush(&self, key: K, elements: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(elements); - commands::lists::rpush(self, key, elements).await?.convert() + async move { + into!(key); + try_into!(elements); + commands::lists::rpush(self, key, elements).await?.convert() + } } /// Inserts specified values at the tail of the list stored at `key`, only if key already exists and holds a list. /// /// - async fn rpushx(&self, key: K, elements: V) -> RedisResult + fn rpushx(&self, key: K, elements: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(elements); - commands::lists::rpushx(self, key, elements).await?.convert() + async move { + into!(key); + try_into!(elements); + commands::lists::rpushx(self, key, elements).await?.convert() + } + } + + /// Returns or stores the elements contained in the list, set or sorted set at `key`. + /// + /// + fn sort( + &self, + key: K, + by: Option, + limit: Option, + get: S, + order: Option, + alpha: bool, + store: Option, + ) -> impl Future> + Send + where + R: FromRedis, + K: Into + Send, + S: Into + Send, + { + async move { + into!(key, get); + commands::lists::sort(self, key, by, limit, get, order, alpha, store) + .await? + .convert() + } + } + + /// Read-only variant of the SORT command. It is exactly like the original SORT but refuses the STORE option and can safely be used in read-only replicas. + /// + /// + fn sort_ro( + &self, + key: K, + by: Option, + limit: Option, + get: S, + order: Option, + alpha: bool, + ) -> impl Future> + Send + where + R: FromRedis, + K: Into + Send, + S: Into + Send, + { + async move { + into!(key, get); + commands::lists::sort_ro(self, key, by, limit, get, order, alpha) + .await? + .convert() + } } } diff --git a/src/commands/interfaces/lua.rs b/src/commands/interfaces/lua.rs index f6528b69..2f71adbe 100644 --- a/src/commands/interfaces/lua.rs +++ b/src/commands/interfaces/lua.rs @@ -6,10 +6,10 @@ use crate::{ }; use bytes::Bytes; use bytes_utils::Str; +use futures::Future; use std::convert::TryInto; /// Functions that implement the [lua](https://redis.io/commands#lua) interface. -#[async_trait] pub trait LuaInterface: ClientLike + Sized { /// Load a script into the scripts cache, without executing it. After the specified command is loaded into the /// script cache it will be callable using EVALSHA with the correct SHA1 digest of the script. @@ -17,13 +17,15 @@ pub trait LuaInterface: ClientLike + Sized { /// Returns the SHA-1 hash of the script. /// /// - async fn script_load(&self, script: S) -> RedisResult + fn script_load(&self, script: S) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(script); - commands::lua::script_load(self, script).await?.convert() + async move { + into!(script); + commands::lua::script_load(self, script).await?.convert() + } } /// A clustered variant of [script_load](Self::script_load) that loads the script on all primary nodes in a cluster. @@ -31,58 +33,62 @@ pub trait LuaInterface: ClientLike + Sized { /// Returns the SHA-1 hash of the script. #[cfg(feature = "sha-1")] #[cfg_attr(docsrs, doc(cfg(feature = "sha-1")))] - async fn script_load_cluster(&self, script: S) -> RedisResult + fn script_load_cluster(&self, script: S) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(script); - commands::lua::script_load_cluster(self, script).await?.convert() + async move { + into!(script); + commands::lua::script_load_cluster(self, script).await?.convert() + } } /// Kills the currently executing Lua script, assuming no write operation was yet performed by the script. /// /// - async fn script_kill(&self) -> RedisResult<()> { - commands::lua::script_kill(self).await + fn script_kill(&self) -> impl Future> + Send { + async move { commands::lua::script_kill(self).await } } /// A clustered variant of the [script_kill](Self::script_kill) command that issues the command to all primary nodes /// in the cluster. - async fn script_kill_cluster(&self) -> RedisResult<()> { - commands::lua::script_kill_cluster(self).await + fn script_kill_cluster(&self) -> impl Future> + Send { + async move { commands::lua::script_kill_cluster(self).await } } /// Flush the Lua scripts cache. /// /// - async fn script_flush(&self, r#async: bool) -> RedisResult<()> { - commands::lua::script_flush(self, r#async).await + fn script_flush(&self, r#async: bool) -> impl Future> + Send { + async move { commands::lua::script_flush(self, r#async).await } } /// A clustered variant of [script_flush](Self::script_flush) that flushes the script cache on all primary nodes in /// the cluster. - async fn script_flush_cluster(&self, r#async: bool) -> RedisResult<()> { - commands::lua::script_flush_cluster(self, r#async).await + fn script_flush_cluster(&self, r#async: bool) -> impl Future> + Send { + async move { commands::lua::script_flush_cluster(self, r#async).await } } /// Returns information about the existence of the scripts in the script cache. /// /// - async fn script_exists(&self, hashes: H) -> RedisResult + fn script_exists(&self, hashes: H) -> impl Future> + Send where R: FromRedis, H: Into + Send, { - into!(hashes); - commands::lua::script_exists(self, hashes).await?.convert() + async move { + into!(hashes); + commands::lua::script_exists(self, hashes).await?.convert() + } } /// Set the debug mode for subsequent scripts executed with EVAL. /// /// - async fn script_debug(&self, flag: ScriptDebugFlag) -> RedisResult<()> { - commands::lua::script_debug(self, flag).await + fn script_debug(&self, flag: ScriptDebugFlag) -> impl Future> + Send { + async move { commands::lua::script_debug(self, flag).await } } /// Evaluates a script cached on the server side by its SHA1 digest. @@ -90,7 +96,7 @@ pub trait LuaInterface: ClientLike + Sized { /// /// /// **Note: Use `None` to represent an empty set of keys or args.** - async fn evalsha(&self, hash: S, keys: K, args: V) -> RedisResult + fn evalsha(&self, hash: S, keys: K, args: V) -> impl Future> + Send where R: FromRedis, S: Into + Send, @@ -98,9 +104,11 @@ pub trait LuaInterface: ClientLike + Sized { V: TryInto + Send, V::Error: Into + Send, { - into!(hash, keys); - try_into!(args); - commands::lua::evalsha(self, hash, keys, args).await?.convert() + async move { + into!(hash, keys); + try_into!(args); + commands::lua::evalsha(self, hash, keys, args).await?.convert() + } } /// Evaluate a Lua script on the server. @@ -108,7 +116,7 @@ pub trait LuaInterface: ClientLike + Sized { /// /// /// **Note: Use `None` to represent an empty set of keys or args.** - async fn eval(&self, script: S, keys: K, args: V) -> RedisResult + fn eval(&self, script: S, keys: K, args: V) -> impl Future> + Send where R: FromRedis, S: Into + Send, @@ -116,19 +124,20 @@ pub trait LuaInterface: ClientLike + Sized { V: TryInto + Send, V::Error: Into + Send, { - into!(script, keys); - try_into!(args); - commands::lua::eval(self, script, keys, args).await?.convert() + async move { + into!(script, keys); + try_into!(args); + commands::lua::eval(self, script, keys, args).await?.convert() + } } } /// Functions that implement the [function](https://redis.io/docs/manual/programmability/functions-intro/) interface. -#[async_trait] pub trait FunctionInterface: ClientLike + Sized { /// Invoke a function. /// /// - async fn fcall(&self, func: F, keys: K, args: V) -> RedisResult + fn fcall(&self, func: F, keys: K, args: V) -> impl Future> + Send where R: FromRedis, F: Into + Send, @@ -136,15 +145,17 @@ pub trait FunctionInterface: ClientLike + Sized { V: TryInto + Send, V::Error: Into + Send, { - into!(func); - try_into!(keys, args); - commands::lua::fcall(self, func, keys, args).await?.convert() + async move { + into!(func); + try_into!(keys, args); + commands::lua::fcall(self, func, keys, args).await?.convert() + } } /// This is a read-only variant of the FCALL command that cannot execute commands that modify data. /// /// - async fn fcall_ro(&self, func: F, keys: K, args: V) -> RedisResult + fn fcall_ro(&self, func: F, keys: K, args: V) -> impl Future> + Send where R: FromRedis, F: Into + Send, @@ -152,59 +163,65 @@ pub trait FunctionInterface: ClientLike + Sized { V: TryInto + Send, V::Error: Into + Send, { - into!(func); - try_into!(keys, args); - commands::lua::fcall_ro(self, func, keys, args).await?.convert() + async move { + into!(func); + try_into!(keys, args); + commands::lua::fcall_ro(self, func, keys, args).await?.convert() + } } /// Delete a library and all its functions. /// /// - async fn function_delete(&self, library_name: S) -> RedisResult + fn function_delete(&self, library_name: S) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(library_name); - commands::lua::function_delete(self, library_name).await?.convert() + async move { + into!(library_name); + commands::lua::function_delete(self, library_name).await?.convert() + } } /// Delete a library and all its functions from each cluster node concurrently. /// /// - async fn function_delete_cluster(&self, library_name: S) -> RedisResult<()> + fn function_delete_cluster(&self, library_name: S) -> impl Future> + Send where S: Into + Send, { - into!(library_name); - commands::lua::function_delete_cluster(self, library_name).await + async move { + into!(library_name); + commands::lua::function_delete_cluster(self, library_name).await + } } /// Return the serialized payload of loaded libraries. /// /// - async fn function_dump(&self) -> RedisResult + fn function_dump(&self) -> impl Future> + Send where R: FromRedis, { - commands::lua::function_dump(self).await?.convert() + async move { commands::lua::function_dump(self).await?.convert() } } /// Deletes all the libraries. /// /// - async fn function_flush(&self, r#async: bool) -> RedisResult + fn function_flush(&self, r#async: bool) -> impl Future> + Send where R: FromRedis, { - commands::lua::function_flush(self, r#async).await?.convert() + async move { commands::lua::function_flush(self, r#async).await?.convert() } } /// Deletes all the libraries on all cluster nodes concurrently. /// /// - async fn function_flush_cluster(&self, r#async: bool) -> RedisResult<()> { - commands::lua::function_flush_cluster(self, r#async).await + fn function_flush_cluster(&self, r#async: bool) -> impl Future> + Send { + async move { commands::lua::function_flush_cluster(self, r#async).await } } /// Kill a function that is currently executing. @@ -213,51 +230,61 @@ pub trait FunctionInterface: ClientLike + Sized { /// possible. /// /// - async fn function_kill(&self) -> RedisResult + fn function_kill(&self) -> impl Future> + Send where R: FromRedis, { - commands::lua::function_kill(self).await?.convert() + async move { commands::lua::function_kill(self).await?.convert() } } /// Return information about the functions and libraries. /// /// - async fn function_list(&self, library_name: Option, withcode: bool) -> RedisResult + fn function_list( + &self, + library_name: Option, + withcode: bool, + ) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - let library_name = library_name.map(|l| l.into()); - commands::lua::function_list(self, library_name, withcode) - .await? - .convert() + async move { + let library_name = library_name.map(|l| l.into()); + commands::lua::function_list(self, library_name, withcode) + .await? + .convert() + } } /// Load a library to Redis. /// /// - async fn function_load(&self, replace: bool, code: S) -> RedisResult + fn function_load(&self, replace: bool, code: S) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(code); - commands::lua::function_load(self, replace, code).await?.convert() + async move { + into!(code); + commands::lua::function_load(self, replace, code).await?.convert() + } } /// Load a library to Redis on all cluster nodes concurrently. /// /// - async fn function_load_cluster(&self, replace: bool, code: S) -> RedisResult + fn function_load_cluster(&self, replace: bool, code: S) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(code); - commands::lua::function_load_cluster(self, replace, code) - .await? - .convert() + async move { + into!(code); + commands::lua::function_load_cluster(self, replace, code) + .await? + .convert() + } } /// Restore libraries from the serialized payload. @@ -265,18 +292,20 @@ pub trait FunctionInterface: ClientLike + Sized { /// /// /// Note: Use `FnPolicy::default()` to use the default function restore policy (`"APPEND"`). - async fn function_restore(&self, serialized: B, policy: P) -> RedisResult + fn function_restore(&self, serialized: B, policy: P) -> impl Future> + Send where R: FromRedis, B: Into + Send, P: TryInto + Send, P::Error: Into + Send, { - into!(serialized); - try_into!(policy); - commands::lua::function_restore(self, serialized, policy) - .await? - .convert() + async move { + into!(serialized); + try_into!(policy); + commands::lua::function_restore(self, serialized, policy) + .await? + .convert() + } } /// Restore libraries from the serialized payload on all cluster nodes concurrently. @@ -284,15 +313,17 @@ pub trait FunctionInterface: ClientLike + Sized { /// /// /// Note: Use `FnPolicy::default()` to use the default function restore policy (`"APPEND"`). - async fn function_restore_cluster(&self, serialized: B, policy: P) -> RedisResult<()> + fn function_restore_cluster(&self, serialized: B, policy: P) -> impl Future> + Send where B: Into + Send, P: TryInto + Send, P::Error: Into + Send, { - into!(serialized); - try_into!(policy); - commands::lua::function_restore_cluster(self, serialized, policy).await + async move { + into!(serialized); + try_into!(policy); + commands::lua::function_restore_cluster(self, serialized, policy).await + } } /// Return information about the function that's currently running and information about the available execution @@ -301,10 +332,10 @@ pub trait FunctionInterface: ClientLike + Sized { /// Note: This command runs on a backchannel connection to the server. /// /// - async fn function_stats(&self) -> RedisResult + fn function_stats(&self) -> impl Future> + Send where R: FromRedis, { - commands::lua::function_stats(self).await?.convert() + async move { commands::lua::function_stats(self).await?.convert() } } } diff --git a/src/commands/interfaces/memory.rs b/src/commands/interfaces/memory.rs index bf5735af..047ddff0 100644 --- a/src/commands/interfaces/memory.rs +++ b/src/commands/interfaces/memory.rs @@ -4,57 +4,59 @@ use crate::{ prelude::FromRedis, types::RedisKey, }; +use futures::Future; /// Functions that implement the [memory](https://redis.io/commands#server) interface. -#[async_trait] pub trait MemoryInterface: ClientLike + Sized { /// The MEMORY DOCTOR command reports about different memory-related issues that the Redis server experiences, and /// advises about possible remedies. /// /// - async fn memory_doctor(&self) -> RedisResult + fn memory_doctor(&self) -> impl Future> + Send where R: FromRedis, { - commands::memory::memory_doctor(self).await?.convert() + async move { commands::memory::memory_doctor(self).await?.convert() } } /// The MEMORY MALLOC-STATS command provides an internal statistics report from the memory allocator. /// /// - async fn memory_malloc_stats(&self) -> RedisResult + fn memory_malloc_stats(&self) -> impl Future> + Send where R: FromRedis, { - commands::memory::memory_malloc_stats(self).await?.convert() + async move { commands::memory::memory_malloc_stats(self).await?.convert() } } /// The MEMORY PURGE command attempts to purge dirty pages so these can be reclaimed by the allocator. /// /// - async fn memory_purge(&self) -> RedisResult<()> { - commands::memory::memory_purge(self).await + fn memory_purge(&self) -> impl Future> + Send { + async move { commands::memory::memory_purge(self).await } } /// The MEMORY STATS command returns an Array reply about the memory usage of the server. /// /// - async fn memory_stats(&self) -> RedisResult + fn memory_stats(&self) -> impl Future> + Send where R: FromRedis, { - commands::memory::memory_stats(self).await?.convert() + async move { commands::memory::memory_stats(self).await?.convert() } } /// The MEMORY USAGE command reports the number of bytes that a key and its value require to be stored in RAM. /// /// - async fn memory_usage(&self, key: K, samples: Option) -> RedisResult + fn memory_usage(&self, key: K, samples: Option) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::memory::memory_usage(self, key, samples).await?.convert() + async move { + into!(key); + commands::memory::memory_usage(self, key, samples).await?.convert() + } } } diff --git a/src/commands/interfaces/mod.rs b/src/commands/interfaces/mod.rs index aa615632..9dd06620 100644 --- a/src/commands/interfaces/mod.rs +++ b/src/commands/interfaces/mod.rs @@ -1,35 +1,47 @@ +#[cfg(feature = "i-acl")] pub mod acl; +#[cfg(feature = "i-client")] pub mod client; +#[cfg(feature = "i-cluster")] pub mod cluster; +#[cfg(feature = "i-config")] pub mod config; +#[cfg(feature = "i-geo")] pub mod geo; +#[cfg(feature = "i-hashes")] pub mod hashes; +#[cfg(feature = "i-hyperloglog")] pub mod hyperloglog; +#[cfg(feature = "i-keys")] pub mod keys; +#[cfg(feature = "i-lists")] pub mod lists; +#[cfg(feature = "i-scripts")] pub mod lua; +#[cfg(feature = "i-memory")] pub mod memory; pub mod metrics; +#[cfg(feature = "i-pubsub")] pub mod pubsub; +#[cfg(feature = "i-redis-json")] +pub mod redis_json; pub mod scan; +#[cfg(feature = "sentinel-client")] +pub mod sentinel; +#[cfg(feature = "i-server")] pub mod server; +#[cfg(feature = "i-sets")] pub mod sets; +#[cfg(feature = "i-slowlog")] pub mod slowlog; +#[cfg(feature = "i-sorted-sets")] pub mod sorted_sets; +#[cfg(feature = "i-streams")] pub mod streams; pub mod strings; - +#[cfg(feature = "i-time-series")] +pub mod timeseries; +#[cfg(feature = "i-tracking")] +pub mod tracking; #[cfg(feature = "transactions")] pub mod transactions; - -#[cfg(feature = "client-tracking")] -pub mod tracking; - -#[cfg(feature = "sentinel-client")] -pub mod sentinel; - -#[cfg(feature = "redis-json")] -pub mod redis_json; - -#[cfg(feature = "time-series")] -pub mod timeseries; diff --git a/src/commands/interfaces/pubsub.rs b/src/commands/interfaces/pubsub.rs index 3430e410..310f5412 100644 --- a/src/commands/interfaces/pubsub.rs +++ b/src/commands/interfaces/pubsub.rs @@ -5,42 +5,50 @@ use crate::{ types::{FromRedis, MultipleStrings, RedisValue}, }; use bytes_utils::Str; +use futures::Future; use std::convert::TryInto; /// Functions that implement the [pubsub](https://redis.io/commands#pubsub) interface. -#[async_trait] -pub trait PubsubInterface: ClientLike + Sized { +pub trait PubsubInterface: ClientLike + Sized + Send { /// Subscribe to a channel on the publish-subscribe interface. /// /// - async fn subscribe(&self, channels: S) -> RedisResult<()> + fn subscribe(&self, channels: S) -> impl Future> + Send where S: Into + Send, + Self: Send + Sync, { - into!(channels); - commands::pubsub::subscribe(self, channels).await + async move { + into!(channels); + commands::pubsub::subscribe(self, channels).await + } } /// Unsubscribe from a channel on the PubSub interface. /// /// - async fn unsubscribe(&self, channels: S) -> RedisResult<()> + fn unsubscribe(&self, channels: S) -> impl Future> + Send where S: Into + Send, + Self: Sync, { - into!(channels); - commands::pubsub::unsubscribe(self, channels).await + async move { + into!(channels); + commands::pubsub::unsubscribe(self, channels).await + } } /// Subscribes the client to the given patterns. /// /// - async fn psubscribe(&self, patterns: S) -> RedisResult<()> + fn psubscribe(&self, patterns: S) -> impl Future> + Send where S: Into + Send, { - into!(patterns); - commands::pubsub::psubscribe(self, patterns).await + async move { + into!(patterns); + commands::pubsub::psubscribe(self, patterns).await + } } /// Unsubscribes the client from the given patterns, or from all of them if none is given. @@ -48,38 +56,44 @@ pub trait PubsubInterface: ClientLike + Sized { /// If no channels are provided this command returns an empty array. /// /// - async fn punsubscribe(&self, patterns: S) -> RedisResult<()> + fn punsubscribe(&self, patterns: S) -> impl Future> + Send where S: Into + Send, { - into!(patterns); - commands::pubsub::punsubscribe(self, patterns).await + async move { + into!(patterns); + commands::pubsub::punsubscribe(self, patterns).await + } } /// Publish a message on the PubSub interface, returning the number of clients that received the message. /// /// - async fn publish(&self, channel: S, message: V) -> RedisResult + fn publish(&self, channel: S, message: V) -> impl Future> + Send where R: FromRedis, S: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(channel); - try_into!(message); - commands::pubsub::publish(self, channel, message).await?.convert() + async move { + into!(channel); + try_into!(message); + commands::pubsub::publish(self, channel, message).await?.convert() + } } /// Subscribes the client to the specified shard channels. /// /// - async fn ssubscribe(&self, channels: C) -> RedisResult<()> + fn ssubscribe(&self, channels: C) -> impl Future> + Send where C: Into + Send, { - into!(channels); - commands::pubsub::ssubscribe(self, channels).await + async move { + into!(channels); + commands::pubsub::ssubscribe(self, channels).await + } } /// Unsubscribes the client from the given shard channels, or from all of them if none is given. @@ -87,84 +101,96 @@ pub trait PubsubInterface: ClientLike + Sized { /// If no channels are provided this command returns an empty array. /// /// - async fn sunsubscribe(&self, channels: C) -> RedisResult<()> + fn sunsubscribe(&self, channels: C) -> impl Future> + Send where C: Into + Send, { - into!(channels); - commands::pubsub::sunsubscribe(self, channels).await + async move { + into!(channels); + commands::pubsub::sunsubscribe(self, channels).await + } } /// Posts a message to the given shard channel. /// /// - async fn spublish(&self, channel: S, message: V) -> RedisResult + fn spublish(&self, channel: S, message: V) -> impl Future> + Send where R: FromRedis, S: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(channel); - try_into!(message); - commands::pubsub::spublish(self, channel, message).await?.convert() + async move { + into!(channel); + try_into!(message); + commands::pubsub::spublish(self, channel, message).await?.convert() + } } /// Lists the currently active channels. /// /// - async fn pubsub_channels(&self, pattern: S) -> RedisResult + fn pubsub_channels(&self, pattern: S) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(pattern); - commands::pubsub::pubsub_channels(self, pattern).await?.convert() + async move { + into!(pattern); + commands::pubsub::pubsub_channels(self, pattern).await?.convert() + } } /// Returns the number of unique patterns that are subscribed to by clients. /// /// - async fn pubsub_numpat(&self) -> RedisResult + fn pubsub_numpat(&self) -> impl Future> + Send where R: FromRedis, { - commands::pubsub::pubsub_numpat(self).await?.convert() + async move { commands::pubsub::pubsub_numpat(self).await?.convert() } } /// Returns the number of subscribers (exclusive of clients subscribed to patterns) for the specified channels. /// /// - async fn pubsub_numsub(&self, channels: S) -> RedisResult + fn pubsub_numsub(&self, channels: S) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(channels); - commands::pubsub::pubsub_numsub(self, channels).await?.convert() + async move { + into!(channels); + commands::pubsub::pubsub_numsub(self, channels).await?.convert() + } } /// Lists the currently active shard channels. /// /// - async fn pubsub_shardchannels(&self, pattern: S) -> RedisResult + fn pubsub_shardchannels(&self, pattern: S) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(pattern); - commands::pubsub::pubsub_shardchannels(self, pattern).await?.convert() + async move { + into!(pattern); + commands::pubsub::pubsub_shardchannels(self, pattern).await?.convert() + } } /// Returns the number of subscribers for the specified shard channels. /// /// - async fn pubsub_shardnumsub(&self, channels: S) -> RedisResult + fn pubsub_shardnumsub(&self, channels: S) -> impl Future> + Send where R: FromRedis, S: Into + Send, { - into!(channels); - commands::pubsub::pubsub_shardnumsub(self, channels).await?.convert() + async move { + into!(channels); + commands::pubsub::pubsub_shardnumsub(self, channels).await?.convert() + } } } diff --git a/src/commands/interfaces/redis_json.rs b/src/commands/interfaces/redis_json.rs index 3c4d6cf9..fdfe3834 100644 --- a/src/commands/interfaces/redis_json.rs +++ b/src/commands/interfaces/redis_json.rs @@ -4,6 +4,7 @@ use crate::{ types::{FromRedis, MultipleKeys, MultipleStrings, RedisKey, SetOptions}, }; use bytes_utils::Str; +use futures::Future; use serde_json::Value; /// The client commands in the [RedisJSON](https://redis.io/docs/data-types/json/) interface. @@ -43,165 +44,199 @@ use serde_json::Value; /// Ok(()) /// } /// ``` -#[async_trait] -#[cfg_attr(docsrs, doc(cfg(feature = "redis-json")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-redis-json")))] pub trait RedisJsonInterface: ClientLike + Sized { /// Append the json values into the array at path after the last element in it. /// /// - async fn json_arrappend(&self, key: K, path: P, values: Vec) -> RedisResult + fn json_arrappend(&self, key: K, path: P, values: Vec) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, V: Into + Send, { - into!(key, path); - let values = values.into_iter().map(|v| v.into()).collect(); - commands::redis_json::json_arrappend(self, key, path, values) - .await? - .convert() + async move { + into!(key, path); + let values = values.into_iter().map(|v| v.into()).collect(); + commands::redis_json::json_arrappend(self, key, path, values) + .await? + .convert() + } } /// Search for the first occurrence of a JSON value in an array. /// /// - async fn json_arrindex( + fn json_arrindex( &self, key: K, path: P, value: V, start: Option, stop: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, V: Into + Send, { - into!(key, path, value); - commands::redis_json::json_arrindex(self, key, path, value, start, stop) - .await? - .convert() + async move { + into!(key, path, value); + commands::redis_json::json_arrindex(self, key, path, value, start, stop) + .await? + .convert() + } } /// Insert the json values into the array at path before the index (shifts to the right). /// /// - async fn json_arrinsert(&self, key: K, path: P, index: i64, values: Vec) -> RedisResult + fn json_arrinsert( + &self, + key: K, + path: P, + index: i64, + values: Vec, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, V: Into + Send, { - into!(key, path); - let values = values.into_iter().map(|v| v.into()).collect(); - commands::redis_json::json_arrinsert(self, key, path, index, values) - .await? - .convert() + async move { + into!(key, path); + let values = values.into_iter().map(|v| v.into()).collect(); + commands::redis_json::json_arrinsert(self, key, path, index, values) + .await? + .convert() + } } /// Report the length of the JSON array at path in key. /// /// - async fn json_arrlen(&self, key: K, path: Option

) -> RedisResult + fn json_arrlen(&self, key: K, path: Option

) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, { - into!(key); - let path = path.map(|p| p.into()); - commands::redis_json::json_arrlen(self, key, path).await?.convert() + async move { + into!(key); + let path = path.map(|p| p.into()); + commands::redis_json::json_arrlen(self, key, path).await?.convert() + } } /// Remove and return an element from the index in the array /// /// - async fn json_arrpop(&self, key: K, path: Option

, index: Option) -> RedisResult + fn json_arrpop( + &self, + key: K, + path: Option

, + index: Option, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, { - into!(key); - let path = path.map(|p| p.into()); - commands::redis_json::json_arrpop(self, key, path, index) - .await? - .convert() + async move { + into!(key); + let path = path.map(|p| p.into()); + commands::redis_json::json_arrpop(self, key, path, index) + .await? + .convert() + } } /// Trim an array so that it contains only the specified inclusive range of elements /// /// - async fn json_arrtrim(&self, key: K, path: P, start: i64, stop: i64) -> RedisResult + fn json_arrtrim( + &self, + key: K, + path: P, + start: i64, + stop: i64, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, { - into!(key, path); - commands::redis_json::json_arrtrim(self, key, path, start, stop) - .await? - .convert() + async move { + into!(key, path); + commands::redis_json::json_arrtrim(self, key, path, start, stop) + .await? + .convert() + } } /// Clear container values (arrays/objects) and set numeric values to 0 /// /// - async fn json_clear(&self, key: K, path: Option

) -> RedisResult + fn json_clear(&self, key: K, path: Option

) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, { - into!(key); - let path = path.map(|p| p.into()); - commands::redis_json::json_clear(self, key, path).await?.convert() + async move { + into!(key); + let path = path.map(|p| p.into()); + commands::redis_json::json_clear(self, key, path).await?.convert() + } } /// Report a value's memory usage in bytes /// /// - async fn json_debug_memory(&self, key: K, path: Option

) -> RedisResult + fn json_debug_memory(&self, key: K, path: Option

) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, { - into!(key); - let path = path.map(|p| p.into()); - commands::redis_json::json_debug_memory(self, key, path) - .await? - .convert() + async move { + into!(key); + let path = path.map(|p| p.into()); + commands::redis_json::json_debug_memory(self, key, path) + .await? + .convert() + } } /// Delete a value. /// /// - async fn json_del(&self, key: K, path: P) -> RedisResult + fn json_del(&self, key: K, path: P) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, { - into!(key, path); - commands::redis_json::json_del(self, key, path).await?.convert() + async move { + into!(key, path); + commands::redis_json::json_del(self, key, path).await?.convert() + } } /// Return the value at path in JSON serialized form. /// /// - async fn json_get( + fn json_get( &self, key: K, indent: Option, newline: Option, space: Option, paths: P, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -210,190 +245,227 @@ pub trait RedisJsonInterface: ClientLike + Sized { S: Into + Send, P: Into + Send, { - into!(key, paths); - let indent = indent.map(|v| v.into()); - let newline = newline.map(|v| v.into()); - let space = space.map(|v| v.into()); - commands::redis_json::json_get(self, key, indent, newline, space, paths) - .await? - .convert() + async move { + into!(key, paths); + let indent = indent.map(|v| v.into()); + let newline = newline.map(|v| v.into()); + let space = space.map(|v| v.into()); + commands::redis_json::json_get(self, key, indent, newline, space, paths) + .await? + .convert() + } } /// Merge a given JSON value into matching paths. /// /// - async fn json_merge(&self, key: K, path: P, value: V) -> RedisResult + fn json_merge(&self, key: K, path: P, value: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, V: Into + Send, { - into!(key, path, value); - commands::redis_json::json_merge(self, key, path, value) - .await? - .convert() + async move { + into!(key, path, value); + commands::redis_json::json_merge(self, key, path, value) + .await? + .convert() + } } /// Return the values at path from multiple key arguments. /// /// - async fn json_mget(&self, keys: K, path: P) -> RedisResult + fn json_mget(&self, keys: K, path: P) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, { - into!(keys, path); - commands::redis_json::json_mget(self, keys, path).await?.convert() + async move { + into!(keys, path); + commands::redis_json::json_mget(self, keys, path).await?.convert() + } } /// Set or update one or more JSON values according to the specified key-path-value triplets. /// /// - async fn json_mset(&self, values: Vec<(K, P, V)>) -> RedisResult + fn json_mset(&self, values: Vec<(K, P, V)>) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, V: Into + Send, { - let values = values - .into_iter() - .map(|(k, p, v)| (k.into(), p.into(), v.into())) - .collect(); - commands::redis_json::json_mset(self, values).await?.convert() + async move { + let values = values + .into_iter() + .map(|(k, p, v)| (k.into(), p.into(), v.into())) + .collect(); + commands::redis_json::json_mset(self, values).await?.convert() + } } /// Increment the number value stored at path by number /// /// - async fn json_numincrby(&self, key: K, path: P, value: V) -> RedisResult + fn json_numincrby(&self, key: K, path: P, value: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, V: Into + Send, { - into!(key, path, value); - commands::redis_json::json_numincrby(self, key, path, value) - .await? - .convert() + async move { + into!(key, path, value); + commands::redis_json::json_numincrby(self, key, path, value) + .await? + .convert() + } } /// Return the keys in the object that's referenced by path. /// /// - async fn json_objkeys(&self, key: K, path: Option

) -> RedisResult + fn json_objkeys(&self, key: K, path: Option

) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, { - into!(key); - let path = path.map(|p| p.into()); - commands::redis_json::json_objkeys(self, key, path).await?.convert() + async move { + into!(key); + let path = path.map(|p| p.into()); + commands::redis_json::json_objkeys(self, key, path).await?.convert() + } } /// Report the number of keys in the JSON object at path in key. /// /// - async fn json_objlen(&self, key: K, path: Option

) -> RedisResult + fn json_objlen(&self, key: K, path: Option

) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, { - into!(key); - let path = path.map(|p| p.into()); - commands::redis_json::json_objlen(self, key, path).await?.convert() + async move { + into!(key); + let path = path.map(|p| p.into()); + commands::redis_json::json_objlen(self, key, path).await?.convert() + } } /// Return the JSON in key in Redis serialization protocol specification form. /// /// - async fn json_resp(&self, key: K, path: Option

) -> RedisResult + fn json_resp(&self, key: K, path: Option

) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, { - into!(key); - let path = path.map(|p| p.into()); - commands::redis_json::json_resp(self, key, path).await?.convert() + async move { + into!(key); + let path = path.map(|p| p.into()); + commands::redis_json::json_resp(self, key, path).await?.convert() + } } /// Set the JSON value at path in key. /// /// - async fn json_set(&self, key: K, path: P, value: V, options: Option) -> RedisResult + fn json_set( + &self, + key: K, + path: P, + value: V, + options: Option, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, V: Into + Send, { - into!(key, path, value); - commands::redis_json::json_set(self, key, path, value, options) - .await? - .convert() + async move { + into!(key, path, value); + commands::redis_json::json_set(self, key, path, value, options) + .await? + .convert() + } } /// Append the json-string values to the string at path. /// /// - async fn json_strappend(&self, key: K, path: Option

, value: V) -> RedisResult + fn json_strappend( + &self, + key: K, + path: Option

, + value: V, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, V: Into + Send, { - into!(key, value); - let path = path.map(|p| p.into()); - commands::redis_json::json_strappend(self, key, path, value) - .await? - .convert() + async move { + into!(key, value); + let path = path.map(|p| p.into()); + commands::redis_json::json_strappend(self, key, path, value) + .await? + .convert() + } } /// Report the length of the JSON String at path in key. /// /// - async fn json_strlen(&self, key: K, path: Option

) -> RedisResult + fn json_strlen(&self, key: K, path: Option

) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, { - into!(key); - let path = path.map(|p| p.into()); - commands::redis_json::json_strlen(self, key, path).await?.convert() + async move { + into!(key); + let path = path.map(|p| p.into()); + commands::redis_json::json_strlen(self, key, path).await?.convert() + } } /// Toggle a Boolean value stored at path. /// /// - async fn json_toggle(&self, key: K, path: P) -> RedisResult + fn json_toggle(&self, key: K, path: P) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, { - into!(key, path); - commands::redis_json::json_toggle(self, key, path).await?.convert() + async move { + into!(key, path); + commands::redis_json::json_toggle(self, key, path).await?.convert() + } } /// Report the type of JSON value at path. /// /// - async fn json_type(&self, key: K, path: Option

) -> RedisResult + fn json_type(&self, key: K, path: Option

) -> impl Future> + Send where R: FromRedis, K: Into + Send, P: Into + Send, { - into!(key); - let path = path.map(|p| p.into()); - commands::redis_json::json_type(self, key, path).await?.convert() + async move { + into!(key); + let path = path.map(|p| p.into()); + commands::redis_json::json_type(self, key, path).await?.convert() + } } } diff --git a/src/commands/interfaces/sentinel.rs b/src/commands/interfaces/sentinel.rs index 03281cf7..61876309 100644 --- a/src/commands/interfaces/sentinel.rs +++ b/src/commands/interfaces/sentinel.rs @@ -5,192 +5,216 @@ use crate::{ types::{FromRedis, RedisMap, RedisValue, SentinelFailureKind}, }; use bytes_utils::Str; +use futures::Future; use std::{convert::TryInto, net::IpAddr}; /// Functions that implement the [sentinel](https://redis.io/topics/sentinel#sentinel-commands) interface. -#[async_trait] pub trait SentinelInterface: ClientLike + Sized { /// Check if the current Sentinel configuration is able to reach the quorum needed to failover a master, and the /// majority needed to authorize the failover. - async fn ckquorum(&self, name: N) -> RedisResult + fn ckquorum(&self, name: N) -> impl Future> + Send where R: FromRedis, N: Into + Send, { - into!(name); - commands::sentinel::ckquorum(self, name).await?.convert() + async move { + into!(name); + commands::sentinel::ckquorum(self, name).await?.convert() + } } /// Force Sentinel to rewrite its configuration on disk, including the current Sentinel state. - async fn flushconfig(&self) -> RedisResult + fn flushconfig(&self) -> impl Future> + Send where R: FromRedis, { - commands::sentinel::flushconfig(self).await?.convert() + async move { commands::sentinel::flushconfig(self).await?.convert() } } /// Force a failover as if the master was not reachable, and without asking for agreement to other Sentinels. - async fn failover(&self, name: N) -> RedisResult + fn failover(&self, name: N) -> impl Future> + Send where R: FromRedis, N: Into + Send, { - into!(name); - commands::sentinel::failover(self, name).await?.convert() + async move { + into!(name); + commands::sentinel::failover(self, name).await?.convert() + } } /// Return the ip and port number of the master with that name. - async fn get_master_addr_by_name(&self, name: N) -> RedisResult + fn get_master_addr_by_name(&self, name: N) -> impl Future> + Send where R: FromRedis, N: Into + Send, { - into!(name); - commands::sentinel::get_master_addr_by_name(self, name).await?.convert() + async move { + into!(name); + commands::sentinel::get_master_addr_by_name(self, name).await?.convert() + } } /// Return cached INFO output from masters and replicas. - async fn info_cache(&self) -> RedisResult + fn info_cache(&self) -> impl Future> + Send where R: FromRedis, { - commands::sentinel::info_cache(self).await?.convert() + async move { commands::sentinel::info_cache(self).await?.convert() } } /// Show the state and info of the specified master. - async fn master(&self, name: N) -> RedisResult + fn master(&self, name: N) -> impl Future> + Send where R: FromRedis, N: Into + Send, { - into!(name); - commands::sentinel::master(self, name).await?.convert() + async move { + into!(name); + commands::sentinel::master(self, name).await?.convert() + } } /// Show a list of monitored masters and their state. - async fn masters(&self) -> RedisResult + fn masters(&self) -> impl Future> + Send where R: FromRedis, { - commands::sentinel::masters(self).await?.convert() + async move { commands::sentinel::masters(self).await?.convert() } } /// Start Sentinel's monitoring. /// /// - async fn monitor(&self, name: N, ip: IpAddr, port: u16, quorum: u32) -> RedisResult + fn monitor(&self, name: N, ip: IpAddr, port: u16, quorum: u32) -> impl Future> + Send where R: FromRedis, N: Into + Send, { - into!(name); - commands::sentinel::monitor(self, name, ip, port, quorum) - .await? - .convert() + async move { + into!(name); + commands::sentinel::monitor(self, name, ip, port, quorum) + .await? + .convert() + } } /// Return the ID of the Sentinel instance. - async fn myid(&self) -> RedisResult + fn myid(&self) -> impl Future> + Send where R: FromRedis, { - commands::sentinel::myid(self).await?.convert() + async move { commands::sentinel::myid(self).await?.convert() } } /// This command returns information about pending scripts. - async fn pending_scripts(&self) -> RedisResult + fn pending_scripts(&self) -> impl Future> + Send where R: FromRedis, { - commands::sentinel::pending_scripts(self).await?.convert() + async move { commands::sentinel::pending_scripts(self).await?.convert() } } /// Stop Sentinel's monitoring. /// /// - async fn remove(&self, name: N) -> RedisResult + fn remove(&self, name: N) -> impl Future> + Send where R: FromRedis, N: Into + Send, { - into!(name); - commands::sentinel::remove(self, name).await?.convert() + async move { + into!(name); + commands::sentinel::remove(self, name).await?.convert() + } } /// Show a list of replicas for this master, and their state. - async fn replicas(&self, name: N) -> RedisResult + fn replicas(&self, name: N) -> impl Future> + Send where R: FromRedis, N: Into + Send, { - into!(name); - commands::sentinel::replicas(self, name).await?.convert() + async move { + into!(name); + commands::sentinel::replicas(self, name).await?.convert() + } } /// Show a list of sentinel instances for this master, and their state. - async fn sentinels(&self, name: N) -> RedisResult + fn sentinels(&self, name: N) -> impl Future> + Send where R: FromRedis, N: Into + Send, { - into!(name); - commands::sentinel::sentinels(self, name).await?.convert() + async move { + into!(name); + commands::sentinel::sentinels(self, name).await?.convert() + } } /// Set Sentinel's monitoring configuration. /// /// - async fn set(&self, name: N, args: V) -> RedisResult + fn set(&self, name: N, args: V) -> impl Future> + Send where R: FromRedis, N: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(name); - try_into!(args); - commands::sentinel::set(self, name, args).await?.convert() + async move { + into!(name); + try_into!(args); + commands::sentinel::set(self, name, args).await?.convert() + } } /// This command simulates different Sentinel crash scenarios. - async fn simulate_failure(&self, kind: SentinelFailureKind) -> RedisResult + fn simulate_failure(&self, kind: SentinelFailureKind) -> impl Future> + Send where R: FromRedis, { - commands::sentinel::simulate_failure(self, kind).await?.convert() + async move { commands::sentinel::simulate_failure(self, kind).await?.convert() } } /// This command will reset all the masters with matching name. - async fn reset(&self, pattern: P) -> RedisResult + fn reset(&self, pattern: P) -> impl Future> + Send where R: FromRedis, P: Into + Send, { - into!(pattern); - commands::sentinel::reset(self, pattern).await?.convert() + async move { + into!(pattern); + commands::sentinel::reset(self, pattern).await?.convert() + } } /// Get the current value of a global Sentinel configuration parameter. The specified name may be a wildcard, /// similar to the Redis CONFIG GET command. - async fn config_get(&self, name: K) -> RedisResult + fn config_get(&self, name: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(name); - commands::sentinel::config_get(self, name).await?.convert() + async move { + into!(name); + commands::sentinel::config_get(self, name).await?.convert() + } } /// Set the value of a global Sentinel configuration parameter. - async fn config_set(&self, name: K, value: V) -> RedisResult + fn config_set(&self, name: K, value: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(name); - try_into!(value); - commands::sentinel::config_set(self, name, value).await?.convert() + async move { + into!(name); + try_into!(value); + commands::sentinel::config_set(self, name, value).await?.convert() + } } } diff --git a/src/commands/interfaces/server.rs b/src/commands/interfaces/server.rs index 44e96f5a..18c15514 100644 --- a/src/commands/interfaces/server.rs +++ b/src/commands/interfaces/server.rs @@ -2,142 +2,71 @@ use crate::{ commands, error::RedisError, interfaces::{ClientLike, RedisResult}, - types::{FromRedis, RespVersion, Server}, + types::{FromRedis, Server}, }; -use bytes_utils::Str; -use std::time::Duration; -use tokio::time::interval as tokio_interval; - -/// Functions for authenticating clients. -#[async_trait] -pub trait AuthInterface: ClientLike { - /// Request for authentication in a password-protected Redis server. Returns ok if successful. - /// - /// The client will automatically authenticate with the default user if a password is provided in the associated - /// `RedisConfig` when calling [connect](crate::interfaces::ClientLike::connect). - /// - /// If running against clustered servers this function will authenticate all connections. - /// - /// - async fn auth(&self, username: Option, password: S) -> RedisResult<()> - where - S: Into + Send, - { - into!(password); - commands::server::auth(self, username, password).await - } - - /// Switch to a different protocol, optionally authenticating in the process. - /// - /// If running against clustered servers this function will issue the HELLO command to each server concurrently. - /// - /// - async fn hello(&self, version: RespVersion, auth: Option<(String, String)>) -> RedisResult<()> { - commands::server::hello(self, version, auth).await - } -} - -/// Functions that provide a connection heartbeat interface. -#[async_trait] -pub trait HeartbeatInterface: ClientLike { - /// Return a future that will ping the server on an interval. - #[allow(unreachable_code)] - async fn enable_heartbeat(&self, interval: Duration, break_on_error: bool) -> RedisResult<()> { - let _self = self.clone(); - let mut interval = tokio_interval(interval); - - loop { - interval.tick().await; - - if break_on_error { - let _: () = _self.ping().await?; - } else if let Err(e) = _self.ping::<()>().await { - warn!("{}: Heartbeat ping failed with error: {:?}", _self.inner().id, e); - } - } - - Ok(()) - } -} +use futures::Future; /// Functions that implement the [server](https://redis.io/commands#server) interface. -#[async_trait] pub trait ServerInterface: ClientLike { /// Instruct Redis to start an Append Only File rewrite process. /// /// - async fn bgrewriteaof(&self) -> RedisResult + fn bgrewriteaof(&self) -> impl Future> + Send where R: FromRedis, { - commands::server::bgrewriteaof(self).await?.convert() + async move { commands::server::bgrewriteaof(self).await?.convert() } } /// Save the DB in background. /// /// - async fn bgsave(&self) -> RedisResult + fn bgsave(&self) -> impl Future> + Send where R: FromRedis, { - commands::server::bgsave(self).await?.convert() + async move { commands::server::bgsave(self).await?.convert() } } /// Return the number of keys in the selected database. /// /// - async fn dbsize(&self) -> RedisResult + fn dbsize(&self) -> impl Future> + Send where R: FromRedis, { - commands::server::dbsize(self).await?.convert() - } - - /// Delete the keys in all databases. - /// - /// - async fn flushall(&self, r#async: bool) -> RedisResult - where - R: FromRedis, - { - commands::server::flushall(self, r#async).await?.convert() - } - - /// Delete the keys on all nodes in the cluster. This is a special function that does not map directly to the Redis - /// interface. - async fn flushall_cluster(&self) -> RedisResult<()> { - commands::server::flushall_cluster(self).await + async move { commands::server::dbsize(self).await?.convert() } } /// Select the database this client should use. /// /// - async fn select(&self, db: u8) -> RedisResult<()> { - commands::server::select(self, db).await?.convert() + fn select(&self, db: u8) -> impl Future> + Send { + async move { commands::server::select(self, db).await?.convert() } } /// This command will start a coordinated failover between the currently-connected-to master and one of its /// replicas. /// /// - async fn failover( + fn failover( &self, to: Option<(String, u16)>, force: bool, abort: bool, timeout: Option, - ) -> RedisResult<()> { - commands::server::failover(self, to, force, abort, timeout).await + ) -> impl Future> + Send { + async move { commands::server::failover(self, to, force, abort, timeout).await } } /// Return the UNIX TIME of the last DB save executed with success. /// /// - async fn lastsave(&self) -> RedisResult + fn lastsave(&self) -> impl Future> + Send where R: FromRedis, { - commands::server::lastsave(self).await?.convert() + async move { commands::server::lastsave(self).await?.convert() } } /// This command blocks the current client until all the previous write commands are successfully transferred and @@ -145,11 +74,11 @@ pub trait ServerInterface: ClientLike { /// reached, the command returns even if the specified number of replicas were not yet reached. /// /// - async fn wait(&self, numreplicas: i64, timeout: i64) -> Result + fn wait(&self, numreplicas: i64, timeout: i64) -> impl Future> + Send where R: FromRedis, { - commands::server::wait(self, numreplicas, timeout).await?.convert() + async move { commands::server::wait(self, numreplicas, timeout).await?.convert() } } /// Read the primary Redis server identifier returned from the sentinel nodes. diff --git a/src/commands/interfaces/sets.rs b/src/commands/interfaces/sets.rs index 118415af..eefa38b1 100644 --- a/src/commands/interfaces/sets.rs +++ b/src/commands/interfaces/sets.rs @@ -1,3 +1,5 @@ +use futures::Future; + use crate::{ commands, error::RedisError, @@ -7,131 +9,148 @@ use crate::{ use std::convert::TryInto; /// Functions that implement the [sets](https://redis.io/commands#set) interface. -#[async_trait] pub trait SetsInterface: ClientLike + Sized { /// Add the specified members to the set stored at `key`. /// /// - async fn sadd(&self, key: K, members: V) -> RedisResult + fn sadd(&self, key: K, members: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(members); - commands::sets::sadd(self, key, members).await?.convert() + async move { + into!(key); + try_into!(members); + commands::sets::sadd(self, key, members).await?.convert() + } } /// Returns the set cardinality (number of elements) of the set stored at `key`. /// /// - async fn scard(&self, key: K) -> RedisResult + fn scard(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::sets::scard(self, key).await?.convert() + async move { + into!(key); + commands::sets::scard(self, key).await?.convert() + } } /// Returns the members of the set resulting from the difference between the first set and all the successive sets. /// /// - async fn sdiff(&self, keys: K) -> RedisResult + fn sdiff(&self, keys: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::sets::sdiff(self, keys).await?.convert() + async move { + into!(keys); + commands::sets::sdiff(self, keys).await?.convert() + } } /// This command is equal to SDIFF, but instead of returning the resulting set, it is stored in `destination`. /// /// - async fn sdiffstore(&self, dest: D, keys: K) -> RedisResult + fn sdiffstore(&self, dest: D, keys: K) -> impl Future> + Send where R: FromRedis, D: Into + Send, K: Into + Send, { - into!(dest, keys); - commands::sets::sdiffstore(self, dest, keys).await?.convert() + async move { + into!(dest, keys); + commands::sets::sdiffstore(self, dest, keys).await?.convert() + } } /// Returns the members of the set resulting from the intersection of all the given sets. /// /// - async fn sinter(&self, keys: K) -> RedisResult + fn sinter(&self, keys: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::sets::sinter(self, keys).await?.convert() + async move { + into!(keys); + commands::sets::sinter(self, keys).await?.convert() + } } /// This command is equal to SINTER, but instead of returning the resulting set, it is stored in `destination`. /// /// - async fn sinterstore(&self, dest: D, keys: K) -> RedisResult + fn sinterstore(&self, dest: D, keys: K) -> impl Future> + Send where R: FromRedis, D: Into + Send, K: Into + Send, { - into!(dest, keys); - commands::sets::sinterstore(self, dest, keys).await?.convert() + async move { + into!(dest, keys); + commands::sets::sinterstore(self, dest, keys).await?.convert() + } } /// Returns if `member` is a member of the set stored at `key`. /// /// - async fn sismember(&self, key: K, member: V) -> RedisResult + fn sismember(&self, key: K, member: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(member); - commands::sets::sismember(self, key, member).await?.convert() + async move { + into!(key); + try_into!(member); + commands::sets::sismember(self, key, member).await?.convert() + } } /// Returns whether each member is a member of the set stored at `key`. /// /// - async fn smismember(&self, key: K, members: V) -> RedisResult + fn smismember(&self, key: K, members: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(members); - commands::sets::smismember(self, key, members).await?.convert() + async move { + into!(key); + try_into!(members); + commands::sets::smismember(self, key, members).await?.convert() + } } /// Returns all the members of the set value stored at `key`. /// /// - async fn smembers(&self, key: K) -> RedisResult + fn smembers(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::sets::smembers(self, key).await?.convert() + async move { + into!(key); + commands::sets::smembers(self, key).await?.convert() + } } /// Move `member` from the set at `source` to the set at `destination`. /// /// - async fn smove(&self, source: S, dest: D, member: V) -> RedisResult + fn smove(&self, source: S, dest: D, member: V) -> impl Future> + Send where R: FromRedis, S: Into + Send, @@ -139,21 +158,25 @@ pub trait SetsInterface: ClientLike + Sized { V: TryInto + Send, V::Error: Into + Send, { - into!(source, dest); - try_into!(member); - commands::sets::smove(self, source, dest, member).await?.convert() + async move { + into!(source, dest); + try_into!(member); + commands::sets::smove(self, source, dest, member).await?.convert() + } } /// Removes and returns one or more random members from the set value store at `key`. /// /// - async fn spop(&self, key: K, count: Option) -> RedisResult + fn spop(&self, key: K, count: Option) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::sets::spop(self, key, count).await?.convert() + async move { + into!(key); + commands::sets::spop(self, key, count).await?.convert() + } } /// When called with just the key argument, return a random element from the set value stored at `key`. @@ -162,52 +185,60 @@ pub trait SetsInterface: ClientLike + Sized { /// count or the set's cardinality (SCARD), whichever is lower. /// /// - async fn srandmember(&self, key: K, count: Option) -> RedisResult + fn srandmember(&self, key: K, count: Option) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::sets::srandmember(self, key, count).await?.convert() + async move { + into!(key); + commands::sets::srandmember(self, key, count).await?.convert() + } } /// Remove the specified members from the set stored at `key`. /// /// - async fn srem(&self, key: K, members: V) -> RedisResult + fn srem(&self, key: K, members: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(members); - commands::sets::srem(self, key, members).await?.convert() + async move { + into!(key); + try_into!(members); + commands::sets::srem(self, key, members).await?.convert() + } } /// Returns the members of the set resulting from the union of all the given sets. /// /// - async fn sunion(&self, keys: K) -> RedisResult + fn sunion(&self, keys: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::sets::sunion(self, keys).await?.convert() + async move { + into!(keys); + commands::sets::sunion(self, keys).await?.convert() + } } /// This command is equal to SUNION, but instead of returning the resulting set, it is stored in `destination`. /// /// - async fn sunionstore(&self, dest: D, keys: K) -> RedisResult + fn sunionstore(&self, dest: D, keys: K) -> impl Future> + Send where R: FromRedis, D: Into + Send, K: Into + Send, { - into!(dest, keys); - commands::sets::sunionstore(self, dest, keys).await?.convert() + async move { + into!(dest, keys); + commands::sets::sunionstore(self, dest, keys).await?.convert() + } } } diff --git a/src/commands/interfaces/slowlog.rs b/src/commands/interfaces/slowlog.rs index 4b8a2ee4..83851c30 100644 --- a/src/commands/interfaces/slowlog.rs +++ b/src/commands/interfaces/slowlog.rs @@ -3,34 +3,34 @@ use crate::{ interfaces::{ClientLike, RedisResult}, types::FromRedis, }; +use futures::Future; /// Functions that implement the [slowlog](https://redis.io/commands#server) interface. -#[async_trait] pub trait SlowlogInterface: ClientLike + Sized { /// This command is used to read the slow queries log. /// /// - async fn slowlog_get(&self, count: Option) -> RedisResult + fn slowlog_get(&self, count: Option) -> impl Future> + Send where R: FromRedis, { - commands::slowlog::slowlog_get(self, count).await?.convert() + async move { commands::slowlog::slowlog_get(self, count).await?.convert() } } /// This command is used to read length of the slow queries log. /// /// - async fn slowlog_length(&self) -> RedisResult + fn slowlog_length(&self) -> impl Future> + Send where R: FromRedis, { - commands::slowlog::slowlog_length(self).await?.convert() + async move { commands::slowlog::slowlog_length(self).await?.convert() } } /// This command is used to reset the slow queries log. /// /// - async fn slowlog_reset(&self) -> RedisResult<()> { - commands::slowlog::slowlog_reset(self).await + fn slowlog_reset(&self) -> impl Future> + Send { + async move { commands::slowlog::slowlog_reset(self).await } } } diff --git a/src/commands/interfaces/sorted_sets.rs b/src/commands/interfaces/sorted_sets.rs index b74c419e..21af62cf 100644 --- a/src/commands/interfaces/sorted_sets.rs +++ b/src/commands/interfaces/sorted_sets.rs @@ -1,3 +1,5 @@ +use futures::Future; + use crate::{ commands, error::RedisError, @@ -22,50 +24,61 @@ use crate::{ use std::convert::TryInto; /// Functions that implement the [sorted sets](https://redis.io/commands#sorted_set) interface. -#[async_trait] pub trait SortedSetsInterface: ClientLike + Sized { /// The blocking variant of [Self::zmpop]. /// /// - async fn bzmpop(&self, timeout: f64, keys: K, sort: ZCmp, count: Option) -> RedisResult + fn bzmpop( + &self, + timeout: f64, + keys: K, + sort: ZCmp, + count: Option, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::sorted_sets::bzmpop(self, timeout, keys, sort, count) - .await? - .convert() + async move { + into!(keys); + commands::sorted_sets::bzmpop(self, timeout, keys, sort, count) + .await? + .convert() + } } /// The blocking variant of [Self::zpopmin]. /// /// - async fn bzpopmin(&self, keys: K, timeout: f64) -> RedisResult + fn bzpopmin(&self, keys: K, timeout: f64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::sorted_sets::bzpopmin(self, keys, timeout).await?.convert() + async move { + into!(keys); + commands::sorted_sets::bzpopmin(self, keys, timeout).await?.convert() + } } /// The blocking variant of [Self::zpopmax]. /// /// - async fn bzpopmax(&self, keys: K, timeout: f64) -> RedisResult + fn bzpopmax(&self, keys: K, timeout: f64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::sorted_sets::bzpopmax(self, keys, timeout).await?.convert() + async move { + into!(keys); + commands::sorted_sets::bzpopmax(self, keys, timeout).await?.convert() + } } /// Adds all the specified members with the specified scores to the sorted set stored at `key`. /// /// - async fn zadd( + fn zadd( &self, key: K, options: Option, @@ -73,131 +86,147 @@ pub trait SortedSetsInterface: ClientLike + Sized { changed: bool, incr: bool, values: V, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(values); - commands::sorted_sets::zadd(self, key, options, ordering, changed, incr, values) - .await? - .convert() + async move { + into!(key); + try_into!(values); + commands::sorted_sets::zadd(self, key, options, ordering, changed, incr, values) + .await? + .convert() + } } /// Returns the sorted set cardinality (number of elements) of the sorted set stored at `key`. /// /// - async fn zcard(&self, key: K) -> RedisResult + fn zcard(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::sorted_sets::zcard(self, key).await?.convert() + async move { + into!(key); + commands::sorted_sets::zcard(self, key).await?.convert() + } } /// Returns the number of elements in the sorted set at `key` with a score between `min` and `max`. /// /// - async fn zcount(&self, key: K, min: f64, max: f64) -> RedisResult + fn zcount(&self, key: K, min: f64, max: f64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::sorted_sets::zcount(self, key, min, max).await?.convert() + async move { + into!(key); + commands::sorted_sets::zcount(self, key, min, max).await?.convert() + } } /// This command is similar to ZDIFFSTORE, but instead of storing the resulting sorted set, it is returned to the /// client. /// /// - async fn zdiff(&self, keys: K, withscores: bool) -> RedisResult + fn zdiff(&self, keys: K, withscores: bool) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::sorted_sets::zdiff(self, keys, withscores).await?.convert() + async move { + into!(keys); + commands::sorted_sets::zdiff(self, keys, withscores).await?.convert() + } } /// Computes the difference between the first and all successive input sorted sets and stores the result in /// `destination`. /// /// - async fn zdiffstore(&self, dest: D, keys: K) -> RedisResult + fn zdiffstore(&self, dest: D, keys: K) -> impl Future> + Send where R: FromRedis, D: Into + Send, K: Into + Send, { - into!(dest, keys); - commands::sorted_sets::zdiffstore(self, dest, keys).await?.convert() + async move { + into!(dest, keys); + commands::sorted_sets::zdiffstore(self, dest, keys).await?.convert() + } } /// Increments the score of `member` in the sorted set stored at `key` by `increment`. /// /// - async fn zincrby(&self, key: K, increment: f64, member: V) -> RedisResult + fn zincrby(&self, key: K, increment: f64, member: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(member); - commands::sorted_sets::zincrby(self, key, increment, member) - .await? - .convert() + async move { + into!(key); + try_into!(member); + commands::sorted_sets::zincrby(self, key, increment, member) + .await? + .convert() + } } /// This command is similar to ZINTERSTORE, but instead of storing the resulting sorted set, it is returned to the /// client. /// /// - async fn zinter( + fn zinter( &self, keys: K, weights: W, aggregate: Option, withscores: bool, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, W: Into + Send, { - into!(keys, weights); - commands::sorted_sets::zinter(self, keys, weights, aggregate, withscores) - .await? - .convert() + async move { + into!(keys, weights); + commands::sorted_sets::zinter(self, keys, weights, aggregate, withscores) + .await? + .convert() + } } /// Computes the intersection of the sorted sets given by the specified keys, and stores the result in /// `destination`. /// /// - async fn zinterstore( + fn zinterstore( &self, dest: D, keys: K, weights: W, aggregate: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, D: Into + Send, K: Into + Send, W: Into + Send, { - into!(dest, keys, weights); - commands::sorted_sets::zinterstore(self, dest, keys, weights, aggregate) - .await? - .convert() + async move { + into!(dest, keys, weights); + commands::sorted_sets::zinterstore(self, dest, keys, weights, aggregate) + .await? + .convert() + } } /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical @@ -205,7 +234,7 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// max. /// /// - async fn zlexcount(&self, key: K, min: M, max: N) -> RedisResult + fn zlexcount(&self, key: K, min: M, max: N) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -214,64 +243,74 @@ pub trait SortedSetsInterface: ClientLike + Sized { N: TryInto + Send, N::Error: Into + Send, { - into!(key); - try_into!(min, max); - commands::sorted_sets::zlexcount(self, key, min, max).await?.convert() + async move { + into!(key); + try_into!(min, max); + commands::sorted_sets::zlexcount(self, key, min, max).await?.convert() + } } /// Removes and returns up to count members with the highest scores in the sorted set stored at `key`. /// /// - async fn zpopmax(&self, key: K, count: Option) -> RedisResult + fn zpopmax(&self, key: K, count: Option) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::sorted_sets::zpopmax(self, key, count).await?.convert() + async move { + into!(key); + commands::sorted_sets::zpopmax(self, key, count).await?.convert() + } } /// Removes and returns up to count members with the lowest scores in the sorted set stored at `key`. /// /// - async fn zpopmin(&self, key: K, count: Option) -> RedisResult + fn zpopmin(&self, key: K, count: Option) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::sorted_sets::zpopmin(self, key, count).await?.convert() + async move { + into!(key); + commands::sorted_sets::zpopmin(self, key, count).await?.convert() + } } /// Pops one or more elements, that are member-score pairs, from the first non-empty sorted set in the provided list /// of key names. /// /// - async fn zmpop(&self, keys: K, sort: ZCmp, count: Option) -> RedisResult + fn zmpop(&self, keys: K, sort: ZCmp, count: Option) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(keys); - commands::sorted_sets::zmpop(self, keys, sort, count).await?.convert() + async move { + into!(keys); + commands::sorted_sets::zmpop(self, keys, sort, count).await?.convert() + } } /// When called with just the key argument, return a random element from the sorted set value stored at `key`. /// /// - async fn zrandmember(&self, key: K, count: Option<(i64, bool)>) -> RedisResult + fn zrandmember(&self, key: K, count: Option<(i64, bool)>) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::sorted_sets::zrandmember(self, key, count).await?.convert() + async move { + into!(key); + commands::sorted_sets::zrandmember(self, key, count).await?.convert() + } } /// This command is like ZRANGE, but stores the result in the `destination` key. /// /// - async fn zrangestore( + fn zrangestore( &self, dest: D, source: S, @@ -280,7 +319,7 @@ pub trait SortedSetsInterface: ClientLike + Sized { sort: Option, rev: bool, limit: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, D: Into + Send, @@ -290,17 +329,19 @@ pub trait SortedSetsInterface: ClientLike + Sized { N: TryInto + Send, N::Error: Into + Send, { - into!(dest, source); - try_into!(min, max); - commands::sorted_sets::zrangestore(self, dest, source, min, max, sort, rev, limit) - .await? - .convert() + async move { + into!(dest, source); + try_into!(min, max); + commands::sorted_sets::zrangestore(self, dest, source, min, max, sort, rev, limit) + .await? + .convert() + } } /// Returns the specified range of elements in the sorted set stored at `key`. /// /// - async fn zrange( + fn zrange( &self, key: K, min: M, @@ -309,7 +350,7 @@ pub trait SortedSetsInterface: ClientLike + Sized { rev: bool, limit: Option, withscores: bool, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -318,18 +359,26 @@ pub trait SortedSetsInterface: ClientLike + Sized { N: TryInto + Send, N::Error: Into + Send, { - into!(key); - try_into!(min, max); - commands::sorted_sets::zrange(self, key, min, max, sort, rev, limit, withscores) - .await? - .convert() + async move { + into!(key); + try_into!(min, max); + commands::sorted_sets::zrange(self, key, min, max, sort, rev, limit, withscores) + .await? + .convert() + } } /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical /// ordering, this command returns all the elements in the sorted set at `key` with a value between `min` and `max`. /// /// - async fn zrangebylex(&self, key: K, min: M, max: N, limit: Option) -> RedisResult + fn zrangebylex( + &self, + key: K, + min: M, + max: N, + limit: Option, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -338,18 +387,26 @@ pub trait SortedSetsInterface: ClientLike + Sized { N: TryInto + Send, N::Error: Into + Send, { - into!(key); - try_into!(min, max); - commands::sorted_sets::zrangebylex(self, key, min, max, limit) - .await? - .convert() + async move { + into!(key); + try_into!(min, max); + commands::sorted_sets::zrangebylex(self, key, min, max, limit) + .await? + .convert() + } } /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical /// ordering, this command returns all the elements in the sorted set at `key` with a value between `max` and `min`. /// /// - async fn zrevrangebylex(&self, key: K, max: M, min: N, limit: Option) -> RedisResult + fn zrevrangebylex( + &self, + key: K, + max: M, + min: N, + limit: Option, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -358,25 +415,27 @@ pub trait SortedSetsInterface: ClientLike + Sized { N: TryInto + Send, N::Error: Into + Send, { - into!(key); - try_into!(max, min); - commands::sorted_sets::zrevrangebylex(self, key, max, min, limit) - .await? - .convert() + async move { + into!(key); + try_into!(max, min); + commands::sorted_sets::zrevrangebylex(self, key, max, min, limit) + .await? + .convert() + } } /// Returns all the elements in the sorted set at key with a score between `min` and `max` (including elements /// with score equal to `min` or `max`). /// /// - async fn zrangebyscore( + fn zrangebyscore( &self, key: K, min: M, max: N, withscores: bool, limit: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -385,25 +444,27 @@ pub trait SortedSetsInterface: ClientLike + Sized { N: TryInto + Send, N::Error: Into + Send, { - into!(key); - try_into!(min, max); - commands::sorted_sets::zrangebyscore(self, key, min, max, withscores, limit) - .await? - .convert() + async move { + into!(key); + try_into!(min, max); + commands::sorted_sets::zrangebyscore(self, key, min, max, withscores, limit) + .await? + .convert() + } } /// Returns all the elements in the sorted set at `key` with a score between `max` and `min` (including /// elements with score equal to `max` or `min`). /// /// - async fn zrevrangebyscore( + fn zrevrangebyscore( &self, key: K, max: M, min: N, withscores: bool, limit: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -412,41 +473,47 @@ pub trait SortedSetsInterface: ClientLike + Sized { N: TryInto + Send, N::Error: Into + Send, { - into!(key); - try_into!(max, min); - commands::sorted_sets::zrevrangebyscore(self, key, max, min, withscores, limit) - .await? - .convert() + async move { + into!(key); + try_into!(max, min); + commands::sorted_sets::zrevrangebyscore(self, key, max, min, withscores, limit) + .await? + .convert() + } } /// Returns the rank of member in the sorted set stored at `key`, with the scores ordered from low to high. /// /// - async fn zrank(&self, key: K, member: V) -> RedisResult + fn zrank(&self, key: K, member: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(member); - commands::sorted_sets::zrank(self, key, member).await?.convert() + async move { + into!(key); + try_into!(member); + commands::sorted_sets::zrank(self, key, member).await?.convert() + } } /// Removes the specified members from the sorted set stored at `key`. Non existing members are ignored. /// /// - async fn zrem(&self, key: K, members: V) -> RedisResult + fn zrem(&self, key: K, members: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(members); - commands::sorted_sets::zrem(self, key, members).await?.convert() + async move { + into!(key); + try_into!(members); + commands::sorted_sets::zrem(self, key, members).await?.convert() + } } /// When all the elements in a sorted set are inserted with the same score, in order to force lexicographical @@ -454,7 +521,7 @@ pub trait SortedSetsInterface: ClientLike + Sized { /// specified by `min` and `max`. /// /// - async fn zremrangebylex(&self, key: K, min: M, max: N) -> RedisResult + fn zremrangebylex(&self, key: K, min: M, max: N) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -463,31 +530,35 @@ pub trait SortedSetsInterface: ClientLike + Sized { N: TryInto + Send, N::Error: Into + Send, { - into!(key); - try_into!(min, max); - commands::sorted_sets::zremrangebylex(self, key, min, max) - .await? - .convert() + async move { + into!(key); + try_into!(min, max); + commands::sorted_sets::zremrangebylex(self, key, min, max) + .await? + .convert() + } } /// Removes all elements in the sorted set stored at `key` with rank between `start` and `stop`. /// /// - async fn zremrangebyrank(&self, key: K, start: i64, stop: i64) -> RedisResult + fn zremrangebyrank(&self, key: K, start: i64, stop: i64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::sorted_sets::zremrangebyrank(self, key, start, stop) - .await? - .convert() + async move { + into!(key); + commands::sorted_sets::zremrangebyrank(self, key, start, stop) + .await? + .convert() + } } /// Removes all elements in the sorted set stored at `key` with a score between `min` and `max`. /// /// - async fn zremrangebyscore(&self, key: K, min: M, max: N) -> RedisResult + fn zremrangebyscore(&self, key: K, min: M, max: N) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -496,113 +567,133 @@ pub trait SortedSetsInterface: ClientLike + Sized { N: TryInto + Send, N::Error: Into + Send, { - into!(key); - try_into!(min, max); - commands::sorted_sets::zremrangebyscore(self, key, min, max) - .await? - .convert() + async move { + into!(key); + try_into!(min, max); + commands::sorted_sets::zremrangebyscore(self, key, min, max) + .await? + .convert() + } } /// Returns the specified range of elements in the sorted set stored at `key`. /// /// - async fn zrevrange(&self, key: K, start: i64, stop: i64, withscores: bool) -> RedisResult + fn zrevrange( + &self, + key: K, + start: i64, + stop: i64, + withscores: bool, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::sorted_sets::zrevrange(self, key, start, stop, withscores) - .await? - .convert() + async move { + into!(key); + commands::sorted_sets::zrevrange(self, key, start, stop, withscores) + .await? + .convert() + } } /// Returns the rank of `member` in the sorted set stored at `key`, with the scores ordered from high to low. /// /// - async fn zrevrank(&self, key: K, member: V) -> RedisResult + fn zrevrank(&self, key: K, member: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(member); - commands::sorted_sets::zrevrank(self, key, member).await?.convert() + async move { + into!(key); + try_into!(member); + commands::sorted_sets::zrevrank(self, key, member).await?.convert() + } } /// Returns the score of `member` in the sorted set at `key`. /// /// - async fn zscore(&self, key: K, member: V) -> RedisResult + fn zscore(&self, key: K, member: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(member); - commands::sorted_sets::zscore(self, key, member).await?.convert() + async move { + into!(key); + try_into!(member); + commands::sorted_sets::zscore(self, key, member).await?.convert() + } } /// This command is similar to ZUNIONSTORE, but instead of storing the resulting sorted set, it is returned to the /// client. /// /// - async fn zunion( + fn zunion( &self, keys: K, weights: W, aggregate: Option, withscores: bool, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, W: Into + Send, { - into!(keys, weights); - commands::sorted_sets::zunion(self, keys, weights, aggregate, withscores) - .await? - .convert() + async move { + into!(keys, weights); + commands::sorted_sets::zunion(self, keys, weights, aggregate, withscores) + .await? + .convert() + } } /// Computes the union of the sorted sets given by the specified keys, and stores the result in `destination`. /// /// - async fn zunionstore( + fn zunionstore( &self, dest: D, keys: K, weights: W, aggregate: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, D: Into + Send, K: Into + Send, W: Into + Send, { - into!(dest, keys, weights); - commands::sorted_sets::zunionstore(self, dest, keys, weights, aggregate) - .await? - .convert() + async move { + into!(dest, keys, weights); + commands::sorted_sets::zunionstore(self, dest, keys, weights, aggregate) + .await? + .convert() + } } /// Returns the scores associated with the specified members in the sorted set stored at `key`. /// /// - async fn zmscore(&self, key: K, members: V) -> RedisResult + fn zmscore(&self, key: K, members: V) -> impl Future> + Send where R: FromRedis, K: Into + Send, V: TryInto + Send, V::Error: Into + Send, { - into!(key); - try_into!(members); - commands::sorted_sets::zmscore(self, key, members).await?.convert() + async move { + into!(key); + try_into!(members); + commands::sorted_sets::zmscore(self, key, members).await?.convert() + } } } diff --git a/src/commands/interfaces/streams.rs b/src/commands/interfaces/streams.rs index 52ff20ee..176f0da7 100644 --- a/src/commands/interfaces/streams.rs +++ b/src/commands/interfaces/streams.rs @@ -19,6 +19,7 @@ use crate::{ }, }; use bytes_utils::Str; +use futures::Future; use std::{convert::TryInto, hash::Hash}; /// Functions that implement the [streams](https://redis.io/commands#stream) interface. @@ -28,46 +29,51 @@ use std::{convert::TryInto, hash::Hash}; /// [xreadgroup_map](Self::xreadgroup_map), [xrange_values](Self::xrange_values), etc exist to make this easier for /// callers. These functions apply an additional layer of parsing logic that can make declaring response types easier, /// as well as automatically handling any differences between RESP2 and RESP3 return value types. -#[async_trait] pub trait StreamsInterface: ClientLike + Sized { /// This command returns the list of consumers that belong to the `groupname` consumer group of the stream stored at /// `key`. /// /// - async fn xinfo_consumers(&self, key: K, groupname: S) -> RedisResult + fn xinfo_consumers(&self, key: K, groupname: S) -> impl Future> + Send where R: FromRedis, K: Into + Send, S: Into + Send, { - into!(key, groupname); - commands::streams::xinfo_consumers(self, key, groupname) - .await? - .convert() + async move { + into!(key, groupname); + commands::streams::xinfo_consumers(self, key, groupname) + .await? + .convert() + } } /// This command returns the list of all consumers groups of the stream stored at `key`. /// /// - async fn xinfo_groups(&self, key: K) -> RedisResult + fn xinfo_groups(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::streams::xinfo_groups(self, key).await?.convert() + async move { + into!(key); + commands::streams::xinfo_groups(self, key).await?.convert() + } } /// This command returns information about the stream stored at `key`. /// /// - async fn xinfo_stream(&self, key: K, full: bool, count: Option) -> RedisResult + fn xinfo_stream(&self, key: K, full: bool, count: Option) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::streams::xinfo_stream(self, key, full, count).await?.convert() + async move { + into!(key); + commands::streams::xinfo_stream(self, key, full, count).await?.convert() + } } /// Appends the specified stream entry to the stream at the specified key. If the key does not exist, as a side @@ -75,7 +81,14 @@ pub trait StreamsInterface: ClientLike + Sized { /// disabled with the NOMKSTREAM option. /// /// - async fn xadd(&self, key: K, nomkstream: bool, cap: C, id: I, fields: F) -> RedisResult + fn xadd( + &self, + key: K, + nomkstream: bool, + cap: C, + id: I, + fields: F, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -85,52 +98,58 @@ pub trait StreamsInterface: ClientLike + Sized { C: TryInto + Send, C::Error: Into + Send, { - into!(key, id); - try_into!(fields, cap); - commands::streams::xadd(self, key, nomkstream, cap, id, fields) - .await? - .convert() + async move { + into!(key, id); + try_into!(fields, cap); + commands::streams::xadd(self, key, nomkstream, cap, id, fields) + .await? + .convert() + } } /// Trims the stream by evicting older entries (entries with lower IDs) if needed. /// /// - async fn xtrim(&self, key: K, cap: C) -> RedisResult + fn xtrim(&self, key: K, cap: C) -> impl Future> + Send where R: FromRedis, K: Into + Send, C: TryInto + Send, C::Error: Into + Send, { - into!(key); - try_into!(cap); - commands::streams::xtrim(self, key, cap).await?.convert() + async move { + into!(key); + try_into!(cap); + commands::streams::xtrim(self, key, cap).await?.convert() + } } /// Removes the specified entries from a stream, and returns the number of entries deleted. /// /// - async fn xdel(&self, key: K, ids: S) -> RedisResult + fn xdel(&self, key: K, ids: S) -> impl Future> + Send where R: FromRedis, K: Into + Send, S: Into + Send, { - into!(key, ids); - commands::streams::xdel(self, key, ids).await?.convert() + async move { + into!(key, ids); + commands::streams::xdel(self, key, ids).await?.convert() + } } /// Return the stream entries matching the provided range of IDs, automatically converting to a less verbose type /// definition. /// /// - async fn xrange_values( + fn xrange_values( &self, key: K, start: S, end: E, count: Option, - ) -> RedisResult>> + ) -> impl Future>>> + Send where Ri: FromRedis, Rk: FromRedisKey + Hash + Eq, @@ -141,11 +160,13 @@ pub trait StreamsInterface: ClientLike + Sized { E: TryInto + Send, E::Error: Into + Send, { - into!(key); - try_into!(start, end); - commands::streams::xrange(self, key, start, end, count) - .await? - .into_xread_value() + async move { + into!(key); + try_into!(start, end); + commands::streams::xrange(self, key, start, end, count) + .await? + .into_xread_value() + } } /// The command returns the stream entries matching a given range of IDs. The range is specified by a minimum @@ -155,7 +176,13 @@ pub trait StreamsInterface: ClientLike + Sized { /// /// /// **See [xrange_values](Self::xrange_values) for a variation of this function that may be more useful.** - async fn xrange(&self, key: K, start: S, end: E, count: Option) -> RedisResult + fn xrange( + &self, + key: K, + start: S, + end: E, + count: Option, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -164,22 +191,24 @@ pub trait StreamsInterface: ClientLike + Sized { E: TryInto + Send, E::Error: Into + Send, { - into!(key); - try_into!(start, end); - commands::streams::xrange(self, key, start, end, count).await?.convert() + async move { + into!(key); + try_into!(start, end); + commands::streams::xrange(self, key, start, end, count).await?.convert() + } } /// Similar to `XRANGE`, but with the results returned in reverse order. The results will be automatically converted /// to a less verbose type definition. /// /// - async fn xrevrange_values( + fn xrevrange_values( &self, key: K, end: E, start: S, count: Option, - ) -> RedisResult>> + ) -> impl Future>>> + Send where Ri: FromRedis, Rk: FromRedisKey + Hash + Eq, @@ -190,11 +219,13 @@ pub trait StreamsInterface: ClientLike + Sized { E: TryInto + Send, E::Error: Into + Send, { - into!(key); - try_into!(start, end); - commands::streams::xrevrange(self, key, end, start, count) - .await? - .into_xread_value() + async move { + into!(key); + try_into!(start, end); + commands::streams::xrevrange(self, key, end, start, count) + .await? + .into_xread_value() + } } /// Similar to `XRANGE`, but with the results returned in reverse order. @@ -202,7 +233,13 @@ pub trait StreamsInterface: ClientLike + Sized { /// /// /// **See the [xrevrange_values](Self::xrevrange_values) for a variation of this function that may be more useful.** - async fn xrevrange(&self, key: K, end: E, start: S, count: Option) -> RedisResult + fn xrevrange( + &self, + key: K, + end: E, + start: S, + count: Option, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -211,23 +248,27 @@ pub trait StreamsInterface: ClientLike + Sized { E: TryInto + Send, E::Error: Into + Send, { - into!(key); - try_into!(start, end); - commands::streams::xrevrange(self, key, end, start, count) - .await? - .convert() + async move { + into!(key); + try_into!(start, end); + commands::streams::xrevrange(self, key, end, start, count) + .await? + .convert() + } } /// Returns the number of entries inside a stream. /// /// - async fn xlen(&self, key: K) -> RedisResult + fn xlen(&self, key: K) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::streams::xlen(self, key).await?.convert() + async move { + into!(key); + commands::streams::xlen(self, key).await?.convert() + } } /// Read data from one or multiple streams, only returning entries with an ID greater than the last received ID @@ -327,13 +368,13 @@ pub trait StreamsInterface: ClientLike + Sized { // .flatten_array_values(2) // .convert()?; // ``` - async fn xread_map( + fn xread_map( &self, count: Option, block: Option, keys: K, ids: I, - ) -> RedisResult> + ) -> impl Future>> + Send where Rk1: FromRedisKey + Hash + Eq, Rk2: FromRedis, @@ -342,10 +383,12 @@ pub trait StreamsInterface: ClientLike + Sized { K: Into + Send, I: Into + Send, { - into!(keys, ids); - commands::streams::xread(self, count, block, keys, ids) - .await? - .into_xread_response() + async move { + into!(keys, ids); + commands::streams::xread(self, count, block, keys, ids) + .await? + .into_xread_response() + } } /// Read data from one or multiple streams, only returning entries with an ID greater than the last received ID @@ -355,91 +398,125 @@ pub trait StreamsInterface: ClientLike + Sized { /// /// **See [xread_map](Self::xread_map) for more information on a variation of this function that might be more /// useful.** - async fn xread(&self, count: Option, block: Option, keys: K, ids: I) -> RedisResult + fn xread( + &self, + count: Option, + block: Option, + keys: K, + ids: I, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, I: Into + Send, { - into!(keys, ids); - commands::streams::xread(self, count, block, keys, ids).await?.convert() + async move { + into!(keys, ids); + commands::streams::xread(self, count, block, keys, ids).await?.convert() + } } /// This command creates a new consumer group uniquely identified by `groupname` for the stream stored at `key`. /// /// - async fn xgroup_create(&self, key: K, groupname: S, id: I, mkstream: bool) -> RedisResult + fn xgroup_create( + &self, + key: K, + groupname: S, + id: I, + mkstream: bool, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, S: Into + Send, I: Into + Send, { - into!(key, groupname, id); - commands::streams::xgroup_create(self, key, groupname, id, mkstream) - .await? - .convert() + async move { + into!(key, groupname, id); + commands::streams::xgroup_create(self, key, groupname, id, mkstream) + .await? + .convert() + } } /// Create a consumer named `consumername` in the consumer group `groupname` of the stream that's stored at `key`. /// /// - async fn xgroup_createconsumer(&self, key: K, groupname: G, consumername: C) -> RedisResult + fn xgroup_createconsumer( + &self, + key: K, + groupname: G, + consumername: C, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, G: Into + Send, C: Into + Send, { - into!(key, groupname, consumername); - commands::streams::xgroup_createconsumer(self, key, groupname, consumername) - .await? - .convert() + async move { + into!(key, groupname, consumername); + commands::streams::xgroup_createconsumer(self, key, groupname, consumername) + .await? + .convert() + } } /// Delete a consumer named `consumername` in the consumer group `groupname` of the stream that's stored at `key`. /// /// - async fn xgroup_delconsumer(&self, key: K, groupname: G, consumername: C) -> RedisResult + fn xgroup_delconsumer( + &self, + key: K, + groupname: G, + consumername: C, + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, G: Into + Send, C: Into + Send, { - into!(key, groupname, consumername); - commands::streams::xgroup_delconsumer(self, key, groupname, consumername) - .await? - .convert() + async move { + into!(key, groupname, consumername); + commands::streams::xgroup_delconsumer(self, key, groupname, consumername) + .await? + .convert() + } } /// Completely destroy a consumer group. /// /// - async fn xgroup_destroy(&self, key: K, groupname: S) -> RedisResult + fn xgroup_destroy(&self, key: K, groupname: S) -> impl Future> + Send where R: FromRedis, K: Into + Send, S: Into + Send, { - into!(key, groupname); - commands::streams::xgroup_destroy(self, key, groupname).await?.convert() + async move { + into!(key, groupname); + commands::streams::xgroup_destroy(self, key, groupname).await?.convert() + } } /// Set the last delivered ID for a consumer group. /// /// - async fn xgroup_setid(&self, key: K, groupname: S, id: I) -> RedisResult + fn xgroup_setid(&self, key: K, groupname: S, id: I) -> impl Future> + Send where R: FromRedis, K: Into + Send, S: Into + Send, I: Into + Send, { - into!(key, groupname, id); - commands::streams::xgroup_setid(self, key, groupname, id) - .await? - .convert() + async move { + into!(key, groupname, id); + commands::streams::xgroup_setid(self, key, groupname, id) + .await? + .convert() + } } /// A special version of the `XREAD` command with support for consumer groups. @@ -458,7 +535,7 @@ pub trait StreamsInterface: ClientLike + Sized { /// /// See the [xread_map](Self::xread_map) documentation for more information. // See the `xread_map` source docs for more information. - async fn xreadgroup_map( + fn xreadgroup_map( &self, group: G, consumer: C, @@ -467,7 +544,7 @@ pub trait StreamsInterface: ClientLike + Sized { noack: bool, keys: K, ids: I, - ) -> RedisResult> + ) -> impl Future>> + Send where Rk1: FromRedisKey + Hash + Eq, Rk2: FromRedis, @@ -478,10 +555,12 @@ pub trait StreamsInterface: ClientLike + Sized { K: Into + Send, I: Into + Send, { - into!(group, consumer, keys, ids); - commands::streams::xreadgroup(self, group, consumer, count, block, noack, keys, ids) - .await? - .into_xread_response() + async move { + into!(group, consumer, keys, ids); + commands::streams::xreadgroup(self, group, consumer, count, block, noack, keys, ids) + .await? + .into_xread_response() + } } /// A special version of the `XREAD` command with support for consumer groups. @@ -493,7 +572,7 @@ pub trait StreamsInterface: ClientLike + Sized { /// /// /// **See [xreadgroup_map](Self::xreadgroup_map) for a variation of this function that might be more useful.** - async fn xreadgroup( + fn xreadgroup( &self, group: G, consumer: C, @@ -502,7 +581,7 @@ pub trait StreamsInterface: ClientLike + Sized { noack: bool, keys: K, ids: I, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, G: Into + Send, @@ -510,28 +589,32 @@ pub trait StreamsInterface: ClientLike + Sized { K: Into + Send, I: Into + Send, { - into!(group, consumer, keys, ids); - commands::streams::xreadgroup(self, group, consumer, count, block, noack, keys, ids) - .await? - .convert() + async move { + into!(group, consumer, keys, ids); + commands::streams::xreadgroup(self, group, consumer, count, block, noack, keys, ids) + .await? + .convert() + } } /// Remove one or more messages from the Pending Entries List (PEL) of a stream consumer group. /// /// - async fn xack(&self, key: K, group: G, ids: I) -> RedisResult + fn xack(&self, key: K, group: G, ids: I) -> impl Future> + Send where R: FromRedis, K: Into + Send, G: Into + Send, I: Into + Send, { - into!(key, group, ids); - commands::streams::xack(self, key, group, ids).await?.convert() + async move { + into!(key, group, ids); + commands::streams::xack(self, key, group, ids).await?.convert() + } } /// A variation of [xclaim](Self::xclaim) with a less verbose return type. - async fn xclaim_values( + fn xclaim_values( &self, key: K, group: G, @@ -543,7 +626,7 @@ pub trait StreamsInterface: ClientLike + Sized { retry_count: Option, force: bool, justid: bool, - ) -> RedisResult>> + ) -> impl Future>>> + Send where Ri: FromRedis, Rk: FromRedisKey + Hash + Eq, @@ -553,22 +636,24 @@ pub trait StreamsInterface: ClientLike + Sized { C: Into + Send, I: Into + Send, { - into!(key, group, consumer, ids); - commands::streams::xclaim( - self, - key, - group, - consumer, - min_idle_time, - ids, - idle, - time, - retry_count, - force, - justid, - ) - .await? - .into_xread_value() + async move { + into!(key, group, consumer, ids); + commands::streams::xclaim( + self, + key, + group, + consumer, + min_idle_time, + ids, + idle, + time, + retry_count, + force, + justid, + ) + .await? + .into_xread_value() + } } /// In the context of a stream consumer group, this command changes the ownership of a pending message, @@ -577,7 +662,7 @@ pub trait StreamsInterface: ClientLike + Sized { /// /// /// **See [xclaim_values](Self::xclaim_values) for a variation of this function that might be more useful.** - async fn xclaim( + fn xclaim( &self, key: K, group: G, @@ -589,7 +674,7 @@ pub trait StreamsInterface: ClientLike + Sized { retry_count: Option, force: bool, justid: bool, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -597,22 +682,24 @@ pub trait StreamsInterface: ClientLike + Sized { C: Into + Send, I: Into + Send, { - into!(key, group, consumer, ids); - commands::streams::xclaim( - self, - key, - group, - consumer, - min_idle_time, - ids, - idle, - time, - retry_count, - force, - justid, - ) - .await? - .convert() + async move { + into!(key, group, consumer, ids); + commands::streams::xclaim( + self, + key, + group, + consumer, + min_idle_time, + ids, + idle, + time, + retry_count, + force, + justid, + ) + .await? + .convert() + } } /// This command transfers ownership of pending stream entries that match the specified criteria. It also converts @@ -620,7 +707,7 @@ pub trait StreamsInterface: ClientLike + Sized { /// /// // FIXME: this type declaration wont work for Redis v7. Probably need a new FF for this... - async fn xautoclaim_values( + fn xautoclaim_values( &self, key: K, group: G, @@ -629,7 +716,7 @@ pub trait StreamsInterface: ClientLike + Sized { start: I, count: Option, justid: bool, - ) -> RedisResult<(String, Vec>)> + ) -> impl Future>)>> + Send where Ri: FromRedis, Rk: FromRedisKey + Hash + Eq, @@ -639,10 +726,12 @@ pub trait StreamsInterface: ClientLike + Sized { C: Into + Send, I: Into + Send, { - into!(key, group, consumer, start); - commands::streams::xautoclaim(self, key, group, consumer, min_idle_time, start, count, justid) - .await? - .into_xautoclaim_values() + async move { + into!(key, group, consumer, start); + commands::streams::xautoclaim(self, key, group, consumer, min_idle_time, start, count, justid) + .await? + .into_xautoclaim_values() + } } /// This command transfers ownership of pending stream entries that match the specified criteria. @@ -651,7 +740,7 @@ pub trait StreamsInterface: ClientLike + Sized { /// /// **Note: See [xautoclaim_values](Self::xautoclaim_values) for a variation of this function that may be more /// useful.** - async fn xautoclaim( + fn xautoclaim( &self, key: K, group: G, @@ -660,7 +749,7 @@ pub trait StreamsInterface: ClientLike + Sized { start: I, count: Option, justid: bool, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -668,23 +757,27 @@ pub trait StreamsInterface: ClientLike + Sized { C: Into + Send, I: Into + Send, { - into!(key, group, consumer, start); - commands::streams::xautoclaim(self, key, group, consumer, min_idle_time, start, count, justid) - .await? - .convert() + async move { + into!(key, group, consumer, start); + commands::streams::xautoclaim(self, key, group, consumer, min_idle_time, start, count, justid) + .await? + .convert() + } } /// Inspect the list of pending messages in a consumer group. /// /// - async fn xpending(&self, key: K, group: G, args: A) -> RedisResult + fn xpending(&self, key: K, group: G, args: A) -> impl Future> + Send where R: FromRedis, K: Into + Send, G: Into + Send, A: Into + Send, { - into!(key, group, args); - commands::streams::xpending(self, key, group, args).await?.convert() + async move { + into!(key, group, args); + commands::streams::xpending(self, key, group, args).await?.convert() + } } } diff --git a/src/commands/interfaces/timeseries.rs b/src/commands/interfaces/timeseries.rs index 4d20d380..db76c140 100644 --- a/src/commands/interfaces/timeseries.rs +++ b/src/commands/interfaces/timeseries.rs @@ -3,28 +3,20 @@ use crate::{ interfaces::ClientLike, prelude::{RedisError, RedisKey, RedisResult}, types::{ - Aggregator, - DuplicatePolicy, - Encoding, - FromRedis, - GetLabels, - GetTimestamp, - GroupBy, - RangeAggregation, - RedisMap, + Aggregator, DuplicatePolicy, Encoding, FromRedis, GetLabels, GetTimestamp, GroupBy, RangeAggregation, RedisMap, Timestamp, }, }; use bytes_utils::Str; +use futures::Future; /// A [Redis Timeseries](https://github.com/RedisTimeSeries/RedisTimeSeries/) interface. -#[async_trait] -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] pub trait TimeSeriesInterface: ClientLike { /// Append a sample to a time series. /// /// - async fn ts_add( + fn ts_add( &self, key: K, timestamp: T, @@ -34,7 +26,7 @@ pub trait TimeSeriesInterface: ClientLike { chunk_size: Option, on_duplicate: Option, labels: L, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -43,51 +35,55 @@ pub trait TimeSeriesInterface: ClientLike { L: TryInto + Send, L::Error: Into, { - into!(key); - try_into!(timestamp, labels); - commands::timeseries::ts_add( - self, - key, - timestamp, - value, - retention, - encoding, - chunk_size, - on_duplicate, - labels, - ) - .await? - .convert() + async move { + into!(key); + try_into!(timestamp, labels); + commands::timeseries::ts_add( + self, + key, + timestamp, + value, + retention, + encoding, + chunk_size, + on_duplicate, + labels, + ) + .await? + .convert() + } } /// Update the retention, chunk size, duplicate policy, and labels of an existing time series. /// /// - async fn ts_alter( + fn ts_alter( &self, key: K, retention: Option, chunk_size: Option, duplicate_policy: Option, labels: L, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, L: TryInto + Send, L::Error: Into, { - into!(key); - try_into!(labels); - commands::timeseries::ts_alter(self, key, retention, chunk_size, duplicate_policy, labels) - .await? - .convert() + async move { + into!(key); + try_into!(labels); + commands::timeseries::ts_alter(self, key, retention, chunk_size, duplicate_policy, labels) + .await? + .convert() + } } /// Create a new time series. /// /// - async fn ts_create( + fn ts_create( &self, key: K, retention: Option, @@ -95,46 +91,50 @@ pub trait TimeSeriesInterface: ClientLike { chunk_size: Option, duplicate_policy: Option, labels: L, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, L: TryInto + Send, L::Error: Into, { - into!(key); - try_into!(labels); - commands::timeseries::ts_create(self, key, retention, encoding, chunk_size, duplicate_policy, labels) - .await? - .convert() + async move { + into!(key); + try_into!(labels); + commands::timeseries::ts_create(self, key, retention, encoding, chunk_size, duplicate_policy, labels) + .await? + .convert() + } } /// Create a compaction rule. /// /// - async fn ts_createrule( + fn ts_createrule( &self, src: S, dest: D, aggregation: (Aggregator, u64), align_timestamp: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, S: Into + Send, D: Into + Send, { - into!(src, dest); - commands::timeseries::ts_createrule(self, src, dest, aggregation, align_timestamp) - .await? - .convert() + async move { + into!(src, dest); + commands::timeseries::ts_createrule(self, src, dest, aggregation, align_timestamp) + .await? + .convert() + } } /// Decrease the value of the sample with the maximum existing timestamp, or create a new sample with a value equal /// to the value of the sample with the maximum existing timestamp with a given decrement. /// /// - async fn ts_decrby( + fn ts_decrby( &self, key: K, subtrahend: f64, @@ -143,71 +143,79 @@ pub trait TimeSeriesInterface: ClientLike { uncompressed: bool, chunk_size: Option, labels: L, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, L: TryInto + Send, L::Error: Into + Send, { - into!(key); - try_into!(labels); - commands::timeseries::ts_decrby( - self, - key, - subtrahend, - timestamp, - retention, - uncompressed, - chunk_size, - labels, - ) - .await? - .convert() + async move { + into!(key); + try_into!(labels); + commands::timeseries::ts_decrby( + self, + key, + subtrahend, + timestamp, + retention, + uncompressed, + chunk_size, + labels, + ) + .await? + .convert() + } } /// Delete all samples between two timestamps for a given time series. /// /// - async fn ts_del(&self, key: K, from: i64, to: i64) -> RedisResult + fn ts_del(&self, key: K, from: i64, to: i64) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::timeseries::ts_del(self, key, from, to).await?.convert() + async move { + into!(key); + commands::timeseries::ts_del(self, key, from, to).await?.convert() + } } /// Delete a compaction rule. /// /// - async fn ts_deleterule(&self, src: S, dest: D) -> RedisResult + fn ts_deleterule(&self, src: S, dest: D) -> impl Future> + Send where R: FromRedis, S: Into + Send, D: Into + Send, { - into!(src, dest); - commands::timeseries::ts_deleterule(self, src, dest).await?.convert() + async move { + into!(src, dest); + commands::timeseries::ts_deleterule(self, src, dest).await?.convert() + } } /// Get the sample with the highest timestamp from a given time series. /// /// - async fn ts_get(&self, key: K, latest: bool) -> RedisResult + fn ts_get(&self, key: K, latest: bool) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::timeseries::ts_get(self, key, latest).await?.convert() + async move { + into!(key); + commands::timeseries::ts_get(self, key, latest).await?.convert() + } } /// Increase the value of the sample with the maximum existing timestamp, or create a new sample with a value equal /// to the value of the sample with the maximum existing timestamp with a given increment. /// /// - async fn ts_incrby( + fn ts_incrby( &self, key: K, addend: f64, @@ -216,56 +224,62 @@ pub trait TimeSeriesInterface: ClientLike { uncompressed: bool, chunk_size: Option, labels: L, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, L: TryInto + Send, L::Error: Into + Send, { - into!(key); - try_into!(labels); - commands::timeseries::ts_incrby( - self, - key, - addend, - timestamp, - retention, - uncompressed, - chunk_size, - labels, - ) - .await? - .convert() + async move { + into!(key); + try_into!(labels); + commands::timeseries::ts_incrby( + self, + key, + addend, + timestamp, + retention, + uncompressed, + chunk_size, + labels, + ) + .await? + .convert() + } } /// Return information and statistics for a time series. /// /// - async fn ts_info(&self, key: K, debug: bool) -> RedisResult + fn ts_info(&self, key: K, debug: bool) -> impl Future> + Send where R: FromRedis, K: Into + Send, { - into!(key); - commands::timeseries::ts_info(self, key, debug).await?.convert() + async move { + into!(key); + commands::timeseries::ts_info(self, key, debug).await?.convert() + } } /// Append new samples to one or more time series. /// /// - async fn ts_madd(&self, samples: I) -> RedisResult + fn ts_madd(&self, samples: I) -> impl Future> + Send where R: FromRedis, K: Into + Send, I: IntoIterator + Send, { - let samples: Vec<_> = samples - .into_iter() - .map(|(key, ts, val)| (key.into(), ts, val)) - .collect(); + async move { + let samples: Vec<_> = samples + .into_iter() + .map(|(key, ts, val)| (key.into(), ts, val)) + .collect(); - commands::timeseries::ts_madd(self, samples).await?.convert() + commands::timeseries::ts_madd(self, samples).await?.convert() + } } /// Get the sample with the highest timestamp from each time series matching a specific filter. @@ -274,19 +288,26 @@ pub trait TimeSeriesInterface: ClientLike { /// [Resp3TimeSeriesValues](crate::types::Resp3TimeSeriesValues) for more information. /// /// - async fn ts_mget(&self, latest: bool, labels: Option, filters: I) -> RedisResult + fn ts_mget( + &self, + latest: bool, + labels: Option, + filters: I, + ) -> impl Future> + Send where R: FromRedis, L: Into + Send, S: Into + Send, I: IntoIterator + Send, { - let labels = labels.map(|l| l.into()); - let filters = filters.into_iter().map(|s| s.into()).collect(); + async move { + let labels = labels.map(|l| l.into()); + let filters = filters.into_iter().map(|s| s.into()).collect(); - commands::timeseries::ts_mget(self, latest, labels, filters) - .await? - .convert() + commands::timeseries::ts_mget(self, latest, labels, filters) + .await? + .convert() + } } /// Query a range across multiple time series by filters in the forward direction. @@ -295,7 +316,7 @@ pub trait TimeSeriesInterface: ClientLike { /// [Resp3TimeSeriesValues](crate::types::Resp3TimeSeriesValues) for more information. /// /// - async fn ts_mrange( + fn ts_mrange( &self, from: F, to: T, @@ -307,7 +328,7 @@ pub trait TimeSeriesInterface: ClientLike { aggregation: Option, filters: J, group_by: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, F: TryInto + Send, @@ -318,25 +339,27 @@ pub trait TimeSeriesInterface: ClientLike { I: IntoIterator + Send, J: IntoIterator + Send, { - try_into!(from, to); - let filters = filters.into_iter().map(|s| s.into()).collect(); - let filter_by_ts = filter_by_ts.into_iter().collect(); + async move { + try_into!(from, to); + let filters = filters.into_iter().map(|s| s.into()).collect(); + let filter_by_ts = filter_by_ts.into_iter().collect(); - commands::timeseries::ts_mrange( - self, - from, - to, - latest, - filter_by_ts, - filter_by_value, - labels, - count, - aggregation, - filters, - group_by, - ) - .await? - .convert() + commands::timeseries::ts_mrange( + self, + from, + to, + latest, + filter_by_ts, + filter_by_value, + labels, + count, + aggregation, + filters, + group_by, + ) + .await? + .convert() + } } /// Query a range across multiple time series by filters in the reverse direction. @@ -345,7 +368,7 @@ pub trait TimeSeriesInterface: ClientLike { /// [Resp3TimeSeriesValues](crate::types::Resp3TimeSeriesValues) for more information. /// /// - async fn ts_mrevrange( + fn ts_mrevrange( &self, from: F, to: T, @@ -357,7 +380,7 @@ pub trait TimeSeriesInterface: ClientLike { aggregation: Option, filters: J, group_by: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, F: TryInto + Send, @@ -368,44 +391,48 @@ pub trait TimeSeriesInterface: ClientLike { I: IntoIterator + Send, J: IntoIterator + Send, { - try_into!(from, to); - let filters = filters.into_iter().map(|s| s.into()).collect(); - let filter_by_ts = filter_by_ts.into_iter().collect(); + async move { + try_into!(from, to); + let filters = filters.into_iter().map(|s| s.into()).collect(); + let filter_by_ts = filter_by_ts.into_iter().collect(); - commands::timeseries::ts_mrevrange( - self, - from, - to, - latest, - filter_by_ts, - filter_by_value, - labels, - count, - aggregation, - filters, - group_by, - ) - .await? - .convert() + commands::timeseries::ts_mrevrange( + self, + from, + to, + latest, + filter_by_ts, + filter_by_value, + labels, + count, + aggregation, + filters, + group_by, + ) + .await? + .convert() + } } /// Get all time series keys matching a filter list. /// /// - async fn ts_queryindex(&self, filters: I) -> RedisResult + fn ts_queryindex(&self, filters: I) -> impl Future> + Send where R: FromRedis, S: Into + Send, I: IntoIterator + Send, { - let filters = filters.into_iter().map(|s| s.into()).collect(); - commands::timeseries::ts_queryindex(self, filters).await?.convert() + async move { + let filters = filters.into_iter().map(|s| s.into()).collect(); + commands::timeseries::ts_queryindex(self, filters).await?.convert() + } } /// Query a range in forward direction. /// /// - async fn ts_range( + fn ts_range( &self, key: K, from: F, @@ -415,7 +442,7 @@ pub trait TimeSeriesInterface: ClientLike { filter_by_value: Option<(i64, i64)>, count: Option, aggregation: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -425,29 +452,31 @@ pub trait TimeSeriesInterface: ClientLike { T::Error: Into + Send, I: IntoIterator + Send, { - into!(key); - try_into!(from, to); - let filter_by_ts = filter_by_ts.into_iter().collect(); + async move { + into!(key); + try_into!(from, to); + let filter_by_ts = filter_by_ts.into_iter().collect(); - commands::timeseries::ts_range( - self, - key, - from, - to, - latest, - filter_by_ts, - filter_by_value, - count, - aggregation, - ) - .await? - .convert() + commands::timeseries::ts_range( + self, + key, + from, + to, + latest, + filter_by_ts, + filter_by_value, + count, + aggregation, + ) + .await? + .convert() + } } /// Query a range in reverse direction. /// /// - async fn ts_revrange( + fn ts_revrange( &self, key: K, from: F, @@ -457,7 +486,7 @@ pub trait TimeSeriesInterface: ClientLike { filter_by_value: Option<(i64, i64)>, count: Option, aggregation: Option, - ) -> RedisResult + ) -> impl Future> + Send where R: FromRedis, K: Into + Send, @@ -467,22 +496,24 @@ pub trait TimeSeriesInterface: ClientLike { T::Error: Into + Send, I: IntoIterator + Send, { - into!(key); - try_into!(from, to); - let filter_by_ts = filter_by_ts.into_iter().collect(); + async move { + into!(key); + try_into!(from, to); + let filter_by_ts = filter_by_ts.into_iter().collect(); - commands::timeseries::ts_revrange( - self, - key, - from, - to, - latest, - filter_by_ts, - filter_by_value, - count, - aggregation, - ) - .await? - .convert() + commands::timeseries::ts_revrange( + self, + key, + from, + to, + latest, + filter_by_ts, + filter_by_value, + count, + aggregation, + ) + .await? + .convert() + } } } diff --git a/src/commands/interfaces/tracking.rs b/src/commands/interfaces/tracking.rs index fd2fb78a..a852a23b 100644 --- a/src/commands/interfaces/tracking.rs +++ b/src/commands/interfaces/tracking.rs @@ -4,11 +4,11 @@ use crate::{ prelude::RedisResult, types::{Invalidation, MultipleStrings}, }; +use futures::Future; use tokio::{sync::broadcast::Receiver as BroadcastReceiver, task::JoinHandle}; /// A high level interface that supports [client side caching](https://redis.io/docs/manual/client-side-caching/) via the [client tracking](https://redis.io/commands/client-tracking/) interface. -#[async_trait] -#[cfg_attr(docsrs, doc(cfg(feature = "client-tracking")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] pub trait TrackingInterface: ClientLike + Sized { /// Send the [CLIENT TRACKING](https://redis.io/commands/client-tracking/) command to all connected servers, subscribing to [invalidation messages](Self::on_invalidation) on the same connection. /// @@ -17,24 +17,26 @@ pub trait TrackingInterface: ClientLike + Sized { /// /// See the basic [client tracking](crate::interfaces::ClientInterface::client_tracking) function for more /// information on the underlying commands. - async fn start_tracking

( + fn start_tracking

( &self, prefixes: P, bcast: bool, optin: bool, optout: bool, noloop: bool, - ) -> RedisResult<()> + ) -> impl Future> + Send where P: Into + Send, { - into!(prefixes); - commands::tracking::start_tracking(self, prefixes, bcast, optin, optout, noloop).await + async move { + into!(prefixes); + commands::tracking::start_tracking(self, prefixes, bcast, optin, optout, noloop).await + } } /// Disable client tracking on all connections. - async fn stop_tracking(&self) -> RedisResult<()> { - commands::tracking::stop_tracking(self).await + fn stop_tracking(&self) -> impl Future> + Send { + async move { commands::tracking::stop_tracking(self).await } } /// Spawn a task that processes invalidation messages from the server. diff --git a/src/commands/interfaces/transactions.rs b/src/commands/interfaces/transactions.rs index d38bb384..c7e79bf7 100644 --- a/src/commands/interfaces/transactions.rs +++ b/src/commands/interfaces/transactions.rs @@ -3,7 +3,6 @@ use crate::{clients::Transaction, interfaces::ClientLike}; /// Functions that implement the [transactions](https://redis.io/commands#transactions) interface. /// /// See the [Transaction](crate::clients::Transaction) client for more information; -#[async_trait] #[cfg(feature = "transactions")] #[cfg_attr(docsrs, doc(cfg(feature = "transactions")))] pub trait TransactionInterface: ClientLike + Sized { diff --git a/src/error.rs b/src/error.rs index 4ccba6c0..fc67eb3f 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,6 +1,6 @@ use bytes_utils::string::Utf8Error as BytesUtf8Error; use futures::channel::oneshot::Canceled; -use redis_protocol::{resp2::types::Frame as Resp2Frame, types::RedisProtocolError}; +use redis_protocol::{error::RedisProtocolError, resp2::types::BytesFrame as Resp2Frame}; use semver::Error as SemverError; use std::{ borrow::{Borrow, Cow}, @@ -96,7 +96,7 @@ pub struct RedisError { /// Details about the specific error condition. details: Cow<'static, str>, /// The kind of error. - kind: RedisErrorKind, + kind: RedisErrorKind, } impl Clone for RedisError { @@ -274,15 +274,6 @@ impl From for RedisError { } } -#[doc(hidden)] -#[cfg(feature = "enable-rustls")] -#[cfg_attr(docsrs, doc(cfg(feature = "enable-rustls")))] -impl From for RedisError { - fn from(e: tokio_rustls::rustls::Error) -> Self { - RedisError::new(RedisErrorKind::Tls, format!("{:?}", e)) - } -} - #[doc(hidden)] #[cfg(feature = "enable-rustls")] #[cfg_attr(docsrs, doc(cfg(feature = "enable-rustls")))] @@ -295,8 +286,8 @@ impl From for RedisError { #[doc(hidden)] #[cfg(feature = "enable-rustls")] #[cfg_attr(docsrs, doc(cfg(feature = "enable-rustls")))] -impl From for RedisError { - fn from(e: webpki::Error) -> Self { +impl From for RedisError { + fn from(e: rustls::Error) -> Self { RedisError::new(RedisErrorKind::Tls, format!("{:?}", e)) } } diff --git a/src/interfaces.rs b/src/interfaces.rs index f31ffca4..91fa2135 100644 --- a/src/interfaces.rs +++ b/src/interfaces.rs @@ -6,31 +6,24 @@ use crate::{ protocol::command::{RedisCommand, RouterCommand}, router::commands as router_commands, types::{ - ClientState, - ClusterStateChange, - ConnectHandle, - ConnectionConfig, - CustomCommand, - FromRedis, - InfoKind, - KeyspaceEvent, - Message, - Options, - PerformanceConfig, - ReconnectPolicy, - RedisConfig, - RedisValue, - RespVersion, + ClientState, ClusterStateChange, ConnectHandle, ConnectionConfig, CustomCommand, FromRedis, InfoKind, + KeyspaceEvent, Message, Options, PerformanceConfig, ReconnectPolicy, RedisConfig, RedisValue, RespVersion, Server, - ShutdownFlags, }, utils, }; -pub use redis_protocol::resp3::types::Frame as Resp3Frame; +use bytes_utils::Str; +use futures::Future; use semver::Version; +use std::time::Duration; use std::{convert::TryInto, sync::Arc}; use tokio::{sync::broadcast::Receiver as BroadcastReceiver, task::JoinHandle}; +pub use redis_protocol::resp3::types::BytesFrame as Resp3Frame; + +#[cfg(feature = "i-server")] +use crate::types::ShutdownFlags; + /// Type alias for `Result`. pub type RedisResult = Result; @@ -114,8 +107,7 @@ pub(crate) fn send_to_router(inner: &Arc, command: RouterComma } /// Any Redis client that implements any part of the Redis interface. -#[async_trait] -pub trait ClientLike: Clone + Send + Sized { +pub trait ClientLike: Clone + Send + Sync + Sized { #[doc(hidden)] fn inner(&self) -> &Arc; @@ -163,22 +155,22 @@ pub trait ClientLike: Clone + Send + Sized { } } - /// Whether or not the client has a reconnection policy. + /// Whether the client has a reconnection policy. fn has_reconnect_policy(&self) -> bool { self.inner().policy.read().is_some() } - /// Whether or not the client will automatically pipeline commands. + /// Whether the client will automatically pipeline commands. fn is_pipelined(&self) -> bool { self.inner().is_pipelined() } - /// Whether or not the client is connected to a cluster. + /// Whether the client is connected to a cluster. fn is_clustered(&self) -> bool { self.inner().config.server.is_clustered() } - /// Whether or not the client uses the sentinel interface. + /// Whether the client uses the sentinel interface. fn uses_sentinels(&self) -> bool { self.inner().config.server.is_sentinel() } @@ -200,14 +192,14 @@ pub trait ClientLike: Clone + Send + Sized { self.inner().state.read().clone() } - /// Whether or not all underlying connections are healthy. + /// Whether all underlying connections are healthy. fn is_connected(&self) -> bool { *self.inner().state.read() == ClientState::Connected } /// Read the set of active connections managed by the client. - async fn active_connections(&self) -> Result, RedisError> { - commands::client::active_connections(self).await + fn active_connections(&self) -> impl Future, RedisError>> + Send { + commands::server::active_connections(self) } /// Read the server version, if known. @@ -218,8 +210,8 @@ pub trait ClientLike: Clone + Send + Sized { /// Override the DNS resolution logic for the client. #[cfg(feature = "dns")] #[cfg_attr(docsrs, doc(cfg(feature = "dns")))] - async fn set_resolver(&self, resolver: Arc) { - self.inner().set_resolver(resolver).await; + fn set_resolver(&self, resolver: Arc) -> impl Future + Send { + async move { self.inner().set_resolver(resolver).await } } /// Connect to the Redis server. @@ -257,20 +249,22 @@ pub trait ClientLike: Clone + Send + Sized { /// Force a reconnection to the server(s). /// /// When running against a cluster this function will also refresh the cached cluster routing table. - async fn force_reconnection(&self) -> RedisResult<()> { - commands::server::force_reconnection(self.inner()).await + fn force_reconnection(&self) -> impl Future> + Send { + async move { commands::server::force_reconnection(self.inner()).await } } /// Wait for the result of the next connection attempt. /// /// This can be used with `on_reconnect` to separate initialization logic that needs to occur only on the next /// connection attempt vs all subsequent attempts. - async fn wait_for_connect(&self) -> RedisResult<()> { - if utils::read_locked(&self.inner().state) == ClientState::Connected { - debug!("{}: Client is already connected.", self.inner().id); - Ok(()) - } else { - self.inner().notifications.connect.load().subscribe().recv().await? + fn wait_for_connect(&self) -> impl Future> + Send { + async move { + if utils::read_locked(&self.inner().state) == ClientState::Connected { + debug!("{}: Client is already connected.", self.inner().id); + Ok(()) + } else { + self.inner().notifications.connect.load().subscribe().recv().await? + } } } @@ -296,17 +290,19 @@ pub trait ClientLike: Clone + Send + Sized { /// connection_task.await? /// } /// ``` - async fn init(&self) -> RedisResult { - let mut rx = { self.inner().notifications.connect.load().subscribe() }; - let task = self.connect(); - let error = rx.recv().await.map_err(RedisError::from).and_then(|r| r).err(); - - if let Some(error) = error { - // the initial connection failed, so we should gracefully close the routing task - utils::reset_router_task(self.inner()); - Err(error) - } else { - Ok(task) + fn init(&self) -> impl Future> + Send { + async move { + let mut rx = { self.inner().notifications.connect.load().subscribe() }; + let task = self.connect(); + let error = rx.recv().await.map_err(RedisError::from).and_then(|r| r).err(); + + if let Some(error) = error { + // the initial connection failed, so we should gracefully close the routing task + utils::reset_router_task(self.inner()); + Err(error) + } else { + Ok(task) + } } } @@ -315,35 +311,53 @@ pub trait ClientLike: Clone + Send + Sized { /// returned by [connect](Self::connect) will resolve which indicates that the connection has been fully closed. /// /// This function will also close all error, pubsub message, and reconnection event streams. - async fn quit(&self) -> RedisResult<()> { - commands::server::quit(self).await + fn quit(&self) -> impl Future> + Send { + async move { commands::server::quit(self).await } } /// Shut down the server and quit the client. /// /// - async fn shutdown(&self, flags: Option) -> RedisResult<()> { - commands::server::shutdown(self, flags).await + #[cfg(feature = "i-server")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] + fn shutdown(&self, flags: Option) -> impl Future> + Send { + async move { commands::server::shutdown(self, flags).await } + } + + /// Delete the keys in all databases. + /// + /// + fn flushall(&self, r#async: bool) -> impl Future> + Send + where + R: FromRedis, + { + async move { commands::server::flushall(self, r#async).await?.convert() } + } + + /// Delete the keys on all nodes in the cluster. This is a special function that does not map directly to the Redis + /// interface. + fn flushall_cluster(&self) -> impl Future> + Send { + async move { commands::server::flushall_cluster(self).await } } /// Ping the Redis server. /// /// - async fn ping(&self) -> RedisResult + fn ping(&self) -> impl Future> + Send where R: FromRedis, { - commands::server::ping(self).await?.convert() + async move { commands::server::ping(self).await?.convert() } } /// Read info about the server. /// /// - async fn info(&self, section: Option) -> RedisResult + fn info(&self, section: Option) -> impl Future> + Send where R: FromRedis, { - commands::server::info(self, section).await?.convert() + async move { commands::server::info(self, section).await?.convert() } } /// Run a custom command that is not yet supported via another interface on this client. This is most useful when @@ -354,33 +368,37 @@ pub trait ClientLike: Clone + Send + Sized { /// /// This interface should be used with caution as it may break the automatic pipeline features in the client if /// command flags are not properly configured. - async fn custom(&self, cmd: CustomCommand, args: Vec) -> RedisResult + fn custom(&self, cmd: CustomCommand, args: Vec) -> impl Future> + Send where R: FromRedis, T: TryInto + Send, T::Error: Into + Send, { - let args = utils::try_into_vec(args)?; - commands::server::custom(self, cmd, args).await?.convert() + async move { + let args = utils::try_into_vec(args)?; + commands::server::custom(self, cmd, args).await?.convert() + } } /// Run a custom command similar to [custom](Self::custom), but return the response frame directly without any /// parsing. /// /// Note: RESP2 frames from the server are automatically converted to the RESP3 format when parsed by the client. - async fn custom_raw(&self, cmd: CustomCommand, args: Vec) -> RedisResult + fn custom_raw(&self, cmd: CustomCommand, args: Vec) -> impl Future> + Send where T: TryInto + Send, T::Error: Into + Send, { - let args = utils::try_into_vec(args)?; - commands::server::custom_raw(self, cmd, args).await + async move { + let args = utils::try_into_vec(args)?; + commands::server::custom_raw(self, cmd, args).await + } } /// Customize various configuration options on commands. fn with_options(&self, options: &Options) -> WithOptions { WithOptions { - client: self.clone(), + client: self.clone(), options: options.clone(), } } @@ -405,6 +423,69 @@ where }) } +/// Functions that provide a connection heartbeat interface. +pub trait HeartbeatInterface: ClientLike { + /// Return a future that will ping the server on an interval. + #[allow(unreachable_code)] + fn enable_heartbeat( + &self, + interval: Duration, + break_on_error: bool, + ) -> impl Future> + Send { + async move { + let _self = self.clone(); + let mut interval = tokio::time::interval(interval); + + loop { + interval.tick().await; + + if break_on_error { + let _: () = _self.ping().await?; + } else if let Err(e) = _self.ping::<()>().await { + warn!("{}: Heartbeat ping failed with error: {:?}", _self.inner().id, e); + } + } + + Ok(()) + } + } +} + +/// Functions for authenticating clients. +pub trait AuthInterface: ClientLike { + /// Request for authentication in a password-protected Redis server. Returns ok if successful. + /// + /// The client will automatically authenticate with the default user if a password is provided in the associated + /// `RedisConfig` when calling [connect](crate::interfaces::ClientLike::connect). + /// + /// If running against clustered servers this function will authenticate all connections. + /// + /// + fn auth(&self, username: Option, password: S) -> impl Future> + Send + where + S: Into + Send, + { + async move { + into!(password); + commands::server::auth(self, username, password).await + } + } + + /// Switch to a different protocol, optionally authenticating in the process. + /// + /// If running against clustered servers this function will issue the HELLO command to each server concurrently. + /// + /// + fn hello( + &self, + version: RespVersion, + auth: Option<(Str, Str)>, + setname: Option, + ) -> impl Future> + Send { + async move { commands::server::hello(self, version, auth, setname).await } + } +} + /// An interface that exposes various client and connection events. /// /// Calling [quit](crate::interfaces::ClientLike::quit) will close all event streams. @@ -568,34 +649,70 @@ pub trait EventInterface: ClientLike { } } -pub use crate::commands::interfaces::{ - acl::AclInterface, - client::ClientInterface, - cluster::ClusterInterface, - config::ConfigInterface, - geo::GeoInterface, - hashes::HashesInterface, - hyperloglog::HyperloglogInterface, - keys::KeysInterface, - lists::ListInterface, - lua::{FunctionInterface, LuaInterface}, - memory::MemoryInterface, - metrics::MetricsInterface, - pubsub::PubsubInterface, - server::{AuthInterface, HeartbeatInterface, ServerInterface}, - sets::SetsInterface, - slowlog::SlowlogInterface, - sorted_sets::SortedSetsInterface, - streams::StreamsInterface, -}; - -#[cfg(feature = "redis-json")] +#[cfg(feature = "i-acl")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-acl")))] +pub use crate::commands::interfaces::acl::*; +#[cfg(feature = "i-client")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-client")))] +pub use crate::commands::interfaces::client::*; +#[cfg(feature = "i-cluster")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-cluster")))] +pub use crate::commands::interfaces::cluster::*; +#[cfg(feature = "i-config")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-config")))] +pub use crate::commands::interfaces::config::*; +#[cfg(feature = "i-geo")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] +pub use crate::commands::interfaces::geo::*; +#[cfg(feature = "i-hashes")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hashes")))] +pub use crate::commands::interfaces::hashes::*; +#[cfg(feature = "i-hyperloglog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-hyperloglog")))] +pub use crate::commands::interfaces::hyperloglog::*; +#[cfg(feature = "i-keys")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-keys")))] +pub use crate::commands::interfaces::keys::*; +#[cfg(feature = "i-lists")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-lists")))] +pub use crate::commands::interfaces::lists::*; +#[cfg(feature = "i-scripts")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] +pub use crate::commands::interfaces::lua::*; +#[cfg(feature = "i-memory")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] +pub use crate::commands::interfaces::memory::*; +#[cfg(feature = "i-pubsub")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-pubsub")))] +pub use crate::commands::interfaces::pubsub::*; +#[cfg(feature = "i-redis-json")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-redis-json")))] pub use crate::commands::interfaces::redis_json::RedisJsonInterface; #[cfg(feature = "sentinel-client")] pub use crate::commands::interfaces::sentinel::SentinelInterface; -#[cfg(feature = "time-series")] -pub use crate::commands::interfaces::timeseries::TimeSeriesInterface; -#[cfg(feature = "client-tracking")] -pub use crate::commands::interfaces::tracking::TrackingInterface; +#[cfg(feature = "i-server")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-server")))] +pub use crate::commands::interfaces::server::*; +#[cfg(feature = "i-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sets")))] +pub use crate::commands::interfaces::sets::*; +#[cfg(feature = "i-slowlog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-slowlog")))] +pub use crate::commands::interfaces::slowlog::*; +#[cfg(feature = "i-sorted-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sorted-sets")))] +pub use crate::commands::interfaces::sorted_sets::*; +#[cfg(feature = "i-streams")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] +pub use crate::commands::interfaces::streams::*; +#[cfg(feature = "i-time-series")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] +pub use crate::commands::interfaces::timeseries::*; +#[cfg(feature = "i-tracking")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] +pub use crate::commands::interfaces::tracking::*; #[cfg(feature = "transactions")] -pub use crate::commands::interfaces::transactions::TransactionInterface; +#[cfg_attr(docsrs, doc(cfg(feature = "transactions")))] +pub use crate::commands::interfaces::transactions::*; + +pub use crate::commands::interfaces::metrics::MetricsInterface; diff --git a/src/lib.rs b/src/lib.rs index 40074c46..2c78517d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -18,8 +18,10 @@ #![cfg_attr(docsrs, allow(unused_attributes))] #![doc = include_str!("../README.md")] +#[cfg(any(feature = "dns", feature = "replicas"))] #[macro_use] extern crate async_trait; + #[macro_use] extern crate log; @@ -153,22 +155,8 @@ pub mod prelude { error::{RedisError, RedisErrorKind}, interfaces::*, types::{ - Blocking, - Builder, - ConnectionConfig, - Expiration, - FromRedis, - Options, - PerformanceConfig, - ReconnectPolicy, - RedisConfig, - RedisKey, - RedisValue, - RedisValueKind, - Server, - ServerConfig, - SetOptions, - TcpConfig, + Blocking, Builder, ConnectionConfig, Expiration, FromRedis, Options, PerformanceConfig, ReconnectPolicy, + RedisConfig, RedisKey, RedisValue, RedisValueKind, Server, ServerConfig, SetOptions, TcpConfig, }, }; diff --git a/src/macros.rs b/src/macros.rs index 58372666..26042c2b 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -88,8 +88,8 @@ macro_rules! try_or_continue ( /// A helper macro to wrap a string value in quotes via the [json](serde_json::json) macro. /// /// See the [RedisJSON interface](crate::interfaces::RedisJsonInterface) for more information. -#[cfg(feature = "redis-json")] -#[cfg_attr(docsrs, doc(cfg(feature = "redis-json")))] +#[cfg(feature = "i-redis-json")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-redis-json")))] #[macro_export] macro_rules! json_quote( ($($json:tt)+) => { diff --git a/src/modules/backchannel.rs b/src/modules/backchannel.rs index af927400..95ca21d9 100644 --- a/src/modules/backchannel.rs +++ b/src/modules/backchannel.rs @@ -5,7 +5,7 @@ use crate::{ router::Connections, utils, }; -use redis_protocol::resp3::types::Frame as Resp3Frame; +use redis_protocol::resp3::types::BytesFrame as Resp3Frame; use std::{collections::HashMap, sync::Arc}; /// Check if an existing connection can be used to the provided `server`, otherwise create a new one. @@ -35,9 +35,9 @@ async fn check_and_create_transport( #[derive(Default)] pub struct Backchannel { /// A connection to any of the servers. - pub transport: Option, + pub transport: Option, /// An identifier for the blocked connection, if any. - pub blocked: Option, + pub blocked: Option, /// A map of server IDs to connection IDs, as managed by the router. pub connection_ids: HashMap, } @@ -98,12 +98,12 @@ impl Backchannel { } } - /// Whether or not the client is blocked on a command. + /// Whether the client is blocked on a command. pub fn is_blocked(&self) -> bool { self.blocked.is_some() } - /// Whether or not an open connection exists to the blocked server. + /// Whether an open connection exists to the blocked server. pub fn has_blocked_transport(&self) -> bool { match self.blocked { Some(ref server) => match self.transport { @@ -164,7 +164,7 @@ impl Backchannel { server ); - utils::apply_timeout( + utils::timeout( transport.request_response(command, inner.is_resp3()), inner.connection_timeout(), ) diff --git a/src/modules/inner.rs b/src/modules/inner.rs index fc124647..19ce73af 100644 --- a/src/modules/inner.rs +++ b/src/modules/inner.rs @@ -40,49 +40,49 @@ use std::collections::HashMap; pub type CommandSender = UnboundedSender; pub type CommandReceiver = UnboundedReceiver; -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] use crate::types::Invalidation; pub struct Notifications { /// The client ID. - pub id: Str, + pub id: Str, /// A broadcast channel for the `on_error` interface. - pub errors: ArcSwap>, + pub errors: ArcSwap>, /// A broadcast channel for the `on_message` interface. - pub pubsub: ArcSwap>, + pub pubsub: ArcSwap>, /// A broadcast channel for the `on_keyspace_event` interface. - pub keyspace: ArcSwap>, + pub keyspace: ArcSwap>, /// A broadcast channel for the `on_reconnect` interface. - pub reconnect: ArcSwap>, + pub reconnect: ArcSwap>, /// A broadcast channel for the `on_cluster_change` interface. pub cluster_change: ArcSwap>>, /// A broadcast channel for the `on_connect` interface. - pub connect: ArcSwap>>, + pub connect: ArcSwap>>, /// A channel for events that should close all client tasks with `Canceled` errors. /// /// Emitted when QUIT, SHUTDOWN, etc are called. - pub close: BroadcastSender<()>, + pub close: BroadcastSender<()>, /// A broadcast channel for the `on_invalidation` interface. - #[cfg(feature = "client-tracking")] - pub invalidations: ArcSwap>, + #[cfg(feature = "i-tracking")] + pub invalidations: ArcSwap>, /// A broadcast channel for notifying callers when servers go unresponsive. - pub unresponsive: ArcSwap>, + pub unresponsive: ArcSwap>, } impl Notifications { pub fn new(id: &Str, capacity: usize) -> Self { Notifications { - id: id.clone(), - close: broadcast::channel(capacity).0, - errors: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), - pubsub: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), - keyspace: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), - reconnect: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), - cluster_change: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), - connect: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), - #[cfg(feature = "client-tracking")] - invalidations: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), - unresponsive: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), + id: id.clone(), + close: broadcast::channel(capacity).0, + errors: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), + pubsub: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), + keyspace: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), + reconnect: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), + cluster_change: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), + connect: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), + #[cfg(feature = "i-tracking")] + invalidations: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), + unresponsive: ArcSwap::new(Arc::new(broadcast::channel(capacity).0)), } } @@ -94,7 +94,7 @@ impl Notifications { utils::swap_new_broadcast_channel(&self.reconnect, capacity); utils::swap_new_broadcast_channel(&self.cluster_change, capacity); utils::swap_new_broadcast_channel(&self.connect, capacity); - #[cfg(feature = "client-tracking")] + #[cfg(feature = "i-tracking")] utils::swap_new_broadcast_channel(&self.invalidations, capacity); utils::swap_new_broadcast_channel(&self.unresponsive, capacity); } @@ -143,7 +143,7 @@ impl Notifications { } } - #[cfg(feature = "client-tracking")] + #[cfg(feature = "i-tracking")] pub fn broadcast_invalidation(&self, msg: Invalidation) { if let Err(_) = self.invalidations.load().send(msg) { debug!("{}: No `on_invalidation` listeners.", self.id); @@ -159,14 +159,14 @@ impl Notifications { #[derive(Clone)] pub struct ClientCounters { - pub cmd_buffer_len: Arc, + pub cmd_buffer_len: Arc, pub redelivery_count: Arc, } impl Default for ClientCounters { fn default() -> Self { ClientCounters { - cmd_buffer_len: Arc::new(AtomicUsize::new(0)), + cmd_buffer_len: Arc::new(AtomicUsize::new(0)), redelivery_count: Arc::new(AtomicUsize::new(0)), } } @@ -209,7 +209,7 @@ impl ClientCounters { /// Cached state related to the server(s). pub struct ServerState { - pub kind: ServerKind, + pub kind: ServerKind, #[cfg(feature = "replicas")] pub replicas: HashMap, } @@ -217,9 +217,9 @@ pub struct ServerState { impl ServerState { pub fn new(config: &RedisConfig) -> Self { ServerState { - kind: ServerKind::new(config), + kind: ServerKind::new(config), #[cfg(feature = "replicas")] - replicas: HashMap::new(), + replicas: HashMap::new(), } } @@ -232,16 +232,16 @@ impl ServerState { /// Added state associated with different server deployment types, synchronized by the router task. pub enum ServerKind { Sentinel { - version: Option, + version: Option, /// An updated set of known sentinel nodes. sentinels: Vec, /// The server host/port resolved from the sentinel nodes, if known. - primary: Option, + primary: Option, }, Cluster { version: Option, /// The cached cluster routing table. - cache: Option, + cache: Option, }, Centralized { version: Option, @@ -254,12 +254,12 @@ impl ServerKind { match config.server { ServerConfig::Clustered { .. } => ServerKind::Cluster { version: None, - cache: None, + cache: None, }, ServerConfig::Sentinel { ref hosts, .. } => ServerKind::Sentinel { - version: None, + version: None, sentinels: hosts.clone(), - primary: None, + primary: None, }, ServerConfig::Centralized { .. } => ServerKind::Centralized { version: None }, #[cfg(feature = "unix-sockets")] @@ -376,48 +376,48 @@ fn create_resolver(id: &Str) -> Arc { pub struct RedisClientInner { /// An internal lock used to sync certain select operations that should not run concurrently across tasks. - pub _lock: Mutex<()>, + pub _lock: Mutex<()>, /// The client ID used for logging and the default `CLIENT SETNAME` value. - pub id: Str, + pub id: Str, /// Whether the client uses RESP3. - pub resp3: Arc, + pub resp3: Arc, /// The state of the underlying connection. - pub state: RwLock, + pub state: RwLock, /// Client configuration options. - pub config: Arc, + pub config: Arc, /// Connection configuration options. - pub connection: Arc, + pub connection: Arc, /// Performance config options for the client. - pub performance: ArcSwap, + pub performance: ArcSwap, /// An optional reconnect policy. - pub policy: RwLock>, + pub policy: RwLock>, /// Notification channels for the event interfaces. pub notifications: Arc, /// An mpsc sender for commands to the router. - pub command_tx: ArcSwap, + pub command_tx: ArcSwap, /// Temporary storage for the receiver half of the router command channel. - pub command_rx: RwLock>, + pub command_rx: RwLock>, /// Shared counters. - pub counters: ClientCounters, + pub counters: ClientCounters, /// The DNS resolver to use when establishing new connections. - pub resolver: AsyncRwLock>, + pub resolver: AsyncRwLock>, /// A backchannel that can be used to control the router connections even while the connections are blocked. - pub backchannel: Arc>, + pub backchannel: Arc>, /// Server state cache for various deployment types. - pub server_state: RwLock, + pub server_state: RwLock, /// Command latency metrics. #[cfg(feature = "metrics")] - pub latency_stats: RwLock, + pub latency_stats: RwLock, /// Network latency metrics. #[cfg(feature = "metrics")] pub network_latency_stats: RwLock, /// Payload size metrics tracking for requests. #[cfg(feature = "metrics")] - pub req_size_stats: Arc>, + pub req_size_stats: Arc>, /// Payload size metrics tracking for responses #[cfg(feature = "metrics")] - pub res_size_stats: Arc>, + pub res_size_stats: Arc>, } impl RedisClientInner { @@ -521,6 +521,13 @@ impl RedisClientInner { *guard = resolver; } + pub fn cluster_discovery_policy(&self) -> Option<&ClusterDiscoveryPolicy> { + match self.config.server { + ServerConfig::Clustered { ref policy, .. } => Some(policy), + _ => None, + } + } + pub async fn get_resolver(&self) -> Arc { self.resolver.read().await.clone() } @@ -686,9 +693,9 @@ impl RedisClientInner { ); let cmd = RouterCommand::Reconnect { - server: Some(server.clone()), - force: false, - tx: None, + server: Some(server.clone()), + force: false, + tx: None, replica: true, }; if let Err(_) = interfaces::send_to_router(self, cmd) { diff --git a/src/modules/mocks.rs b/src/modules/mocks.rs index 7d339366..6b1acec0 100644 --- a/src/modules/mocks.rs +++ b/src/modules/mocks.rs @@ -30,14 +30,14 @@ pub struct MockCommand { /// * `SET` - `"SET"` /// * `XGROUP CREATE` - `"XGROUP"` /// * `INCRBY` - `"INCRBY"` - pub cmd: Str, + pub cmd: Str, /// The optional subcommand string (or second word) in the command string. For example: /// * `SET` - `None` /// * `XGROUP CREATE` - `Some("CREATE")` /// * `INCRBY` - `None` pub subcommand: Option, /// The ordered list of arguments to the command. - pub args: Vec, + pub args: Vec, } /// An interface for intercepting and processing Redis commands in a mocking layer. @@ -317,7 +317,7 @@ impl Mocks for Buffer { } #[cfg(test)] -#[cfg(feature = "mocks")] +#[cfg(all(feature = "mocks", feature = "i-keys"))] mod tests { use super::*; use crate::{ @@ -391,14 +391,14 @@ mod tests { let expected = vec![ MockCommand { - cmd: "SET".into(), + cmd: "SET".into(), subcommand: None, - args: vec!["foo".as_bytes().into(), "bar".into()], + args: vec!["foo".as_bytes().into(), "bar".into()], }, MockCommand { - cmd: "GET".into(), + cmd: "GET".into(), subcommand: None, - args: vec!["foo".as_bytes().into()], + args: vec!["foo".as_bytes().into()], }, ]; assert_eq!(buffer.take(), expected); diff --git a/src/modules/response.rs b/src/modules/response.rs index 4050f5db..021359f0 100644 --- a/src/modules/response.rs +++ b/src/modules/response.rs @@ -1,6 +1,6 @@ use crate::{ error::{RedisError, RedisErrorKind}, - types::{ClusterInfo, DatabaseMemoryStats, GeoPosition, MemoryStats, RedisKey, RedisValue, SlowlogEntry, QUEUED}, + types::{RedisKey, RedisValue, QUEUED}, }; use bytes::Bytes; use bytes_utils::Str; @@ -9,6 +9,15 @@ use std::{ hash::{BuildHasher, Hash}, }; +#[cfg(feature = "i-cluster")] +use crate::types::ClusterInfo; +#[cfg(feature = "i-geo")] +use crate::types::GeoPosition; +#[cfg(feature = "i-slowlog")] +use crate::types::SlowlogEntry; +#[cfg(feature = "i-memory")] +use crate::types::{DatabaseMemoryStats, MemoryStats}; + #[allow(unused_imports)] use std::any::type_name; @@ -595,30 +604,40 @@ impl FromRedis for Value { } } +#[cfg(feature = "i-geo")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] impl FromRedis for GeoPosition { fn from_value(value: RedisValue) -> Result { GeoPosition::try_from(value) } } +#[cfg(feature = "i-slowlog")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-slowlog")))] impl FromRedis for SlowlogEntry { fn from_value(value: RedisValue) -> Result { SlowlogEntry::try_from(value) } } +#[cfg(feature = "i-cluster")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-cluster")))] impl FromRedis for ClusterInfo { fn from_value(value: RedisValue) -> Result { ClusterInfo::try_from(value) } } +#[cfg(feature = "i-memory")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] impl FromRedis for MemoryStats { fn from_value(value: RedisValue) -> Result { MemoryStats::try_from(value) } } +#[cfg(feature = "i-memory")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] impl FromRedis for DatabaseMemoryStats { fn from_value(value: RedisValue) -> Result { DatabaseMemoryStats::try_from(value) diff --git a/src/monitor/parser.rs b/src/monitor/parser.rs index d65b77e1..f91eace8 100644 --- a/src/monitor/parser.rs +++ b/src/monitor/parser.rs @@ -7,7 +7,10 @@ use nom::{ sequence::{delimited as nom_delimited, preceded as nom_preceded, terminated as nom_terminated}, IResult, }; -use redis_protocol::{resp3::types::Frame as Resp3Frame, types::RedisParseError}; +use redis_protocol::{ + error::RedisParseError, + resp3::types::{BytesFrame as Resp3Frame, Resp3Frame as _Resp3Frame}, +}; use std::{str, sync::Arc}; const EMPTY_SPACE: &str = " "; @@ -142,10 +145,10 @@ mod tests { let input = "1631469940.785623 [0 127.0.0.1:46998] \"SET\" \"foo\" \"2\""; let expected = Command { timestamp: 1631469940.785623, - db: 0, - client: "127.0.0.1:46998".into(), - command: "SET".into(), - args: vec!["foo".into(), "2".into()], + db: 0, + client: "127.0.0.1:46998".into(), + command: "SET".into(), + args: vec!["foo".into(), "2".into()], }; let actual = d_parse_frame(input.as_bytes()).unwrap(); @@ -157,10 +160,10 @@ mod tests { let input = "1631469940.785623 [0 127.0.0.1:46998] \"SET\" \"foo bar\" \"2\""; let expected = Command { timestamp: 1631469940.785623, - db: 0, - client: "127.0.0.1:46998".into(), - command: "SET".into(), - args: vec!["foo bar".into(), "2".into()], + db: 0, + client: "127.0.0.1:46998".into(), + command: "SET".into(), + args: vec!["foo bar".into(), "2".into()], }; let actual = d_parse_frame(input.as_bytes()).unwrap(); @@ -173,10 +176,10 @@ mod tests { - \\\"ghi\\\" \\\"jkl\\\"\""; let expected = Command { timestamp: 1631475365.563304, - db: 0, - client: "127.0.0.1:47438".into(), - command: "SET".into(), - args: vec![ + db: 0, + client: "127.0.0.1:47438".into(), + command: "SET".into(), + args: vec![ "foo".into(), "0 - \\\"abc\\\"".into(), "1 - \\\"def\\\"".into(), @@ -193,10 +196,10 @@ mod tests { let input = "1631469940.785623 [0 127.0.0.1:46998] \"KEYS\""; let expected = Command { timestamp: 1631469940.785623, - db: 0, - client: "127.0.0.1:46998".into(), - command: "KEYS".into(), - args: vec![], + db: 0, + client: "127.0.0.1:46998".into(), + command: "KEYS".into(), + args: vec![], }; let actual = d_parse_frame(input.as_bytes()).unwrap(); diff --git a/src/monitor/utils.rs b/src/monitor/utils.rs index b295afdb..66051b2f 100644 --- a/src/monitor/utils.rs +++ b/src/monitor/utils.rs @@ -12,6 +12,7 @@ use crate::{ types::{ConnectionConfig, PerformanceConfig, RedisConfig, ServerConfig}, }; use futures::stream::{Stream, StreamExt}; +use redis_protocol::resp3::types::Resp3Frame; use std::sync::Arc; use tokio::{ io::{AsyncRead, AsyncWrite}, @@ -32,7 +33,7 @@ async fn handle_monitor_frame( return None; }, }; - let frame_size = protocol_utils::resp3_frame_size(&frame); + let frame_size = frame.encode_len(); if frame_size >= inner.with_perf_config(|c| c.blocking_encode_threshold) { // since this isn't called from the Encoder/Decoder trait we can use spawn_blocking here diff --git a/src/protocol/codec.rs b/src/protocol/codec.rs index 41d9dcf1..f09aea58 100644 --- a/src/protocol/codec.rs +++ b/src/protocol/codec.rs @@ -10,11 +10,13 @@ use crate::{ use bytes::BytesMut; use bytes_utils::Str; use redis_protocol::{ - resp2::{decode::decode_mut as resp2_decode, encode::encode_bytes as resp2_encode, types::Frame as Resp2Frame}, + resp2::{ + decode::decode_bytes_mut as resp2_decode, encode::extend_encode as resp2_encode, types::BytesFrame as Resp2Frame, + }, resp3::{ - decode::streaming::decode_mut as resp3_decode, - encode::complete::encode_bytes as resp3_encode, - types::{Frame as Resp3Frame, StreamedFrame}, + decode::streaming::decode_bytes_mut as resp3_decode, + encode::complete::extend_encode as resp3_encode, + types::{BytesFrame as Resp3Frame, Resp3Frame as _Resp3Frame, StreamedFrame}, }, }; use std::sync::{atomic::AtomicBool, Arc}; @@ -133,7 +135,7 @@ fn resp3_decode_frame(codec: &mut RedisCodec, src: &mut BytesMut) -> Result, - pub streaming_state: Option, + pub name: Str, + pub server: Server, + pub resp3: Arc, + pub streaming_state: Option>, #[cfg(feature = "metrics")] - pub req_size_stats: Arc>, + pub req_size_stats: Arc>, #[cfg(feature = "metrics")] - pub res_size_stats: Arc>, + pub res_size_stats: Arc>, } impl RedisCodec { pub fn new(inner: &Arc, server: &Server) -> Self { RedisCodec { - server: server.clone(), - name: inner.id.clone(), - resp3: inner.shared_resp3(), - streaming_state: None, + server: server.clone(), + name: inner.id.clone(), + resp3: inner.shared_resp3(), + streaming_state: None, #[cfg(feature = "metrics")] - req_size_stats: inner.req_size_stats.clone(), + req_size_stats: inner.req_size_stats.clone(), #[cfg(feature = "metrics")] - res_size_stats: inner.res_size_stats.clone(), + res_size_stats: inner.res_size_stats.clone(), } } diff --git a/src/protocol/command.rs b/src/protocol/command.rs index c5132fa6..128719b3 100644 --- a/src/protocol/command.rs +++ b/src/protocol/command.rs @@ -10,8 +10,7 @@ use crate::{ }, trace, types::{CustomCommand, RedisValue}, - utils as client_utils, - utils, + utils as client_utils, utils, }; use bytes_utils::Str; use parking_lot::Mutex; @@ -20,8 +19,7 @@ use std::{ convert::TryFrom, fmt, fmt::Formatter, - mem, - str, + mem, str, sync::{atomic::AtomicBool, Arc}, time::{Duration, Instant}, }; @@ -307,6 +305,7 @@ pub enum RedisCommandKind { Smismember, Smove, Sort, + SortRo, Spop, Srandmember, Srem, @@ -747,6 +746,7 @@ impl RedisCommandKind { RedisCommandKind::Smismember => "SMISMEMBER", RedisCommandKind::Smove => "SMOVE", RedisCommandKind::Sort => "SORT", + RedisCommandKind::SortRo => "SORT_RO", RedisCommandKind::Spop => "SPOP", RedisCommandKind::Srandmember => "SRANDMEMBER", RedisCommandKind::Srem => "SREM", @@ -1095,6 +1095,7 @@ impl RedisCommandKind { RedisCommandKind::Smismember => "SMISMEMBER", RedisCommandKind::Smove => "SMOVE", RedisCommandKind::Sort => "SORT", + RedisCommandKind::SortRo => "SORT_RO", RedisCommandKind::Spop => "SPOP", RedisCommandKind::Srandmember => "SRANDMEMBER", RedisCommandKind::Srem => "SREM", @@ -1447,55 +1448,55 @@ impl RedisCommandKind { pub struct RedisCommand { /// The command and optional subcommand name. - pub kind: RedisCommandKind, + pub kind: RedisCommandKind, /// The policy to apply when handling the response. - pub response: ResponseKind, + pub response: ResponseKind, /// The policy to use when hashing the arguments for cluster routing. - pub hasher: ClusterHash, + pub hasher: ClusterHash, /// The provided arguments. /// /// Some commands store arguments differently. Callers should use `self.args()` to account for this. - pub arguments: Vec, + pub arguments: Vec, /// A oneshot sender used to communicate with the router. - pub router_tx: Arc>>, + pub router_tx: Arc>>, /// The number of times the command has been written to a socket. - pub write_attempts: u32, + pub write_attempts: u32, /// The number of write attempts remaining. - pub attempts_remaining: u32, + pub attempts_remaining: u32, /// The number of cluster redirections remaining. pub redirections_remaining: u32, - /// Whether or not the command can be pipelined. + /// Whether the command can be pipelined. /// /// Also used for commands like XREAD that block based on an argument. - pub can_pipeline: bool, - /// Whether or not to skip backpressure checks. - pub skip_backpressure: bool, + pub can_pipeline: bool, + /// Whether to skip backpressure checks. + pub skip_backpressure: bool, /// Whether to fail fast without retries if the connection ever closes unexpectedly. - pub fail_fast: bool, + pub fail_fast: bool, /// The internal ID of a transaction. - pub transaction_id: Option, + pub transaction_id: Option, /// The timeout duration provided by the `with_options` interface. - pub timeout_dur: Option, + pub timeout_dur: Option, /// Whether the command has timed out from the perspective of the caller. - pub timed_out: Arc, + pub timed_out: Arc, /// A timestamp of when the command was last written to the socket. - pub network_start: Option, + pub network_start: Option, /// Whether to route the command to a replica, if possible. - pub use_replica: bool, + pub use_replica: bool, /// Only send the command to the provided server. - pub cluster_node: Option, + pub cluster_node: Option, /// A timestamp of when the command was first created from the public interface. #[cfg(feature = "metrics")] - pub created: Instant, + pub created: Instant, /// Tracing state that has to carry over across writer/reader tasks to track certain fields (response size, etc). #[cfg(feature = "partial-tracing")] - pub traces: CommandTraces, + pub traces: CommandTraces, /// A counter to differentiate unique commands. #[cfg(feature = "debug-ids")] - pub counter: usize, + pub counter: usize, /// Whether to send a `CLIENT CACHING yes|no` before the command. - #[cfg(feature = "client-tracking")] - pub caching: Option, + #[cfg(feature = "i-tracking")] + pub caching: Option, } impl fmt::Debug for RedisCommand { @@ -1556,7 +1557,7 @@ impl From<(RedisCommandKind, Vec)> for RedisCommand { traces: CommandTraces::default(), #[cfg(feature = "debug-ids")] counter: command_counter(), - #[cfg(feature = "client-tracking")] + #[cfg(feature = "i-tracking")] caching: None, } } @@ -1588,7 +1589,7 @@ impl From<(RedisCommandKind, Vec, ResponseSender)> for RedisCommand traces: CommandTraces::default(), #[cfg(feature = "debug-ids")] counter: command_counter(), - #[cfg(feature = "client-tracking")] + #[cfg(feature = "i-tracking")] caching: None, } } @@ -1620,7 +1621,7 @@ impl From<(RedisCommandKind, Vec, ResponseKind)> for RedisCommand { traces: CommandTraces::default(), #[cfg(feature = "debug-ids")] counter: command_counter(), - #[cfg(feature = "client-tracking")] + #[cfg(feature = "i-tracking")] caching: None, } } @@ -1653,7 +1654,7 @@ impl RedisCommand { traces: CommandTraces::default(), #[cfg(feature = "debug-ids")] counter: command_counter(), - #[cfg(feature = "client-tracking")] + #[cfg(feature = "i-tracking")] caching: None, } } @@ -1661,31 +1662,31 @@ impl RedisCommand { /// Create a new empty `ASKING` command. pub fn new_asking(hash_slot: u16) -> Self { RedisCommand { - kind: RedisCommandKind::Asking, - hasher: ClusterHash::Custom(hash_slot), - arguments: Vec::new(), - timed_out: Arc::new(AtomicBool::new(false)), - timeout_dur: None, - response: ResponseKind::Respond(None), - router_tx: Arc::new(Mutex::new(None)), - attempts_remaining: 0, - redirections_remaining: 0, - can_pipeline: true, - skip_backpressure: false, - transaction_id: None, - use_replica: false, - cluster_node: None, - network_start: None, - write_attempts: 0, - fail_fast: false, + kind: RedisCommandKind::Asking, + hasher: ClusterHash::Custom(hash_slot), + arguments: Vec::new(), + timed_out: Arc::new(AtomicBool::new(false)), + timeout_dur: None, + response: ResponseKind::Respond(None), + router_tx: Arc::new(Mutex::new(None)), + attempts_remaining: 0, + redirections_remaining: 0, + can_pipeline: true, + skip_backpressure: false, + transaction_id: None, + use_replica: false, + cluster_node: None, + network_start: None, + write_attempts: 0, + fail_fast: false, #[cfg(feature = "metrics")] - created: Instant::now(), + created: Instant::now(), #[cfg(feature = "partial-tracing")] - traces: CommandTraces::default(), + traces: CommandTraces::default(), #[cfg(feature = "debug-ids")] - counter: command_counter(), - #[cfg(feature = "client-tracking")] - caching: None, + counter: command_counter(), + #[cfg(feature = "i-tracking")] + caching: None, } } @@ -1852,7 +1853,7 @@ impl RedisCommand { traces: CommandTraces::default(), #[cfg(feature = "debug-ids")] counter: command_counter(), - #[cfg(feature = "client-tracking")] + #[cfg(feature = "i-tracking")] caching: self.caching, } } @@ -1961,9 +1962,9 @@ impl RedisCommand { #[cfg(feature = "mocks")] pub fn to_mocked(&self) -> MockCommand { MockCommand { - cmd: self.kind.cmd_str(), + cmd: self.kind.cmd_str(), subcommand: self.kind.subcommand_str(), - args: self.args().clone(), + args: self.args().clone(), } } @@ -2002,32 +2003,32 @@ pub enum RouterCommand { // a different cluster node mapping. #[cfg(feature = "transactions")] Transaction { - id: u64, - commands: Vec, - watched: Option, + id: u64, + commands: Vec, + watched: Option, abort_on_error: bool, - tx: ResponseSender, + tx: ResponseSender, }, /// Retry a command after a `MOVED` error. // This will trigger a call to `CLUSTER SLOTS` before the command is retried. Moved { - slot: u16, - server: Server, + slot: u16, + server: Server, command: RedisCommand, }, /// Retry a command after an `ASK` error. // This is typically used instead of `RouterResponse::Ask` when a command was pipelined. Ask { - slot: u16, - server: Server, + slot: u16, + server: Server, command: RedisCommand, }, /// Initiate a reconnection to the provided server, or all servers. // The client may not perform a reconnection if a healthy connection exists to `server`, unless `force` is `true`. Reconnect { - server: Option, - force: bool, - tx: Option, + server: Option, + force: bool, + tx: Option, #[cfg(feature = "replicas")] replica: bool, }, @@ -2037,7 +2038,10 @@ pub enum RouterCommand { Connections { tx: OneshotSender> }, /// Force sync the replica routing table with the server(s). #[cfg(feature = "replicas")] - SyncReplicas { tx: OneshotSender> }, + SyncReplicas { + tx: OneshotSender>, + reset: bool, + }, } impl RouterCommand { @@ -2166,8 +2170,9 @@ impl fmt::Debug for RouterCommand { .field("command", &command.kind.to_str_debug()); }, #[cfg(feature = "replicas")] - RouterCommand::SyncReplicas { .. } => { + RouterCommand::SyncReplicas { reset, .. } => { formatter.field("kind", &"Sync Replicas"); + formatter.field("reset", &reset); }, }; diff --git a/src/protocol/connection.rs b/src/protocol/connection.rs index 8f3df24c..568fb486 100644 --- a/src/protocol/connection.rs +++ b/src/protocol/connection.rs @@ -8,18 +8,16 @@ use crate::{ utils as protocol_utils, }, types::InfoKind, - utils as client_utils, - utils, + utils as client_utils, utils, }; use bytes_utils::Str; use crossbeam_queue::SegQueue; use futures::{ sink::SinkExt, stream::{SplitSink, SplitStream, StreamExt}, - Sink, - Stream, + Sink, Stream, }; -use redis_protocol::resp3::types::{Frame as Resp3Frame, RespVersion}; +use redis_protocol::resp3::types::{BytesFrame as Resp3Frame, Resp3Frame as _Resp3Frame, RespVersion}; use semver::Version; use socket2::SockRef; use std::{ @@ -67,14 +65,14 @@ pub type CommandBuffer = Vec; /// A shared buffer across tasks. #[derive(Clone, Debug)] pub struct SharedBuffer { - inner: Arc>, + inner: Arc>, blocked: Arc, } impl SharedBuffer { pub fn new() -> Self { SharedBuffer { - inner: Arc::new(SegQueue::new()), + inner: Arc::new(SegQueue::new()), blocked: Arc::new(AtomicBool::new(false)), } } @@ -381,16 +379,16 @@ impl Sink for SplitSinkKind { #[derive(Clone, Debug)] pub struct Counters { pub cmd_buffer_len: Arc, - pub in_flight: Arc, - pub feed_count: Arc, + pub in_flight: Arc, + pub feed_count: Arc, } impl Counters { pub fn new(cmd_buffer_len: &Arc) -> Self { Counters { cmd_buffer_len: cmd_buffer_len.clone(), - in_flight: Arc::new(AtomicUsize::new(0)), - feed_count: Arc::new(AtomicUsize::new(0)), + in_flight: Arc::new(AtomicUsize::new(0)), + feed_count: Arc::new(AtomicUsize::new(0)), } } @@ -423,19 +421,19 @@ impl Counters { pub struct RedisTransport { /// An identifier for the connection, usually `|:`. - pub server: Server, + pub server: Server, /// The parsed `SocketAddr` for the connection. - pub addr: Option, + pub addr: Option, /// The hostname used to initialize the connection. pub default_host: Str, /// The network connection. - pub transport: ConnectionKind, + pub transport: ConnectionKind, /// The connection/client ID from the CLIENT ID command. - pub id: Option, + pub id: Option, /// The server version. - pub version: Option, + pub version: Option, /// Counters for the connection state. - pub counters: Counters, + pub counters: Counters, } impl RedisTransport { @@ -498,10 +496,7 @@ impl RedisTransport { let counters = Counters::new(&inner.counters.cmd_buffer_len); let (id, version) = (None, None); - let tls_server_name = server - .tls_server_name - .as_ref().cloned() - .unwrap_or(server.host.clone()); + let tls_server_name = server.tls_server_name.as_ref().cloned().unwrap_or(server.host.clone()); let default_host = server.host.clone(); let codec = RedisCodec::new(inner, server); @@ -535,7 +530,7 @@ impl RedisTransport { #[cfg(feature = "enable-rustls")] #[allow(unreachable_patterns)] pub async fn new_rustls(inner: &Arc, server: &Server) -> Result { - use webpki::types::ServerName; + use rustls::pki_types::ServerName; let connector = match inner.config.tls { Some(ref config) => match config.connector { @@ -547,10 +542,7 @@ impl RedisTransport { let counters = Counters::new(&inner.counters.cmd_buffer_len); let (id, version) = (None, None); - let tls_server_name = server - .tls_server_name - .as_ref().cloned() - .unwrap_or(server.host.clone()); + let tls_server_name = server.tls_server_name.as_ref().cloned().unwrap_or(server.host.clone()); let default_host = server.host.clone(); let codec = RedisCodec::new(inner, server); @@ -746,14 +738,9 @@ impl RedisTransport { /// Send `QUIT` and close the connection. pub async fn disconnect(&mut self, inner: &Arc) -> Result<(), RedisError> { - let command: RedisCommand = RedisCommandKind::Quit.into(); - let quit_ft = self.request_response(command, inner.is_resp3()); - - if let Err(e) = client_utils::apply_timeout(quit_ft, inner.internal_command_timeout()).await { - _debug!(inner, "Error calling QUIT on backchannel: {:?}", e); + if let Err(e) = self.transport.close().await { + _warn!(inner, "Error closing connection to {}: {:?}", self.server, e); } - let _ = self.transport.close().await; - Ok(()) } @@ -810,7 +797,7 @@ impl RedisTransport { pub async fn setup(&mut self, inner: &Arc, timeout: Option) -> Result<(), RedisError> { let timeout = timeout.unwrap_or(inner.internal_command_timeout()); - utils::apply_timeout( + utils::timeout( async { if inner.config.password.is_some() || inner.config.version == RespVersion::RESP3 { self.switch_protocols_and_authenticate(inner).await?; @@ -846,7 +833,7 @@ impl RedisTransport { } let timeout = timeout.unwrap_or(inner.internal_command_timeout()); - utils::apply_timeout( + utils::timeout( async { _debug!(inner, "Sending READONLY to {}", self.server); let command = RedisCommand::new(RedisCommandKind::Readonly, vec![]); @@ -870,7 +857,7 @@ impl RedisTransport { let timeout = timeout.unwrap_or(inner.internal_command_timeout()); let command = RedisCommand::new(RedisCommandKind::Role, vec![]); - utils::apply_timeout( + utils::timeout( async { self .request_response(command, inner.is_resp3()) @@ -927,11 +914,11 @@ impl RedisTransport { } pub struct RedisReader { - pub stream: Option, - pub server: Server, - pub buffer: SharedBuffer, + pub stream: Option, + pub server: Server, + pub buffer: SharedBuffer, pub counters: Counters, - pub task: Option>>, + pub task: Option>>, } impl RedisReader { @@ -962,15 +949,15 @@ impl RedisReader { } pub struct RedisWriter { - pub sink: SplitSinkKind, - pub server: Server, + pub sink: SplitSinkKind, + pub server: Server, pub default_host: Str, - pub addr: Option, - pub buffer: SharedBuffer, - pub version: Option, - pub id: Option, - pub counters: Counters, - pub reader: Option, + pub addr: Option, + pub buffer: SharedBuffer, + pub version: Option, + pub id: Option, + pub counters: Counters, + pub reader: Option, } impl fmt::Debug for RedisWriter { @@ -1079,7 +1066,7 @@ impl RedisWriter { /// /// Returns the in-flight commands that had not received a response. pub async fn graceful_close(mut self) -> CommandBuffer { - let _ = utils::apply_timeout( + let _ = utils::timeout( async { let _ = self.sink.close().await; if let Some(mut reader) = self.reader { @@ -1113,20 +1100,20 @@ pub async fn create( inner.config.uses_rustls(), ); if inner.config.uses_native_tls() { - utils::apply_timeout(RedisTransport::new_native_tls(inner, server), timeout).await + utils::timeout(RedisTransport::new_native_tls(inner, server), timeout).await } else if inner.config.uses_rustls() { - utils::apply_timeout(RedisTransport::new_rustls(inner, server), timeout).await + utils::timeout(RedisTransport::new_rustls(inner, server), timeout).await } else { match inner.config.server { #[cfg(feature = "unix-sockets")] - ServerConfig::Unix { ref path } => utils::apply_timeout(RedisTransport::new_unix(inner, path), timeout).await, - _ => utils::apply_timeout(RedisTransport::new_tcp(inner, server), timeout).await, + ServerConfig::Unix { ref path } => utils::timeout(RedisTransport::new_unix(inner, path), timeout).await, + _ => utils::timeout(RedisTransport::new_tcp(inner, server), timeout).await, } } } /// Split a connection, spawn a reader task, and link the reader and writer halves. -pub fn split_and_initialize( +pub fn split( inner: &Arc, transport: RedisTransport, is_replica: bool, @@ -1195,5 +1182,5 @@ pub async fn request_response( writer.push_command(inner, command); writer.write_frame(frame, true, false).await?; - utils::apply_timeout(async { rx.await? }, timeout_dur).await + utils::timeout(async { rx.await? }, timeout_dur).await } diff --git a/src/protocol/debug.rs b/src/protocol/debug.rs index 2ff918d9..ee718bd1 100644 --- a/src/protocol/debug.rs +++ b/src/protocol/debug.rs @@ -1,7 +1,4 @@ -use redis_protocol::{ - resp2::types::Frame as Resp2Frame, - resp3::types::{Auth, Frame as Resp3Frame}, -}; +use redis_protocol::{resp2::types::BytesFrame as Resp2Frame, resp3::types::BytesFrame as Resp3Frame}; use std::{ collections::{HashMap, HashSet}, hash::{Hash, Hasher}, @@ -14,6 +11,7 @@ enum DebugFrame { Bytes(Vec), Integer(i64), Double(f64), + #[allow(dead_code)] Array(Vec), // TODO add support for maps in network logs #[allow(dead_code)] @@ -92,11 +90,7 @@ impl<'a> From<&'a Resp3Frame> for DebugFrame { ref version, ref auth, .. } => { let mut values = vec![DebugFrame::Integer(version.to_byte() as i64)]; - if let Some(Auth { - ref username, - ref password, - }) = auth - { + if let Some((ref username, ref password)) = auth { values.push(DebugFrame::String(username.to_string())); values.push(DebugFrame::String(password.to_string())); } diff --git a/src/protocol/responders.rs b/src/protocol/responders.rs index 942ab48c..9e94f038 100644 --- a/src/protocol/responders.rs +++ b/src/protocol/responders.rs @@ -12,6 +12,7 @@ use crate::{ }; use bytes_utils::Str; use parking_lot::Mutex; +use redis_protocol::resp3::types::Resp3Frame as _Resp3Frame; use std::{ fmt, fmt::Formatter, @@ -25,6 +26,7 @@ use std::{ use crate::modules::metrics::MovingStats; #[cfg(feature = "metrics")] use parking_lot::RwLock; +use redis_protocol::resp3::types::FrameKind; #[cfg(feature = "metrics")] use std::{cmp, time::Instant}; @@ -46,15 +48,15 @@ pub enum ResponseKind { /// cluster connections. Buffer { /// A shared buffer for response frames. - frames: Arc>>, + frames: Arc>>, /// The expected number of response frames. - expected: usize, + expected: usize, /// The number of response frames received. - received: Arc, + received: Arc, /// A shared oneshot channel to the caller. - tx: Arc>>, + tx: Arc>>, /// A local field for tracking the expected index of the response in the `frames` array. - index: usize, + index: usize, /// Whether errors should be returned early to the caller. error_early: bool, }, @@ -66,13 +68,17 @@ pub enum ResponseKind { impl fmt::Debug for ResponseKind { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{}", match self { - ResponseKind::Skip => "Skip", - ResponseKind::Buffer { .. } => "Buffer", - ResponseKind::Respond(_) => "Respond", - ResponseKind::KeyScan(_) => "KeyScan", - ResponseKind::ValueScan(_) => "ValueScan", - }) + write!( + f, + "{}", + match self { + ResponseKind::Skip => "Skip", + ResponseKind::Buffer { .. } => "Buffer", + ResponseKind::Respond(_) => "Respond", + ResponseKind::KeyScan(_) => "KeyScan", + ResponseKind::ValueScan(_) => "ValueScan", + } + ) } } @@ -93,11 +99,11 @@ impl ResponseKind { expected, error_early, } => ResponseKind::Buffer { - frames: frames.clone(), - tx: tx.clone(), - received: received.clone(), - index: *index, - expected: *expected, + frames: frames.clone(), + tx: tx.clone(), + received: received.clone(), + index: *index, + expected: *expected, error_early: *error_early, }, ResponseKind::KeyScan(_) | ResponseKind::ValueScan(_) => return None, @@ -121,11 +127,11 @@ impl ResponseKind { pub fn new_buffer(tx: ResponseSender) -> Self { ResponseKind::Buffer { - frames: Arc::new(Mutex::new(vec![])), - tx: Arc::new(Mutex::new(Some(tx))), - received: Arc::new(AtomicUsize::new(0)), - index: 0, - expected: 0, + frames: Arc::new(Mutex::new(vec![])), + tx: Arc::new(Mutex::new(Some(tx))), + received: Arc::new(AtomicUsize::new(0)), + index: 0, + expected: 0, error_early: true, } } @@ -197,7 +203,7 @@ fn sample_command_latencies(_: &Arc, _: &mut RedisCommand) {} /// Update the client's protocol version codec version after receiving a non-error response to HELLO. fn update_protocol_version(inner: &Arc, command: &RedisCommand, frame: &Resp3Frame) { - if !frame.is_error() { + if !matches!(frame.kind(), FrameKind::SimpleError | FrameKind::BlobError) { let version = match command.kind { RedisCommandKind::_Hello(ref version) => version, RedisCommandKind::_HelloAllCluster(ref version) => version, @@ -259,14 +265,14 @@ fn add_buffered_frame( fn merge_multiple_frames(frames: &mut Vec, error_early: bool) -> Resp3Frame { if error_early { for frame in frames.iter() { - if frame.is_error() { + if matches!(frame.kind(), FrameKind::SimpleError | FrameKind::BlobError) { return frame.clone(); } } } Resp3Frame::Array { - data: mem::take(frames), + data: mem::take(frames), attributes: None, } } @@ -467,10 +473,10 @@ pub fn respond_buffer( ) -> Result<(), RedisError> { _trace!( inner, - "Handling `buffer` response from {} for {}. Is error: {}, Index: {}, ID: {}", + "Handling `buffer` response from {} for {}. kind {:?}, Index: {}, ID: {}", server, command.kind.to_str_debug(), - frame.is_error(), + frame.kind(), index, command.debug_id() ); @@ -505,7 +511,7 @@ pub fn respond_buffer( ); let frame = merge_multiple_frames(&mut frames.lock(), error_early); - if frame.is_error() { + if matches!(frame.kind(), FrameKind::SimpleError | FrameKind::BlobError) { let err = match frame.as_str() { Some(s) => protocol_utils::pretty_error(s), None => RedisError::new( diff --git a/src/protocol/types.rs b/src/protocol/types.rs index 8c3b46fa..b4692e80 100644 --- a/src/protocol/types.rs +++ b/src/protocol/types.rs @@ -1,3 +1,16 @@ +use super::utils as protocol_utils; +use crate::prelude::RedisResult; +use crate::{ + error::{RedisError, RedisErrorKind}, + modules::inner::RedisClientInner, + protocol::{cluster, utils::server_to_parts}, + types::*, + utils, +}; +use async_trait::async_trait; +use bytes_utils::Str; +use rand::Rng; +use redis_protocol::{resp2::types::BytesFrame as Resp2Frame, resp3::types::BytesFrame as Resp3Frame}; use std::{ cmp::Ordering, collections::{BTreeMap, BTreeSet, HashMap}, @@ -7,25 +20,8 @@ use std::{ net::{SocketAddr, ToSocketAddrs}, sync::Arc, }; - -use bytes_utils::Str; -use rand::Rng; -#[allow(unused_imports)] -pub use redis_protocol::{redis_keyslot, resp2::types::NULL, types::CRLF}; -use redis_protocol::{resp2::types::Frame as Resp2Frame, resp2_frame_to_resp3, resp3::types::Frame as Resp3Frame}; use tokio::sync::mpsc::UnboundedSender; -use super::utils as protocol_utils; -use crate::{ - error::{RedisError, RedisErrorKind}, - modules::inner::RedisClientInner, - protocol::{cluster, utils::server_to_parts}, - types::*, - utils, -}; - -pub const REDIS_CLUSTER_SLOTS: u16 = 16384; - #[cfg(any(feature = "enable-rustls", feature = "enable-native-tls"))] use std::{net::IpAddr, str::FromStr}; @@ -37,12 +33,12 @@ pub enum ProtocolFrame { } impl ProtocolFrame { - /// Convert the frame tp RESP3. + /// Convert the frame to RESP3. pub fn into_resp3(self) -> Resp3Frame { // the `RedisValue::convert` logic already accounts for different encodings of maps and sets, so // we can just change everything to RESP3 above the protocol layer match self { - ProtocolFrame::Resp2(frame) => resp2_frame_to_resp3(frame), + ProtocolFrame::Resp2(frame) => frame.into_resp3(), ProtocolFrame::Resp3(frame) => frame, } } @@ -69,9 +65,9 @@ impl From for ProtocolFrame { #[derive(Debug, Clone)] pub struct Server { /// The hostname or IP address for the server. - pub host: Str, + pub host: Str, /// The port for the server. - pub port: u16, + pub port: u16, /// The server name used during the TLS handshake. #[cfg(any(feature = "enable-rustls", feature = "enable-native-tls"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "enable-rustls", feature = "enable-native-tls"))))] @@ -276,24 +272,24 @@ pub struct Message { /// The channel on which the message was sent. pub channel: Str, /// The message contents. - pub value: RedisValue, + pub value: RedisValue, /// The type of message subscription. - pub kind: MessageKind, + pub kind: MessageKind, /// The server that sent the message. - pub server: Server, + pub server: Server, } pub struct KeyScanInner { /// The hash slot for the command. - pub hash_slot: Option, + pub hash_slot: Option, /// An optional server override. - pub server: Option, + pub server: Option, /// The index of the cursor in `args`. pub cursor_idx: usize, /// The arguments sent in each scan command. - pub args: Vec, + pub args: Vec, /// The sender half of the results channel. - pub tx: UnboundedSender>, + pub tx: UnboundedSender>, } impl KeyScanInner { @@ -318,9 +314,9 @@ pub struct ValueScanInner { /// The index of the cursor argument in `args`. pub cursor_idx: usize, /// The arguments sent in each scan command. - pub args: Vec, + pub args: Vec, /// The sender half of the results channel. - pub tx: UnboundedSender>, + pub tx: UnboundedSender>, } impl ValueScanInner { @@ -403,13 +399,13 @@ impl ValueScanInner { #[derive(Debug, Clone, Eq, PartialEq)] pub struct SlotRange { /// The start of the hash slot range. - pub start: u16, + pub start: u16, /// The end of the hash slot range. - pub end: u16, + pub end: u16, /// The primary server owner. - pub primary: Server, + pub primary: Server, /// The internal ID assigned by the server. - pub id: Str, + pub id: Str, /// Replica node owners. #[cfg(feature = "replicas")] #[cfg_attr(docsrs, doc(cfg(feature = "replicas")))] @@ -520,7 +516,7 @@ impl ClusterRouting { /// Read a random primary node hash slot range from the cluster cache. pub fn random_slot(&self) -> Option<&SlotRange> { if !self.data.is_empty() { - let idx = rand::thread_rng().gen_range(0 .. self.data.len()); + let idx = rand::thread_rng().gen_range(0..self.data.len()); Some(&self.data[idx]) } else { None @@ -555,7 +551,7 @@ impl ClusterRouting { #[cfg_attr(docsrs, doc(cfg(feature = "dns")))] pub trait Resolve: Send + Sync + 'static { /// Resolve a hostname. - async fn resolve(&self, host: Str, port: u16) -> Result, RedisError>; + async fn resolve(&self, host: Str, port: u16) -> RedisResult>; } /// Default DNS resolver that uses `to_socket_addrs` under the hood. @@ -573,7 +569,7 @@ impl DefaultResolver { #[async_trait] impl Resolve for DefaultResolver { - async fn resolve(&self, host: Str, port: u16) -> Result, RedisError> { + async fn resolve(&self, host: Str, port: u16) -> RedisResult> { let client_id = self.id.clone(); tokio::task::spawn_blocking(move || { diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index ace5fd44..516e228e 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -13,11 +13,17 @@ use crate::{ use bytes::Bytes; use bytes_utils::Str; use redis_protocol::{ - resp2::types::Frame as Resp2Frame, - resp3::types::{Auth, Frame as Resp3Frame, FrameMap, PUBSUB_PUSH_PREFIX}, + resp2::types::{BytesFrame as Resp2Frame, Resp2Frame as _Resp2Frame}, + resp3::types::{BytesFrame as Resp3Frame, Resp3Frame as _Resp3Frame}, + types::{PUBSUB_PUSH_PREFIX, REDIS_CLUSTER_SLOTS}, }; use std::{borrow::Cow, collections::HashMap, convert::TryInto, ops::Deref, str, sync::Arc}; +#[cfg(any(feature = "i-lists", feature = "i-sorted-sets"))] +use redis_protocol::resp3::types::FrameKind; +#[cfg(feature = "i-hashes")] +use redis_protocol::resp3::types::FrameMap; + static LEGACY_AUTH_ERROR_BODY: &str = "ERR Client sent AUTH, but no password is set"; static ACL_AUTH_ERROR_PREFIX: &str = "ERR AUTH called without any password configured for the default user"; @@ -42,15 +48,6 @@ pub fn queued_frame() -> Resp3Frame { } } -pub fn frame_is_queued(frame: &Resp3Frame) -> bool { - match frame { - Resp3Frame::SimpleString { ref data, .. } | Resp3Frame::BlobString { ref data, .. } => { - str::from_utf8(data).ok().map(|s| s == QUEUED).unwrap_or(false) - }, - _ => false, - } -} - pub fn is_ok(frame: &Resp3Frame) -> bool { match frame { Resp3Frame::SimpleString { ref data, .. } => data == OK, @@ -137,6 +134,7 @@ pub fn frame_into_string(frame: Resp3Frame) -> Result { } /// Parse the frame from a shard pubsub channel. +// TODO clean this up with the v5 redis_protocol interface pub fn parse_shard_pubsub_frame(server: &Server, frame: &Resp3Frame) -> Option { let value = match frame { Resp3Frame::Array { ref data, .. } | Resp3Frame::Push { ref data, .. } => { @@ -209,8 +207,8 @@ pub fn parse_message_kind(frame: &Resp3Frame) -> Result } /// Parse the channel and value fields from a pubsub frame. -pub fn parse_message_fields(frame: &Resp3Frame) -> Result<(Str, RedisValue), RedisError> { - let mut frames = match frame.clone() { +pub fn parse_message_fields(frame: Resp3Frame) -> Result<(Str, RedisValue), RedisError> { + let mut frames = match frame { Resp3Frame::Array { data, .. } => data, Resp3Frame::Push { data, .. } => data, _ => return Err(RedisError::new(RedisErrorKind::Protocol, "Invalid pubsub frame type.")), @@ -236,7 +234,8 @@ pub fn frame_to_pubsub(server: &Server, frame: Resp3Frame) -> Result Result Result { - if let Some(message) = parse_shard_pubsub_frame(server, &frame) { - return Ok(message); - } - - // resp3 has an added "pubsub" simple string frame at the front - let mut out = Vec::with_capacity(frame.len() + 1); - out.push(Resp3Frame::SimpleString { - data: PUBSUB_PUSH_PREFIX.into(), - attributes: None, - }); - - if let Resp3Frame::Push { data, .. } = frame { - out.extend(data); - let frame = Resp3Frame::Push { - data: out, - attributes: None, - }; - - frame_to_pubsub(server, frame) - } else { - Err(RedisError::new( - RedisErrorKind::Protocol, - "Invalid pubsub message. Expected push frame.", - )) - } -} - pub fn check_resp2_auth_error(codec: &RedisCodec, frame: Resp2Frame) -> Resp2Frame { let is_auth_error = match frame { Resp2Frame::Error(ref data) => *data == LEGACY_AUTH_ERROR_BODY || data.starts_with(ACL_AUTH_ERROR_PREFIX), @@ -354,11 +320,10 @@ pub fn frame_to_str(frame: &Resp3Frame) -> Option { } } -fn parse_nested_map(data: FrameMap) -> Result { +#[cfg(feature = "i-hashes")] +fn parse_nested_map(data: FrameMap) -> Result { let mut out = HashMap::with_capacity(data.len()); - // maybe make this smarter, but that would require changing the RedisMap type to use potentially non-hashable types - // as keys... for (key, value) in data.into_iter() { let key: RedisKey = frame_to_results(key)?.try_into()?; let value = frame_to_results(value)?; @@ -370,8 +335,9 @@ fn parse_nested_map(data: FrameMap) -> Result { } /// Convert `nil` responses to a generic `Timeout` error. +#[cfg(any(feature = "i-lists", feature = "i-sorted-sets"))] pub fn check_null_timeout(frame: &Resp3Frame) -> Result<(), RedisError> { - if frame.is_null() { + if frame.kind() == FrameKind::Null { Err(RedisError::new(RedisErrorKind::Timeout, "Request timed out.")) } else { Ok(()) @@ -437,6 +403,7 @@ pub fn frame_to_results(frame: Resp3Frame) -> Result { } /// Flatten a single nested layer of arrays or sets into one array. +#[cfg(feature = "i-hashes")] pub fn flatten_frame(frame: Resp3Frame) -> Resp3Frame { match frame { Resp3Frame::Array { data, .. } => { @@ -491,6 +458,7 @@ pub fn flatten_frame(frame: Resp3Frame) -> Resp3Frame { } } +#[cfg(feature = "i-hashes")] /// Convert a frame to a nested RedisMap. pub fn frame_to_map(frame: Resp3Frame) -> Result { match frame { @@ -689,6 +657,7 @@ pub fn parse_master_role_replicas(data: RedisValue) -> Result, Redis } } +#[cfg(feature = "i-geo")] pub fn assert_array_len(data: &[T], len: usize) -> Result<(), RedisError> { if data.len() == len { Ok(()) @@ -789,11 +758,6 @@ pub fn arg_size(value: &RedisValue) -> usize { } } -#[cfg(any(feature = "blocking-encoding", feature = "partial-tracing", feature = "full-tracing"))] -pub fn resp3_frame_size(frame: &Resp3Frame) -> usize { - frame.encode_len().unwrap_or(0) -} - #[cfg(any(feature = "blocking-encoding", feature = "partial-tracing", feature = "full-tracing"))] pub fn args_size(args: &[RedisValue]) -> usize { args.iter().fold(0, |c, arg| c + arg_size(arg)) @@ -802,8 +766,8 @@ pub fn args_size(args: &[RedisValue]) -> usize { fn serialize_hello(command: &RedisCommand, version: &RespVersion) -> Result { let args = command.args(); - let auth = if args.len() == 2 { - // has username and password + let (auth, setname) = if args.len() == 3 { + // has auth and setname let username = match args[0].as_bytes_str() { Some(username) => username, None => { @@ -822,11 +786,29 @@ fn serialize_hello(command: &RedisCommand, version: &RespVersion) -> Result val, + None => { + return Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "Invalid setname value. Expected string.", + )); + }, + }; - Some(Auth { username, password }) - } else if args.len() == 1 { - // just has a password (assume the default user) - let password = match args[0].as_bytes_str() { + (Some((username, password)), Some(name)) + } else if args.len() == 2 { + // has auth but no setname + let username = match args[0].as_bytes_str() { + Some(username) => username, + None => { + return Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "Invalid username. Expected string.", + )); + }, + }; + let password = match args[1].as_bytes_str() { Some(password) => password, None => { return Err(RedisError::new( @@ -836,14 +818,28 @@ fn serialize_hello(command: &RedisCommand, version: &RespVersion) -> Result val, + None => { + return Err(RedisError::new( + RedisErrorKind::InvalidArgument, + "Invalid setname value. Expected string.", + )); + }, + }; + + (None, Some(name)) } else { - None + (None, None) }; Ok(Resp3Frame::Hello { version: version.clone(), auth, + setname, }) } @@ -870,7 +866,9 @@ pub fn command_to_resp3_frame(command: &RedisCommand) -> Result serialize_hello(command, version), + RedisCommandKind::_HelloAllCluster(ref version) | RedisCommandKind::_Hello(ref version) => { + serialize_hello(command, version) + }, _ => { let mut bulk_strings = Vec::with_capacity(args.len() + 2); @@ -951,6 +949,8 @@ pub fn encode_frame(inner: &Arc, command: &RedisCommand) -> Re #[cfg(test)] mod tests { + #![allow(dead_code)] + #![allow(unused_imports)] use super::*; use std::{collections::HashMap, time::Duration}; @@ -976,6 +976,7 @@ mod tests { } #[test] + #[cfg(feature = "i-memory")] fn should_parse_memory_stats() { // better from()/into() interfaces for frames coming in the next redis-protocol version... let input = frame_to_results(Resp3Frame::Array { @@ -1086,6 +1087,7 @@ mod tests { } #[test] + #[cfg(feature = "i-slowlog")] fn should_parse_slowlog_entries_redis_3() { // redis 127.0.0.1:6379> slowlog get 2 // 1) 1) (integer) 14 @@ -1144,6 +1146,7 @@ mod tests { } #[test] + #[cfg(feature = "i-slowlog")] fn should_parse_slowlog_entries_redis_4() { // redis 127.0.0.1:6379> slowlog get 2 // 1) 1) (integer) 14 @@ -1220,6 +1223,7 @@ mod tests { } #[test] + #[cfg(feature = "i-cluster")] fn should_parse_cluster_info() { let input: RedisValue = "cluster_state:fail cluster_slots_assigned:16384 diff --git a/src/router/centralized.rs b/src/router/centralized.rs index 21f24c10..d3053d06 100644 --- a/src/router/centralized.rs +++ b/src/router/centralized.rs @@ -1,7 +1,7 @@ use crate::{ error::RedisErrorKind, modules::inner::RedisClientInner, - prelude::{RedisError, Resp3Frame}, + prelude::RedisError, protocol::{ command::{RedisCommand, RouterResponse}, connection, @@ -13,6 +13,7 @@ use crate::{ router::{responses, utils, Connections, Written}, types::ServerConfig, }; +use redis_protocol::resp3::types::{BytesFrame as Resp3Frame, Resp3Frame as _Resp3Frame}; use std::{collections::VecDeque, sync::Arc}; use tokio::task::JoinHandle; @@ -196,7 +197,7 @@ pub async fn initialize_connection( }; let mut transport = connection::create(inner, &server, None).await?; transport.setup(inner, None).await?; - let (server, _writer) = connection::split_and_initialize(inner, transport, false, spawn_reader_task)?; + let (server, _writer) = connection::split(inner, transport, false, spawn_reader_task)?; inner.notifications.broadcast_reconnect(server); *writer = Some(_writer); diff --git a/src/router/clustered.rs b/src/router/clustered.rs index 86197bcd..984486d2 100644 --- a/src/router/clustered.rs +++ b/src/router/clustered.rs @@ -1,7 +1,7 @@ +use crate::types::ClusterDiscoveryPolicy; use crate::{ error::{RedisError, RedisErrorKind}, interfaces, - interfaces::Resp3Frame, modules::inner::RedisClientInner, protocol::{ command::{ClusterErrorKind, RedisCommand, RedisCommandKind, RouterCommand, RouterResponse}, @@ -17,6 +17,7 @@ use crate::{ }; use futures::future::try_join_all; use parking_lot::Mutex; +use redis_protocol::resp3::types::{BytesFrame as Resp3Frame, FrameKind, Resp3Frame as _Resp3Frame}; use std::{ collections::{BTreeSet, HashMap, VecDeque}, iter::repeat, @@ -428,7 +429,7 @@ pub async fn process_response_frame( } responses::check_and_set_unblocked_flag(inner, &command).await; - if frame.is_moved_or_ask_error() { + if frame.is_redirection() { _debug!( inner, "Recv MOVED or ASK error for `{}` from {}: {:?}", @@ -539,7 +540,12 @@ pub async fn connect_any( pub async fn cluster_slots_backchannel( inner: &Arc, cache: Option<&ClusterRouting>, + force_disconnect: bool, ) -> Result { + if force_disconnect { + inner.backchannel.write().await.check_and_disconnect(inner, None).await; + } + let (response, host) = { let command: RedisCommand = RedisCommandKind::ClusterSlots.into(); @@ -550,7 +556,7 @@ pub async fn cluster_slots_backchannel( let default_host = transport.default_host.clone(); _trace!(inner, "Sending backchannel CLUSTER SLOTS to {}", transport.server); - client_utils::apply_timeout( + client_utils::timeout( transport.request_response(command, inner.is_resp3()), inner.internal_command_timeout(), ) @@ -566,14 +572,23 @@ pub async fn cluster_slots_backchannel( } // failing the backchannel, try to connect to any of the user-provided hosts or the last known cluster nodes - let old_cache = cache.map(|cache| cache.slots()); + let old_cache = if let Some(policy) = inner.cluster_discovery_policy() { + match policy { + ClusterDiscoveryPolicy::ConfigEndpoint => None, + ClusterDiscoveryPolicy::UseCache => cache.map(|cache| cache.slots()), + } + } else { + cache.map(|cache| cache.slots()) + }; let command: RedisCommand = RedisCommandKind::ClusterSlots.into(); let (frame, host) = if let Some((frame, host)) = backchannel_result { - if frame.is_error() { + let kind = frame.kind(); + + if matches!(kind, FrameKind::SimpleError | FrameKind::BlobError) { // try connecting to any of the nodes, then try again let mut transport = connect_any(inner, old_cache).await?; - let frame = client_utils::apply_timeout( + let frame = client_utils::timeout( transport.request_response(command, inner.is_resp3()), inner.internal_command_timeout(), ) @@ -589,7 +604,7 @@ pub async fn cluster_slots_backchannel( } else { // try connecting to any of the nodes, then try again let mut transport = connect_any(inner, old_cache).await?; - let frame = client_utils::apply_timeout( + let frame = client_utils::timeout( transport.request_response(command, inner.is_resp3()), inner.internal_command_timeout(), ) @@ -643,8 +658,14 @@ pub async fn sync( _debug!(inner, "Synchronizing cluster state."); if let Connections::Clustered { cache, writers } = connections { - // send `CLUSTER SLOTS` to any of the cluster nodes via a backchannel - let state = cluster_slots_backchannel(inner, Some(&*cache)).await?; + // force disconnect after a connection unexpectedly closes or goes unresponsive + let force_disconnect = writers.is_empty() + || writers + .values() + .find_map(|t| if t.is_working() { None } else { Some(true) }) + .unwrap_or(false); + + let state = cluster_slots_backchannel(inner, Some(&*cache), force_disconnect).await?; _debug!(inner, "Cluster routing state: {:?}", state.pretty()); // update the cached routing table inner @@ -683,7 +704,7 @@ pub async fn sync( let mut transport = connection::create(&_inner, &server, None).await?; transport.setup(&_inner, None).await?; - let (server, writer) = connection::split_and_initialize(&_inner, transport, false, spawn_reader_task)?; + let (server, writer) = connection::split(&_inner, transport, false, spawn_reader_task)?; inner.notifications.broadcast_reconnect(server.clone()); _new_writers.lock().insert(server, writer); Ok::<_, RedisError>(()) diff --git a/src/router/commands.rs b/src/router/commands.rs index 38d9c931..da282f7d 100644 --- a/src/router/commands.rs +++ b/src/router/commands.rs @@ -6,13 +6,13 @@ use crate::{ types::{ClientState, ClientUnblockFlag, ClusterHash, Server}, utils as client_utils, }; -use redis_protocol::resp3::types::Frame as Resp3Frame; +use crate::{protocol::command::RedisCommandKind, types::Blocking}; +use redis_protocol::resp3::types::BytesFrame as Resp3Frame; use std::sync::Arc; use tokio::sync::oneshot::Sender as OneshotSender; #[cfg(feature = "transactions")] use crate::router::transactions; -use crate::{protocol::command::RedisCommandKind, types::Blocking}; #[cfg(feature = "full-tracing")] use tracing_futures::Instrument; @@ -287,7 +287,7 @@ async fn write_with_backpressure_t( ) -> Result<(), RedisError> { if inner.should_trace() { command.take_queued_span(); - let span = fspan!(command, inner.full_tracing_span_level(), "write_command"); + let span = fspan!(command, inner.full_tracing_span_level(), "fred.write"); write_with_backpressure(inner, router, command, force_pipeline) .instrument(span) .await @@ -404,7 +404,7 @@ async fn process_replica_reconnect( replica: bool, ) -> Result<(), RedisError> { if replica { - let result = utils::sync_replicas_with_policy(inner, router).await; + let result = utils::sync_replicas_with_policy(inner, router, false).await; if let Some(tx) = tx { let _ = tx.send(result.map(|_| Resp3Frame::Null)); } @@ -468,8 +468,9 @@ async fn process_sync_replicas( inner: &Arc, router: &mut Router, tx: OneshotSender>, + reset: bool, ) -> Result<(), RedisError> { - let result = utils::sync_replicas_with_policy(inner, router).await; + let result = utils::sync_replicas_with_policy(inner, router, reset).await; let _ = tx.send(result); Ok(()) } @@ -500,7 +501,11 @@ fn process_connections( router: &Router, tx: OneshotSender>, ) -> Result<(), RedisError> { - let connections = router.connections.active_connections(); + #[allow(unused_mut)] + let mut connections = router.connections.active_connections(); + #[cfg(feature = "replicas")] + connections.extend(router.replicas.writers.keys().cloned()); + _debug!(inner, "Active connections: {:?}", connections); let _ = tx.send(connections); Ok(()) @@ -528,7 +533,7 @@ async fn process_command( RouterCommand::Command(command) => process_normal_command(inner, router, command).await, RouterCommand::Connections { tx } => process_connections(inner, router, tx), #[cfg(feature = "replicas")] - RouterCommand::SyncReplicas { tx } => process_sync_replicas(inner, router, tx).await, + RouterCommand::SyncReplicas { tx, reset } => process_sync_replicas(inner, router, tx, reset).await, #[cfg(not(feature = "replicas"))] RouterCommand::Reconnect { server, force, tx } => process_reconnect(inner, router, server, force, tx).await, #[cfg(feature = "replicas")] diff --git a/src/router/mod.rs b/src/router/mod.rs index 40740cf6..f1323000 100644 --- a/src/router/mod.rs +++ b/src/router/mod.rs @@ -6,8 +6,7 @@ use crate::{ connection::{self, CommandBuffer, Counters, RedisWriter}, types::{ClusterRouting, Server}, }, - trace, - utils as client_utils, + trace, utils as client_utils, }; use futures::future::try_join_all; use semver::Version; @@ -67,17 +66,21 @@ pub enum Written { impl fmt::Display for Written { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{}", match self { - Written::Backpressure(_) => "Backpressure", - Written::Sent(_) => "Sent", - Written::SentAll => "SentAll", - Written::Disconnected(_) => "Disconnected", - Written::Ignore => "Ignore", - Written::NotFound(_) => "NotFound", - Written::Error(_) => "Error", - #[cfg(feature = "replicas")] - Written::Fallback(_) => "Fallback", - }) + write!( + f, + "{}", + match self { + Written::Backpressure(_) => "Backpressure", + Written::Sent(_) => "Sent", + Written::SentAll => "SentAll", + Written::Disconnected(_) => "Disconnected", + Written::Ignore => "Ignore", + Written::NotFound(_) => "NotFound", + Written::Error(_) => "Error", + #[cfg(feature = "replicas")] + Written::Fallback(_) => "Fallback", + } + ) } } @@ -132,7 +135,7 @@ pub enum Connections { }, Clustered { /// The cached cluster routing table used for mapping keys to server IDs. - cache: ClusterRouting, + cache: ClusterRouting, /// A map of server IDs and connections. writers: HashMap, }, @@ -153,7 +156,7 @@ impl Connections { pub fn new_clustered() -> Self { Connections::Clustered { - cache: ClusterRouting::new(), + cache: ClusterRouting::new(), writers: HashMap::new(), } } @@ -251,6 +254,7 @@ impl Connections { return Err(RedisError::new(RedisErrorKind::Config, "Invalid client configuration.")); }; + // TODO clean this up if result.is_ok() { if let Some(version) = self.server_version() { inner.server_state.write().kind.set_server_version(version); @@ -453,7 +457,7 @@ impl Connections { let mut transport = connection::create(inner, server, None).await?; transport.setup(inner, None).await?; - let (server, writer) = connection::split_and_initialize(inner, transport, false, clustered::spawn_reader_task)?; + let (server, writer) = connection::split(inner, transport, false, clustered::spawn_reader_task)?; writers.insert(server, writer); Ok(()) } else { @@ -496,12 +500,12 @@ pub struct Router { /// The connection map for each deployment type. pub connections: Connections, /// The inner client state associated with the router. - pub inner: Arc, + pub inner: Arc, /// Storage for commands that should be deferred or retried later. - pub buffer: VecDeque, + pub buffer: VecDeque, /// The replica routing interface. #[cfg(feature = "replicas")] - pub replicas: Replicas, + pub replicas: Replicas, } impl Router { @@ -949,7 +953,7 @@ impl Router { _ => {}, }; - let _ = client_utils::apply_timeout(rx, self.inner.internal_command_timeout()).await??; + let _ = client_utils::timeout(rx, self.inner.internal_command_timeout()).await??; } Ok(()) diff --git a/src/router/replicas.rs b/src/router/replicas.rs index 889c9d0c..99e060ed 100644 --- a/src/router/replicas.rs +++ b/src/router/replicas.rs @@ -40,11 +40,11 @@ pub struct ReplicaConfig { /// Whether the client should lazily connect to replica nodes. /// /// Default: `true` - pub lazy_connections: bool, + pub lazy_connections: bool, /// An optional interface for filtering available replica nodes. /// /// Default: `None` - pub filter: Option>, + pub filter: Option>, /// Whether the client should ignore errors from replicas that occur when the max reconnection count is reached. /// /// Default: `true` @@ -52,11 +52,11 @@ pub struct ReplicaConfig { /// The number of times a command can fail with a replica connection error before being sent to a primary node. /// /// Default: `0` (unlimited) - pub connection_error_count: u32, + pub connection_error_count: u32, /// Whether the client should use the associated primary node if no replica exists that can serve a command. /// /// Default: `true` - pub primary_fallback: bool, + pub primary_fallback: bool, } #[cfg(feature = "replicas")] @@ -88,11 +88,11 @@ impl Eq for ReplicaConfig {} impl Default for ReplicaConfig { fn default() -> Self { ReplicaConfig { - lazy_connections: true, - filter: None, + lazy_connections: true, + filter: None, ignore_reconnection_errors: true, - connection_error_count: 0, - primary_fallback: true, + connection_error_count: 0, + primary_fallback: true, } } } @@ -242,8 +242,8 @@ impl ReplicaSet { #[cfg(feature = "replicas")] pub struct Replicas { pub(crate) writers: HashMap, - routing: ReplicaSet, - buffer: VecDeque, + routing: ReplicaSet, + buffer: VecDeque, } #[cfg(feature = "replicas")] @@ -253,7 +253,7 @@ impl Replicas { Replicas { writers: HashMap::new(), routing: ReplicaSet::new(), - buffer: VecDeque::new(), + buffer: VecDeque::new(), } } @@ -303,9 +303,9 @@ impl Replicas { let (_, writer) = if inner.config.server.is_clustered() { transport.readonly(inner, None).await?; - connection::split_and_initialize(inner, transport, true, clustered::spawn_reader_task)? + connection::split(inner, transport, true, clustered::spawn_reader_task)? } else { - connection::split_and_initialize(inner, transport, true, centralized::spawn_reader_task)? + connection::split(inner, transport, true, centralized::spawn_reader_task)? }; self.writers.insert(replica.clone(), writer); diff --git a/src/router/responses.rs b/src/router/responses.rs index 485c6048..0c09239f 100644 --- a/src/router/responses.rs +++ b/src/router/responses.rs @@ -6,15 +6,19 @@ use crate::{ types::{ClientState, KeyspaceEvent, Message, RedisKey, RedisValue}, utils, }; -use redis_protocol::resp3::{prelude::PUBSUB_PUSH_PREFIX, types::Frame as Resp3Frame}; +use redis_protocol::resp3::types::FrameKind; +use redis_protocol::{ + resp3::types::{BytesFrame as Resp3Frame, Resp3Frame as _Resp3Frame}, + types::PUBSUB_PUSH_PREFIX, +}; use std::{str, sync::Arc}; -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] use crate::types::Invalidation; const KEYSPACE_PREFIX: &str = "__keyspace@"; const KEYEVENT_PREFIX: &str = "__keyevent@"; -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] const INVALIDATION_CHANNEL: &str = "__redis__:invalidate"; fn parse_keyspace_notification(channel: &str, message: &RedisValue) -> Option { @@ -29,15 +33,9 @@ fn parse_keyspace_notification(channel: &str, message: &RedisValue) -> Option() { - Ok(db) => db, - Err(_) => return None, - }; + let db = suffix[0].replace("__", "").parse::().ok()?; let operation = suffix[1].to_owned(); - let key: RedisKey = match message.clone().try_into() { - Ok(k) => k, - Err(_) => return None, - }; + let key: RedisKey = message.clone().try_into().ok()?; Some(KeyspaceEvent { db, key, operation }) } else if channel.starts_with(KEYSPACE_PREFIX) { @@ -51,15 +49,9 @@ fn parse_keyspace_notification(channel: &str, message: &RedisValue) -> Option() { - Ok(db) => db, - Err(_) => return None, - }; + let db = suffix[0].replace("__", "").parse::().ok()?; let key: RedisKey = suffix[1].to_owned().into(); - let operation = match message.as_string() { - Some(k) => k, - None => return None, - }; + let operation = message.as_string()?; Some(KeyspaceEvent { db, key, operation }) } else { @@ -67,48 +59,7 @@ fn parse_keyspace_notification(channel: &str, message: &RedisValue) -> Option bool { - s == "message" || s == "pmessage" || s == "smessage" -} - -/// Check for the various pubsub formats for both RESP2 and RESP3. -fn check_pubsub_formats(frame: &Resp3Frame) -> (bool, bool) { - if frame.is_pubsub_message() { - return (true, false); - } - - // otherwise check for RESP2 formats automatically converted to RESP3 by the codec - let data = match frame { - Resp3Frame::Array { ref data, .. } => data, - Resp3Frame::Push { ref data, .. } => data, - _ => return (false, false), - }; - - // RESP2 and RESP3 differ in that RESP3 contains an additional "pubsub" string frame at the start - // so here we check the frame contents according to the RESP2 pubsub rules - let resp3 = (data.len() == 3 || data.len() == 4) && data[0].as_str().map(check_message_prefix).unwrap_or(false); - - (resp3, false) -} - -/// Try to parse the frame in either RESP2 or RESP3 pubsub formats. -fn parse_pubsub_message( - server: &Server, - frame: Resp3Frame, - is_resp3: bool, - is_resp2: bool, -) -> Result { - if is_resp3 { - protocol_utils::frame_to_pubsub(server, frame) - } else if is_resp2 { - protocol_utils::parse_as_resp2_pubsub(server, frame) - } else { - Err(RedisError::new(RedisErrorKind::Protocol, "Invalid pubsub message.")) - } -} - -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] fn broadcast_pubsub_invalidation(inner: &Arc, message: Message, server: &Server) { if let Some(invalidation) = Invalidation::from_message(message, server) { inner.notifications.broadcast_invalidation(invalidation); @@ -120,20 +71,20 @@ fn broadcast_pubsub_invalidation(inner: &Arc, message: Message } } -#[cfg(not(feature = "client-tracking"))] +#[cfg(not(feature = "i-tracking"))] fn broadcast_pubsub_invalidation(_: &Arc, _: Message, _: &Server) {} -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] fn is_pubsub_invalidation(message: &Message) -> bool { message.channel == INVALIDATION_CHANNEL } -#[cfg(not(feature = "client-tracking"))] +#[cfg(not(feature = "i-tracking"))] fn is_pubsub_invalidation(_: &Message) -> bool { false } -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] fn broadcast_resp3_invalidation(inner: &Arc, server: &Server, frame: Resp3Frame) { if let Resp3Frame::Push { mut data, .. } = frame { if data.len() != 2 { @@ -141,10 +92,10 @@ fn broadcast_resp3_invalidation(inner: &Arc, server: &Server, } // RESP3 example: Push { data: [BlobString { data: b"invalidate", attributes: None }, Array { data: - // [BlobString { data: b"foo", attributes: None }], attributes: None }], attributes: None } + // [BlobString { data: b"foo", attributes: None }], attributes: None }], attributes: None } if let Resp3Frame::Array { data, .. } = data[1].take() { inner.notifications.broadcast_invalidation(Invalidation { - keys: data + keys: data .into_iter() .filter_map(|f| f.as_bytes().map(|b| b.into())) .collect(), @@ -154,13 +105,13 @@ fn broadcast_resp3_invalidation(inner: &Arc, server: &Server, } } -#[cfg(not(feature = "client-tracking"))] +#[cfg(not(feature = "i-tracking"))] fn broadcast_resp3_invalidation(_: &Arc, _: &Server, _: Resp3Frame) {} -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] fn is_resp3_invalidation(frame: &Resp3Frame) -> bool { // RESP3 example: Push { data: [BlobString { data: b"invalidate", attributes: None }, Array { data: - // [BlobString { data: b"foo", attributes: None }], attributes: None }], attributes: None } + // [BlobString { data: b"foo", attributes: None }], attributes: None }], attributes: None } if let Resp3Frame::Push { ref data, .. } = frame { data .first() @@ -205,7 +156,7 @@ fn is_subscription_response(frame: &Resp3Frame) -> bool { } } -#[cfg(not(feature = "client-tracking"))] +#[cfg(not(feature = "i-tracking"))] fn is_resp3_invalidation(_: &Resp3Frame) -> bool { false } @@ -223,8 +174,9 @@ pub fn check_pubsub_message(inner: &Arc, server: &Server, fram return None; } - let (is_resp3_pubsub, is_resp2_pubsub) = check_pubsub_formats(&frame); - if !is_resp3_pubsub && !is_resp2_pubsub { + let is_pubsub = + frame.is_normal_pubsub_message() || frame.is_pattern_pubsub_message() || frame.is_shard_pubsub_message(); + if !is_pubsub { return Some(frame); } @@ -233,9 +185,9 @@ pub fn check_pubsub_message(inner: &Arc, server: &Server, fram let parsed_frame = if let Some(ref span) = span { #[allow(clippy::let_unit_value)] let _guard = span.enter(); - parse_pubsub_message(server, frame, is_resp3_pubsub, is_resp2_pubsub) + protocol_utils::frame_to_pubsub(server, frame) } else { - parse_pubsub_message(server, frame, is_resp3_pubsub, is_resp2_pubsub) + protocol_utils::frame_to_pubsub(server, frame) }; let message = match parsed_frame { @@ -270,7 +222,7 @@ pub async fn check_and_set_unblocked_flag(inner: &Arc, command /// Parse the response frame to see if it's an auth error. fn parse_redis_auth_error(frame: &Resp3Frame) -> Option { - if frame.is_error() { + if matches!(frame.kind(), FrameKind::SimpleError | FrameKind::BlobError) { match protocol_utils::frame_to_results(frame.clone()) { Ok(_) => None, Err(e) => match e.kind() { diff --git a/src/router/sentinel.rs b/src/router/sentinel.rs index d044e756..c0bf7c40 100644 --- a/src/router/sentinel.rs +++ b/src/router/sentinel.rs @@ -140,10 +140,10 @@ async fn read_sentinels( ) -> Result, RedisError> { let service_name = read_service_name(inner)?; - let command = RedisCommand::new(RedisCommandKind::Sentinel, vec![ - static_val!(SENTINELS), - service_name.into(), - ]); + let command = RedisCommand::new( + RedisCommandKind::Sentinel, + vec![static_val!(SENTINELS), service_name.into()], + ); let frame = sentinel.request_response(command, false).await?; let response = stry!(protocol_utils::frame_to_results(frame)); _trace!(inner, "Read sentinel `sentinels` response: {:?}", response); @@ -164,7 +164,7 @@ async fn connect_to_sentinel(inner: &Arc) -> Result Result { let service_name = read_service_name(inner)?; - let command = RedisCommand::new(RedisCommandKind::Sentinel, vec![ - static_val!(GET_MASTER_ADDR_BY_NAME), - service_name.into(), - ]); - let frame = utils::apply_timeout( + let command = RedisCommand::new( + RedisCommandKind::Sentinel, + vec![static_val!(GET_MASTER_ADDR_BY_NAME), service_name.into()], + ); + let frame = utils::timeout( sentinel.request_response(command, false), inner.internal_command_timeout(), ) @@ -299,7 +299,7 @@ async fn update_cached_client_state( .update_sentinel_nodes(&transport.server, sentinels); let _ = update_sentinel_backchannel(inner, &transport).await; - let (_, _writer) = connection::split_and_initialize(inner, transport, false, centralized::spawn_reader_task)?; + let (_, _writer) = connection::split(inner, transport, false, centralized::spawn_reader_task)?; *writer = Some(_writer); Ok(()) } @@ -323,7 +323,7 @@ pub async fn initialize_connection( let mut transport = discover_primary_node(inner, &mut sentinel).await?; let server = transport.server.clone(); - utils::apply_timeout( + utils::timeout( Box::pin(async { check_primary_node_role(inner, &mut transport).await?; update_cached_client_state(inner, writer, sentinel, transport).await?; diff --git a/src/router/transactions.rs b/src/router/transactions.rs index 87380477..a7bb0c4e 100644 --- a/src/router/transactions.rs +++ b/src/router/transactions.rs @@ -62,7 +62,7 @@ async fn write_command( return Ok(TransactionResponse::Retry(e)); } - match client_utils::apply_timeout(rx, timeout_dur).await? { + match client_utils::timeout(rx, timeout_dur).await? { RouterResponse::Continue => Ok(TransactionResponse::Continue), RouterResponse::Ask((slot, server, _)) => { Ok(TransactionResponse::Redirection((ClusterErrorKind::Ask, slot, server))) diff --git a/src/router/utils.rs b/src/router/utils.rs index 73758294..a20afaa3 100644 --- a/src/router/utils.rs +++ b/src/router/utils.rs @@ -369,7 +369,7 @@ pub async fn send_asking_with_policy( continue; } } else { - match client_utils::apply_timeout(rx, inner.internal_command_timeout()).await { + match client_utils::timeout(rx, inner.internal_command_timeout()).await { Ok(Err(e)) => { // error writing the command _debug!(inner, "Reconnect once after error from ASKING: {:?}", e); @@ -402,7 +402,15 @@ pub async fn send_asking_with_policy( } #[cfg(feature = "replicas")] -async fn sync_cluster_replicas(inner: &Arc, router: &mut Router) -> Result<(), RedisError> { +async fn sync_cluster_replicas( + inner: &Arc, + router: &mut Router, + reset: bool, +) -> Result<(), RedisError> { + if reset { + router.replicas.clear_connections(inner).await?; + } + if inner.config.server.is_clustered() { router.sync_cluster().await } else { @@ -412,7 +420,11 @@ async fn sync_cluster_replicas(inner: &Arc, router: &mut Route /// Repeatedly try to sync the cluster state, reconnecting as needed until the max reconnection attempts is reached. #[cfg(feature = "replicas")] -pub async fn sync_replicas_with_policy(inner: &Arc, router: &mut Router) -> Result<(), RedisError> { +pub async fn sync_replicas_with_policy( + inner: &Arc, + router: &mut Router, + reset: bool, +) -> Result<(), RedisError> { let mut delay = Duration::from_millis(0); loop { @@ -421,7 +433,7 @@ pub async fn sync_replicas_with_policy(inner: &Arc, router: &m inner.wait_with_interrupt(delay).await?; } - if let Err(e) = sync_cluster_replicas(inner, router).await { + if let Err(e) = sync_cluster_replicas(inner, router, reset).await { _warn!(inner, "Error syncing replicas: {:?}", e); if e.should_not_reconnect() { @@ -497,11 +509,11 @@ pub fn defer_reconnect(inner: &Arc) { } } else { let cmd = RouterCommand::Reconnect { - server: None, - tx: None, - force: false, + server: None, + tx: None, + force: false, #[cfg(feature = "replicas")] - replica: false, + replica: false, }; if let Err(_) = interfaces::send_to_router(inner, cmd) { _warn!(inner, "Failed to send deferred cluster sync.") diff --git a/src/trace/README.md b/src/trace/README.md index d5c9d0b2..bf750d06 100644 --- a/src/trace/README.md +++ b/src/trace/README.md @@ -1,44 +1,48 @@ Tracing ======= -Tracing is implemented via the [tracing](https://github.com/tokio-rs/tracing) crate. This page describes the spans used by the client and the fields emitted on each of the spans. +Tracing is implemented via the [tracing](https://github.com/tokio-rs/tracing) crate. This page describes the spans used +by the client and the fields emitted on each of the spans. ![](../../tests/screenshot.png) -See the [pipeline test](../../bin/pipeline_test) application for an example showing how to configure tracing with a local Jaeger instance. This crate ships with a [small example](../../tests/docker/compose/jaeger.yml) that uses `docker-compose` to run a local Jaeger instance. +See the [benchmark](../../bin/benchmark) application for an example showing how to configure tracing with a +local Jaeger instance. This crate ships with a [small example](../../tests/docker/compose/jaeger.yml) that +uses `docker-compose` to run a local Jaeger instance. ## Spans -This table shows the spans emitted by the client. The `Partial Trace` column describes whether the span will appear when only the `partial-tracing` feature flag is enabled. +This table shows the spans emitted by the client. The `Partial Trace` column describes whether the span will appear when +only the `partial-tracing` feature flag is enabled. -| Name | Description | Partial Trace | -|-------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| -| redis_command | The top level span used for all redis commands. | x | -| prepare_args | Time spent checking and preparing arguments. | | -| queued | Time spent waiting in the in-memory queue before being sent to the server. Pipelining and backpressure settings can drastically affect this. | | -| write_command | Time spent routing and writing a command. | | -| wait_for_response | Time spent waiting on a response from the server, starting from when the first byte is fed to the socket and ending when the response has been decoded. | x | -| parse_pubsub | Time spent parsing a publish-subscribe message. | | +| Name | Description | Partial Trace | +|--------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| +| fred.command | The top level span used for all redis commands. | x | +| fred.prepare | Time spent checking and preparing arguments. | | +| fred.queued | Time spent waiting in the in-memory queue before being sent to the server. Pipelining and backpressure settings can significantly affect this. | | +| fred.write | Time spent routing and writing a command to the socket. | | +| fred.rtt | Time spent waiting on a response from the server, starting from when the first byte is fed to the socket and ending when the full response has been decoded. | x | +| fred.pubsub | Time spent parsing a publish-subscribe message. | | -Tracing levels for the two tracing features can be configured separately through the `TracingConfig`. +Tracing levels for the two tracing features can be configured separately through the `TracingConfig`. ## Events -| Name | Description | -|-------------------------|-------------------------------------------------------------------------------| -| backpressure | Emitted when a command hits backpressure due to too many in-flight commands. | - -## Fields - -| Name | Description | -|------------------|---------------------------------------------------------------------| -| client_id | The ID of the client instance (`client.id()`). | -| cmd | The redis command name. | -| req_size | The size (in bytes) of the command's arguments. | -| res_size | The size (in bytes) of the command's response. | -| num_args | The number of arguments being sent to the server. | -| buf_len | The length of the in-memory command queue. | -| pipelined | Whether or not a command was pipelined. | -| flush | Whether or not the socket was flushed while sending a command. | -| channel | The channel on which a pubsub message was received. | -| duration_ms | The duration of a pause, in milliseconds, of a backpressure event. | \ No newline at end of file +| Name | Description | +|-------------------|------------------------------------------------------------------------------| +| fred.backpressure | Emitted when a command hits backpressure due to too many in-flight commands. | + +## Attributes + +| Name | Description | +|---------------|--------------------------------------------------------------------| +| client.id | The ID of the client instance (`client.id()`). | +| client.queued | The length of the in-memory command queue. | +| cmd.name | The redis command name. | +| cmd.req | The size (in bytes) of the command's arguments. | +| cmd.res | The size (in bytes) of the command's response. | +| cmd.args | The number of arguments being sent to the server. | +| cmd.pipelined | Whether the command was pipelined. | +| cmd.flush | Whether the socket was flushed while sending the command. | +| msg.channel | The channel on which a pubsub message was received. | +| duration | The duration of a pause, in milliseconds, of a backpressure event. | \ No newline at end of file diff --git a/src/trace/disabled.rs b/src/trace/disabled.rs index de433a7a..566c3029 100644 --- a/src/trace/disabled.rs +++ b/src/trace/disabled.rs @@ -5,7 +5,7 @@ use crate::modules::inner::RedisClientInner; #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] use crate::protocol::command::RedisCommand; #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] -use redis_protocol::resp3::types::Frame; +use redis_protocol::resp3::types::BytesFrame as Frame; #[cfg(not(any(feature = "full-tracing", feature = "partial-tracing")))] use std::sync::Arc; diff --git a/src/trace/enabled.rs b/src/trace/enabled.rs index e0867208..cc397abd 100644 --- a/src/trace/enabled.rs +++ b/src/trace/enabled.rs @@ -1,8 +1,5 @@ -use crate::{ - modules::inner::RedisClientInner, - protocol::{command::RedisCommand, utils as protocol_utils}, -}; -use redis_protocol::resp3::types::Frame; +use crate::{modules::inner::RedisClientInner, protocol::command::RedisCommand}; +use redis_protocol::resp3::types::{BytesFrame as Resp3Frame, Resp3Frame as _Resp3Frame}; use std::{fmt, ops::Deref, sync::Arc}; pub use tracing::span::Span; use tracing::{event, field::Empty, Id as TraceId, Level}; @@ -12,12 +9,12 @@ use crate::trace::disabled::Span as FakeSpan; /// Struct for storing spans used by the client when sending a command. pub struct CommandTraces { - pub cmd: Option, + pub cmd: Option, pub network: Option, #[cfg(feature = "full-tracing")] - pub queued: Option, + pub queued: Option, #[cfg(not(feature = "full-tracing"))] - pub queued: Option, + pub queued: Option, } /// Enter the network span when the command is dropped after receiving a response. @@ -32,8 +29,8 @@ impl Drop for CommandTraces { impl Default for CommandTraces { fn default() -> Self { CommandTraces { - cmd: None, - queued: None, + cmd: None, + queued: None, network: None, } } @@ -47,31 +44,31 @@ impl fmt::Debug for CommandTraces { pub fn set_network_span(inner: &Arc, command: &mut RedisCommand, flush: bool) { trace!("Setting network span from command {}", command.debug_id()); - let span = fspan!(command, inner.tracing_span_level(), "wait_for_response", flush); + let span = fspan!(command, inner.tracing_span_level(), "fred.rtt", "cmd.flush" = flush); span.in_scope(|| {}); command.traces.network = Some(span); } -pub fn record_response_size(span: &Span, frame: &Frame) { +pub fn record_response_size(span: &Span, frame: &Resp3Frame) { #[allow(clippy::needless_borrows_for_generic_args)] - span.record("res_size", &protocol_utils::resp3_frame_size(frame)); + span.record("cmd.res", &frame.encode_len()); } pub fn create_command_span(inner: &Arc) -> Span { span_lvl!( inner.tracing_span_level(), - "redis_command", + "fred.command", module = "fred", - client_id = &inner.id.deref(), - cmd = Empty, - req_size = Empty, - res_size = Empty + "client.id" = &inner.id.deref(), + "cmd.name" = Empty, + "cmd.req" = Empty, + "cmd.res" = Empty ) } #[cfg(feature = "full-tracing")] pub fn create_args_span(parent: Option, inner: &Arc) -> Span { - span_lvl!(inner.full_tracing_span_level(), parent: parent, "prepare_args", num_args = Empty) + span_lvl!(inner.full_tracing_span_level(), parent: parent, "fred.prepare", "cmd.args" = Empty) } #[cfg(not(feature = "full-tracing"))] @@ -91,16 +88,16 @@ pub fn create_queued_span(_parent: Option, _inner: &Arc, frame: &Frame) -> Option { +pub fn create_pubsub_span(inner: &Arc, frame: &Resp3Frame) -> Option { if inner.should_trace() { let span = span_lvl!( inner.full_tracing_span_level(), parent: None, - "parse_pubsub", + "fred.pubsub", module = "fred", - client_id = &inner.id.deref(), - res_size = &protocol_utils::resp3_frame_size(frame), - channel = Empty + "client.id" = &inner.id.deref(), + "cmd.res" = &frame.encode_len(), + "msg.channel" = Empty ); Some(span) @@ -117,8 +114,8 @@ pub fn create_pubsub_span(_inner: &Arc, _frame: &Frame) -> Opt pub fn backpressure_event(cmd: &RedisCommand, duration: Option) { let id = cmd.traces.cmd.as_ref().and_then(|c| c.id()); if let Some(duration) = duration { - event!(parent: id, Level::INFO, "backpressure duration_ms={}", duration); + event!(parent: id, Level::INFO, "fred.backpressure duration={}", duration); } else { - event!(parent: id, Level::INFO, "backpressure drain"); + event!(parent: id, Level::INFO, "fred.backpressure drain"); } } diff --git a/src/types/args.rs b/src/types/args.rs index 69b7c286..5ae77c5b 100644 --- a/src/types/args.rs +++ b/src/types/args.rs @@ -2,7 +2,7 @@ use crate::{ error::{RedisError, RedisErrorKind}, interfaces::{ClientLike, Resp3Frame}, protocol::{connection::OK, utils as protocol_utils}, - types::{FromRedis, FromRedisKey, Function, GeoPosition, GeoRadiusInfo, Server, XReadResponse, XReadValue, QUEUED}, + types::{FromRedis, FromRedisKey, Server, QUEUED}, utils, }; use bytes::Bytes; @@ -21,6 +21,12 @@ use std::{ str, }; +#[cfg(feature = "i-scripts")] +use crate::types::Function; +#[cfg(feature = "i-geo")] +use crate::types::{GeoPosition, GeoRadiusInfo}; +#[cfg(feature = "i-streams")] +use crate::types::{XReadResponse, XReadValue}; #[cfg(feature = "serde-json")] use serde_json::Value; @@ -82,6 +88,7 @@ impl StringOrNumber { StringOrNumber::String(utils::static_str(s)) } + #[cfg(feature = "i-streams")] pub(crate) fn into_arg(self) -> RedisValue { match self { StringOrNumber::String(s) => RedisValue::String(s), @@ -656,7 +663,7 @@ impl RedisValue { Self::from_static_str(OK) } - /// Whether or not the value is a simple string OK value. + /// Whether the value is a simple string OK value. pub fn is_ok(&self) -> bool { match *self { RedisValue::String(ref s) => *s == OK, @@ -711,7 +718,7 @@ impl RedisValue { matches!(*self, RedisValue::Bytes(_)) } - /// Whether or not the value is a boolean value or can be parsed as a boolean value. + /// Whether the value is a boolean value or can be parsed as a boolean value. #[allow(clippy::match_like_matches_macro)] pub fn is_boolean(&self) -> bool { match *self { @@ -728,7 +735,7 @@ impl RedisValue { } } - /// Whether or not the inner value is a double or can be parsed as a double. + /// Whether the inner value is a double or can be parsed as a double. pub fn is_double(&self) -> bool { match *self { RedisValue::Double(_) => true, @@ -742,12 +749,12 @@ impl RedisValue { matches!(*self, RedisValue::Queued) } - /// Whether or not the value is an array or map. + /// Whether the value is an array or map. pub fn is_aggregate_type(&self) -> bool { matches!(*self, RedisValue::Array(_) | RedisValue::Map(_)) } - /// Whether or not the value is a `RedisMap`. + /// Whether the value is a `RedisMap`. /// /// See [is_maybe_map](Self::is_maybe_map) for a function that also checks for arrays that likely represent a map in /// RESP2 mode. @@ -755,7 +762,7 @@ impl RedisValue { matches!(*self, RedisValue::Map(_)) } - /// Whether or not the value is a `RedisMap` or an array with an even number of elements where each even-numbered + /// Whether the value is a `RedisMap` or an array with an even number of elements where each even-numbered /// element is not an aggregate type. /// /// RESP2 and RESP3 encode maps differently, and this function can be used to duck-type maps across protocol @@ -768,7 +775,7 @@ impl RedisValue { } } - /// Whether or not the value is an array. + /// Whether the value is an array. pub fn is_array(&self) -> bool { matches!(*self, RedisValue::Array(_)) } @@ -1207,6 +1214,8 @@ impl RedisValue { /// /// See the [XREAD](crate::interfaces::StreamsInterface::xread) (or `XREADGROUP`) documentation for more /// information. + #[cfg(feature = "i-streams")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] pub fn into_xread_response(self) -> Result, RedisError> where K1: FromRedisKey + Hash + Eq, @@ -1220,6 +1229,8 @@ impl RedisValue { /// A utility function to convert the response from `XCLAIM`, etc into a type with a less verbose type declaration. /// /// This function supports responses in both RESP2 and RESP3 formats. + #[cfg(feature = "i-streams")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] pub fn into_xread_value(self) -> Result>, RedisError> where K: FromRedisKey + Hash + Eq, @@ -1235,6 +1246,8 @@ impl RedisValue { /// /// Note: the new (as of Redis 7.x) return value containing message PIDs that were deleted from the PEL are dropped. /// Callers should use `xautoclaim` instead if this data is needed. + #[cfg(feature = "i-streams")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] pub fn into_xautoclaim_values(self) -> Result<(String, Vec>), RedisError> where K: FromRedisKey + Hash + Eq, @@ -1259,6 +1272,8 @@ impl RedisValue { } /// Parse the value as the response from `FUNCTION LIST`, including only functions with the provided library `name`. + #[cfg(feature = "i-scripts")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] pub fn as_functions(&self, name: &str) -> Result, RedisError> { utils::value_to_functions(self, name) } @@ -1266,6 +1281,8 @@ impl RedisValue { /// Convert the value into a `GeoPosition`, if possible. /// /// Null values are returned as `None` to work more easily with the result of the `GEOPOS` command. + #[cfg(feature = "i-geo")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] pub fn as_geo_position(&self) -> Result, RedisError> { if self.is_null() { Ok(None) @@ -1276,6 +1293,8 @@ impl RedisValue { /// Parse the value as the response to any of the relevant GEO commands that return an array of /// [GeoRadiusInfo](crate::types::GeoRadiusInfo) values, such as `GEOSEARCH`, GEORADIUS`, etc. + #[cfg(feature = "i-geo")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] pub fn into_geo_radius_result( self, withcoord: bool, @@ -1305,7 +1324,7 @@ impl RedisValue { R::from_value(self) } - /// Whether or not the value can be hashed. + /// Whether the value can be hashed. /// /// Some use cases require using `RedisValue` types as keys in a `HashMap`, etc. Trying to do so with an aggregate /// type can panic, and this function can be used to more gracefully handle this situation. diff --git a/src/types/client.rs b/src/types/client.rs index bb260a7f..098ccb50 100644 --- a/src/types/client.rs +++ b/src/types/client.rs @@ -1,7 +1,7 @@ use crate::utils; use bytes_utils::Str; -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] use crate::{ error::{RedisError, RedisErrorKind}, types::{Message, RedisKey, RedisValue, Server}, @@ -50,10 +50,13 @@ impl ClientKillFilter { ClientKillFilter::User(ref user) => ("USER", user.into()), ClientKillFilter::Addr(ref addr) => ("ADDR", addr.into()), ClientKillFilter::LAddr(ref addr) => ("LADDR", addr.into()), - ClientKillFilter::SkipMe(ref b) => ("SKIPME", match *b { - true => utils::static_str("yes"), - false => utils::static_str("no"), - }), + ClientKillFilter::SkipMe(ref b) => ( + "SKIPME", + match *b { + true => utils::static_str("yes"), + false => utils::static_str("no"), + }, + ), }; (utils::static_str(prefix), value) @@ -98,32 +101,16 @@ impl ClientReplyFlag { } } -/// Arguments to the CLIENT UNBLOCK command. -#[derive(Clone, Debug, Eq, PartialEq)] -pub enum ClientUnblockFlag { - Timeout, - Error, -} - -impl ClientUnblockFlag { - pub(crate) fn to_str(&self) -> Str { - utils::static_str(match *self { - ClientUnblockFlag::Timeout => "TIMEOUT", - ClientUnblockFlag::Error => "ERROR", - }) - } -} - /// An `ON|OFF` flag used with client tracking commands. -#[cfg(feature = "client-tracking")] -#[cfg_attr(docsrs, doc(cfg(feature = "client-tracking")))] +#[cfg(feature = "i-tracking")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] #[derive(Clone, Debug, Eq, PartialEq)] pub enum Toggle { On, Off, } -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] impl Toggle { pub(crate) fn to_str(&self) -> &'static str { match self { @@ -141,7 +128,8 @@ impl Toggle { } } -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] impl TryFrom<&str> for Toggle { type Error = RedisError; @@ -150,7 +138,8 @@ impl TryFrom<&str> for Toggle { } } -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] impl TryFrom for Toggle { type Error = RedisError; @@ -159,7 +148,8 @@ impl TryFrom for Toggle { } } -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] impl TryFrom<&String> for Toggle { type Error = RedisError; @@ -168,7 +158,8 @@ impl TryFrom<&String> for Toggle { } } -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] impl From for Toggle { fn from(value: bool) -> Self { if value { @@ -180,20 +171,20 @@ impl From for Toggle { } /// A [client tracking](https://redis.io/docs/manual/client-side-caching/) invalidation message from the provided server. -#[cfg(feature = "client-tracking")] -#[cfg_attr(docsrs, doc(cfg(feature = "client-tracking")))] +#[cfg(feature = "i-tracking")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] #[derive(Clone, Debug, Eq, PartialEq)] pub struct Invalidation { - pub keys: Vec, + pub keys: Vec, pub server: Server, } -#[cfg(feature = "client-tracking")] -#[cfg_attr(docsrs, doc(cfg(feature = "client-tracking")))] +#[cfg(feature = "i-tracking")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] impl Invalidation { pub(crate) fn from_message(message: Message, server: &Server) -> Option { Some(Invalidation { - keys: match message.value { + keys: match message.value { RedisValue::Array(values) => values.into_iter().filter_map(|v| v.try_into().ok()).collect(), RedisValue::String(s) => vec![s.into()], RedisValue::Bytes(b) => vec![b.into()], diff --git a/src/types/config.rs b/src/types/config.rs index a4eb207c..98b1b116 100644 --- a/src/types/config.rs +++ b/src/types/config.rs @@ -4,6 +4,7 @@ use socket2::TcpKeepalive; use std::{cmp, time::Duration}; use url::Url; +use crate::error::RedisErrorKind; #[cfg(feature = "mocks")] use crate::mocks::Mocks; #[cfg(feature = "unix-sockets")] @@ -18,6 +19,7 @@ pub use crate::protocol::tls::{HostMapping, TlsConfig, TlsConnector, TlsHostMapp #[cfg(feature = "replicas")] #[cfg_attr(docsrs, doc(cfg(feature = "replicas")))] pub use crate::router::replicas::{ReplicaConfig, ReplicaFilter}; +use crate::types::ClusterHash; /// The default amount of jitter when waiting to reconnect. pub const DEFAULT_JITTER_MS: u32 = 100; @@ -75,37 +77,37 @@ impl ReconnectError { pub enum ReconnectPolicy { /// Wait a constant amount of time between reconnect attempts, in ms. Constant { - attempts: u32, + attempts: u32, max_attempts: u32, - delay: u32, - jitter: u32, + delay: u32, + jitter: u32, }, /// Backoff reconnection attempts linearly, adding `delay` each time. Linear { - attempts: u32, + attempts: u32, max_attempts: u32, - max_delay: u32, - delay: u32, - jitter: u32, + max_delay: u32, + delay: u32, + jitter: u32, }, /// Backoff reconnection attempts exponentially, multiplying the last delay by `mult` each time. Exponential { - attempts: u32, + attempts: u32, max_attempts: u32, - min_delay: u32, - max_delay: u32, - mult: u32, - jitter: u32, + min_delay: u32, + max_delay: u32, + mult: u32, + jitter: u32, }, } impl Default for ReconnectPolicy { fn default() -> Self { ReconnectPolicy::Constant { - attempts: 0, + attempts: 0, max_attempts: 0, - delay: 1000, - jitter: DEFAULT_JITTER_MS, + delay: 1000, + jitter: DEFAULT_JITTER_MS, } } } @@ -295,7 +297,7 @@ pub enum BackpressurePolicy { /// `disable_auto_backpressure` is `true`. /// /// Default: 10 ms - min_sleep_duration: Duration, + min_sleep_duration: Duration, }, /// Wait for all in-flight commands to finish before sending the next command. Drain, @@ -312,7 +314,7 @@ impl BackpressurePolicy { pub fn default_sleep() -> Self { BackpressurePolicy::Sleep { disable_backpressure_scaling: false, - min_sleep_duration: Duration::from_millis(10), + min_sleep_duration: Duration::from_millis(10), } } } @@ -320,7 +322,7 @@ impl BackpressurePolicy { /// Configuration options for backpressure features in the client. #[derive(Clone, Debug, Eq, PartialEq)] pub struct BackpressureConfig { - /// Whether or not to disable the automatic backpressure features when pipelining is enabled. + /// Whether to disable the automatic backpressure features when pipelining is enabled. /// /// If `true` then `RedisErrorKind::Backpressure` errors may be surfaced to callers. Callers can set this to `true` /// and `max_in_flight_commands` to `0` to effectively disable the backpressure logic. @@ -330,19 +332,19 @@ pub struct BackpressureConfig { /// The maximum number of in-flight commands (per connection) before backpressure will be applied. /// /// Default: 10_000 - pub max_in_flight_commands: u64, + pub max_in_flight_commands: u64, /// The backpressure policy to apply when the max number of in-flight commands is reached. /// /// Default: [Drain](crate::types::BackpressurePolicy::Drain). - pub policy: BackpressurePolicy, + pub policy: BackpressurePolicy, } impl Default for BackpressureConfig { fn default() -> Self { BackpressureConfig { disable_auto_backpressure: false, - max_in_flight_commands: 10_000, - policy: BackpressurePolicy::default(), + max_in_flight_commands: 10_000, + policy: BackpressurePolicy::default(), } } } @@ -351,11 +353,11 @@ impl Default for BackpressureConfig { #[derive(Clone, Debug, Default)] pub struct TcpConfig { /// Set the [TCP_NODELAY](https://docs.rs/tokio/latest/tokio/net/struct.TcpStream.html#method.set_nodelay) value. - pub nodelay: Option, + pub nodelay: Option, /// Set the [SO_LINGER](https://docs.rs/tokio/latest/tokio/net/struct.TcpStream.html#method.set_linger) value. - pub linger: Option, + pub linger: Option, /// Set the [IP_TTL](https://docs.rs/tokio/latest/tokio/net/struct.TcpStream.html#method.set_ttl) value. - pub ttl: Option, + pub ttl: Option, /// Set the [TCP keepalive values](https://docs.rs/socket2/latest/socket2/struct.Socket.html#method.set_tcp_keepalive). pub keepalive: Option, } @@ -390,18 +392,37 @@ pub struct UnresponsiveConfig { /// This value should usually be less than half of `max_timeout` and always more than 1 ms. /// /// Default: 2 sec - pub interval: Duration, + pub interval: Duration, } impl Default for UnresponsiveConfig { fn default() -> Self { UnresponsiveConfig { max_timeout: None, - interval: Duration::from_secs(2), + interval: Duration::from_secs(2), } } } +/// A policy that determines how clustered clients initially connect to and discover other cluster nodes. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClusterDiscoveryPolicy { + /// Always use the endpoint(s) provided in the client's [ServerConfig](ServerConfig). + /// + /// This is generally recommended with managed services, Kubernetes, or other systems that provide client routing or cluster discovery interfaces. + /// + /// Default. + ConfigEndpoint, + /// Try connecting to nodes specified in both the client's [ServerConfig](ServerConfig) and the most recently cached routing table. + UseCache, +} + +impl Default for ClusterDiscoveryPolicy { + fn default() -> Self { + ClusterDiscoveryPolicy::ConfigEndpoint + } +} + /// Configuration options related to the creation or management of TCP connection. #[derive(Clone, Debug, Eq, PartialEq)] pub struct ConnectionConfig { @@ -410,37 +431,37 @@ pub struct ConnectionConfig { /// This also includes the TLS handshake if using any of the TLS features. /// /// Default: 10 sec - pub connection_timeout: Duration, + pub connection_timeout: Duration, /// The timeout to apply when sending internal commands such as `AUTH`, `SELECT`, `CLUSTER SLOTS`, `READONLY`, etc. /// /// Default: 10 sec - pub internal_command_timeout: Duration, + pub internal_command_timeout: Duration, /// The amount of time to wait after a `MOVED` error is received before the client will update the cached cluster /// state. /// /// Default: `0` - pub cluster_cache_update_delay: Duration, + pub cluster_cache_update_delay: Duration, /// The maximum number of times the client will attempt to send a command. /// /// This value be incremented whenever the connection closes while the command is in-flight. /// /// Default: `3` - pub max_command_attempts: u32, + pub max_command_attempts: u32, /// The maximum number of times the client will attempt to follow a `MOVED` or `ASK` redirection per command. /// /// Default: `5` - pub max_redirections: u32, + pub max_redirections: u32, /// Unresponsive connection configuration options. - pub unresponsive: UnresponsiveConfig, + pub unresponsive: UnresponsiveConfig, /// An unexpected `NOAUTH` error is treated the same as a general connection failure, causing the client to /// reconnect based on the [ReconnectPolicy](crate::types::ReconnectPolicy). This is [recommended](https://github.com/StackExchange/StackExchange.Redis/issues/1273#issuecomment-651823824) if callers are using ElastiCache. /// /// Default: `false` - pub reconnect_on_auth_error: bool, + pub reconnect_on_auth_error: bool, /// Automatically send `CLIENT SETNAME` on each connection associated with a client instance. /// /// Default: `false` - pub auto_client_setname: bool, + pub auto_client_setname: bool, /// Limit the size of the internal in-memory command queue. /// /// Commands that exceed this limit will receive a `RedisErrorKind::Backpressure` error. @@ -448,7 +469,7 @@ pub struct ConnectionConfig { /// See [command_queue_len](crate::interfaces::MetricsInterface::command_queue_len) for more information. /// /// Default: `0` (unlimited) - pub max_command_buffer_len: usize, + pub max_command_buffer_len: usize, /// Disable the `CLUSTER INFO` health check when initializing cluster connections. /// /// Default: `false` @@ -458,13 +479,13 @@ pub struct ConnectionConfig { /// Default: `None` #[cfg(feature = "replicas")] #[cfg_attr(docsrs, doc(cfg(feature = "replicas")))] - pub replica: ReplicaConfig, + pub replica: ReplicaConfig, /// TCP connection options. - pub tcp: TcpConfig, + pub tcp: TcpConfig, /// #[cfg(feature = "custom-reconnect-errors")] #[cfg_attr(docsrs, doc(cfg(feature = "custom-reconnect-errors")))] - pub reconnect_errors: Vec, + pub reconnect_errors: Vec, } impl Default for ConnectionConfig { @@ -496,28 +517,28 @@ impl Default for ConnectionConfig { /// Configuration options that can affect the performance of the client. #[derive(Clone, Debug, Eq, PartialEq)] pub struct PerformanceConfig { - /// Whether or not the client should automatically pipeline commands across tasks when possible. + /// Whether the client should automatically pipeline commands across tasks when possible. /// /// The [Pipeline](crate::clients::Pipeline) interface can be used to pipeline commands within one task, /// whereas this flag can automatically pipeline commands across tasks. /// /// Default: `true` - pub auto_pipeline: bool, + pub auto_pipeline: bool, /// Configuration options for backpressure features in the client. - pub backpressure: BackpressureConfig, + pub backpressure: BackpressureConfig, /// An optional timeout to apply to all commands. /// /// If `0` this will disable any timeout being applied to commands. Callers can also set timeouts on individual /// commands via the [with_options](crate::interfaces::ClientLike::with_options) interface. /// /// Default: `0` - pub default_command_timeout: Duration, + pub default_command_timeout: Duration, /// The maximum number of frames that will be fed to a socket before flushing. /// /// Note: in some circumstances the client with always flush the socket (`QUIT`, `EXEC`, etc). /// /// Default: 200 - pub max_feed_count: u64, + pub max_feed_count: u64, /// The default capacity used when creating [broadcast channels](https://docs.rs/tokio/latest/tokio/sync/broadcast/fn.channel.html) in the [EventInterface](crate::interfaces::EventInterface). /// /// Default: 32 @@ -529,7 +550,7 @@ pub struct PerformanceConfig { /// Default: 50_000_000 #[cfg(feature = "blocking-encoding")] #[cfg_attr(docsrs, doc(cfg(feature = "blocking-encoding")))] - pub blocking_encode_threshold: usize, + pub blocking_encode_threshold: usize, } impl Default for PerformanceConfig { @@ -549,7 +570,7 @@ impl Default for PerformanceConfig { /// Configuration options for a `RedisClient`. #[derive(Clone, Debug)] pub struct RedisConfig { - /// Whether or not the client should return an error if it cannot connect to the server the first time when being + /// Whether the client should return an error if it cannot connect to the server the first time when being /// initialized. If `false` the client will run the reconnect logic if it cannot connect to the server the first /// time, but if `true` the client will return initial connection errors to the caller immediately. /// @@ -567,20 +588,20 @@ pub struct RedisConfig { /// Setting this to anything other than `Blocking::Block` incurs a small performance penalty. /// /// Default: `Blocking::Block` - pub blocking: Blocking, + pub blocking: Blocking, /// An optional ACL username for the client to use when authenticating. If ACL rules are not configured this should /// be `None`. /// /// Default: `None` - pub username: Option, + pub username: Option, /// An optional password for the client to use when authenticating. /// /// Default: `None` - pub password: Option, + pub password: Option, /// Connection configuration for the server(s). /// /// Default: `Centralized(localhost, 6379)` - pub server: ServerConfig, + pub server: ServerConfig, /// The protocol version to use when communicating with the server(s). /// /// If RESP3 is specified the client will automatically use `HELLO` when authenticating. **This requires Redis @@ -591,7 +612,7 @@ pub struct RedisConfig { /// has a slightly different type system than RESP2. /// /// Default: `RESP2` - pub version: RespVersion, + pub version: RespVersion, /// An optional database number that the client will automatically `SELECT` after connecting or reconnecting. /// /// It is recommended that callers use this field instead of putting a `select()` call inside the `on_reconnect` @@ -599,23 +620,23 @@ pub struct RedisConfig { /// the `on_reconnect` block. /// /// Default: `None` - pub database: Option, + pub database: Option, /// TLS configuration options. /// /// Default: `None` #[cfg(any(feature = "enable-native-tls", feature = "enable-rustls"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "enable-native-tls", feature = "enable-rustls"))))] - pub tls: Option, + pub tls: Option, /// Tracing configuration options. #[cfg(feature = "partial-tracing")] #[cfg_attr(docsrs, doc(cfg(feature = "partial-tracing")))] - pub tracing: TracingConfig, + pub tracing: TracingConfig, /// An optional [mocking layer](crate::mocks) to intercept and process commands. /// /// Default: `None` #[cfg(feature = "mocks")] #[cfg_attr(docsrs, doc(cfg(feature = "mocks")))] - pub mocks: Option>, + pub mocks: Option>, } impl PartialEq for RedisConfig { @@ -825,7 +846,10 @@ impl RedisConfig { let (url, host, port, _tls) = utils::parse_url(url, Some(6379))?; let mut cluster_nodes = utils::parse_url_other_nodes(&url)?; cluster_nodes.push(Server::new(host, port)); - let server = ServerConfig::Clustered { hosts: cluster_nodes }; + let server = ServerConfig::Clustered { + hosts: cluster_nodes, + policy: ClusterDiscoveryPolicy::default(), + }; let (username, password) = utils::parse_url_credentials(&url)?; Ok(RedisConfig { @@ -912,6 +936,8 @@ pub enum ServerConfig { /// Only one node in the cluster needs to be provided here, the rest will be discovered via the `CLUSTER SLOTS` /// command. hosts: Vec, + /// The cluster discovery policy to use when connecting or following redirections. + policy: ClusterDiscoveryPolicy, }, #[cfg(feature = "unix-sockets")] #[cfg_attr(docsrs, doc(cfg(feature = "unix-sockets")))] @@ -923,7 +949,7 @@ pub enum ServerConfig { }, Sentinel { /// An array of `Server` identifiers for each known sentinel instance. - hosts: Vec, + hosts: Vec, /// The service name for primary/main instances. service_name: String, @@ -965,6 +991,7 @@ impl ServerConfig { { ServerConfig::Clustered { hosts: hosts.drain(..).map(|(s, p)| Server::new(s.into(), p)).collect(), + policy: ClusterDiscoveryPolicy::default(), } } @@ -977,12 +1004,12 @@ impl ServerConfig { N: Into, { ServerConfig::Sentinel { - hosts: hosts.into_iter().map(|(h, p)| Server::new(h.into(), p)).collect(), - service_name: service_name.into(), + hosts: hosts.into_iter().map(|(h, p)| Server::new(h.into(), p)).collect(), + service_name: service_name.into(), #[cfg(feature = "sentinel-auth")] - username: None, + username: None, #[cfg(feature = "sentinel-auth")] - password: None, + password: None, } } @@ -1011,6 +1038,7 @@ impl ServerConfig { Server::new("127.0.0.1", 30002), Server::new("127.0.0.1", 30003), ], + policy: ClusterDiscoveryPolicy::default(), } } @@ -1042,12 +1070,22 @@ impl ServerConfig { pub fn hosts(&self) -> Vec { match *self { ServerConfig::Centralized { ref server } => vec![server.clone()], - ServerConfig::Clustered { ref hosts } => hosts.to_vec(), + ServerConfig::Clustered { ref hosts, .. } => hosts.to_vec(), ServerConfig::Sentinel { ref hosts, .. } => hosts.to_vec(), #[cfg(feature = "unix-sockets")] ServerConfig::Unix { ref path } => vec![Server::new(utils::path_to_string(path), 0)], } } + + /// Set the [ClusterDiscoveryPolicy], if possible. + pub fn set_cluster_discovery_policy(&mut self, new_policy: ClusterDiscoveryPolicy) -> Result<(), RedisError> { + if let ServerConfig::Clustered { ref mut policy, .. } = self { + *policy = new_policy; + Ok(()) + } else { + Err(RedisError::new(RedisErrorKind::Config, "Expected clustered config.")) + } + } } /// Configuration options for tracing. @@ -1055,7 +1093,7 @@ impl ServerConfig { #[cfg_attr(docsrs, doc(cfg(feature = "partial-tracing")))] #[derive(Clone, Debug)] pub struct TracingConfig { - /// Whether or not to enable tracing for this client. + /// Whether to enable tracing for this client. /// /// Default: `false` pub enabled: bool, @@ -1089,10 +1127,10 @@ impl TracingConfig { impl Default for TracingConfig { fn default() -> Self { Self { - enabled: false, - default_tracing_level: tracing::Level::INFO, + enabled: false, + default_tracing_level: tracing::Level::INFO, #[cfg(feature = "full-tracing")] - full_tracing_level: tracing::Level::DEBUG, + full_tracing_level: tracing::Level::DEBUG, } } } @@ -1105,11 +1143,11 @@ pub struct SentinelConfig { /// The hostname for the sentinel node. /// /// Default: `127.0.0.1` - pub host: String, + pub host: String, /// The port on which the sentinel node is listening. /// /// Default: `26379` - pub port: u16, + pub port: u16, /// An optional ACL username for the client to use when authenticating. If ACL rules are not configured this should /// be `None`. /// @@ -1126,13 +1164,13 @@ pub struct SentinelConfig { /// Default: `None` #[cfg(any(feature = "enable-native-tls", feature = "enable-rustls"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "enable-native-tls", feature = "enable-rustls"))))] - pub tls: Option, - /// Whether or not to enable tracing for this client. + pub tls: Option, + /// Whether to enable tracing for this client. /// /// Default: `false` #[cfg(feature = "partial-tracing")] #[cfg_attr(docsrs, doc(cfg(feature = "partial-tracing")))] - pub tracing: TracingConfig, + pub tracing: TracingConfig, } #[cfg(feature = "sentinel-client")] @@ -1200,7 +1238,7 @@ impl From for RedisConfig { #[derive(Clone, Debug, Eq, PartialEq, Default)] pub struct Options { /// Set the max number of write attempts for a command. - pub max_attempts: Option, + pub max_attempts: Option, /// Set the max number of cluster redirections to follow for a command. pub max_redirections: Option, /// Set the timeout duration for a command. @@ -1209,27 +1247,31 @@ pub struct Options { /// /// * But it's not perfect. There's no reliable mechanism to cancel a command once it has been written /// to the connection. - pub timeout: Option, + pub timeout: Option, /// The cluster node that should receive the command. /// /// The caller will receive a `RedisErrorKind::Cluster` error if the provided server does not exist. /// /// The client will still follow redirection errors via this interface. Callers may not notice this, but incorrect /// server arguments here could result in unnecessary calls to refresh the cached cluster routing table. - pub cluster_node: Option, + pub cluster_node: Option, + /// The cluster hashing policy to use, if applicable. + /// + /// If `cluster_node` is also provided it will take precedence over this value. + pub cluster_hash: Option, /// Whether to skip backpressure checks for a command. - pub no_backpressure: bool, + pub no_backpressure: bool, /// Whether the command should fail quickly if the connection is not healthy or available for writes. This always /// takes precedence over `max_attempts` if `true`. /// /// Setting this to `true` incurs a small performance penalty. (Checking a `RwLock`). /// /// Default: `false` - pub fail_fast: bool, + pub fail_fast: bool, /// Whether to send `CLIENT CACHING yes|no` before the command. - #[cfg(feature = "client-tracking")] - #[cfg_attr(docsrs, doc(cfg(feature = "client-tracking")))] - pub caching: Option, + #[cfg(feature = "i-tracking")] + #[cfg_attr(docsrs, doc(cfg(feature = "i-tracking")))] + pub caching: Option, } impl Options { @@ -1247,10 +1289,13 @@ impl Options { if let Some(ref val) = other.cluster_node { self.cluster_node = Some(val.clone()); } + if let Some(ref cluster_hash) = other.cluster_hash { + self.cluster_hash = Some(cluster_hash.clone()); + } self.no_backpressure |= other.no_backpressure; self.fail_fast |= other.fail_fast; - #[cfg(feature = "client-tracking")] + #[cfg(feature = "i-tracking")] if let Some(val) = other.caching { self.caching = Some(val); } @@ -1262,14 +1307,15 @@ impl Options { #[cfg(feature = "transactions")] pub(crate) fn from_command(cmd: &RedisCommand) -> Self { Options { - max_attempts: Some(cmd.attempts_remaining), - max_redirections: Some(cmd.redirections_remaining), - timeout: cmd.timeout_dur, - no_backpressure: cmd.skip_backpressure, - cluster_node: cmd.cluster_node.clone(), - fail_fast: cmd.fail_fast, - #[cfg(feature = "client-tracking")] - caching: cmd.caching, + max_attempts: Some(cmd.attempts_remaining), + max_redirections: Some(cmd.redirections_remaining), + timeout: cmd.timeout_dur, + no_backpressure: cmd.skip_backpressure, + cluster_node: cmd.cluster_node.clone(), + cluster_hash: Some(cmd.hasher.clone()), + fail_fast: cmd.fail_fast, + #[cfg(feature = "i-tracking")] + caching: cmd.caching, } } @@ -1280,7 +1326,7 @@ impl Options { command.cluster_node = self.cluster_node.clone(); command.fail_fast = self.fail_fast; - #[cfg(feature = "client-tracking")] + #[cfg(feature = "i-tracking")] { command.caching = self.caching; } @@ -1291,6 +1337,9 @@ impl Options { if let Some(redirections) = self.max_redirections { command.redirections_remaining = redirections; } + if let Some(ref cluster_hash) = self.cluster_hash { + command.hasher = cluster_hash.clone(); + } } } @@ -1526,10 +1575,10 @@ mod tests { sentinelUsername=username2&sentinelPassword=password2"; let expected = RedisConfig { server: ServerConfig::Sentinel { - hosts: vec![Server::new("foo.com", 26379)], + hosts: vec![Server::new("foo.com", 26379)], service_name: "fakename".into(), - username: Some("username2".into()), - password: Some("password2".into()), + username: Some("username2".into()), + password: Some("password2".into()), }, username: Some("username1".into()), password: Some("password1".into()), diff --git a/src/types/misc.rs b/src/types/misc.rs index 8be76d7c..b26f33e1 100644 --- a/src/types/misc.rs +++ b/src/types/misc.rs @@ -5,10 +5,15 @@ pub use crate::protocol::{ use crate::{ error::{RedisError, RedisErrorKind}, types::{RedisKey, RedisValue, Server}, - utils::{self, convert_or_default}, + utils, }; use bytes_utils::Str; -use std::{collections::HashMap, convert::TryFrom, fmt, time::Duration}; +use std::{convert::TryFrom, fmt, time::Duration}; + +#[cfg(feature = "i-memory")] +use crate::utils::convert_or_default; +#[cfg(feature = "i-memory")] +use std::collections::HashMap; /// Arguments passed to the SHUTDOWN command. /// @@ -33,9 +38,9 @@ impl ShutdownFlags { /// #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct KeyspaceEvent { - pub db: u8, + pub db: u8, pub operation: String, - pub key: RedisKey, + pub key: RedisKey, } /// Aggregate options for the [zinterstore](https://redis.io/commands/zinterstore) (and related) commands. @@ -46,6 +51,7 @@ pub enum AggregateOptions { } impl AggregateOptions { + #[cfg(feature = "i-sorted-sets")] pub(crate) fn to_str(&self) -> Str { utils::static_str(match *self { AggregateOptions::Sum => "SUM", @@ -95,13 +101,13 @@ impl InfoKind { #[derive(Clone, Debug, Eq, PartialEq)] pub struct CustomCommand { /// The command name, sent directly to the server. - pub cmd: Str, + pub cmd: Str, /// The cluster hashing policy to use, if any. /// /// Cluster clients will use the default policy if not provided. pub cluster_hash: ClusterHash, - /// Whether or not the command should block the connection while waiting on a response. - pub blocking: bool, + /// Whether the command should block the connection while waiting on a response. + pub blocking: bool, } impl CustomCommand { @@ -160,6 +166,7 @@ pub enum SetOptions { } impl SetOptions { + #[allow(dead_code)] pub(crate) fn to_str(&self) -> Str { utils::static_str(match *self { SetOptions::NX => "NX", @@ -184,6 +191,7 @@ pub enum Expiration { } impl Expiration { + #[allow(dead_code)] pub(crate) fn into_args(self) -> (Str, Option) { let (prefix, value) = match self { Expiration::EX(i) => ("EX", Some(i)), @@ -227,22 +235,26 @@ impl fmt::Display for ClientState { /// /// #[derive(Clone, Debug, Eq, PartialEq)] +#[cfg(feature = "i-memory")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] pub struct DatabaseMemoryStats { - pub overhead_hashtable_main: u64, - pub overhead_hashtable_expires: u64, + pub overhead_hashtable_main: u64, + pub overhead_hashtable_expires: u64, pub overhead_hashtable_slot_to_keys: u64, } +#[cfg(feature = "i-memory")] impl Default for DatabaseMemoryStats { fn default() -> Self { DatabaseMemoryStats { - overhead_hashtable_expires: 0, - overhead_hashtable_main: 0, + overhead_hashtable_expires: 0, + overhead_hashtable_main: 0, overhead_hashtable_slot_to_keys: 0, } } } +#[cfg(feature = "i-memory")] fn parse_database_memory_stat(stats: &mut DatabaseMemoryStats, key: &str, value: RedisValue) { match key { "overhead.hashtable.main" => stats.overhead_hashtable_main = convert_or_default(value), @@ -252,6 +264,8 @@ fn parse_database_memory_stat(stats: &mut DatabaseMemoryStats, key: &str, value: }; } +#[cfg(feature = "i-memory")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] impl TryFrom for DatabaseMemoryStats { type Error = RedisError; @@ -270,68 +284,71 @@ impl TryFrom for DatabaseMemoryStats { /// /// #[derive(Clone, Debug)] +#[cfg(feature = "i-memory")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] pub struct MemoryStats { - pub peak_allocated: u64, - pub total_allocated: u64, - pub startup_allocated: u64, - pub replication_backlog: u64, - pub clients_slaves: u64, - pub clients_normal: u64, - pub aof_buffer: u64, - pub lua_caches: u64, - pub overhead_total: u64, - pub keys_count: u64, - pub keys_bytes_per_key: u64, - pub dataset_bytes: u64, - pub dataset_percentage: f64, - pub peak_percentage: f64, - pub fragmentation: f64, - pub fragmentation_bytes: u64, - pub rss_overhead_ratio: f64, - pub rss_overhead_bytes: u64, - pub allocator_allocated: u64, - pub allocator_active: u64, - pub allocator_resident: u64, + pub peak_allocated: u64, + pub total_allocated: u64, + pub startup_allocated: u64, + pub replication_backlog: u64, + pub clients_slaves: u64, + pub clients_normal: u64, + pub aof_buffer: u64, + pub lua_caches: u64, + pub overhead_total: u64, + pub keys_count: u64, + pub keys_bytes_per_key: u64, + pub dataset_bytes: u64, + pub dataset_percentage: f64, + pub peak_percentage: f64, + pub fragmentation: f64, + pub fragmentation_bytes: u64, + pub rss_overhead_ratio: f64, + pub rss_overhead_bytes: u64, + pub allocator_allocated: u64, + pub allocator_active: u64, + pub allocator_resident: u64, pub allocator_fragmentation_ratio: f64, pub allocator_fragmentation_bytes: u64, - pub allocator_rss_ratio: f64, - pub allocator_rss_bytes: u64, - pub db: HashMap, + pub allocator_rss_ratio: f64, + pub allocator_rss_bytes: u64, + pub db: HashMap, } +#[cfg(feature = "i-memory")] impl Default for MemoryStats { fn default() -> Self { MemoryStats { - peak_allocated: 0, - total_allocated: 0, - startup_allocated: 0, - replication_backlog: 0, - clients_normal: 0, - clients_slaves: 0, - aof_buffer: 0, - lua_caches: 0, - overhead_total: 0, - keys_count: 0, - keys_bytes_per_key: 0, - dataset_bytes: 0, - dataset_percentage: 0.0, - peak_percentage: 0.0, - fragmentation: 0.0, - fragmentation_bytes: 0, - rss_overhead_ratio: 0.0, - rss_overhead_bytes: 0, - allocator_allocated: 0, - allocator_active: 0, - allocator_resident: 0, + peak_allocated: 0, + total_allocated: 0, + startup_allocated: 0, + replication_backlog: 0, + clients_normal: 0, + clients_slaves: 0, + aof_buffer: 0, + lua_caches: 0, + overhead_total: 0, + keys_count: 0, + keys_bytes_per_key: 0, + dataset_bytes: 0, + dataset_percentage: 0.0, + peak_percentage: 0.0, + fragmentation: 0.0, + fragmentation_bytes: 0, + rss_overhead_ratio: 0.0, + rss_overhead_bytes: 0, + allocator_allocated: 0, + allocator_active: 0, + allocator_resident: 0, allocator_fragmentation_ratio: 0.0, allocator_fragmentation_bytes: 0, - allocator_rss_bytes: 0, - allocator_rss_ratio: 0.0, - db: HashMap::new(), + allocator_rss_bytes: 0, + allocator_rss_ratio: 0.0, + db: HashMap::new(), } } } - +#[cfg(feature = "i-memory")] impl PartialEq for MemoryStats { fn eq(&self, other: &Self) -> bool { self.peak_allocated == other.peak_allocated @@ -363,8 +380,10 @@ impl PartialEq for MemoryStats { } } +#[cfg(feature = "i-memory")] impl Eq for MemoryStats {} +#[cfg(feature = "i-memory")] fn parse_memory_stat_field(stats: &mut MemoryStats, key: &str, value: RedisValue) { match key { "peak.allocated" => stats.peak_allocated = convert_or_default(value), @@ -409,6 +428,8 @@ fn parse_memory_stat_field(stats: &mut MemoryStats, key: &str, value: RedisValue } } +#[cfg(feature = "i-memory")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-memory")))] impl TryFrom for MemoryStats { type Error = RedisError; @@ -428,12 +449,12 @@ impl TryFrom for MemoryStats { /// #[derive(Clone, Debug, Eq, PartialEq)] pub struct SlowlogEntry { - pub id: i64, + pub id: i64, pub timestamp: i64, - pub duration: Duration, - pub args: Vec, - pub ip: Option, - pub name: Option, + pub duration: Duration, + pub args: Vec, + pub ip: Option, + pub name: Option, } impl TryFrom for SlowlogEntry { @@ -497,6 +518,7 @@ pub enum ScriptDebugFlag { } impl ScriptDebugFlag { + #[cfg(feature = "i-scripts")] pub(crate) fn to_str(&self) -> Str { utils::static_str(match *self { ScriptDebugFlag::Yes => "YES", @@ -535,6 +557,7 @@ pub enum SortOrder { } impl SortOrder { + #[allow(dead_code)] pub(crate) fn to_str(&self) -> Str { utils::static_str(match *self { SortOrder::Asc => "ASC", @@ -558,6 +581,7 @@ impl Default for FnPolicy { } impl FnPolicy { + #[cfg(feature = "i-scripts")] pub(crate) fn to_str(&self) -> Str { utils::static_str(match *self { FnPolicy::Flush => "FLUSH", @@ -621,3 +645,19 @@ impl TryFrom<&Str> for FnPolicy { FnPolicy::from_str(value) } } + +/// Arguments to the CLIENT UNBLOCK command. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ClientUnblockFlag { + Timeout, + Error, +} + +impl ClientUnblockFlag { + pub(crate) fn to_str(&self) -> Str { + utils::static_str(match *self { + ClientUnblockFlag::Timeout => "TIMEOUT", + ClientUnblockFlag::Error => "ERROR", + }) + } +} diff --git a/src/types/mod.rs b/src/types/mod.rs index f57b8e35..3f01ff01 100644 --- a/src/types/mod.rs +++ b/src/types/mod.rs @@ -1,48 +1,67 @@ use crate::error::RedisError; pub use crate::modules::response::{FromRedis, FromRedisKey}; -pub use redis_protocol::resp3::types::{Frame, RespVersion}; +pub use redis_protocol::resp3::types::{BytesFrame as Resp3Frame, RespVersion}; use tokio::task::JoinHandle; mod args; mod builder; +#[cfg(feature = "i-client")] mod client; +#[cfg(feature = "i-cluster")] mod cluster; mod config; mod from_tuple; +#[cfg(feature = "i-geo")] mod geo; +#[cfg(feature = "i-lists")] mod lists; mod misc; mod multiple; mod scan; +#[cfg(feature = "i-scripts")] mod scripts; +#[cfg(feature = "i-sorted-sets")] mod sorted_sets; +#[cfg(feature = "i-streams")] mod streams; -#[cfg(feature = "time-series")] +#[cfg(feature = "i-time-series")] mod timeseries; +#[cfg(feature = "metrics")] +#[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] +pub use crate::modules::metrics::Stats; pub use args::*; pub use builder::*; +#[cfg(feature = "i-client")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-client")))] pub use client::*; +#[cfg(feature = "i-cluster")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-cluster")))] pub use cluster::*; pub use config::*; +#[cfg(feature = "i-geo")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-geo")))] pub use geo::*; +#[cfg(feature = "i-lists")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-lists")))] pub use lists::*; pub use misc::*; pub use multiple::*; pub use scan::*; +#[cfg(feature = "i-scripts")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-scripts")))] pub use scripts::*; pub use semver::Version; +#[cfg(feature = "i-sorted-sets")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-sorted-sets")))] pub use sorted_sets::*; +#[cfg(feature = "i-streams")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-streams")))] pub use streams::*; - -#[cfg(feature = "time-series")] -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg(feature = "i-time-series")] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] pub use timeseries::*; -#[cfg(feature = "metrics")] -#[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] -pub use crate::modules::metrics::Stats; - #[cfg(feature = "dns")] #[cfg_attr(docsrs, doc(cfg(feature = "dns")))] pub use crate::protocol::types::Resolve; diff --git a/src/types/scan.rs b/src/types/scan.rs index 63128193..05e9a1f0 100644 --- a/src/types/scan.rs +++ b/src/types/scan.rs @@ -46,7 +46,7 @@ pub trait Scanner { /// Read the cursor returned from the last scan operation. fn cursor(&self) -> Option>; - /// Whether or not the scan call will continue returning results. If `false` this will be the last result set + /// Whether the scan call will continue returning results. If `false` this will be the last result set /// returned on the stream. /// /// Calling `next` when this returns `false` will return `Ok(())`, so this does not need to be checked on each @@ -81,9 +81,9 @@ pub trait Scanner { /// The result of a SCAN operation. pub struct ScanResult { - pub(crate) results: Option>, - pub(crate) inner: Arc, - pub(crate) scan_state: KeyScanInner, + pub(crate) results: Option>, + pub(crate) inner: Arc, + pub(crate) scan_state: KeyScanInner, pub(crate) can_continue: bool, } @@ -128,9 +128,9 @@ impl Scanner for ScanResult { /// The result of a HSCAN operation. pub struct HScanResult { - pub(crate) results: Option, - pub(crate) inner: Arc, - pub(crate) scan_state: ValueScanInner, + pub(crate) results: Option, + pub(crate) inner: Arc, + pub(crate) scan_state: ValueScanInner, pub(crate) can_continue: bool, } @@ -172,9 +172,9 @@ impl Scanner for HScanResult { /// The result of a SSCAN operation. pub struct SScanResult { - pub(crate) results: Option>, - pub(crate) inner: Arc, - pub(crate) scan_state: ValueScanInner, + pub(crate) results: Option>, + pub(crate) inner: Arc, + pub(crate) scan_state: ValueScanInner, pub(crate) can_continue: bool, } @@ -216,9 +216,9 @@ impl Scanner for SScanResult { /// The result of a ZSCAN operation. pub struct ZScanResult { - pub(crate) results: Option>, - pub(crate) inner: Arc, - pub(crate) scan_state: ValueScanInner, + pub(crate) results: Option>, + pub(crate) inner: Arc, + pub(crate) scan_state: ValueScanInner, pub(crate) can_continue: bool, } diff --git a/src/types/timeseries.rs b/src/types/timeseries.rs index 5cb53ecc..f800a2a0 100644 --- a/src/types/timeseries.rs +++ b/src/types/timeseries.rs @@ -7,7 +7,7 @@ use bytes_utils::Str; use std::collections::HashMap; /// Encoding arguments for certain timeseries commands. -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] #[derive(Clone, Debug, Eq, PartialEq)] pub enum Encoding { Compressed, @@ -24,7 +24,7 @@ impl Encoding { } /// The duplicate policy used with certain timeseries commands. -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] #[derive(Clone, Debug, Eq, PartialEq)] pub enum DuplicatePolicy { Block, @@ -49,7 +49,7 @@ impl DuplicatePolicy { } /// A timestamp used in most timeseries commands. -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] #[derive(Clone, Debug, Eq, PartialEq)] pub enum Timestamp { /// Unix time (milliseconds since epoch). @@ -111,7 +111,7 @@ impl TryFrom for Timestamp { } /// An aggregation policy to use with certain timeseries commands. -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] #[derive(Clone, Debug, Eq, PartialEq)] pub enum Aggregator { Avg, @@ -150,7 +150,7 @@ impl Aggregator { } /// Arguments equivalent to `WITHLABELS | SELECTED_LABELS label...` in various time series GET functions. -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] #[derive(Clone, Debug, Eq, PartialEq)] pub enum GetLabels { WithLabels, @@ -194,7 +194,7 @@ where } /// A timestamp query used in commands such as `TS.MRANGE`. -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] #[derive(Clone, Debug, Eq, PartialEq)] pub enum GetTimestamp { /// Equivalent to `-`. @@ -234,14 +234,14 @@ impl From for GetTimestamp { /// A struct representing `[ALIGN align] AGGREGATION aggregator bucketDuration [BUCKETTIMESTAMP bt] [EMPTY]` in /// commands such as `TS.MRANGE`. -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] #[derive(Clone, Debug, Eq, PartialEq)] pub struct RangeAggregation { - pub align: Option, - pub aggregation: Aggregator, - pub bucket_duration: u64, + pub align: Option, + pub aggregation: Aggregator, + pub bucket_duration: u64, pub bucket_timestamp: Option, - pub empty: bool, + pub empty: bool, } impl From<(Aggregator, u64)> for RangeAggregation { @@ -257,7 +257,7 @@ impl From<(Aggregator, u64)> for RangeAggregation { } /// A `REDUCER` argument in commands such as `TS.MRANGE`. -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] #[derive(Clone, Debug, Eq, PartialEq)] pub enum Reducer { Avg, @@ -290,11 +290,11 @@ impl Reducer { } /// A struct representing `GROUPBY label REDUCE reducer` in commands such as `TS.MRANGE`. -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] #[derive(Clone, Debug, Eq, PartialEq)] pub struct GroupBy { pub groupby: Str, - pub reduce: Reducer, + pub reduce: Reducer, } impl> From<(S, Reducer)> for GroupBy { @@ -307,7 +307,7 @@ impl> From<(S, Reducer)> for GroupBy { } /// A `BUCKETTIMESTAMP` argument in commands such as `TS.MRANGE`. -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] #[derive(Clone, Debug, Eq, PartialEq)] pub enum BucketTimestamp { Start, @@ -401,7 +401,7 @@ impl BucketTimestamp { /// ``` /// /// See [Resp3TimeSeriesValues](crate::types::Resp3TimeSeriesValues) for the RESP3 equivalent. -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] pub type Resp2TimeSeriesValues = Vec<(K, Vec<(Lk, Lv)>, Vec<(i64, f64)>)>; /// The RESP3 equivalent of [Resp2TimeSeriesValues](crate::types::Resp2TimeSeriesValues). @@ -445,5 +445,5 @@ pub type Resp2TimeSeriesValues = Vec<(K, Vec<(Lk, Lv)>, Vec<(i64, f64 /// Ok(()) /// } /// ``` -#[cfg_attr(docsrs, doc(cfg(feature = "time-series")))] +#[cfg_attr(docsrs, doc(cfg(feature = "i-time-series")))] pub type Resp3TimeSeriesValues = HashMap, Vec<(i64, f64)>)>; diff --git a/src/utils.rs b/src/utils.rs index 5a11bd6c..19430ed7 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -15,13 +15,11 @@ use bytes_utils::Str; use float_cmp::approx_eq; use futures::{ future::{select, Either}, - pin_mut, - Future, - TryFutureExt, + pin_mut, Future, TryFutureExt, }; use parking_lot::RwLock; use rand::{self, distributions::Alphanumeric, Rng}; -use redis_protocol::resp3::types::Frame as Resp3Frame; +use redis_protocol::resp3::types::BytesFrame as Resp3Frame; use std::{ collections::HashMap, convert::TryInto, @@ -81,6 +79,7 @@ pub fn f64_eq(lhs: f64, rhs: f64) -> bool { approx_eq!(f64, lhs, rhs, ulps = 2) } +#[cfg(feature = "i-geo")] pub fn f64_opt_eq(lhs: &Option, rhs: &Option) -> bool { match *lhs { Some(lhs) => match *rhs { @@ -124,6 +123,7 @@ pub fn f64_to_redis_string(d: f64) -> Result { } } +#[cfg(feature = "i-sorted-sets")] pub fn f64_to_zrange_bound(d: f64, kind: &ZRangeKind) -> Result { if d.is_infinite() && d.is_sign_negative() { Ok("-inf".into()) @@ -158,6 +158,7 @@ pub fn random_string(len: usize) -> String { .collect() } +#[cfg(feature = "i-memory")] pub fn convert_or_default(value: RedisValue) -> R where R: FromRedis + Default, @@ -167,7 +168,7 @@ where #[cfg(feature = "transactions")] pub fn random_u64(max: u64) -> u64 { - rand::thread_rng().gen_range(0 .. max) + rand::thread_rng().gen_range(0..max) } pub fn set_client_state(state: &RwLock, new_state: ClientState) { @@ -233,6 +234,7 @@ pub fn path_to_string(path: &Path) -> String { path.as_os_str().to_string_lossy().to_string() } +#[cfg(feature = "i-sorted-sets")] pub fn check_lex_str(val: String, kind: &ZRangeKind) -> String { let formatted = val.starts_with('(') || val.starts_with('[') || val == "+" || val == "-"; @@ -246,6 +248,7 @@ pub fn check_lex_str(val: String, kind: &ZRangeKind) -> String { } /// Parse the response from `FUNCTION LIST`. +#[cfg(feature = "i-scripts")] fn parse_functions(value: &RedisValue) -> Result, RedisError> { if let RedisValue::Array(functions) = value { let mut out = Vec::with_capacity(functions.len()); @@ -276,6 +279,7 @@ fn parse_functions(value: &RedisValue) -> Result, RedisError> { } /// Check and parse the response to `FUNCTION LIST`. +#[cfg(feature = "i-scripts")] pub fn value_to_functions(value: &RedisValue, name: &str) -> Result, RedisError> { if let RedisValue::Array(ref libraries) = value { for library in libraries.iter() { @@ -299,7 +303,7 @@ pub fn value_to_functions(value: &RedisValue, name: &str) -> Result(ft: Fut, timeout: Duration) -> Result +pub async fn timeout(ft: Fut, timeout: Duration) -> Result where E: Into, Fut: Future>, @@ -431,7 +435,7 @@ where check_blocking_policy(inner, &command).await?; client.send_command(command)?; - apply_timeout(rx, timeout_dur) + timeout(rx, timeout_dur) .and_then(|r| async { r }) .map_err(move |error| { set_bool_atomic(&timed_out, true); @@ -490,7 +494,7 @@ where check_blocking_policy(inner, &command).await?; client.send_command(command)?; - apply_timeout(rx, timeout_dur) + timeout(rx, timeout_dur) .and_then(|r| async { r }) .map_err(move |error| { set_bool_atomic(&timed_out, true); @@ -527,17 +531,6 @@ pub async fn backchannel_request_response( backchannel.request_response(inner, &server, command).await } -pub fn check_empty_keys(keys: &MultipleKeys) -> Result<(), RedisError> { - if keys.len() == 0 { - Err(RedisError::new( - RedisErrorKind::InvalidArgument, - "At least one key is required.", - )) - } else { - Ok(()) - } -} - /// Check for a scan pattern without a hash tag, or with a wildcard in the hash tag. /// /// These patterns will result in scanning a random node if used against a clustered redis. @@ -596,7 +589,7 @@ pub fn add_jitter(delay: u64, jitter: u32) -> u64 { if jitter == 0 { delay } else { - delay.saturating_add(rand::thread_rng().gen_range(0 .. jitter as u64)) + delay.saturating_add(rand::thread_rng().gen_range(0..jitter as u64)) } } diff --git a/tests/README.md b/tests/README.md index bd0500e9..9be45a55 100644 --- a/tests/README.md +++ b/tests/README.md @@ -2,13 +2,17 @@ Tests are organized by category, similar to the [commands](../src/commands) folder. -By default, most tests run 8 times based on the following configuration parameters: clustered vs centralized servers, pipelined vs non-pipelined clients, and RESP2 vs RESP3 mode. Helper macros exist to make this easy so each test only has to be written once. +By default, most tests run 8 times based on the following configuration parameters: clustered vs centralized servers, +pipelined vs non-pipelined clients, and RESP2 vs RESP3 mode. Helper macros exist to make this easy so each test only has +to be written once. -**The tests require Redis version >=6.2** As of writing the default version used is 7.2.1. +**The tests require Redis version >=6.2** As of writing the default version used is 7.2.4. ## Installation -The [environ](environ) file will bootstrap the local environment with all the environment variables and system settings necessary to run the tests. It will prompt the caller for certain system-wide modifications if necessary. The `/etc/hosts` modifications are only necessary if you wish to manually run the TLS tests outside the docker network. +The [environ](environ) file will bootstrap the local environment with all the environment variables and system settings +necessary to run the tests. It will prompt the caller for certain system-wide modifications if necessary. +The `/etc/hosts` modifications are only necessary if you wish to manually run the TLS tests outside the docker network. In order to run the testing scripts the following must be installed: @@ -18,22 +22,29 @@ In order to run the testing scripts the following must be installed: ## Running Tests -The runner scripts will set up the Redis servers and run the tests inside docker. +The runner scripts will set up the Redis servers and run the tests inside docker. * [all-features](runners/all-features.sh) will run tests with all features (except sentinel tests). * [default-features](runners/default-features.sh) will run tests with default features (except sentinel tests). * [default-nil-types](runners/default-nil-types.sh) will run tests with `default-nil-types`. * [no-features](runners/no-features.sh) will run the tests without any of the feature flags. -* [sentinel-features](runners/sentinel-features.sh) will run the centralized tests against a sentinel deployment. This is the only test runner that requires the sentinel deployment via docker-compose. -* [cluster-rustls](runners/cluster-rustls.sh) will set up a cluster with TLS enabled and run the cluster tests against it with `rustls`. -* [cluster-native-tls](runners/cluster-native-tls.sh) will set up a cluster with TLS enabled and run the cluster tests against it with `native-tls`. -* [redis-stack](runners/redis-stack.sh) will set up a centralized `redis/redis-stack` container and run with `redis-stack` features. -* [everything](runners/everything.sh) will run all of the above scripts. +* [sentinel-features](runners/sentinel-features.sh) will run the centralized tests against a sentinel deployment. This + is the only test runner that requires the sentinel deployment via docker-compose. +* [cluster-rustls](runners/cluster-rustls.sh) will set up a cluster with TLS enabled and run the cluster tests against + it with `rustls`. +* [cluster-native-tls](runners/cluster-native-tls.sh) will set up a cluster with TLS enabled and run the cluster tests + against it with `native-tls`. +* [redis-stack](runners/redis-stack.sh) will set up a centralized `redis/redis-stack` container and run + with `redis-stack` features. +* [everything](runners/everything.sh) will run all of the above scripts. These scripts will pass through any extra argv so callers can filter tests as needed. See the [CI configuration](../.circleci/config.yml) for more information. +There's also a [debug container](runners/docker-bash.sh) script that can be used to run `redis-cli` inside the docker +network. + ### Example ``` @@ -42,28 +53,40 @@ cd path/to/fred ./tests/runners/all-features.sh ``` +### Checking Interface Features + +There's [a build script](scripts/check_features.sh) that +runs `cargo clippy --no-default-features --features -- -Dwarnings` on each of the interface +features individually, without any other features. + +``` +cd path/to/fred +./tests/scripts/check_features.sh +``` + ## Adding Tests Adding tests is straightforward with the help of some macros and utility functions. -Note: When writing tests that operate on multiple keys be sure to use a [hash_tag](https://redis.io/topics/cluster-spec#keys-hash-tags) so that all keys used by a command exist on the same node in a cluster. +Note: When writing tests that operate on multiple keys be sure to use +a [hash_tag](https://redis.io/topics/cluster-spec#keys-hash-tags) so that all keys used by a command exist on the same +node in a cluster. 1. If necessary create a new file in the appropriate folder. -2. Create a new async function in the appropriate file. This function should take a `RedisClient` and `RedisConfig` as arguments and should return a `Result<(), RedisError>`. The client will already be connected when this function runs. +2. Create a new async function in the appropriate file. This function should take a `RedisClient` and `RedisConfig` as + arguments and should return a `Result<(), RedisError>`. The client will already be connected when this function runs. 3. This new function should **not** be marked as a `#[test]` or `#[tokio::test]` -4. Call the test from the appropriate [integration/cluster.rs](integration/cluster.rs) or [integration/centralized.rs](integration/centralized.rs) files, or both. Create a wrapping `mod` block with the same name as the test's folder if necessary. -5. Use `centralized_test!` or `cluster_test!` to generate tests in the appropriate module. Centralized tests will be automatically converted to sentinel tests if using the sentinel testing features. +4. Call the test from the appropriate [integration/cluster.rs](integration/cluster.rs) + or [integration/centralized.rs](integration/centralized.rs) files, or both. Create a wrapping `mod` block with the + same name as the test's folder if necessary. +5. Use `centralized_test!` or `cluster_test!` to generate tests in the appropriate module. Centralized tests will be + converted to sentinel tests or redis-stack tests if needed. -Tests that use this pattern will run 8 times to check the functionality against clustered and centralized redis servers with using both pipelined and non-pipelined clients in RESP2 and RESP3 mode. +Tests that use this pattern will run 8 times to check the functionality against clustered and centralized redis servers +with using both pipelined and non-pipelined clients in RESP2 and RESP3 mode. ## Notes -* Since we're mutating shared state in external redis servers with these tests it's necessary to run the tests with `--test-threads=1`. The test runner scripts will do this automatically. +* Since we're mutating shared state in external redis servers with these tests it's necessary to run the tests + with `--test-threads=1`. The test runner scripts will do this automatically. * **The tests will periodically call `flushall` before each test iteration.** - -## Contributing - -The following modules still need better test coverage: - -* ACL commands -* Cluster commands. This one is more complicated though since many of these modify the cluster. \ No newline at end of file diff --git a/tests/docker/runners/bash/all-features.sh b/tests/docker/runners/bash/all-features.sh index e599e6b2..0e12ef68 100755 --- a/tests/docker/runners/bash/all-features.sh +++ b/tests/docker/runners/bash/all-features.sh @@ -15,7 +15,7 @@ done # those features individually. FEATURES="network-logs custom-reconnect-errors serde-json blocking-encoding full-tracing monitor metrics sentinel-client subscriber-client dns debug-ids - replicas client-tracking codec sha-1 transactions" + replicas sha-1 transactions i-all" if [ -z "$FRED_CI_NEXTEST" ]; then cargo test --release --lib --tests --features "$FEATURES" -- --test-threads=1 "$@" diff --git a/tests/docker/runners/bash/cluster-rustls.sh b/tests/docker/runners/bash/cluster-rustls.sh index 2be756e2..572a7734 100755 --- a/tests/docker/runners/bash/cluster-rustls.sh +++ b/tests/docker/runners/bash/cluster-rustls.sh @@ -10,7 +10,7 @@ do fi done -FEATURES="enable-rustls transactions" +FEATURES="enable-rustls transactions i-all" if [ -z "$FRED_CI_NEXTEST" ]; then FRED_CI_TLS=true cargo test --release --lib --tests --features "$FEATURES" -- --test-threads=1 "$@" diff --git a/tests/docker/runners/bash/cluster-tls.sh b/tests/docker/runners/bash/cluster-tls.sh index 8edae03d..93e5c465 100755 --- a/tests/docker/runners/bash/cluster-tls.sh +++ b/tests/docker/runners/bash/cluster-tls.sh @@ -10,7 +10,7 @@ do fi done -FEATURES="enable-native-tls vendored-openssl transactions" +FEATURES="enable-native-tls vendored-openssl transactions i-all" # https://github.com/sfackler/rust-native-tls/issues/143 echo "This may not work on Mac" diff --git a/tests/docker/runners/bash/default-features.sh b/tests/docker/runners/bash/default-features.sh index 9cad8ae9..f147c3eb 100755 --- a/tests/docker/runners/bash/default-features.sh +++ b/tests/docker/runners/bash/default-features.sh @@ -11,7 +11,7 @@ do done if [ -z "$FRED_CI_NEXTEST" ]; then - cargo test --release --lib --tests -- --test-threads=1 "$@" + cargo test --release --lib --tests --features "i-all" -- --test-threads=1 "$@" else - cargo nextest run --release --lib --tests --test-threads=1 "$@" + cargo nextest run --release --lib --tests --features "i-all" --test-threads=1 "$@" fi \ No newline at end of file diff --git a/tests/docker/runners/bash/default-nil-types.sh b/tests/docker/runners/bash/default-nil-types.sh index 7747a257..8e7aa87c 100755 --- a/tests/docker/runners/bash/default-nil-types.sh +++ b/tests/docker/runners/bash/default-nil-types.sh @@ -10,7 +10,7 @@ do fi done -FEATURES="network-logs serde-json debug-ids replicas client-tracking default-nil-types" +FEATURES="network-logs serde-json debug-ids replicas i-all default-nil-types" if [ -z "$FRED_CI_NEXTEST" ]; then cargo test --release --lib --tests --features "$FEATURES" -- --test-threads=1 "$@" diff --git a/tests/docker/runners/bash/mocks.sh b/tests/docker/runners/bash/mocks.sh index e3011a5d..585fd118 100755 --- a/tests/docker/runners/bash/mocks.sh +++ b/tests/docker/runners/bash/mocks.sh @@ -1,7 +1,7 @@ #!/bin/bash if [ -z "$FRED_CI_NEXTEST" ]; then - cargo test --release --lib --features "mocks" + cargo test --release --lib --features "mocks i-keys" else - cargo nextest run --release --lib --features "mocks" + cargo nextest run --release --lib --features "mocks i-keys" fi \ No newline at end of file diff --git a/tests/docker/runners/bash/no-features.sh b/tests/docker/runners/bash/no-features.sh index cca05cd1..bef0d77d 100755 --- a/tests/docker/runners/bash/no-features.sh +++ b/tests/docker/runners/bash/no-features.sh @@ -11,7 +11,7 @@ do done if [ -z "$FRED_CI_NEXTEST" ]; then - cargo test --release --lib --tests --no-default-features -- --test-threads=1 "$@" + cargo test --release --lib --tests --no-default-features --features "i-all" -- --test-threads=1 "$@" else - cargo nextest run --release --lib --tests --no-default-features --test-threads=1 "$@" + cargo nextest run --release --lib --tests --no-default-features --features "i-all" --test-threads=1 "$@" fi \ No newline at end of file diff --git a/tests/docker/runners/bash/redis-stack.sh b/tests/docker/runners/bash/redis-stack.sh index bcf36f1a..b8eaa3f7 100755 --- a/tests/docker/runners/bash/redis-stack.sh +++ b/tests/docker/runners/bash/redis-stack.sh @@ -10,7 +10,7 @@ do fi done -FEATURES="network-logs serde-json debug-ids redis-stack" +FEATURES="network-logs serde-json debug-ids i-redis-stack i-all" if [ -z "$FRED_CI_NEXTEST" ]; then cargo test --release --lib --tests --features "$FEATURES" -- --test-threads=1 "$@" diff --git a/tests/docker/runners/bash/sentinel-features.sh b/tests/docker/runners/bash/sentinel-features.sh index 15856451..6488ce10 100755 --- a/tests/docker/runners/bash/sentinel-features.sh +++ b/tests/docker/runners/bash/sentinel-features.sh @@ -10,7 +10,7 @@ do fi done -FEATURES="network-logs debug-ids sentinel-auth replicas" +FEATURES="network-logs debug-ids sentinel-auth replicas i-all" if [ -z "$FRED_CI_NEXTEST" ]; then cargo test --release --lib --tests --features "$FEATURES" -- --test-threads=1 "$@" diff --git a/tests/docker/runners/bash/unix-socket.sh b/tests/docker/runners/bash/unix-socket.sh index 8e881958..f116027f 100755 --- a/tests/docker/runners/bash/unix-socket.sh +++ b/tests/docker/runners/bash/unix-socket.sh @@ -10,7 +10,7 @@ do fi done -FEATURES="network-logs subscriber-client debug-ids transactions unix-sockets" +FEATURES="network-logs subscriber-client debug-ids transactions unix-sockets i-all" if [ -z "$FRED_CI_NEXTEST" ]; then cargo test --release --lib --tests --features "$FEATURES" -- --test-threads=1 "$@" diff --git a/tests/docker/runners/images/base.dockerfile b/tests/docker/runners/images/base.dockerfile index 498fcf8b..124855c8 100644 --- a/tests/docker/runners/images/base.dockerfile +++ b/tests/docker/runners/images/base.dockerfile @@ -1,4 +1,6 @@ -FROM rust:1.75.0-slim-buster +# https://github.com/docker/for-mac/issues/5548#issuecomment-1029204019 +# FROM rust:1.77-slim-buster +FROM rust:1.77-slim-bullseye WORKDIR /project @@ -17,7 +19,7 @@ ARG FRED_REDIS_SENTINEL_HOST ARG FRED_REDIS_SENTINEL_PORT ARG CIRCLECI_TESTS -RUN USER=root apt-get update && apt-get install -y build-essential libssl-dev dnsutils curl pkg-config +RUN USER=root apt-get update && apt-get install -y build-essential libssl-dev dnsutils curl pkg-config cmake RUN echo "REDIS_VERSION=$REDIS_VERSION" # For debugging diff --git a/tests/docker/runners/images/ci.dockerfile b/tests/docker/runners/images/ci.dockerfile index fdf51e6a..4476ec57 100644 --- a/tests/docker/runners/images/ci.dockerfile +++ b/tests/docker/runners/images/ci.dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.75.0-slim-buster +FROM rust:1.77-slim-buster WORKDIR /project # circleci doesn't mount volumes with a remote docker engine so we have to copy everything @@ -20,7 +20,7 @@ ARG FRED_REDIS_SENTINEL_HOST ARG FRED_REDIS_SENTINEL_PORT ARG CIRCLECI_TESTS -RUN USER=root apt-get update && apt-get install -y build-essential libssl-dev dnsutils +RUN USER=root apt-get update && apt-get install -y build-essential libssl-dev dnsutils cmake RUN echo "REDIS_VERSION=$REDIS_VERSION" # For debugging diff --git a/tests/docker/runners/images/debug.dockerfile b/tests/docker/runners/images/debug.dockerfile index 498fcf8b..124855c8 100644 --- a/tests/docker/runners/images/debug.dockerfile +++ b/tests/docker/runners/images/debug.dockerfile @@ -1,4 +1,6 @@ -FROM rust:1.75.0-slim-buster +# https://github.com/docker/for-mac/issues/5548#issuecomment-1029204019 +# FROM rust:1.77-slim-buster +FROM rust:1.77-slim-bullseye WORKDIR /project @@ -17,7 +19,7 @@ ARG FRED_REDIS_SENTINEL_HOST ARG FRED_REDIS_SENTINEL_PORT ARG CIRCLECI_TESTS -RUN USER=root apt-get update && apt-get install -y build-essential libssl-dev dnsutils curl pkg-config +RUN USER=root apt-get update && apt-get install -y build-essential libssl-dev dnsutils curl pkg-config cmake RUN echo "REDIS_VERSION=$REDIS_VERSION" # For debugging diff --git a/tests/integration/acl/mod.rs b/tests/integration/acl/mod.rs index 4d15fa7e..52a3673f 100644 --- a/tests/integration/acl/mod.rs +++ b/tests/integration/acl/mod.rs @@ -26,7 +26,7 @@ pub async fn should_auth_as_test_user(client: RedisClient, _: RedisConfig) -> Re let (username, password) = check_env_creds(); if let Some(password) = password { client.auth(username, password).await?; - client.get("foo").await?; + client.ping().await?; } Ok(()) @@ -41,7 +41,7 @@ pub async fn should_auth_as_test_user_via_config(_: RedisClient, mut config: Red let client = RedisClient::new(config, None, None, None); client.connect(); client.wait_for_connect().await?; - client.get("foo").await?; + client.ping().await?; } Ok(()) diff --git a/tests/integration/centralized.rs b/tests/integration/centralized.rs index 573ae80f..21fee883 100644 --- a/tests/integration/centralized.rs +++ b/tests/integration/centralized.rs @@ -1,3 +1,4 @@ +#[cfg(feature = "i-keys")] mod keys { centralized_test!(keys, should_handle_missing_keys); centralized_test!(keys, should_set_and_get_a_value); @@ -29,7 +30,7 @@ mod keys { centralized_test!(keys, should_get_keys_from_pool_in_a_stream); } -#[cfg(feature = "transactions")] +#[cfg(all(feature = "transactions", feature = "i-keys"))] mod multi { centralized_test!(multi, should_run_get_set_trx); @@ -48,44 +49,50 @@ mod other { #[cfg(feature = "metrics")] centralized_test!(other, should_track_size_stats); - + #[cfg(all(feature = "i-client", feature = "i-lists"))] centralized_test!(other, should_automatically_unblock); + #[cfg(all(feature = "i-client", feature = "i-lists"))] centralized_test!(other, should_manually_unblock); + #[cfg(all(feature = "i-client", feature = "i-lists"))] centralized_test!(other, should_error_when_blocked); + #[cfg(all(feature = "i-keys", feature = "i-hashes"))] centralized_test!(other, should_smoke_test_from_redis_impl); centralized_test!(other, should_safely_change_protocols_repeatedly); + #[cfg(feature = "i-keys")] centralized_test!(other, should_pipeline_all); + #[cfg(all(feature = "i-keys", feature = "i-hashes"))] centralized_test!(other, should_pipeline_all_error_early); + #[cfg(feature = "i-keys")] centralized_test!(other, should_pipeline_last); + #[cfg(all(feature = "i-keys", feature = "i-hashes"))] centralized_test!(other, should_pipeline_try_all); + #[cfg(feature = "i-server")] centralized_test!(other, should_use_all_cluster_nodes_repeatedly); centralized_test!(other, should_gracefully_quit); + #[cfg(feature = "i-lists")] centralized_test!(other, should_support_options_with_pipeline); + #[cfg(feature = "i-keys")] centralized_test!(other, should_reuse_pipeline); + #[cfg(all(feature = "i-keys", feature = "i-lists"))] centralized_test!(other, should_manually_connect_twice); - #[cfg(feature = "transactions")] + #[cfg(all(feature = "transactions", feature = "i-keys"))] centralized_test!(other, should_support_options_with_trx); //#[cfg(feature = "dns")] // centralized_test!(other, should_use_trust_dns); // centralized_test!(other, should_test_high_concurrency_pool); - #[cfg(feature = "partial-tracing")] + #[cfg(all(feature = "partial-tracing", feature = "i-keys"))] centralized_test!(other, should_use_tracing_get_set); #[cfg(feature = "subscriber-client")] centralized_test!(other, should_ping_with_subscriber_client); - #[cfg(feature = "replicas")] + #[cfg(all(feature = "replicas", feature = "i-keys"))] centralized_test!(other, should_replica_set_and_get); - #[cfg(feature = "replicas")] + #[cfg(all(feature = "replicas", feature = "i-keys"))] centralized_test!(other, should_replica_set_and_get_not_lazy); - #[cfg(feature = "replicas")] + #[cfg(all(feature = "replicas", feature = "i-keys"))] centralized_test!(other, should_pipeline_with_replicas); - - #[cfg(feature = "codec")] - centralized_test!(other, should_use_resp3_codec_example); - #[cfg(feature = "codec")] - centralized_test!(other, should_use_resp2_codec_example); } mod pool { @@ -93,6 +100,7 @@ mod pool { centralized_test!(pool, should_connect_and_ping_static_pool_two_conn); } +#[cfg(feature = "i-hashes")] mod hashes { centralized_test!(hashes, should_hset_and_hget); centralized_test!(hashes, should_hset_and_hdel); @@ -109,6 +117,7 @@ mod hashes { centralized_test!(hashes, should_get_values); } +#[cfg(feature = "i-pubsub")] mod pubsub { centralized_test!(pubsub, should_publish_and_recv_messages); centralized_test!(pubsub, should_psubscribe_and_recv_messages); @@ -121,6 +130,7 @@ mod pubsub { centralized_test!(pubsub, should_get_pubsub_shard_numsub); } +#[cfg(feature = "i-hyperloglog")] mod hyperloglog { centralized_test!(hyperloglog, should_pfadd_elements); @@ -130,12 +140,17 @@ mod hyperloglog { mod scanning { - centralized_test!(scanning, should_scan_keyspace); - centralized_test!(scanning, should_hscan_hash); - centralized_test!(scanning, should_sscan_set); - centralized_test!(scanning, should_zscan_sorted_set); + #[cfg(feature = "i-keys")] + cluster_test!(scanning, should_scan_keyspace); + #[cfg(feature = "i-hashes")] + cluster_test!(scanning, should_hscan_hash); + #[cfg(feature = "i-sets")] + cluster_test!(scanning, should_sscan_set); + #[cfg(feature = "i-sorted-sets")] + cluster_test!(scanning, should_zscan_sorted_set); } +#[cfg(feature = "i-slowlog")] mod slowlog { centralized_test!(slowlog, should_read_slowlog_length); @@ -143,6 +158,7 @@ mod slowlog { centralized_test!(slowlog, should_reset_slowlog); } +#[cfg(feature = "i-server")] mod server { centralized_test!(server, should_flushall); @@ -155,6 +171,7 @@ mod server { centralized_test!(server, should_do_bgrewriteaof); } +#[cfg(feature = "i-sets")] mod sets { centralized_test!(sets, should_sadd_elements); @@ -174,6 +191,7 @@ mod sets { centralized_test!(sets, should_sunionstore_elements); } +#[cfg(feature = "i-memory")] pub mod memory { centralized_test!(memory, should_run_memory_doctor); @@ -183,6 +201,7 @@ pub mod memory { centralized_test!(memory, should_run_memory_usage); } +#[cfg(feature = "i-scripts")] pub mod lua { #[cfg(feature = "sha-1")] @@ -203,6 +222,7 @@ pub mod lua { centralized_test!(lua, should_function_delete); centralized_test!(lua, should_function_list); centralized_test!(lua, should_function_list_multiple); + #[cfg(feature = "i-keys")] centralized_test!(lua, should_function_fcall_getset); centralized_test!(lua, should_function_fcall_echo); centralized_test!(lua, should_function_fcall_ro_echo); @@ -215,6 +235,7 @@ pub mod lua { centralized_test!(lua, should_create_function_from_name); } +#[cfg(feature = "i-sorted-sets")] pub mod sorted_sets { centralized_test!(sorted_sets, should_bzpopmin); @@ -248,6 +269,7 @@ pub mod sorted_sets { centralized_test!(sorted_sets, should_zmscore_values); } +#[cfg(feature = "i-lists")] pub mod lists { centralized_test!(lists, should_blpop_values); centralized_test!(lists, should_brpop_values); @@ -263,14 +285,21 @@ pub mod lists { centralized_test!(lists, should_lrange_values); centralized_test!(lists, should_lrem_values); centralized_test!(lists, should_lset_values); + #[cfg(feature = "i-keys")] centralized_test!(lists, should_ltrim_values); centralized_test!(lists, should_rpop_values); centralized_test!(lists, should_rpoplpush_values); centralized_test!(lists, should_lmove_values); centralized_test!(lists, should_rpush_values); centralized_test!(lists, should_rpushx_values); + centralized_test!(lists, should_sort_int_list); + centralized_test!(lists, should_sort_alpha_list); + centralized_test!(lists, should_sort_int_list_with_limit); + #[cfg(feature = "i-keys")] + centralized_test!(lists, should_sort_int_list_with_patterns); } +#[cfg(feature = "i-geo")] pub mod geo { centralized_test!(geo, should_geoadd_values); @@ -282,12 +311,14 @@ pub mod geo { centralized_test!(geo, should_geosearch_values); } +#[cfg(feature = "i-acl")] pub mod acl { centralized_test!(acl, should_auth_as_test_user); centralized_test!(acl, should_auth_as_test_user_via_config); centralized_test!(acl, should_run_acl_getuser); } +#[cfg(feature = "i-streams")] mod streams { centralized_test!(streams, should_xinfo_consumers); centralized_test!(streams, should_xinfo_groups); @@ -328,14 +359,16 @@ mod streams { centralized_test!(streams, should_xautoclaim_default); } -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] mod tracking { + #[cfg(feature = "i-keys")] centralized_test!(tracking, should_invalidate_foo_resp3); + #[cfg(feature = "i-keys")] centralized_test!(tracking, should_invalidate_foo_resp2_centralized); } // The CI settings for redis-stack only support centralized configs for now. -#[cfg(feature = "redis-json")] +#[cfg(feature = "i-redis-json")] mod redis_json { centralized_test!(redis_json, should_get_and_set_basic_obj); centralized_test!(redis_json, should_get_and_set_stringified_obj); @@ -352,7 +385,7 @@ mod redis_json { centralized_test!(redis_json, should_get_value_type); } -#[cfg(feature = "time-series")] +#[cfg(feature = "i-time-series")] mod timeseries { centralized_test!(timeseries, should_ts_add_get_and_range); centralized_test!(timeseries, should_create_alter_and_del_timeseries); diff --git a/tests/integration/cluster/mod.rs b/tests/integration/cluster/mod.rs index 73fb2816..36f5eed7 100644 --- a/tests/integration/cluster/mod.rs +++ b/tests/integration/cluster/mod.rs @@ -1,5 +1,7 @@ +#![allow(unused_imports)] use fred::{error::RedisError, interfaces::*, prelude::RedisClient, types::RedisConfig}; +#[cfg(all(feature = "i-cluster", feature = "i-client"))] pub async fn should_use_each_cluster_node(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let connections = client.active_connections().await?; diff --git a/tests/integration/clustered.rs b/tests/integration/clustered.rs index 791ae28a..9381d95c 100644 --- a/tests/integration/clustered.rs +++ b/tests/integration/clustered.rs @@ -1,3 +1,4 @@ +#[cfg(feature = "i-keys")] mod keys { cluster_test!(keys, should_handle_missing_keys); cluster_test!(keys, should_set_and_get_a_value); @@ -30,7 +31,7 @@ mod keys { cluster_test!(keys, should_get_keys_from_pool_in_a_stream); } -#[cfg(feature = "transactions")] +#[cfg(all(feature = "transactions", feature = "i-keys"))] mod multi { cluster_test!(multi, should_run_get_set_trx); @@ -52,43 +53,50 @@ mod other { cluster_test!(other, should_track_size_stats); cluster_test!(other, should_split_clustered_connection); + #[cfg(feature = "i-server")] cluster_test!(other, should_run_flushall_cluster); + #[cfg(all(feature = "i-client", feature = "i-lists"))] cluster_test!(other, should_automatically_unblock); + #[cfg(all(feature = "i-client", feature = "i-lists"))] cluster_test!(other, should_manually_unblock); + #[cfg(all(feature = "i-client", feature = "i-lists"))] cluster_test!(other, should_error_when_blocked); cluster_test!(other, should_safely_change_protocols_repeatedly); + #[cfg(feature = "i-keys")] cluster_test!(other, should_pipeline_all); + #[cfg(all(feature = "i-keys", feature = "i-hashes"))] cluster_test!(other, should_pipeline_all_error_early); + #[cfg(feature = "i-keys")] cluster_test!(other, should_pipeline_last); + #[cfg(all(feature = "i-keys", feature = "i-hashes"))] cluster_test!(other, should_pipeline_try_all); + #[cfg(feature = "i-server")] cluster_test!(other, should_use_all_cluster_nodes_repeatedly); cluster_test!(other, should_gracefully_quit); + #[cfg(feature = "i-lists")] cluster_test!(other, should_support_options_with_pipeline); + #[cfg(feature = "i-keys")] cluster_test!(other, should_reuse_pipeline); + #[cfg(all(feature = "i-keys", feature = "i-lists"))] cluster_test!(other, should_manually_connect_twice); - #[cfg(feature = "transactions")] + #[cfg(all(feature = "transactions", feature = "i-keys"))] cluster_test!(other, should_support_options_with_trx); //#[cfg(feature = "dns")] // cluster_test!(other, should_use_trust_dns); - #[cfg(feature = "partial-tracing")] + #[cfg(all(feature = "partial-tracing", feature = "i-keys"))] cluster_test!(other, should_use_tracing_get_set); #[cfg(feature = "subscriber-client")] cluster_test!(other, should_ping_with_subscriber_client); - #[cfg(feature = "replicas")] + #[cfg(all(feature = "replicas", feature = "i-keys"))] cluster_test!(other, should_replica_set_and_get); - #[cfg(feature = "replicas")] + #[cfg(all(feature = "replicas", feature = "i-keys"))] cluster_test!(other, should_replica_set_and_get_not_lazy); - #[cfg(feature = "replicas")] + #[cfg(all(feature = "replicas", feature = "i-keys"))] cluster_test!(other, should_use_cluster_replica_without_redirection); - #[cfg(feature = "replicas")] + #[cfg(all(feature = "replicas", feature = "i-keys"))] cluster_test!(other, should_pipeline_with_replicas); - - #[cfg(feature = "codec")] - cluster_test!(other, should_use_resp3_codec_example); - #[cfg(feature = "codec")] - cluster_test!(other, should_use_resp2_codec_example); } mod pool { @@ -96,6 +104,7 @@ mod pool { cluster_test!(pool, should_connect_and_ping_static_pool_two_conn); } +#[cfg(feature = "i-hashes")] mod hashes { cluster_test!(hashes, should_hset_and_hget); @@ -113,9 +122,11 @@ mod hashes { cluster_test!(hashes, should_get_values); } +#[cfg(feature = "i-pubsub")] mod pubsub { cluster_test!(pubsub, should_publish_and_recv_messages); + cluster_test!(pubsub, should_ssubscribe_and_recv_messages); cluster_test!(pubsub, should_psubscribe_and_recv_messages); cluster_test!(pubsub, should_unsubscribe_from_all); @@ -127,6 +138,7 @@ mod pubsub { cluster_test!(pubsub, should_get_pubsub_shard_numsub); } +#[cfg(feature = "i-hyperloglog")] mod hyperloglog { cluster_test!(hyperloglog, should_pfadd_elements); @@ -136,13 +148,19 @@ mod hyperloglog { mod scanning { + #[cfg(feature = "i-keys")] cluster_test!(scanning, should_scan_keyspace); + #[cfg(feature = "i-hashes")] cluster_test!(scanning, should_hscan_hash); + #[cfg(feature = "i-sets")] cluster_test!(scanning, should_sscan_set); + #[cfg(feature = "i-sorted-sets")] cluster_test!(scanning, should_zscan_sorted_set); + #[cfg(feature = "i-keys")] cluster_test!(scanning, should_scan_cluster); } +#[cfg(feature = "i-slowlog")] mod slowlog { cluster_test!(slowlog, should_read_slowlog_length); @@ -150,6 +168,7 @@ mod slowlog { cluster_test!(slowlog, should_reset_slowlog); } +#[cfg(feature = "i-server")] mod server { cluster_test!(server, should_flushall); @@ -162,6 +181,7 @@ mod server { cluster_test!(server, should_do_bgrewriteaof); } +#[cfg(feature = "i-sets")] mod sets { cluster_test!(sets, should_sadd_elements); @@ -181,6 +201,7 @@ mod sets { cluster_test!(sets, should_sunionstore_elements); } +#[cfg(feature = "i-memory")] pub mod memory { cluster_test!(memory, should_run_memory_doctor); @@ -190,6 +211,7 @@ pub mod memory { cluster_test!(memory, should_run_memory_usage); } +#[cfg(feature = "i-scripts")] pub mod lua { #[cfg(feature = "sha-1")] @@ -212,6 +234,7 @@ pub mod lua { cluster_test!(lua, should_function_delete); cluster_test!(lua, should_function_list); cluster_test!(lua, should_function_list_multiple); + #[cfg(feature = "i-keys")] cluster_test!(lua, should_function_fcall_getset); cluster_test!(lua, should_function_fcall_echo); cluster_test!(lua, should_function_fcall_ro_echo); @@ -224,6 +247,7 @@ pub mod lua { cluster_test!(lua, should_create_function_from_name); } +#[cfg(feature = "i-sorted-sets")] pub mod sorted_sets { cluster_test!(sorted_sets, should_bzpopmin); @@ -257,6 +281,7 @@ pub mod sorted_sets { cluster_test!(sorted_sets, should_zmscore_values); } +#[cfg(feature = "i-lists")] pub mod lists { cluster_test!(lists, should_blpop_values); @@ -272,14 +297,21 @@ pub mod lists { cluster_test!(lists, should_lrange_values); cluster_test!(lists, should_lrem_values); cluster_test!(lists, should_lset_values); + #[cfg(feature = "i-keys")] cluster_test!(lists, should_ltrim_values); cluster_test!(lists, should_rpop_values); cluster_test!(lists, should_rpoplpush_values); cluster_test!(lists, should_lmove_values); cluster_test!(lists, should_rpush_values); cluster_test!(lists, should_rpushx_values); + cluster_test!(lists, should_sort_int_list); + cluster_test!(lists, should_sort_alpha_list); + cluster_test!(lists, should_sort_int_list_with_limit); + #[cfg(feature = "replicas")] + cluster_test!(lists, should_sort_ro_int_list); } +#[cfg(feature = "i-geo")] pub mod geo { cluster_test!(geo, should_geoadd_values); @@ -291,11 +323,12 @@ pub mod geo { cluster_test!(geo, should_geosearch_values); } -#[cfg(not(feature = "redis-stack"))] +#[cfg(all(not(feature = "redis-stack"), feature = "i-acl"))] pub mod acl { cluster_test!(acl, should_run_acl_getuser); } +#[cfg(feature = "i-streams")] mod streams { cluster_test!(streams, should_xinfo_consumers); cluster_test!(streams, should_xinfo_groups); @@ -336,16 +369,19 @@ mod streams { cluster_test!(streams, should_xautoclaim_default); } +#[cfg(feature = "i-cluster")] mod cluster { + #[cfg(feature = "i-client")] cluster_test!(cluster, should_use_each_cluster_node); } -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] mod tracking { + #[cfg(feature = "i-keys")] cluster_test!(tracking, should_invalidate_foo_resp3); } -#[cfg(feature = "time-series")] +#[cfg(feature = "i-time-series")] mod timeseries { cluster_test!(timeseries, should_ts_add_get_and_range); cluster_test!(timeseries, should_create_alter_and_del_timeseries); diff --git a/tests/integration/docker.rs b/tests/integration/docker.rs index fb957cee..00cdd7da 100644 --- a/tests/integration/docker.rs +++ b/tests/integration/docker.rs @@ -5,24 +5,18 @@ use crate::integration::{ }; use bollard::{ container::{ - Config, - CreateContainerOptions, - LogOutput, - NetworkingConfig, - RemoveContainerOptions, - StartContainerOptions, + Config, CreateContainerOptions, LogOutput, NetworkingConfig, RemoveContainerOptions, StartContainerOptions, }, errors::Error as BollardError, exec::{CreateExecOptions, StartExecResults}, network::{ConnectNetworkOptions, ListNetworksOptions}, - ClientVersion, - Docker, - API_DEFAULT_VERSION, + ClientVersion, Docker, API_DEFAULT_VERSION, }; use bytes::Bytes; -use fred::{prelude::*, types::ClusterRouting}; +use fred::prelude::*; +use fred::types::ClusterRouting; use futures::stream::StreamExt; -use redis_protocol::resp2::decode::decode as resp2_decode; +use redis_protocol::resp2::decode::decode_bytes as resp2_decode; use std::collections::HashMap; macro_rules! e ( @@ -105,22 +99,28 @@ pub async fn run_in_redis_container(docker: &Docker, command: Vec) -> Re debug!("Connecting container to the test network..."); e!( docker - .connect_network(&test_network, ConnectNetworkOptions { - container: container_id.clone(), - ..Default::default() - }) + .connect_network( + &test_network, + ConnectNetworkOptions { + container: container_id.clone(), + ..Default::default() + } + ) .await )?; debug!("Running command: {:?}", command); let exec = e!( docker - .create_exec(&container_id, CreateExecOptions { - attach_stdout: Some(true), - attach_stderr: Some(true), - cmd: Some(command), - ..Default::default() - }) + .create_exec( + &container_id, + CreateExecOptions { + attach_stdout: Some(true), + attach_stderr: Some(true), + cmd: Some(command), + ..Default::default() + } + ) .await )? .id; @@ -197,7 +197,7 @@ pub async fn inspect_cluster(tls: bool) -> Result { let result = run_in_redis_container(&docker, cluster_slots).await?; debug!("CLUSTER SLOTS response: {}", String::from_utf8_lossy(&result)); let parsed: RedisValue = match resp2_decode(&Bytes::from(result))? { - Some((frame, _)) => redis_protocol::resp2_frame_to_resp3(frame).try_into()?, + Some((frame, _)) => frame.into_resp3().try_into()?, None => { return Err(RedisError::new( RedisErrorKind::Unknown, diff --git a/tests/integration/hashes/mod.rs b/tests/integration/hashes/mod.rs index c4e36f6a..4a326ba9 100644 --- a/tests/integration/hashes/mod.rs +++ b/tests/integration/hashes/mod.rs @@ -36,8 +36,6 @@ fn assert_diff_len(values: Vec<&'static str>, value: RedisValue, len: usize) { } pub async fn should_hset_and_hget(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let result: i64 = client.hset("foo", ("a", 1)).await?; assert_eq!(result, 1); let result: i64 = client.hset("foo", vec![("b", 2), ("c", 3)]).await?; @@ -54,8 +52,6 @@ pub async fn should_hset_and_hget(client: RedisClient, _: RedisConfig) -> Result } pub async fn should_hset_and_hdel(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let result: i64 = client.hset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; assert_eq!(result, 3); let result: i64 = client.hdel("foo", vec!["a", "b"]).await?; @@ -69,8 +65,6 @@ pub async fn should_hset_and_hdel(client: RedisClient, _: RedisConfig) -> Result } pub async fn should_hexists(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - client.hset("foo", ("a", 1)).await?; let a: bool = client.hexists("foo", "a").await?; assert!(a); @@ -81,8 +75,6 @@ pub async fn should_hexists(client: RedisClient, _: RedisConfig) -> Result<(), R } pub async fn should_hgetall(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - client.hset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let values: HashMap = client.hgetall("foo").await?; @@ -97,8 +89,6 @@ pub async fn should_hgetall(client: RedisClient, _: RedisConfig) -> Result<(), R } pub async fn should_hincryby(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let result: i64 = client.hincrby("foo", "a", 1).await?; assert_eq!(result, 1); let result: i64 = client.hincrby("foo", "a", 2).await?; @@ -108,8 +98,6 @@ pub async fn should_hincryby(client: RedisClient, _: RedisConfig) -> Result<(), } pub async fn should_hincryby_float(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let result: f64 = client.hincrbyfloat("foo", "a", 0.5).await?; assert_eq!(result, 0.5); let result: f64 = client.hincrbyfloat("foo", "a", 3.7).await?; @@ -119,8 +107,6 @@ pub async fn should_hincryby_float(client: RedisClient, _: RedisConfig) -> Resul } pub async fn should_get_keys(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - client.hset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let keys = client.hkeys("foo").await?; @@ -130,8 +116,6 @@ pub async fn should_get_keys(client: RedisClient, _: RedisConfig) -> Result<(), } pub async fn should_hmset(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - client.hmset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let a: i64 = client.hget("foo", "a").await?; @@ -145,8 +129,6 @@ pub async fn should_hmset(client: RedisClient, _: RedisConfig) -> Result<(), Red } pub async fn should_hmget(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - client.hmset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let result: Vec = client.hmget("foo", vec!["a", "b"]).await?; @@ -156,8 +138,6 @@ pub async fn should_hmget(client: RedisClient, _: RedisConfig) -> Result<(), Red } pub async fn should_hsetnx(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - client.hset("foo", ("a", 1)).await?; let result: bool = client.hsetnx("foo", "a", 2).await?; assert!(!result); @@ -172,8 +152,6 @@ pub async fn should_hsetnx(client: RedisClient, _: RedisConfig) -> Result<(), Re } pub async fn should_get_random_field(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - client.hmset("foo", vec![("a", 1), ("b", 2), ("c", 3)]).await?; let field: String = client.hrandfield("foo", None).await?; @@ -199,8 +177,6 @@ pub async fn should_get_random_field(client: RedisClient, _: RedisConfig) -> Res } pub async fn should_get_strlen(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let expected = "abcdefhijklmnopqrstuvwxyz"; client.hset("foo", ("a", expected)).await?; @@ -211,8 +187,6 @@ pub async fn should_get_strlen(client: RedisClient, _: RedisConfig) -> Result<() } pub async fn should_get_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - client.hmset("foo", vec![("a", "1"), ("b", "2")]).await?; let values: RedisValue = client.hvals("foo").await?; diff --git a/tests/integration/hyperloglog/mod.rs b/tests/integration/hyperloglog/mod.rs index 40bede38..f39c491d 100644 --- a/tests/integration/hyperloglog/mod.rs +++ b/tests/integration/hyperloglog/mod.rs @@ -1,8 +1,6 @@ use fred::prelude::*; pub async fn should_pfadd_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let result: i64 = client.pfadd("foo", vec!["a", "b"]).await?; assert_eq!(result, 1); let result: i64 = client.pfadd("foo", "a").await?; @@ -12,8 +10,6 @@ pub async fn should_pfadd_elements(client: RedisClient, _: RedisConfig) -> Resul } pub async fn should_pfcount_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let result: i64 = client.pfadd("foo", vec!["a", "b", "c"]).await?; assert_eq!(result, 1); let result: i64 = client.pfcount("foo").await?; @@ -27,10 +23,6 @@ pub async fn should_pfcount_elements(client: RedisClient, _: RedisConfig) -> Res } pub async fn should_pfmerge_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); - check_null!(client, "baz{1}"); - let result: i64 = client.pfadd("foo{1}", vec!["a", "b", "c"]).await?; assert_eq!(result, 1); let result: i64 = client.pfadd("bar{1}", vec!["c", "d", "e"]).await?; diff --git a/tests/integration/keys/mod.rs b/tests/integration/keys/mod.rs index 45059369..ada262c1 100644 --- a/tests/integration/keys/mod.rs +++ b/tests/integration/keys/mod.rs @@ -22,8 +22,6 @@ pub async fn should_handle_missing_keys(client: RedisClient, _: RedisConfig) -> } pub async fn should_set_and_get_a_value(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - client.set("foo", "bar", None, None, false).await?; assert_eq!(client.get::("foo").await?, "bar"); @@ -31,21 +29,16 @@ pub async fn should_set_and_get_a_value(client: RedisClient, _config: RedisConfi } pub async fn should_set_and_del_a_value(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let result: Option = client.set("foo", "bar", None, None, true).await?; assert!(result.is_none()); assert_eq!(client.get::("foo").await?, "bar"); assert_eq!(client.del::("foo").await?, 1); - check_null!(client, "foo"); Ok(()) } pub async fn should_set_with_get_argument(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - client.set("foo", "bar", None, None, false).await?; let result: String = client.set("foo", "baz", None, None, true).await?; @@ -58,34 +51,25 @@ pub async fn should_set_with_get_argument(client: RedisClient, _config: RedisCon } pub async fn should_rename(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "{foo}.1"); - check_null!(client, "{foo}.2"); - client.set("{foo}.1", "baz", None, None, false).await?; client.rename("{foo}.1", "{foo}.2").await?; let result: String = client.get("{foo}.2").await?; assert_eq!(result, "baz"); - check_null!(client, "{foo}.1"); Ok(()) } pub async fn should_error_rename_does_not_exist(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "{foo}"); client.rename("{foo}", "{foo}.bar").await } pub async fn should_renamenx(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "{foo}.1"); - check_null!(client, "{foo}.2"); - client.set("{foo}.1", "baz", None, None, false).await?; client.renamenx("{foo}.1", "{foo}.2").await?; let result: String = client.get("{foo}.2").await?; assert_eq!(result, "baz"); - check_null!(client, "{foo}.1"); Ok(()) } @@ -94,13 +78,10 @@ pub async fn should_error_renamenx_does_not_exist( client: RedisClient, _config: RedisConfig, ) -> Result<(), RedisError> { - check_null!(client, "{foo}"); client.renamenx("{foo}", "{foo}.bar").await } pub async fn should_unlink(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "{foo}1"); - client.set("{foo}1", "bar", None, None, false).await?; assert_eq!(client.get::("{foo}1").await?, "bar"); @@ -110,14 +91,11 @@ pub async fn should_unlink(client: RedisClient, _config: RedisConfig) -> Result< .await?, 1 ); - check_null!(client, "{foo}1"); Ok(()) } pub async fn should_incr_and_decr_a_value(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let count: u64 = client.incr("foo").await?; assert_eq!(count, 1); let count: u64 = client.incr_by("foo", 2).await?; @@ -131,8 +109,6 @@ pub async fn should_incr_and_decr_a_value(client: RedisClient, _config: RedisCon } pub async fn should_incr_by_float(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let count: f64 = client.incr_by_float("foo", 1.5).await?; assert_eq!(count, 1.5); let count: f64 = client.incr_by_float("foo", 2.2).await?; @@ -144,10 +120,6 @@ pub async fn should_incr_by_float(client: RedisClient, _config: RedisConfig) -> } pub async fn should_mset_a_non_empty_map(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "a{1}"); - check_null!(client, "b{1}"); - check_null!(client, "c{1}"); - let mut map: HashMap = HashMap::new(); // MSET args all have to map to the same cluster node map.insert("a{1}".into(), 1.into()); @@ -172,7 +144,6 @@ pub async fn should_error_mset_empty_map(client: RedisClient, _config: RedisConf } pub async fn should_expire_key(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); client.set("foo", "bar", None, None, false).await?; client.expire("foo", 1).await?; @@ -184,7 +155,6 @@ pub async fn should_expire_key(client: RedisClient, _config: RedisConfig) -> Res } pub async fn should_persist_key(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); client.set("foo", "bar", Some(Expiration::EX(5)), None, false).await?; let removed: bool = client.persist("foo").await?; @@ -197,7 +167,6 @@ pub async fn should_persist_key(client: RedisClient, _config: RedisConfig) -> Re } pub async fn should_check_ttl(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); client.set("foo", "bar", Some(Expiration::EX(5)), None, false).await?; let ttl: i64 = client.ttl("foo").await?; @@ -207,7 +176,6 @@ pub async fn should_check_ttl(client: RedisClient, _config: RedisConfig) -> Resu } pub async fn should_check_pttl(client: RedisClient, _config: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); client.set("foo", "bar", Some(Expiration::EX(5)), None, false).await?; let ttl: i64 = client.pttl("foo").await?; @@ -217,8 +185,6 @@ pub async fn should_check_pttl(client: RedisClient, _config: RedisConfig) -> Res } pub async fn should_dump_key(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - client.set("foo", "abc123", None, None, false).await?; let dump: RedisValue = client.dump("foo").await?; assert!(dump.is_bytes()); @@ -227,7 +193,6 @@ pub async fn should_dump_key(client: RedisClient, _: RedisConfig) -> Result<(), } pub async fn should_dump_and_restore_key(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); let expected = "abc123"; client.set("foo", expected, None, None, false).await?; @@ -242,7 +207,6 @@ pub async fn should_dump_and_restore_key(client: RedisClient, _: RedisConfig) -> } pub async fn should_modify_ranges(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); client.set("foo", "0123456789", None, None, false).await?; let range: String = client.getrange("foo", 0, 4).await?; @@ -256,8 +220,6 @@ pub async fn should_modify_ranges(client: RedisClient, _: RedisConfig) -> Result } pub async fn should_getset_value(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let value: Option = client.getset("foo", "bar").await?; assert!(value.is_none()); let value: String = client.getset("foo", "baz").await?; @@ -269,8 +231,6 @@ pub async fn should_getset_value(client: RedisClient, _: RedisConfig) -> Result< } pub async fn should_getdel_value(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let value: Option = client.getdel("foo").await?; assert!(value.is_none()); @@ -284,8 +244,6 @@ pub async fn should_getdel_value(client: RedisClient, _: RedisConfig) -> Result< } pub async fn should_get_strlen(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let expected = "abcdefghijklmnopqrstuvwxyz"; client.set("foo", expected, None, None, false).await?; let len: usize = client.strlen("foo").await?; @@ -295,10 +253,6 @@ pub async fn should_get_strlen(client: RedisClient, _: RedisConfig) -> Result<() } pub async fn should_mget_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "a{1}"); - check_null!(client, "b{1}"); - check_null!(client, "c{1}"); - let expected: Vec<(&str, RedisValue)> = vec![("a{1}", 1.into()), ("b{1}", 2.into()), ("c{1}", 3.into())]; for (key, value) in expected.iter() { client.set(*key, value.clone(), None, None, false).await?; @@ -310,9 +264,6 @@ pub async fn should_mget_values(client: RedisClient, _: RedisConfig) -> Result<( } pub async fn should_msetnx_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "a{1}"); - check_null!(client, "b{1}"); - let expected: Vec<(&str, RedisValue)> = vec![("a{1}", 1.into()), ("b{1}", 2.into())]; // do it first, check they're there @@ -335,9 +286,6 @@ pub async fn should_msetnx_values(client: RedisClient, _: RedisConfig) -> Result } pub async fn should_copy_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "a{1}"); - check_null!(client, "b{1}"); - client.set("a{1}", "bar", None, None, false).await?; let result: i64 = client.copy("a{1}", "b{1}", None, false).await?; assert_eq!(result, 1); diff --git a/tests/integration/lists/mod.rs b/tests/integration/lists/mod.rs index 05fae7b6..aa3a2067 100644 --- a/tests/integration/lists/mod.rs +++ b/tests/integration/lists/mod.rs @@ -1,3 +1,4 @@ +use fred::types::SortOrder; use fred::{ interfaces::*, prelude::*, @@ -10,7 +11,7 @@ const COUNT: i64 = 10; async fn create_count_data(client: &RedisClient, key: &str) -> Result, RedisError> { let mut values = Vec::with_capacity(COUNT as usize); - for idx in 0 .. COUNT { + for idx in 0..COUNT { client.rpush(key, idx).await?; values.push(idx.to_string().into()); } @@ -24,7 +25,7 @@ pub async fn should_blpop_values(client: RedisClient, _: RedisConfig) -> Result< publisher.wait_for_connect().await?; let jh = tokio::spawn(async move { - for idx in 0 .. COUNT { + for idx in 0..COUNT { let mut result: Vec = client.blpop("foo", 30.0).await?; assert_eq!(result.pop().unwrap().as_i64().unwrap(), idx); } @@ -32,7 +33,7 @@ pub async fn should_blpop_values(client: RedisClient, _: RedisConfig) -> Result< Ok::<_, RedisError>(()) }); - for idx in 0 .. COUNT { + for idx in 0..COUNT { // the assertion below checks the length of the list, so we have to make sure not to push faster than elements are // removed sleep(Duration::from_millis(100)).await; @@ -50,7 +51,7 @@ pub async fn should_brpop_values(client: RedisClient, _: RedisConfig) -> Result< publisher.wait_for_connect().await?; let jh = tokio::spawn(async move { - for idx in 0 .. COUNT { + for idx in 0..COUNT { let mut result: Vec = client.brpop("foo", 30.0).await?; assert_eq!(result.pop().unwrap().as_i64().unwrap(), idx); } @@ -58,7 +59,7 @@ pub async fn should_brpop_values(client: RedisClient, _: RedisConfig) -> Result< Ok::<_, RedisError>(()) }); - for idx in 0 .. COUNT { + for idx in 0..COUNT { // the assertion below checks the length of the list, so we have to make sure not to push faster than elements are // removed sleep(Duration::from_millis(50)).await; @@ -76,7 +77,7 @@ pub async fn should_brpoplpush_values(client: RedisClient, _: RedisConfig) -> Re publisher.wait_for_connect().await?; let jh = tokio::spawn(async move { - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.brpoplpush("foo{1}", "bar{1}", 30.0).await?; assert_eq!(result, idx); } @@ -84,13 +85,13 @@ pub async fn should_brpoplpush_values(client: RedisClient, _: RedisConfig) -> Re Ok::<_, RedisError>(()) }); - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = publisher.lpush("foo{1}", idx).await?; assert!(result > 0); } let _ = jh.await?; - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = publisher.rpop("bar{1}", None).await?; assert_eq!(result, idx); } @@ -104,7 +105,7 @@ pub async fn should_blmove_values(client: RedisClient, _: RedisConfig) -> Result publisher.wait_for_connect().await?; let jh = tokio::spawn(async move { - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client .blmove("foo{1}", "bar{1}", LMoveDirection::Right, LMoveDirection::Left, 30.0) .await?; @@ -114,13 +115,13 @@ pub async fn should_blmove_values(client: RedisClient, _: RedisConfig) -> Result Ok::<_, RedisError>(()) }); - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = publisher.lpush("foo{1}", idx).await?; assert!(result > 0); } let _ = jh.await?; - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = publisher.rpop("bar{1}", None).await?; assert_eq!(result, idx); } @@ -147,7 +148,7 @@ pub async fn should_linsert_values(client: RedisClient, _: RedisConfig) -> Resul client.lpush("foo", 0).await?; let mut expected: Vec = vec!["0".into()]; - for idx in 1 .. COUNT { + for idx in 1..COUNT { let result: i64 = client.linsert("foo", ListLocation::After, idx - 1, idx).await?; assert_eq!(result, idx + 1); expected.push(idx.to_string().into()); @@ -161,7 +162,7 @@ pub async fn should_linsert_values(client: RedisClient, _: RedisConfig) -> Resul pub async fn should_lpop_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let expected = create_count_data(&client, "foo").await?; - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.lpop("foo", None).await?; assert_eq!(result, idx); } @@ -176,7 +177,7 @@ pub async fn should_lpop_values(client: RedisClient, _: RedisConfig) -> Result<( pub async fn should_lpos_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let _ = create_count_data(&client, "foo").await?; - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.lpos("foo", idx, None, None, None).await?; assert_eq!(result, idx); } @@ -184,7 +185,7 @@ pub async fn should_lpos_values(client: RedisClient, _: RedisConfig) -> Result<( let _ = create_count_data(&client, "foo").await?; let _ = create_count_data(&client, "foo").await?; - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.lpos("foo", idx, Some(2), None, None).await?; assert_eq!(result, idx + COUNT); let result: i64 = client.lpos("foo", idx, Some(3), None, None).await?; @@ -203,7 +204,7 @@ pub async fn should_lpos_values(client: RedisClient, _: RedisConfig) -> Result<( } pub async fn should_lpush_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.lpush("foo", idx).await?; assert_eq!(result, idx + 1); let result: i64 = client.lrange("foo", 0, 0).await?; @@ -220,7 +221,7 @@ pub async fn should_lpushx_values(client: RedisClient, _: RedisConfig) -> Result assert_eq!(result, 0); client.lpush("foo", 0).await?; - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.lpushx("foo", idx).await?; assert_eq!(result, idx + 2); let result: i64 = client.lrange("foo", 0, 0).await?; @@ -238,7 +239,7 @@ pub async fn should_lrange_values(client: RedisClient, _: RedisConfig) -> Result let result: Vec = client.lrange("foo", 0, COUNT).await?; assert_eq!(result, expected); - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.lrange("foo", idx, idx).await?; assert_eq!(result, idx); } @@ -248,7 +249,7 @@ pub async fn should_lrange_values(client: RedisClient, _: RedisConfig) -> Result pub async fn should_lrem_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let _ = create_count_data(&client, "foo").await?; - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: usize = client.lrem("foo", 1, idx).await?; assert_eq!(result, 1); } @@ -257,7 +258,7 @@ pub async fn should_lrem_values(client: RedisClient, _: RedisConfig) -> Result<( let _ = create_count_data(&client, "foo").await?; let _ = create_count_data(&client, "foo").await?; - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: usize = client.lrem("foo", 2, idx).await?; assert_eq!(result, 2); } @@ -272,7 +273,7 @@ pub async fn should_lset_values(client: RedisClient, _: RedisConfig) -> Result<( let mut expected = create_count_data(&client, "foo").await?; expected.reverse(); - for idx in 0 .. COUNT { + for idx in 0..COUNT { client.lset("foo", idx, COUNT - (idx + 1)).await?; } let result: Vec = client.lrange("foo", 0, COUNT).await?; @@ -281,6 +282,7 @@ pub async fn should_lset_values(client: RedisClient, _: RedisConfig) -> Result<( Ok(()) } +#[cfg(feature = "i-keys")] pub async fn should_ltrim_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let expected = create_count_data(&client, "foo").await?; @@ -288,10 +290,10 @@ pub async fn should_ltrim_values(client: RedisClient, _: RedisConfig) -> Result< let result: Vec = client.lrange("foo", 0, COUNT).await?; assert_eq!(result, expected); - for idx in 0 .. COUNT { + for idx in 0..COUNT { client.ltrim("foo", 0, idx).await?; let result: Vec = client.lrange("foo", 0, COUNT).await?; - assert_eq!(result, expected[0 .. (idx + 1) as usize]); + assert_eq!(result, expected[0..(idx + 1) as usize]); client.del("foo").await?; let _ = create_count_data(&client, "foo").await?; @@ -304,7 +306,7 @@ pub async fn should_rpop_values(client: RedisClient, _: RedisConfig) -> Result<( let mut expected = create_count_data(&client, "foo").await?; expected.reverse(); - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.rpop("foo", None).await?; assert_eq!(result, COUNT - (idx + 1)); } @@ -317,7 +319,7 @@ pub async fn should_rpop_values(client: RedisClient, _: RedisConfig) -> Result<( } pub async fn should_rpoplpush_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.lpush("foo{1}", idx).await?; assert_eq!(result, 1); let result: i64 = client.rpoplpush("foo{1}", "bar{1}").await?; @@ -330,7 +332,7 @@ pub async fn should_rpoplpush_values(client: RedisClient, _: RedisConfig) -> Res } pub async fn should_lmove_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.lpush("foo{1}", idx).await?; assert_eq!(result, 1); let result: i64 = client @@ -345,7 +347,7 @@ pub async fn should_lmove_values(client: RedisClient, _: RedisConfig) -> Result< } pub async fn should_rpush_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.rpush("foo", idx).await?; assert_eq!(result, idx + 1); let result: i64 = client.lrange("foo", -1, -1).await?; @@ -362,7 +364,7 @@ pub async fn should_rpushx_values(client: RedisClient, _: RedisConfig) -> Result assert_eq!(result, 0); client.rpush("foo", 0).await?; - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.rpushx("foo", idx).await?; assert_eq!(result, idx + 2); let result: i64 = client.lrange("foo", -1, -1).await?; @@ -373,3 +375,89 @@ pub async fn should_rpushx_values(client: RedisClient, _: RedisConfig) -> Result Ok(()) } + +pub async fn should_sort_int_list(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + client.lpush("foo", vec![1, 2, 3, 4, 5]).await?; + + let sorted: Vec = client.sort("foo", None, None, (), None, false, None).await?; + assert_eq!(sorted, vec![1, 2, 3, 4, 5]); + Ok(()) +} + +pub async fn should_sort_alpha_list(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + client.lpush("foo", vec!["a", "b", "c", "d", "e"]).await?; + + let sorted: Vec = client + .sort("foo", None, None, (), Some(SortOrder::Desc), true, None) + .await?; + assert_eq!(sorted, vec!["e", "d", "c", "b", "a"]); + Ok(()) +} + +pub async fn should_sort_int_list_with_limit(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + client.lpush("foo", vec![1, 2, 3, 4, 5]).await?; + + let sorted: Vec = client.sort("foo", None, Some((2, 2)), (), None, false, None).await?; + assert_eq!(sorted, vec![3, 4]); + Ok(()) +} + +#[cfg(feature = "i-keys")] +pub async fn should_sort_int_list_with_patterns(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let vals: Vec = (1..6).collect(); + let key: RedisKey = "foo".into(); + + client.lpush(&key, vals.clone()).await?; + for val in vals.iter() { + // reverse the weights + client + .set( + format!("{}_weight_{}", key.as_str().unwrap(), val), + 7 - *val, + None, + None, + false, + ) + .await?; + } + for val in vals.iter() { + client + .set( + format!("{}_val_{}", key.as_str().unwrap(), val), + *val * 2, + None, + None, + false, + ) + .await?; + } + + let sorted: Vec = client + .sort( + &key, + Some(format!("{}_weight_*", key.as_str().unwrap()).into()), + None, + format!("{}_val_*", key.as_str().unwrap()), + None, + false, + None, + ) + .await?; + assert_eq!(sorted, vec![10, 8, 6, 4, 2]); + + Ok(()) +} + +#[cfg(feature = "replicas")] +pub async fn should_sort_ro_int_list(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + client.lpush("foo", vec![1, 2, 3, 4, 5]).await?; + // wait for replicas to recv the command + tokio::time::sleep(Duration::from_millis(500)).await; + + let sorted: Vec = client + .replicas() + .sort_ro("foo", None, None, (), Some(SortOrder::Desc), false) + .await?; + assert_eq!(sorted, vec![5, 4, 3, 2, 1]); + Ok(()) +} diff --git a/tests/integration/lua/mod.rs b/tests/integration/lua/mod.rs index 2212b2b3..5af4033c 100644 --- a/tests/integration/lua/mod.rs +++ b/tests/integration/lua/mod.rs @@ -227,6 +227,7 @@ pub async fn should_function_list_multiple(client: RedisClient, _: RedisConfig) Ok(()) } +#[cfg(feature = "i-keys")] pub async fn should_function_fcall_getset(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { check_redis_7!(client); diff --git a/tests/integration/memory/mod.rs b/tests/integration/memory/mod.rs index a951fa3e..071491ef 100644 --- a/tests/integration/memory/mod.rs +++ b/tests/integration/memory/mod.rs @@ -1,4 +1,4 @@ -use fred::{prelude::*, types::MemoryStats}; +use fred::{cmd, prelude::*, types::MemoryStats}; pub async fn should_run_memory_doctor(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { client.memory_doctor().await?; @@ -23,7 +23,7 @@ pub async fn should_run_memory_stats(client: RedisClient, _: RedisConfig) -> Res } pub async fn should_run_memory_usage(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.set("foo", "bar", None, None, false).await?; + client.custom(cmd!("SET"), vec!["foo", "bar"]).await?; assert!(client.memory_usage::("foo", None).await? > 0); Ok(()) diff --git a/tests/integration/mod.rs b/tests/integration/mod.rs index 3196f3fd..1ba298cf 100644 --- a/tests/integration/mod.rs +++ b/tests/integration/mod.rs @@ -1,36 +1,49 @@ #[macro_use] pub mod utils; -pub mod docker; - +#[cfg(feature = "i-acl")] mod acl; +#[cfg(feature = "i-client")] mod client; +#[cfg(feature = "i-cluster")] mod cluster; +#[cfg(feature = "i-cluster")] +pub mod docker; +#[cfg(feature = "i-geo")] mod geo; +#[cfg(feature = "i-hashes")] mod hashes; +#[cfg(feature = "i-hyperloglog")] mod hyperloglog; +#[cfg(feature = "i-keys")] mod keys; +#[cfg(feature = "i-lists")] mod lists; +#[cfg(feature = "i-scripts")] mod lua; +#[cfg(feature = "i-memory")] mod memory; #[cfg(feature = "transactions")] mod multi; mod other; mod pool; +#[cfg(feature = "i-pubsub")] mod pubsub; +#[cfg(feature = "i-redis-json")] +mod redis_json; mod scanning; +#[cfg(feature = "i-server")] mod server; +#[cfg(feature = "i-sets")] mod sets; +#[cfg(feature = "i-slowlog")] mod slowlog; +#[cfg(feature = "i-sorted-sets")] mod sorted_sets; +#[cfg(feature = "i-streams")] mod streams; - -#[cfg(feature = "redis-json")] -mod redis_json; - -#[cfg(feature = "time-series")] +#[cfg(feature = "i-time-series")] mod timeseries; - -#[cfg(feature = "client-tracking")] +#[cfg(feature = "i-tracking")] mod tracking; #[cfg(not(feature = "mocks"))] diff --git a/tests/integration/other/mod.rs b/tests/integration/other/mod.rs index 1c03aee1..f2cc4f6c 100644 --- a/tests/integration/other/mod.rs +++ b/tests/integration/other/mod.rs @@ -1,19 +1,14 @@ use super::utils; use async_trait::async_trait; +use fred::types::Builder; use fred::{ clients::{RedisClient, RedisPool}, + cmd, error::{RedisError, RedisErrorKind}, interfaces::*, prelude::{Blocking, RedisValue}, types::{ - BackpressureConfig, - ClientUnblockFlag, - Options, - PerformanceConfig, - RedisConfig, - RedisKey, - RedisMap, - ServerConfig, + BackpressureConfig, ClientUnblockFlag, Options, PerformanceConfig, RedisConfig, RedisKey, RedisMap, ServerConfig, }, }; use futures::future::try_join; @@ -33,8 +28,6 @@ use tokio::time::sleep; #[cfg(feature = "subscriber-client")] use fred::clients::SubscriberClient; -#[cfg(feature = "codec")] -use fred::codec::*; #[cfg(feature = "replicas")] use fred::types::ReplicaConfig; #[cfg(feature = "dns")] @@ -46,14 +39,7 @@ use std::net::{IpAddr, SocketAddr}; #[cfg(feature = "dns")] use trust_dns_resolver::{config::*, TokioAsyncResolver}; -use fred::types::Builder; -#[cfg(feature = "codec")] -use futures::{SinkExt, StreamExt}; -#[cfg(feature = "codec")] -use tokio::net::TcpStream; -#[cfg(feature = "codec")] -use tokio_util::codec::{Decoder, Encoder, Framed}; - +#[cfg(all(feature = "i-keys", feature = "i-hashes"))] fn hash_to_btree(vals: &RedisMap) -> BTreeMap { vals .iter() @@ -61,14 +47,17 @@ fn hash_to_btree(vals: &RedisMap) -> BTreeMap { .collect() } +#[cfg(all(feature = "i-keys", feature = "i-hashes"))] fn array_to_set(vals: Vec) -> BTreeSet { vals.into_iter().collect() } +#[cfg(feature = "i-keys")] pub fn incr_atomic(size: &Arc) -> usize { size.fetch_add(1, Ordering::AcqRel).saturating_add(1) } +#[cfg(all(feature = "i-keys", feature = "i-hashes"))] pub async fn should_smoke_test_from_redis_impl(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let nested_values: RedisMap = vec![("a", 1), ("b", 2)].try_into()?; client.set("foo", "123", None, None, false).await?; @@ -99,6 +88,7 @@ pub async fn should_smoke_test_from_redis_impl(client: RedisClient, _: RedisConf Ok(()) } +#[cfg(all(feature = "i-client", feature = "i-lists"))] pub async fn should_automatically_unblock(_: RedisClient, mut config: RedisConfig) -> Result<(), RedisError> { config.blocking = Blocking::Interrupt; let client = RedisClient::new(config, None, None, None); @@ -117,6 +107,7 @@ pub async fn should_automatically_unblock(_: RedisClient, mut config: RedisConfi Ok(()) } +#[cfg(all(feature = "i-client", feature = "i-lists"))] pub async fn should_manually_unblock(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let connections_ids = client.connection_ids().await; let unblock_client = client.clone(); @@ -137,6 +128,7 @@ pub async fn should_manually_unblock(client: RedisClient, _: RedisConfig) -> Res Ok(()) } +#[cfg(all(feature = "i-client", feature = "i-lists"))] pub async fn should_error_when_blocked(_: RedisClient, mut config: RedisConfig) -> Result<(), RedisError> { config.blocking = Blocking::Error; let client = RedisClient::new(config, None, None, None); @@ -201,16 +193,19 @@ pub async fn should_track_size_stats(client: RedisClient, _config: RedisConfig) Ok(()) } +#[cfg(feature = "i-server")] pub async fn should_run_flushall_cluster(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let count: i64 = 200; - for idx in 0 .. count { - client.set(format!("foo-{}", idx), idx, None, None, false).await?; + for idx in 0..count { + client + .custom(cmd!("SET"), vec![format!("foo-{}", idx), idx.to_string()]) + .await?; } client.flushall_cluster().await?; - for idx in 0 .. count { - let value: Option = client.get(format!("foo-{}", idx)).await?; + for idx in 0..count { + let value: Option = client.custom(cmd!("GET"), vec![format!("foo-{}", idx)]).await?; assert!(value.is_none()); } @@ -230,20 +225,19 @@ pub async fn should_safely_change_protocols_repeatedly( if *other_done.read() { return Ok::<_, RedisError>(()); } - let foo = String::from("foo"); - other.incr(&foo).await?; + other.ping().await?; sleep(Duration::from_millis(10)).await; } }); // switch protocols every half second - for idx in 0 .. 15 { + for idx in 0..15 { let version = if idx % 2 == 0 { RespVersion::RESP2 } else { RespVersion::RESP3 }; - client.hello(version, None).await?; + client.hello(version, None, None).await?; sleep(Duration::from_millis(500)).await; } let _ = mem::replace(&mut *done.write(), true); @@ -254,6 +248,7 @@ pub async fn should_safely_change_protocols_repeatedly( // test to repro an intermittent race condition found while stress testing the client #[allow(dead_code)] +#[cfg(feature = "i-keys")] pub async fn should_test_high_concurrency_pool(_: RedisClient, mut config: RedisConfig) -> Result<(), RedisError> { config.blocking = Blocking::Block; let perf = PerformanceConfig { @@ -272,7 +267,7 @@ pub async fn should_test_high_concurrency_pool(_: RedisClient, mut config: Redis let mut tasks = Vec::with_capacity(num_tasks); let counter = Arc::new(AtomicUsize::new(0)); - for idx in 0 .. num_tasks { + for idx in 0..num_tasks { let client = pool.next().clone(); let counter = counter.clone(); @@ -300,6 +295,7 @@ pub async fn should_test_high_concurrency_pool(_: RedisClient, mut config: Redis Ok(()) } +#[cfg(feature = "i-keys")] pub async fn should_pipeline_all(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let pipeline = client.pipeline(); @@ -315,6 +311,7 @@ pub async fn should_pipeline_all(client: RedisClient, _: RedisConfig) -> Result< Ok(()) } +#[cfg(all(feature = "i-keys", feature = "i-hashes"))] pub async fn should_pipeline_all_error_early(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let pipeline = client.pipeline(); @@ -335,6 +332,7 @@ pub async fn should_pipeline_all_error_early(client: RedisClient, _: RedisConfig Ok(()) } +#[cfg(feature = "i-keys")] pub async fn should_pipeline_last(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let pipeline = client.pipeline(); @@ -350,6 +348,7 @@ pub async fn should_pipeline_last(client: RedisClient, _: RedisConfig) -> Result Ok(()) } +#[cfg(all(feature = "i-keys", feature = "i-hashes"))] pub async fn should_pipeline_try_all(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let pipeline = client.pipeline(); @@ -363,17 +362,18 @@ pub async fn should_pipeline_try_all(client: RedisClient, _: RedisConfig) -> Res Ok(()) } +#[cfg(feature = "i-server")] pub async fn should_use_all_cluster_nodes_repeatedly(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let other = client.clone(); let jh1 = tokio::spawn(async move { - for _ in 0 .. 200 { + for _ in 0..200 { other.flushall_cluster().await?; } Ok::<_, RedisError>(()) }); let jh2 = tokio::spawn(async move { - for _ in 0 .. 200 { + for _ in 0..200 { client.flushall_cluster().await?; } @@ -384,7 +384,7 @@ pub async fn should_use_all_cluster_nodes_repeatedly(client: RedisClient, _: Red Ok(()) } -#[cfg(feature = "partial-tracing")] +#[cfg(all(feature = "partial-tracing", feature = "i-keys"))] pub async fn should_use_tracing_get_set(client: RedisClient, mut config: RedisConfig) -> Result<(), RedisError> { config.tracing = TracingConfig::new(true); let (perf, policy) = (client.perf_config(), client.client_reconnect_policy()); @@ -392,7 +392,6 @@ pub async fn should_use_tracing_get_set(client: RedisClient, mut config: RedisCo let _ = client.connect(); let _ = client.wait_for_connect().await?; - check_null!(client, "foo"); let _: () = client.set("foo", "bar", None, None, false).await?; assert_eq!(client.get::("foo").await?, "bar"); Ok(()) @@ -463,10 +462,8 @@ pub async fn should_ping_with_subscriber_client(client: RedisClient, config: Red Ok(()) } -#[cfg(feature = "replicas")] +#[cfg(all(feature = "replicas", feature = "i-keys"))] pub async fn should_replica_set_and_get(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let _: () = client.set("foo", "bar", None, None, false).await?; let result: String = client.replicas().get("foo").await?; assert_eq!(result, "bar"); @@ -474,7 +471,7 @@ pub async fn should_replica_set_and_get(client: RedisClient, _: RedisConfig) -> Ok(()) } -#[cfg(feature = "replicas")] +#[cfg(all(feature = "replicas", feature = "i-keys"))] pub async fn should_replica_set_and_get_not_lazy(client: RedisClient, config: RedisConfig) -> Result<(), RedisError> { let policy = client.client_reconnect_policy(); let mut connection = client.connection_config().clone(); @@ -483,7 +480,6 @@ pub async fn should_replica_set_and_get_not_lazy(client: RedisClient, config: Re let _ = client.connect(); let _ = client.wait_for_connect().await?; - check_null!(client, "foo"); let _: () = client.set("foo", "bar", None, None, false).await?; let result: String = client.replicas().get("foo").await?; assert_eq!(result, "bar"); @@ -491,11 +487,8 @@ pub async fn should_replica_set_and_get_not_lazy(client: RedisClient, config: Re Ok(()) } -#[cfg(feature = "replicas")] +#[cfg(all(feature = "replicas", feature = "i-keys"))] pub async fn should_pipeline_with_replicas(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - check_null!(client, "bar"); - let _: () = client.set("foo", 1, None, None, false).await?; let _: () = client.set("bar", 2, None, None, false).await?; @@ -508,7 +501,7 @@ pub async fn should_pipeline_with_replicas(client: RedisClient, _: RedisConfig) Ok(()) } -#[cfg(feature = "replicas")] +#[cfg(all(feature = "replicas", feature = "i-keys"))] pub async fn should_use_cluster_replica_without_redirection( client: RedisClient, config: RedisConfig, @@ -538,13 +531,14 @@ pub async fn should_gracefully_quit(client: RedisClient, _: RedisConfig) -> Resu let connection = client.connect(); client.wait_for_connect().await?; - let _: i64 = client.incr("foo").await?; + client.ping().await?; client.quit().await?; let _ = connection.await; Ok(()) } +#[cfg(feature = "i-lists")] pub async fn should_support_options_with_pipeline(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let options = Options { timeout: Some(Duration::from_millis(100)), @@ -561,6 +555,7 @@ pub async fn should_support_options_with_pipeline(client: RedisClient, _: RedisC Ok(()) } +#[cfg(feature = "i-keys")] pub async fn should_reuse_pipeline(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let pipeline = client.pipeline(); pipeline.incr("foo").await?; @@ -570,7 +565,7 @@ pub async fn should_reuse_pipeline(client: RedisClient, _: RedisConfig) -> Resul Ok(()) } -#[cfg(feature = "transactions")] +#[cfg(all(feature = "transactions", feature = "i-keys"))] pub async fn should_support_options_with_trx(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let options = Options { max_attempts: Some(1), @@ -590,6 +585,7 @@ pub async fn should_support_options_with_trx(client: RedisClient, _: RedisConfig Ok(()) } +#[cfg(all(feature = "i-keys", feature = "i-lists"))] pub async fn should_manually_connect_twice(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let client = client.clone_new(); let _old_connection = client.connect(); @@ -610,56 +606,6 @@ pub async fn should_manually_connect_twice(client: RedisClient, _: RedisConfig) Ok(()) } -#[cfg(feature = "codec")] -pub async fn should_use_resp3_codec_example(_: RedisClient, config: RedisConfig) -> Result<(), RedisError> { - let addr = format!("{}", config.server.hosts().first().unwrap()); - let socket = TcpStream::connect(addr).await?; - let mut framed = Framed::new(socket, Resp3::default()); - - let hello = Resp3Frame::Hello { - version: RespVersion::RESP3, - auth: Some(Auth { - username: utils::read_redis_username().into(), - password: utils::read_redis_password().into(), - }), - }; - let echo_foo = resp3_encode_command("ECHO foo"); - - let _ = framed.send(hello).await?; - let response = framed.next().await.unwrap().unwrap(); - assert_eq!(response.kind(), Resp3FrameKind::Map); - - let _ = framed.send(echo_foo).await?; - let response = framed.next().await.unwrap().unwrap(); - assert_eq!(response.as_str().unwrap(), "foo"); - - Ok(()) -} - -#[cfg(feature = "codec")] -pub async fn should_use_resp2_codec_example(_: RedisClient, config: RedisConfig) -> Result<(), RedisError> { - let addr = format!("{}", config.server.hosts().first().unwrap()); - let socket = TcpStream::connect(addr).await?; - let mut framed = Framed::new(socket, Resp2::default()); - - let auth = resp2_encode_command(&format!( - "AUTH {} {}", - utils::read_redis_username(), - utils::read_redis_password() - )); - let echo_foo = resp2_encode_command("ECHO foo"); - - let _ = framed.send(auth).await?; - let response = framed.next().await.unwrap().unwrap(); - assert_eq!(response.as_str().unwrap(), "OK"); - - let _ = framed.send(echo_foo).await?; - let response = framed.next().await.unwrap().unwrap(); - assert_eq!(response.as_str().unwrap(), "foo"); - - Ok(()) -} - pub async fn pool_should_connect_correctly_via_init_interface( _: RedisClient, config: RedisConfig, diff --git a/tests/integration/pool/mod.rs b/tests/integration/pool/mod.rs index 9e31828d..cbc5de98 100644 --- a/tests/integration/pool/mod.rs +++ b/tests/integration/pool/mod.rs @@ -7,15 +7,14 @@ use fred::{ async fn create_and_ping_pool(config: &RedisConfig, count: usize) -> Result<(), RedisError> { let pool = RedisPool::new(config.clone(), None, None, None, count)?; - pool.connect(); - pool.wait_for_connect().await?; + pool.init().await?; for client in pool.clients().iter() { client.ping().await?; } pool.ping().await?; - let _ = pool.quit().await; + pool.quit().await?; Ok(()) } diff --git a/tests/integration/pubsub/mod.rs b/tests/integration/pubsub/mod.rs index 213c9c81..09ecf316 100644 --- a/tests/integration/pubsub/mod.rs +++ b/tests/integration/pubsub/mod.rs @@ -51,6 +51,40 @@ pub async fn should_publish_and_recv_messages(client: RedisClient, _: RedisConfi Ok(()) } +pub async fn should_ssubscribe_and_recv_messages(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { + let subscriber_client = client.clone_new(); + subscriber_client.connect(); + subscriber_client.wait_for_connect().await?; + subscriber_client.ssubscribe(CHANNEL1).await?; + + let subscriber_jh = tokio::spawn(async move { + let mut message_stream = subscriber_client.message_rx(); + + let mut count = 0; + while count < NUM_MESSAGES { + if let Ok(message) = message_stream.recv().await { + assert_eq!(CHANNEL1, message.channel); + assert_eq!(format!("{}-{}", FAKE_MESSAGE, count), message.value.as_str().unwrap()); + count += 1; + } + } + + Ok::<_, RedisError>(()) + }); + + sleep(Duration::from_secs(1)).await; + for idx in 0 .. NUM_MESSAGES { + // https://redis.io/commands/publish#return-value + client.spublish(CHANNEL1, format!("{}-{}", FAKE_MESSAGE, idx)).await?; + + // pubsub messages may arrive out of order due to cross-cluster broadcasting + sleep(Duration::from_millis(50)).await; + } + let _ = subscriber_jh.await?; + + Ok(()) +} + pub async fn should_psubscribe_and_recv_messages(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let channels = vec![CHANNEL1, CHANNEL2, CHANNEL3]; let subscriber_channels = channels.clone(); @@ -98,7 +132,7 @@ pub async fn should_unsubscribe_from_all(publisher: RedisClient, _: RedisConfig) let mut message_stream = subscriber.message_rx(); tokio::spawn(async move { - while let Ok(message) = message_stream.recv().await { + if let Ok(message) = message_stream.recv().await { // unsubscribe without args will result in 3 messages in this case, and none should show up here panic!("Recv unexpected pubsub message: {:?}", message); } @@ -109,10 +143,10 @@ pub async fn should_unsubscribe_from_all(publisher: RedisClient, _: RedisConfig) subscriber.unsubscribe(()).await?; sleep(Duration::from_secs(1)).await; - // do some incr commands to make sure the response buffer is flushed correctly by this point - assert_eq!(subscriber.incr::("abc{1}").await?, 1); - assert_eq!(subscriber.incr::("abc{1}").await?, 2); - assert_eq!(subscriber.incr::("abc{1}").await?, 3); + // make sure the response buffer is flushed correctly by this point + assert_eq!(subscriber.ping::().await?, "PONG"); + assert_eq!(subscriber.ping::().await?, "PONG"); + assert_eq!(subscriber.ping::().await?, "PONG"); subscriber.quit().await?; let _ = connection.await?; diff --git a/tests/integration/redis_json/mod.rs b/tests/integration/redis_json/mod.rs index 890eebbd..952106c0 100644 --- a/tests/integration/redis_json/mod.rs +++ b/tests/integration/redis_json/mod.rs @@ -121,7 +121,7 @@ pub async fn should_merge_objects(client: RedisClient, _: RedisConfig) -> Result } pub async fn should_mset_and_mget(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - let values = vec![json!({ "a": "b" }), json!({ "c": "d" })]; + let values = [json!({ "a": "b" }), json!({ "c": "d" })]; let args = vec![("foo{1}", "$", values[0].clone()), ("bar{1}", "$", values[1].clone())]; let _: () = client.json_mset(args).await?; @@ -181,7 +181,7 @@ pub async fn should_modify_strings(client: RedisClient, _: RedisConfig) -> Resul pub async fn should_toggle_boolean(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let _: () = client.json_set("foo", "$", json!({ "a": 1, "b": true }), None).await?; let new_val: bool = client.json_toggle("foo", "$.b").await?; - assert_eq!(new_val, false); + assert!(!new_val); Ok(()) } diff --git a/tests/integration/scanning/mod.rs b/tests/integration/scanning/mod.rs index be22a274..d3e7dae9 100644 --- a/tests/integration/scanning/mod.rs +++ b/tests/integration/scanning/mod.rs @@ -1,11 +1,13 @@ +#![allow(dead_code)] use fred::{prelude::*, types::Scanner}; use futures::TryStreamExt; use tokio_stream::StreamExt; const SCAN_KEYS: i64 = 100; +#[cfg(feature = "i-keys")] pub async fn should_scan_keyspace(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - for idx in 0 .. SCAN_KEYS { + for idx in 0..SCAN_KEYS { client .set(format!("foo-{}-{}", idx, "{1}"), idx, None, None, false) .await?; @@ -35,8 +37,9 @@ pub async fn should_scan_keyspace(client: RedisClient, _: RedisConfig) -> Result Ok(()) } +#[cfg(feature = "i-hashes")] pub async fn should_hscan_hash(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - for idx in 0 .. SCAN_KEYS { + for idx in 0..SCAN_KEYS { let value = (format!("bar-{}", idx), idx); client.hset("foo", value).await?; } @@ -65,8 +68,9 @@ pub async fn should_hscan_hash(client: RedisClient, _: RedisConfig) -> Result<() Ok(()) } +#[cfg(feature = "i-sets")] pub async fn should_sscan_set(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - for idx in 0 .. SCAN_KEYS { + for idx in 0..SCAN_KEYS { client.sadd("foo", idx).await?; } @@ -92,8 +96,9 @@ pub async fn should_sscan_set(client: RedisClient, _: RedisConfig) -> Result<(), Ok(()) } +#[cfg(feature = "i-sorted-sets")] pub async fn should_zscan_sorted_set(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - for idx in 0 .. SCAN_KEYS { + for idx in 0..SCAN_KEYS { let (score, value) = (idx as f64, format!("foo-{}", idx)); client.zadd("foo", None, None, false, false, (score, value)).await?; } @@ -124,8 +129,9 @@ pub async fn should_zscan_sorted_set(client: RedisClient, _: RedisConfig) -> Res Ok(()) } +#[cfg(feature = "i-keys")] pub async fn should_scan_cluster(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - for idx in 0 .. 2000 { + for idx in 0..2000 { client.set(idx, idx, None, None, false).await?; } diff --git a/tests/integration/server/mod.rs b/tests/integration/server/mod.rs index 3d1d96b0..05b5e353 100644 --- a/tests/integration/server/mod.rs +++ b/tests/integration/server/mod.rs @@ -1,16 +1,17 @@ +use fred::cmd; use fred::prelude::*; use std::time::Duration; use tokio::time::sleep; pub async fn should_flushall(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - client.set("foo{1}", "bar", None, None, false).await?; + client.custom(cmd!("SET"), vec!["foo{1}", "bar"]).await?; if client.is_clustered() { client.flushall_cluster().await?; } else { client.flushall(false).await?; }; - let result: Option = client.get("foo{1}").await?; + let result: Option = client.custom(cmd!("GET"), vec!["foo{1}"]).await?; assert!(result.is_none()); Ok(()) @@ -43,8 +44,10 @@ pub async fn should_read_last_save(client: RedisClient, _: RedisConfig) -> Resul } pub async fn should_read_db_size(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - for idx in 0 .. 50 { - client.set(format!("foo-{}", idx), idx, None, None, false).await?; + for idx in 0..50 { + client + .custom(cmd!("SET"), vec![format!("foo-{}", idx), idx.to_string()]) + .await?; } // this is tricky to assert b/c the dbsize command isnt linked to a specific server in the cluster, hence the loop diff --git a/tests/integration/sets/mod.rs b/tests/integration/sets/mod.rs index bbc08920..55c4dd6f 100644 --- a/tests/integration/sets/mod.rs +++ b/tests/integration/sets/mod.rs @@ -20,8 +20,6 @@ fn sets_eq(lhs: &HashSet, rhs: &HashSet) -> bool { } pub async fn should_sadd_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let result: i64 = client.sadd("foo", "a").await?; assert_eq!(result, 1); let result: i64 = client.sadd("foo", vec!["b", "c"]).await?; @@ -33,8 +31,6 @@ pub async fn should_sadd_elements(client: RedisClient, _: RedisConfig) -> Result } pub async fn should_scard_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let result: i64 = client.scard("foo").await?; assert_eq!(result, 0); @@ -47,9 +43,6 @@ pub async fn should_scard_elements(client: RedisClient, _: RedisConfig) -> Resul } pub async fn should_sdiff_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); - client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; let result: HashSet = client.sdiff(vec!["foo{1}", "bar{1}"]).await?; @@ -59,10 +52,6 @@ pub async fn should_sdiff_elements(client: RedisClient, _: RedisConfig) -> Resul } pub async fn should_sdiffstore_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); - check_null!(client, "baz{1}"); - client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; let result: i64 = client.sdiffstore("baz{1}", vec!["foo{1}", "bar{1}"]).await?; @@ -74,10 +63,6 @@ pub async fn should_sdiffstore_elements(client: RedisClient, _: RedisConfig) -> } pub async fn should_sinter_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); - check_null!(client, "baz{1}"); - client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; let result: HashSet = client.sinter(vec!["foo{1}", "bar{1}"]).await?; @@ -91,10 +76,6 @@ pub async fn should_sinter_elements(client: RedisClient, _: RedisConfig) -> Resu } pub async fn should_sinterstore_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); - check_null!(client, "baz{1}"); - client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; let result: i64 = client.sinterstore("baz{1}", vec!["foo{1}", "bar{1}"]).await?; @@ -110,7 +91,6 @@ pub async fn should_sinterstore_elements(client: RedisClient, _: RedisConfig) -> } pub async fn should_check_sismember(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); client.sadd("foo", vec![1, 2, 3, 4, 5, 6]).await?; let result: bool = client.sismember("foo", 1).await?; @@ -122,7 +102,6 @@ pub async fn should_check_sismember(client: RedisClient, _: RedisConfig) -> Resu } pub async fn should_check_smismember(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); client.sadd("foo", vec![1, 2, 3, 4, 5, 6]).await?; let result: Vec = client.smismember("foo", vec![1, 2, 7]).await?; @@ -137,8 +116,6 @@ pub async fn should_check_smismember(client: RedisClient, _: RedisConfig) -> Res } pub async fn should_read_smembers(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - client.sadd("foo", vec![1, 2, 3, 4, 5, 6]).await?; let result: HashSet = client.smembers("foo").await?; assert!(sets_eq( @@ -157,9 +134,6 @@ pub async fn should_read_smembers(client: RedisClient, _: RedisConfig) -> Result } pub async fn should_smove_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); - client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; client.sadd("bar{1}", 5).await?; @@ -182,8 +156,6 @@ pub async fn should_smove_elements(client: RedisClient, _: RedisConfig) -> Resul } pub async fn should_spop_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let expected = vec_to_set(vec!["1".into(), "2".into(), "3".into()]); client.sadd("foo", vec![1, 2, 3]).await?; @@ -199,8 +171,6 @@ pub async fn should_spop_elements(client: RedisClient, _: RedisConfig) -> Result } pub async fn should_get_random_member(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let expected = vec_to_set(vec!["1".into(), "2".into(), "3".into()]); client.sadd("foo", vec![1, 2, 3]).await?; @@ -215,8 +185,6 @@ pub async fn should_get_random_member(client: RedisClient, _: RedisConfig) -> Re } pub async fn should_remove_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let result: i64 = client.srem("foo", 1).await?; assert_eq!(result, 0); @@ -233,9 +201,6 @@ pub async fn should_remove_elements(client: RedisClient, _: RedisConfig) -> Resu } pub async fn should_sunion_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); - client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; let result: HashSet = client.sunion(vec!["foo{1}", "bar{1}"]).await?; @@ -258,10 +223,6 @@ pub async fn should_sunion_elements(client: RedisClient, _: RedisConfig) -> Resu } pub async fn should_sunionstore_elements(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); - check_null!(client, "baz{1}"); - client.sadd("foo{1}", vec![1, 2, 3, 4, 5, 6]).await?; client.sadd("bar{1}", vec![3, 4, 5, 6, 7, 8]).await?; let result: i64 = client.sunionstore("baz{1}", vec!["foo{1}", "bar{1}"]).await?; diff --git a/tests/integration/sorted_sets/mod.rs b/tests/integration/sorted_sets/mod.rs index e230500d..01dee2d0 100644 --- a/tests/integration/sorted_sets/mod.rs +++ b/tests/integration/sorted_sets/mod.rs @@ -29,7 +29,7 @@ async fn create_lex_data(client: &RedisClient, key: &str) -> Result Result, RedisError> { - let values: Vec<(f64, RedisValue)> = (0 .. COUNT).map(|idx| (idx as f64, idx.to_string().into())).collect(); + let values: Vec<(f64, RedisValue)> = (0..COUNT).map(|idx| (idx as f64, idx.to_string().into())).collect(); client.zadd(key, None, None, false, false, values.clone()).await?; Ok(values) @@ -41,7 +41,7 @@ pub async fn should_bzpopmin(client: RedisClient, _: RedisConfig) -> Result<(), publisher_client.wait_for_connect().await?; let jh = tokio::task::spawn(async move { - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: (String, i64, f64) = client.bzpopmin("foo", 60.0).await?; assert_eq!(result, ("foo".into(), idx, idx as f64)); } @@ -49,7 +49,7 @@ pub async fn should_bzpopmin(client: RedisClient, _: RedisConfig) -> Result<(), Ok::<(), RedisError>(()) }); - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = publisher_client .zadd("foo", None, None, false, false, (idx as f64, idx)) .await?; @@ -66,7 +66,7 @@ pub async fn should_bzpopmax(client: RedisClient, _: RedisConfig) -> Result<(), publisher_client.wait_for_connect().await?; let jh = tokio::task::spawn(async move { - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: (String, i64, f64) = client.bzpopmax("foo", 60.0).await?; assert_eq!(result, ("foo".into(), idx, idx as f64)); } @@ -74,7 +74,7 @@ pub async fn should_bzpopmax(client: RedisClient, _: RedisConfig) -> Result<(), Ok::<(), RedisError>(()) }); - for idx in 0 .. COUNT { + for idx in 0..COUNT { sleep(Duration::from_millis(50)).await; let result: i64 = publisher_client @@ -88,14 +88,12 @@ pub async fn should_bzpopmax(client: RedisClient, _: RedisConfig) -> Result<(), } pub async fn should_zadd_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let result: i64 = client .zadd("foo", None, None, false, false, vec![(0.0, 0), (1.0, 1)]) .await?; assert_eq!(result, 2); - for idx in 2 .. COUNT { + for idx in 2..COUNT { let value: i64 = client.zadd("foo", None, None, false, false, (idx as f64, idx)).await?; assert_eq!(value, 1); } @@ -178,10 +176,7 @@ pub async fn should_zadd_values(client: RedisClient, _: RedisConfig) -> Result<( } pub async fn should_zcard_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - check_null!(client, "bar"); - - for idx in 0 .. COUNT { + for idx in 0..COUNT { let values = vec![(idx as f64, idx), ((idx + COUNT) as f64, idx + COUNT)]; let result: i64 = client.zadd("foo", None, None, false, false, values).await?; assert_eq!(result, 2); @@ -196,7 +191,7 @@ pub async fn should_zcard_values(client: RedisClient, _: RedisConfig) -> Result< } pub async fn should_zcount_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - for idx in 0 .. COUNT { + for idx in 0..COUNT { let values = vec![(idx as f64, idx), ((idx + COUNT) as f64, idx + COUNT)]; let result: i64 = client.zadd("foo", None, None, false, false, values).await?; assert_eq!(result, 2); @@ -213,11 +208,8 @@ pub async fn should_zcount_values(client: RedisClient, _: RedisConfig) -> Result } pub async fn should_zdiff_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); - let mut expected: Vec<(f64, RedisValue)> = Vec::with_capacity(COUNT as usize); - for idx in 0 .. COUNT { + for idx in 0..COUNT { expected.push((idx as f64, idx.to_string().into())); let result: i64 = client .zadd("foo{1}", None, None, false, false, (idx as f64, idx)) @@ -236,26 +228,22 @@ pub async fn should_zdiff_values(client: RedisClient, _: RedisConfig) -> Result< None, false, false, - expected[0 .. expected.len() - 1].to_vec(), + expected[0..expected.len() - 1].to_vec(), ) .await?; let result: RedisValue = client.zdiff(vec!["foo{1}", "bar{1}"], true).await?; let expected: Vec<(RedisValue, f64)> = expected.into_iter().map(|(s, v)| (v, s)).collect(); assert_eq!( result.into_zset_result().unwrap(), - expected[expected.len() - 1 ..].to_vec() + expected[expected.len() - 1..].to_vec() ); Ok(()) } pub async fn should_zdiffstore_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); - check_null!(client, "baz{1}"); - let mut expected: Vec<(f64, RedisValue)> = Vec::with_capacity(COUNT as usize); - for idx in 0 .. COUNT { + for idx in 0..COUNT { expected.push((idx as f64, idx.to_string().into())); let result: i64 = client .zadd("foo{1}", None, None, false, false, (idx as f64, idx)) @@ -273,7 +261,7 @@ pub async fn should_zdiffstore_values(client: RedisClient, _: RedisConfig) -> Re None, false, false, - expected[0 .. expected.len() - 1].to_vec(), + expected[0..expected.len() - 1].to_vec(), ) .await?; let result: i64 = client.zdiffstore("baz{1}", vec!["foo{1}", "bar{1}"]).await?; @@ -283,8 +271,6 @@ pub async fn should_zdiffstore_values(client: RedisClient, _: RedisConfig) -> Re } pub async fn should_zincrby_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); - let result: f64 = client.zincrby("foo", 1.0, "a").await?; assert_eq!(result, 1.0); let result: f64 = client.zincrby("foo", 2.5, "a").await?; @@ -296,11 +282,8 @@ pub async fn should_zincrby_values(client: RedisClient, _: RedisConfig) -> Resul } pub async fn should_zinter_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); - let mut expected: Vec<(f64, RedisValue)> = Vec::with_capacity(COUNT as usize); - for idx in 0 .. COUNT { + for idx in 0..COUNT { expected.push((idx as f64, idx.to_string().into())); let result: i64 = client .zadd("foo{1}", None, None, false, false, (idx as f64, idx)) @@ -318,7 +301,7 @@ pub async fn should_zinter_values(client: RedisClient, _: RedisConfig) -> Result None, false, false, - expected[0 .. expected.len() - 1].to_vec(), + expected[0..expected.len() - 1].to_vec(), ) .await?; let result: RedisValue = client.zinter(vec!["foo{1}", "bar{1}"], None, None, true).await?; @@ -327,20 +310,13 @@ pub async fn should_zinter_values(client: RedisClient, _: RedisConfig) -> Result // zinter returns results in descending order based on score expected.reverse(); - assert_eq!( - result.into_zset_result().unwrap(), - expected[1 .. expected.len()].to_vec() - ); + assert_eq!(result.into_zset_result().unwrap(), expected[1..expected.len()].to_vec()); Ok(()) } pub async fn should_zinterstore_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); - check_null!(client, "baz{1}"); - let mut expected: Vec<(f64, RedisValue)> = Vec::with_capacity(COUNT as usize); - for idx in 0 .. COUNT { + for idx in 0..COUNT { expected.push((idx as f64, idx.to_string().into())); let result: i64 = client .zadd("foo{1}", None, None, false, false, (idx as f64, idx)) @@ -360,7 +336,7 @@ pub async fn should_zinterstore_values(client: RedisClient, _: RedisConfig) -> R None, false, false, - expected[0 .. expected.len() - 1].to_vec(), + expected[0..expected.len() - 1].to_vec(), ) .await?; let result: i64 = client @@ -372,7 +348,6 @@ pub async fn should_zinterstore_values(client: RedisClient, _: RedisConfig) -> R } pub async fn should_zlexcount(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo"); let _ = create_lex_data(&client, "foo").await?; let result: i64 = client.zlexcount("foo", "-", "+").await?; @@ -388,7 +363,7 @@ pub async fn should_zlexcount(client: RedisClient, _: RedisConfig) -> Result<(), pub async fn should_zpopmax(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let _ = create_count_data(&client, "foo").await?; - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: RedisValue = client.zpopmax("foo", None).await?; let (member, score) = result.into_zset_result().unwrap().pop().unwrap(); assert_eq!(score, (COUNT - idx - 1) as f64); @@ -403,7 +378,7 @@ pub async fn should_zpopmax(client: RedisClient, _: RedisConfig) -> Result<(), R pub async fn should_zpopmin(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let _ = create_count_data(&client, "foo").await?; - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: RedisValue = client.zpopmin("foo", None).await?; let (member, score) = result.into_zset_result().unwrap().pop().unwrap(); assert_eq!(score, idx as f64); @@ -418,7 +393,7 @@ pub async fn should_zpopmin(client: RedisClient, _: RedisConfig) -> Result<(), R pub async fn should_zrandmember(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let _ = create_count_data(&client, "foo").await?; - for _ in 0 .. COUNT * 2 { + for _ in 0..COUNT * 2 { let result: RedisValue = client.zrandmember("foo", Some((1, true))).await?; let (member, score) = result.into_zset_result().unwrap().pop().unwrap(); assert!(score >= 0.0 && score < COUNT as f64); @@ -436,8 +411,6 @@ pub async fn should_zrandmember(client: RedisClient, _: RedisConfig) -> Result<( } pub async fn should_zrangestore_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); let _ = create_count_data(&client, "foo{1}").await?; let result: i64 = client @@ -466,7 +439,7 @@ pub async fn should_zrangebylex(client: RedisClient, _: RedisConfig) -> Result<( .zrange("foo", "a", "[c", Some(ZSort::ByLex), false, None, false) .await?; assert_eq!(old_result, new_result); - assert_eq!(old_result.into_array(), expected_values[0 .. 3]); + assert_eq!(old_result.into_array(), expected_values[0..3]); Ok(()) } @@ -488,7 +461,7 @@ pub async fn should_zrevrangebylex(client: RedisClient, _: RedisConfig) -> Resul .zrange("foo", "[c", "a", Some(ZSort::ByLex), true, None, false) .await?; assert_eq!(old_result, new_result); - assert_eq!(old_result.into_array(), expected_values[expected_values.len() - 3 ..]); + assert_eq!(old_result.into_array(), expected_values[expected_values.len() - 3..]); Ok(()) } @@ -519,14 +492,14 @@ pub async fn should_zrangebyscore(client: RedisClient, _: RedisConfig) -> Result ) .await?; assert_eq!(old_result, new_result); - assert_eq!(old_result.into_array(), expected_values[(COUNT / 2) as usize ..]); + assert_eq!(old_result.into_array(), expected_values[(COUNT / 2) as usize..]); let lower = ZRange { - kind: ZRangeKind::Inclusive, + kind: ZRangeKind::Inclusive, range: ((COUNT / 2) as f64).try_into()?, }; let upper = ZRange { - kind: ZRangeKind::Inclusive, + kind: ZRangeKind::Inclusive, range: (COUNT as f64).try_into()?, }; let old_result: RedisValue = client.zrangebyscore("foo", &lower, &upper, false, None).await?; @@ -534,7 +507,7 @@ pub async fn should_zrangebyscore(client: RedisClient, _: RedisConfig) -> Result .zrange("foo", &lower, &upper, Some(ZSort::ByScore), false, None, false) .await?; assert_eq!(old_result, new_result); - assert_eq!(old_result.into_array(), expected_values[(COUNT / 2) as usize ..]); + assert_eq!(old_result.into_array(), expected_values[(COUNT / 2) as usize..]); Ok(()) } @@ -566,14 +539,14 @@ pub async fn should_zrevrangebyscore(client: RedisClient, _: RedisConfig) -> Res ) .await?; assert_eq!(old_result, new_result); - assert_eq!(old_result.into_array(), expected_values[0 .. (COUNT / 2) as usize]); + assert_eq!(old_result.into_array(), expected_values[0..(COUNT / 2) as usize]); let lower = ZRange { - kind: ZRangeKind::Inclusive, + kind: ZRangeKind::Inclusive, range: ((COUNT / 2) as f64).try_into()?, }; let upper = ZRange { - kind: ZRangeKind::Inclusive, + kind: ZRangeKind::Inclusive, range: (COUNT as f64).try_into()?, }; let old_result: RedisValue = client.zrevrangebyscore("foo", &upper, &lower, false, None).await?; @@ -581,7 +554,7 @@ pub async fn should_zrevrangebyscore(client: RedisClient, _: RedisConfig) -> Res .zrange("foo", &upper, &lower, Some(ZSort::ByScore), true, None, false) .await?; assert_eq!(old_result, new_result); - assert_eq!(old_result.into_array(), expected_values[0 .. (COUNT / 2) as usize]); + assert_eq!(old_result.into_array(), expected_values[0..(COUNT / 2) as usize]); Ok(()) } @@ -589,7 +562,7 @@ pub async fn should_zrevrangebyscore(client: RedisClient, _: RedisConfig) -> Res pub async fn should_zrank_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let _ = create_count_data(&client, "foo").await?; - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.zrank("foo", idx).await?; assert_eq!(result, idx); } @@ -606,7 +579,7 @@ pub async fn should_zrem_values(client: RedisClient, _: RedisConfig) -> Result<( let result: i64 = client.zrem("foo", COUNT + 1).await?; assert_eq!(result, 0); - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.zrem("foo", idx).await?; assert_eq!(result, 1); let result: i64 = client.zcard("foo").await?; @@ -649,7 +622,7 @@ pub async fn should_zremrangebyrank(client: RedisClient, _: RedisConfig) -> Resu assert_eq!(result, 0); let _ = create_count_data(&client, "foo").await?; - for _ in 0 .. COUNT { + for _ in 0..COUNT { // this modifies the set so the idx cant change let result: usize = client.zremrangebyrank("foo", 0, 0).await?; assert_eq!(result, 1); @@ -668,7 +641,7 @@ pub async fn should_zremrangebyscore(client: RedisClient, _: RedisConfig) -> Res assert_eq!(result, 0); let _ = create_count_data(&client, "foo").await?; - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: usize = client.zremrangebyscore("foo", idx as f64, idx as f64).await?; assert_eq!(result, 1); } @@ -684,7 +657,7 @@ pub async fn should_zrevrank_values(client: RedisClient, _: RedisConfig) -> Resu let result: Option = client.zrevrank("foo", COUNT + 1).await?; assert!(result.is_none()); - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: i64 = client.zrevrank("foo", idx).await?; assert_eq!(result, COUNT - (idx + 1)); } @@ -695,7 +668,7 @@ pub async fn should_zrevrank_values(client: RedisClient, _: RedisConfig) -> Resu pub async fn should_zscore_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { let _ = create_count_data(&client, "foo").await?; - for idx in 0 .. COUNT { + for idx in 0..COUNT { let result: f64 = client.zscore("foo", idx).await?; assert_eq!(result, idx as f64); } @@ -707,11 +680,8 @@ pub async fn should_zscore_values(client: RedisClient, _: RedisConfig) -> Result } pub async fn should_zunion_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); - let mut expected: Vec<(f64, RedisValue)> = Vec::with_capacity(COUNT as usize); - for idx in 0 .. COUNT { + for idx in 0..COUNT { expected.push((idx as f64, idx.to_string().into())); let result: i64 = client .zadd("foo{1}", None, None, false, false, (idx as f64, idx)) @@ -730,12 +700,12 @@ pub async fn should_zunion_values(client: RedisClient, _: RedisConfig) -> Result None, false, false, - expected[0 .. expected.len() - 1].to_vec(), + expected[0..expected.len() - 1].to_vec(), ) .await?; let result: RedisValue = client.zunion(vec!["foo{1}", "bar{1}"], None, None, true).await?; // scores are added together with a weight of 1 in this example - let mut _expected: Vec<(RedisValue, f64)> = expected[0 .. expected.len() - 1] + let mut _expected: Vec<(RedisValue, f64)> = expected[0..expected.len() - 1] .iter() .map(|(s, v)| (v.clone(), s * 2.0)) .collect(); @@ -751,12 +721,8 @@ pub async fn should_zunion_values(client: RedisClient, _: RedisConfig) -> Result } pub async fn should_zunionstore_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); - check_null!(client, "bar{1}"); - check_null!(client, "baz{1}"); - let mut expected: Vec<(f64, RedisValue)> = Vec::with_capacity(COUNT as usize); - for idx in 0 .. COUNT { + for idx in 0..COUNT { expected.push((idx as f64, idx.to_string().into())); let result: i64 = client .zadd("foo{1}", None, None, false, false, (idx as f64, idx)) @@ -776,7 +742,7 @@ pub async fn should_zunionstore_values(client: RedisClient, _: RedisConfig) -> R None, false, false, - expected[0 .. expected.len() - 1].to_vec(), + expected[0..expected.len() - 1].to_vec(), ) .await?; let result: i64 = client @@ -788,7 +754,7 @@ pub async fn should_zunionstore_values(client: RedisClient, _: RedisConfig) -> R } pub async fn should_zmscore_values(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - for idx in 0 .. COUNT { + for idx in 0..COUNT { client.zadd("foo", None, None, false, false, (idx as f64, idx)).await?; } diff --git a/tests/integration/streams/mod.rs b/tests/integration/streams/mod.rs index de921783..f6e3518f 100644 --- a/tests/integration/streams/mod.rs +++ b/tests/integration/streams/mod.rs @@ -1,4 +1,5 @@ use fred::{ + cmd, prelude::*, types::{XCapKind, XCapTrim, XReadResponse, XReadValue, XID}, }; @@ -18,7 +19,7 @@ async fn add_stream_entries( ) -> Result<(Vec, FakeExpectedValues), RedisError> { let mut ids = Vec::with_capacity(count); let mut expected = Vec::with_capacity(count); - for idx in 0 .. count { + for idx in 0..count { let id: String = client.xadd(key, false, None, "*", ("count", idx)).await?; ids.push(id.clone()); @@ -37,7 +38,6 @@ fn has_expected_value(expected: &FakeExpectedValues, actual: &FakeExpectedValues } pub async fn should_xinfo_consumers(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); let result: Result<(), RedisError> = client.xinfo_consumers("foo{1}", "group1").await; assert!(result.is_err()); @@ -57,7 +57,6 @@ pub async fn should_xinfo_consumers(client: RedisClient, _: RedisConfig) -> Resu } pub async fn should_xinfo_groups(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); let result: Result<(), RedisError> = client.xinfo_groups("foo{1}").await; assert!(result.is_err()); @@ -76,7 +75,6 @@ pub async fn should_xinfo_groups(client: RedisClient, _: RedisConfig) -> Result< } pub async fn should_xinfo_streams(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); let result: Result<(), RedisError> = client.xinfo_stream("foo{1}", true, None).await; assert!(result.is_err()); @@ -92,7 +90,6 @@ pub async fn should_xinfo_streams(client: RedisClient, _: RedisConfig) -> Result } pub async fn should_xadd_auto_id_to_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); let result: String = client.xadd("foo{1}", false, None, "*", ("a", "b")).await?; assert!(!result.is_empty()); @@ -102,7 +99,6 @@ pub async fn should_xadd_auto_id_to_a_stream(client: RedisClient, _: RedisConfig } pub async fn should_xadd_manual_id_to_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); let result: String = client.xadd("foo{1}", false, None, "1-0", ("a", "b")).await?; assert_eq!(result, "1-0"); @@ -112,7 +108,6 @@ pub async fn should_xadd_manual_id_to_a_stream(client: RedisClient, _: RedisConf } pub async fn should_xadd_with_cap_to_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); client .xadd("foo{1}", false, ("MAXLEN", "=", 1), "*", ("a", "b")) .await?; @@ -123,7 +118,6 @@ pub async fn should_xadd_with_cap_to_a_stream(client: RedisClient, _: RedisConfi } pub async fn should_xadd_nomkstream_to_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); let result: Option = client.xadd("foo{1}", true, None, "*", ("a", "b")).await?; assert!(result.is_none()); @@ -135,7 +129,6 @@ pub async fn should_xadd_nomkstream_to_a_stream(client: RedisClient, _: RedisCon } pub async fn should_xtrim_a_stream_approx_cap(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; @@ -144,7 +137,7 @@ pub async fn should_xtrim_a_stream_approx_cap(client: RedisClient, _: RedisConfi let len: usize = client.xlen("foo{1}").await?; assert_eq!(len, 3 - deleted); - client.del("foo{1}").await?; + client.custom(cmd!("DEL"), vec!["foo{1}"]).await?; create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; let deleted: usize = client @@ -158,7 +151,6 @@ pub async fn should_xtrim_a_stream_approx_cap(client: RedisClient, _: RedisConfi } pub async fn should_xtrim_a_stream_eq_cap(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; @@ -167,7 +159,7 @@ pub async fn should_xtrim_a_stream_eq_cap(client: RedisClient, _: RedisConfig) - let len: usize = client.xlen("foo{1}").await?; assert_eq!(len, 1); - client.del("foo{1}").await?; + client.custom(cmd!("DEL"), vec!["foo{1}"]).await?; create_fake_group_and_stream(&client, "foo{1}").await?; let _ = add_stream_entries(&client, "foo{1}", 3).await?; let deleted: usize = client.xtrim("foo{1}", (XCapKind::MaxLen, XCapTrim::Exact, 1)).await?; @@ -179,7 +171,6 @@ pub async fn should_xtrim_a_stream_eq_cap(client: RedisClient, _: RedisConfig) - } pub async fn should_xdel_one_id_in_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); create_fake_group_and_stream(&client, "foo{1}").await?; let (ids, _) = add_stream_entries(&client, "foo{1}", 2).await?; @@ -191,11 +182,10 @@ pub async fn should_xdel_one_id_in_a_stream(client: RedisClient, _: RedisConfig) } pub async fn should_xdel_multiple_ids_in_a_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); create_fake_group_and_stream(&client, "foo{1}").await?; let (ids, _) = add_stream_entries(&client, "foo{1}", 3).await?; - let deleted: usize = client.xdel("foo{1}", ids[0 .. 2].to_vec()).await?; + let deleted: usize = client.xdel("foo{1}", ids[0..2].to_vec()).await?; assert_eq!(deleted, 2); let len: usize = client.xlen("foo{1}").await?; assert_eq!(len, 1); @@ -203,7 +193,6 @@ pub async fn should_xdel_multiple_ids_in_a_stream(client: RedisClient, _: RedisC } pub async fn should_xrange_no_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); create_fake_group_and_stream(&client, "foo{1}").await?; let (_, expected) = add_stream_entries(&client, "foo{1}", 3).await?; @@ -213,7 +202,6 @@ pub async fn should_xrange_no_count(client: RedisClient, _: RedisConfig) -> Resu } pub async fn should_xrange_values_no_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); create_fake_group_and_stream(&client, "foo{1}").await?; let (ids, _) = add_stream_entries(&client, "foo{1}", 3).await?; @@ -224,7 +212,6 @@ pub async fn should_xrange_values_no_count(client: RedisClient, _: RedisConfig) } pub async fn should_xrevrange_values_no_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); create_fake_group_and_stream(&client, "foo{1}").await?; let (mut ids, _) = add_stream_entries(&client, "foo{1}", 3).await?; ids.reverse(); @@ -236,7 +223,6 @@ pub async fn should_xrevrange_values_no_count(client: RedisClient, _: RedisConfi } pub async fn should_xrange_with_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); create_fake_group_and_stream(&client, "foo{1}").await?; let (_, expected) = add_stream_entries(&client, "foo{1}", 3).await?; @@ -246,7 +232,6 @@ pub async fn should_xrange_with_count(client: RedisClient, _: RedisConfig) -> Re } pub async fn should_xrevrange_no_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); create_fake_group_and_stream(&client, "foo{1}").await?; let (_, mut expected) = add_stream_entries(&client, "foo{1}", 3).await?; expected.reverse(); @@ -257,7 +242,6 @@ pub async fn should_xrevrange_no_count(client: RedisClient, _: RedisConfig) -> R } pub async fn should_xrevrange_with_count(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); create_fake_group_and_stream(&client, "foo{1}").await?; let (_, mut expected) = add_stream_entries(&client, "foo{1}", 3).await?; expected.reverse(); @@ -268,7 +252,6 @@ pub async fn should_xrevrange_with_count(client: RedisClient, _: RedisConfig) -> } pub async fn should_run_xlen_on_stream(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { - check_null!(client, "foo{1}"); create_fake_group_and_stream(&client, "foo{1}").await?; let len: usize = client.xlen("foo{1}").await?; assert_eq!(len, 0); @@ -320,8 +303,8 @@ pub async fn should_xread_multiple_keys_count_2(client: RedisClient, _: RedisCon let (bar_ids, bar_inner) = add_stream_entries(&client, "bar{1}", 3).await?; let mut expected = HashMap::new(); - expected.insert("foo{1}".into(), foo_inner[1 ..].to_vec()); - expected.insert("bar{1}".into(), bar_inner[1 ..].to_vec()); + expected.insert("foo{1}".into(), foo_inner[1..].to_vec()); + expected.insert("bar{1}".into(), bar_inner[1..].to_vec()); let ids: Vec = vec![foo_ids[0].as_str().into(), bar_ids[0].as_str().into()]; let result: HashMap>>> = client diff --git a/tests/integration/timeseries/mod.rs b/tests/integration/timeseries/mod.rs index 266d2ea5..9d905647 100644 --- a/tests/integration/timeseries/mod.rs +++ b/tests/integration/timeseries/mod.rs @@ -5,17 +5,10 @@ use fred::{ interfaces::*, prelude::RedisResult, types::{ - Aggregator, - GetLabels, - RedisConfig, - RedisKey, - RedisValue, - Resp2TimeSeriesValues, - Resp3TimeSeriesValues, - Timestamp, + Aggregator, GetLabels, RedisConfig, RedisKey, RedisValue, Resp2TimeSeriesValues, Resp3TimeSeriesValues, Timestamp, }, }; -use redis_protocol::resp3::prelude::RespVersion; +use redis_protocol::resp3::types::RespVersion; use std::{collections::HashMap, time::Duration}; use tokio::time::sleep; @@ -69,12 +62,16 @@ pub async fn should_madd_and_mget(client: RedisClient, _: RedisConfig) -> Result values.sort_by(|(lhs_key, _, _), (rhs_key, _, _)| lhs_key.cmp(rhs_key)); let expected = vec![ - ("bar{1}".to_string(), vec![("a".to_string(), "b".to_string())], vec![( - 2, 2.3, - )]), - ("foo{1}".to_string(), vec![("a".to_string(), "b".to_string())], vec![( - 3, 3.3, - )]), + ( + "bar{1}".to_string(), + vec![("a".to_string(), "b".to_string())], + vec![(2, 2.3)], + ), + ( + "foo{1}".to_string(), + vec![("a".to_string(), "b".to_string())], + vec![(3, 3.3)], + ), ]; assert_eq!(values, expected); } else { @@ -139,16 +136,24 @@ pub async fn should_incr_and_decr(client: RedisClient, _: RedisConfig) -> Result pub async fn should_create_and_delete_rules(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { client - .ts_create("temp:TLV", None, None, None, None, [ - ("type", "temp"), - ("location", "TLV"), - ]) + .ts_create( + "temp:TLV", + None, + None, + None, + None, + [("type", "temp"), ("location", "TLV")], + ) .await?; client - .ts_create("dailyAvgTemp:TLV", None, None, None, None, [ - ("type", "temp"), - ("location", "TLV"), - ]) + .ts_create( + "dailyAvgTemp:TLV", + None, + None, + None, + None, + [("type", "temp"), ("location", "TLV")], + ) .await?; client .ts_createrule("temp:TLV", "dailyAvgTemp:TLV", (Aggregator::TWA, 86400000), None) @@ -191,15 +196,16 @@ pub async fn should_madd_and_mrange(client: RedisClient, _: RedisConfig) -> Resu samples.sort_by(|(l, _, _), (r, _, _)| l.cmp(r)); let expected = vec![ - ("bar{1}".to_string(), vec![("a".to_string(), "b".to_string())], vec![ - (1, 1.2), - (2, 2.3), - ]), - ("foo{1}".to_string(), vec![("a".to_string(), "b".to_string())], vec![ - (1, 1.1), - (2, 2.2), - (3, 3.3), - ]), + ( + "bar{1}".to_string(), + vec![("a".to_string(), "b".to_string())], + vec![(1, 1.2), (2, 2.3)], + ), + ( + "foo{1}".to_string(), + vec![("a".to_string(), "b".to_string())], + vec![(1, 1.1), (2, 2.2), (3, 3.3)], + ), ]; assert_eq!(samples, expected) } else { @@ -326,15 +332,16 @@ pub async fn should_madd_and_mrevrange(client: RedisClient, _: RedisConfig) -> R samples.sort_by(|(l, _, _), (r, _, _)| l.cmp(r)); let expected = vec![ - ("bar{1}".to_string(), vec![("a".to_string(), "b".to_string())], vec![ - (2, 2.3), - (1, 1.2), - ]), - ("foo{1}".to_string(), vec![("a".to_string(), "b".to_string())], vec![ - (3, 3.3), - (2, 2.2), - (1, 1.1), - ]), + ( + "bar{1}".to_string(), + vec![("a".to_string(), "b".to_string())], + vec![(2, 2.3), (1, 1.2)], + ), + ( + "foo{1}".to_string(), + vec![("a".to_string(), "b".to_string())], + vec![(3, 3.3), (2, 2.2), (1, 1.1)], + ), ]; assert_eq!(samples, expected) } else { diff --git a/tests/integration/tracking/mod.rs b/tests/integration/tracking/mod.rs index 9767ad36..ba2b458c 100644 --- a/tests/integration/tracking/mod.rs +++ b/tests/integration/tracking/mod.rs @@ -12,14 +12,13 @@ use std::{ use tokio::time::sleep; #[allow(dead_code)] +#[cfg(feature = "i-keys")] pub async fn should_invalidate_foo_resp3(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { if client.protocol_version() == RespVersion::RESP2 { return Ok(()); } let key: RedisKey = "foo{1}".into(); - check_null!(client, "foo{1}"); - let invalidated = Arc::new(AtomicBool::new(false)); let _invalidated = invalidated.clone(); @@ -49,13 +48,13 @@ pub async fn should_invalidate_foo_resp3(client: RedisClient, _: RedisConfig) -> } #[allow(dead_code)] +#[cfg(feature = "i-keys")] pub async fn should_invalidate_foo_resp2_centralized(client: RedisClient, _: RedisConfig) -> Result<(), RedisError> { if client.protocol_version() == RespVersion::RESP3 || client.is_clustered() { return Ok(()); } let key: RedisKey = "foo{1}".into(); - check_null!(client, "foo{1}"); let subscriber = client.clone_new(); subscriber.connect(); subscriber.wait_for_connect().await?; diff --git a/tests/integration/utils.rs b/tests/integration/utils.rs index 9b12cc02..1025a94c 100644 --- a/tests/integration/utils.rs +++ b/tests/integration/utils.rs @@ -19,11 +19,12 @@ use fred::{ UnresponsiveConfig, }, }; -use redis_protocol::resp3::prelude::RespVersion; +use redis_protocol::resp3::types::RespVersion; use std::{convert::TryInto, default::Default, env, fmt, fmt::Formatter, fs, future::Future, time::Duration}; const RECONNECT_DELAY: u32 = 1000; +use fred::types::ClusterDiscoveryPolicy; #[cfg(any(feature = "enable-rustls", feature = "enable-native-tls"))] use fred::types::{TlsConfig, TlsConnector, TlsHostMapping}; #[cfg(feature = "enable-native-tls")] @@ -83,7 +84,7 @@ fn read_fail_fast_env() -> bool { } } -#[cfg(feature = "redis-stack")] +#[cfg(feature = "i-redis-stack")] pub fn read_redis_centralized_host() -> (String, u16) { let host = read_env_var("FRED_REDIS_STACK_HOST").unwrap_or("redis-main".into()); let port = read_env_var("FRED_REDIS_STACK_PORT") @@ -93,7 +94,7 @@ pub fn read_redis_centralized_host() -> (String, u16) { (host, port) } -#[cfg(not(feature = "redis-stack"))] +#[cfg(not(feature = "i-redis-stack"))] pub fn read_redis_centralized_host() -> (String, u16) { let host = read_env_var("FRED_REDIS_CENTRALIZED_HOST").unwrap_or("redis-main".into()); let port = read_env_var("FRED_REDIS_CENTRALIZED_PORT") @@ -127,13 +128,13 @@ pub fn read_redis_password() -> String { read_env_var("REDIS_PASSWORD").expect("Failed to read REDIS_PASSWORD env") } -#[cfg(not(feature = "redis-stack"))] +#[cfg(not(feature = "i-redis-stack"))] pub fn read_redis_username() -> String { read_env_var("REDIS_USERNAME").expect("Failed to read REDIS_USERNAME env") } // the CI settings for redis-stack don't set up custom ACL rules -#[cfg(feature = "redis-stack")] +#[cfg(feature = "i-redis-stack")] pub fn read_redis_username() -> String { read_env_var("REDIS_USERNAME").unwrap_or("default".into()) } @@ -215,7 +216,7 @@ fn read_tls_creds() -> TlsCreds { #[cfg(feature = "enable-rustls")] fn create_rustls_config() -> TlsConnector { - use webpki::types::PrivatePkcs8KeyDer; + use rustls::pki_types::PrivatePkcs8KeyDer; let creds = read_tls_creds(); let mut root_store = RootCertStore::empty(); @@ -251,7 +252,7 @@ fn create_native_tls_config() -> TlsConnector { builder.try_into().expect("Failed to build native-tls connector") } -fn resilience_settings() -> (Option, u32, bool) { +fn reconnect_settings() -> (Option, u32, bool) { (Some(ReconnectPolicy::new_constant(300, RECONNECT_DELAY)), 3, true) } @@ -267,7 +268,8 @@ fn create_server_config(cluster: bool) -> ServerConfig { if cluster { let (host, port) = read_redis_cluster_host(); ServerConfig::Clustered { - hosts: vec![Server::new(host, port)], + hosts: vec![Server::new(host, port)], + policy: ClusterDiscoveryPolicy::default(), } } else { let (host, port) = read_redis_centralized_host(); @@ -411,7 +413,7 @@ where F: Fn(RedisClient, RedisConfig) -> Fut, Fut: Future>, { - let (policy, cmd_attempts, fail_fast) = resilience_settings(); + let (policy, cmd_attempts, fail_fast) = reconnect_settings(); let mut connection = ConnectionConfig::default(); let (mut config, perf) = create_redis_config(true, pipeline, resp3); connection.max_command_attempts = cmd_attempts; @@ -442,7 +444,7 @@ where return run_sentinel(func, pipeline, resp3).await; } - let (policy, cmd_attempts, fail_fast) = resilience_settings(); + let (policy, cmd_attempts, fail_fast) = reconnect_settings(); let mut connection = ConnectionConfig::default(); let (mut config, perf) = create_redis_config(false, pipeline, resp3); connection.max_command_attempts = cmd_attempts; @@ -521,7 +523,7 @@ macro_rules! centralized_test_panic( macro_rules! cluster_test_panic( ($module:tt, $name:tt) => { mod $name { - #[cfg(not(any(feature = "redis-stack", feature = "unix-sockets")))] + #[cfg(not(any(feature = "i-redis-stack", feature = "unix-sockets")))] mod resp2 { #[tokio::test(flavor = "multi_thread")] #[should_panic] @@ -546,7 +548,7 @@ macro_rules! cluster_test_panic( } } - #[cfg(not(any(feature = "redis-stack", feature = "unix-sockets")))] + #[cfg(not(any(feature = "i-redis-stack", feature = "unix-sockets")))] mod resp3 { #[tokio::test(flavor = "multi_thread")] #[should_panic] @@ -628,7 +630,7 @@ macro_rules! centralized_test( macro_rules! cluster_test( ($module:tt, $name:tt) => { mod $name { - #[cfg(not(any(feature = "redis-stack", feature = "unix-sockets")))] + #[cfg(not(any(feature = "i-redis-stack", feature = "unix-sockets")))] mod resp2 { #[tokio::test(flavor = "multi_thread")] async fn pipelined() { @@ -651,7 +653,7 @@ macro_rules! cluster_test( } } - #[cfg(not(any(feature = "redis-stack", feature = "unix-sockets")))] + #[cfg(not(any(feature = "i-redis-stack", feature = "unix-sockets")))] mod resp3 { #[tokio::test(flavor = "multi_thread")] async fn pipelined() { @@ -685,15 +687,6 @@ macro_rules! return_err( } } ); -macro_rules! check_null( - ($client:ident, $arg:expr) => { { - let foo: RedisValue = $client.get($arg).await?; - if !foo.is_null() { - panic!("expected {} to be null", $arg); - } - } } -); - macro_rules! check_redis_7 ( ($client:ident) => { if $client.server_version().unwrap().major < 7 { diff --git a/tests/scripts/check_features.sh b/tests/scripts/check_features.sh new file mode 100755 index 00000000..33ca1404 --- /dev/null +++ b/tests/scripts/check_features.sh @@ -0,0 +1,14 @@ +#!/bin/bash -e + +all_features=`yq -oy '.features["i-all"]' Cargo.toml | tr -d '\n' | sed -e 's/- / /g' | cut -c 2-` +redis_stack_features=`yq -oy '.features["i-redis-stack"]' Cargo.toml | tr -d '\n' | sed -e 's/- / /g' | cut -c 2-` + +for feature in $all_features; do + echo "Checking $feature" + cargo clippy --tests --lib -p fred --no-default-features --features "$feature" -- -Dwarnings +done + +for feature in $redis_stack_features; do + echo "Checking $feature" + cargo clippy --tests --lib -p fred --no-default-features --features "$feature" -- -Dwarnings +done \ No newline at end of file diff --git a/tests/scripts/utils.sh b/tests/scripts/utils.sh index 54160f48..a6247662 100644 --- a/tests/scripts/utils.sh +++ b/tests/scripts/utils.sh @@ -34,15 +34,30 @@ function check_redis { } function install_redis { - echo "Installing redis..." + echo "Installing..." pushd $ROOT > /dev/null rm -rf tests/tmp/redis_cluster_$REDIS_VERSION cd tests/tmp - curl -O http://download.redis.io/releases/redis-$REDIS_VERSION.tar.gz + + if [ -z "$USE_VALKEY" ]; then + echo "Installing Redis from redis.io" + curl -O "http://download.redis.io/releases/redis-$REDIS_VERSION.tar.gz" + else + echo "Installing valkey from github" + curl -O -L "https://github.com/valkey-io/valkey/archive/refs/tags/redis-$REDIS_VERSION.tar.gz" --output redis-$REDIS_VERSION.tar.gz + fi + mkdir redis_$REDIS_VERSION tar xf redis-$REDIS_VERSION.tar.gz -C redis_$REDIS_VERSION rm redis-$REDIS_VERSION.tar.gz - cd redis_$REDIS_VERSION/redis-$REDIS_VERSION + + if [ -z "$USE_VALKEY" ]; then + cd redis_$REDIS_VERSION/redis-$REDIS_VERSION + else + mv redis_$REDIS_VERSION/valkey-redis-$REDIS_VERSION redis_$REDIS_VERSION/redis-$REDIS_VERSION + cd redis_$REDIS_VERSION/redis-$REDIS_VERSION + fi + make BUILD_TLS=yes -j"${PARALLEL_JOBS}" mv redis.conf redis.conf.bk popd > /dev/null