Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix all clippy lints #267

Merged
merged 4 commits into from Jan 20, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.toml
Expand Up @@ -60,6 +60,7 @@ required-features = ["aio"]
[[bench]]
name = "bench_basic"
harness = false
required-features = ["tokio-rt-core"]

[[example]]
name = "async-multiplexed"
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Expand Up @@ -40,6 +40,6 @@ style-check:

lint:
@rustup component add clippy 2> /dev/null
cargo clippy --all-features
cargo clippy --all-features --all --tests --examples -- -D clippy::all

.PHONY: build test bench docs upload-docs style-check lint
10 changes: 5 additions & 5 deletions benches/bench_basic.rs
Expand Up @@ -39,13 +39,13 @@ fn bench_simple_getsetdel_async(b: &mut Bencher) {
runtime
.block_on(async {
let key = "test_key";
let () = redis::cmd("SET")
redis::cmd("SET")
.arg(key)
.arg(42)
.query_async(&mut con)
.await?;
let _: isize = redis::cmd("GET").arg(key).query_async(&mut con).await?;
let () = redis::cmd("DEL").arg(key).query_async(&mut con).await?;
redis::cmd("DEL").arg(key).query_async(&mut con).await?;
Ok(())
})
.map_err(|err: RedisError| err)
Expand Down Expand Up @@ -112,7 +112,7 @@ fn bench_long_pipeline(b: &mut Bencher) {
let pipe = long_pipeline();

b.iter(|| {
let _: () = pipe.query(&mut con).unwrap();
let () = pipe.query(&mut con).unwrap();
});
}

Expand Down Expand Up @@ -140,7 +140,7 @@ fn bench_multiplexed_async_long_pipeline(b: &mut Bencher) {
let pipe = long_pipeline();

b.iter(|| {
let _: () = runtime
let () = runtime
.block_on(async { pipe.query_async(&mut con).await })
.unwrap();
});
Expand All @@ -162,7 +162,7 @@ fn bench_multiplexed_async_implicit_pipeline(b: &mut Bencher) {
.collect::<Vec<_>>();

b.iter(|| {
let _: () = runtime
let () = runtime
.block_on(async {
cmds.iter()
.zip(&mut connections)
Expand Down
4 changes: 2 additions & 2 deletions examples/async-await.rs
Expand Up @@ -5,9 +5,9 @@ async fn main() -> redis::RedisResult<()> {
let client = redis::Client::open("redis://127.0.0.1/").unwrap();
let mut con = client.get_async_connection().await?;

let () = con.set("key1", b"foo").await?;
con.set("key1", b"foo").await?;

let () = redis::cmd("SET")
redis::cmd("SET")
.arg(&["key2", "bar"])
.query_async(&mut con)
.await?;
Expand Down
10 changes: 5 additions & 5 deletions examples/basic.rs
Expand Up @@ -51,7 +51,7 @@ fn do_show_scanning(con: &mut redis::Connection) -> redis::RedisResult<()> {

// since we don't care about the return value of the pipeline we can
// just cast it into the unit type.
let _: () = pipe.query(con)?;
pipe.query(con)?;

// since rust currently does not track temporaries for us, we need to
// store it in a local variable.
Expand All @@ -61,7 +61,7 @@ fn do_show_scanning(con: &mut redis::Connection) -> redis::RedisResult<()> {
// as a simple exercise we just sum up the iterator. Since the fold
// method carries an initial value we do not need to define the
// type of the iterator, rust will figure "int" out for us.
let sum = cmd.iter::<i32>(con)?.fold(0, |a, b| a + b);
let sum: i32 = cmd.iter::<i32>(con)?.sum();

println!("The sum of all numbers in the set 0-1000: {}", sum);

Expand All @@ -74,12 +74,12 @@ fn do_atomic_increment_lowlevel(con: &mut redis::Connection) -> redis::RedisResu
println!("Run low-level atomic increment:");

// set the initial value so we have something to test with.
let _: () = redis::cmd("SET").arg(key).arg(42).query(con)?;
redis::cmd("SET").arg(key).arg(42).query(con)?;

loop {
// we need to start watching the key we care about, so that our
// exec fails if the key changes.
let _: () = redis::cmd("WATCH").arg(key).query(con)?;
redis::cmd("WATCH").arg(key).query(con)?;

// load the old value, so we know what to increment.
let val: isize = redis::cmd("GET").arg(key).query(con)?;
Expand Down Expand Up @@ -117,7 +117,7 @@ fn do_atomic_increment(con: &mut redis::Connection) -> redis::RedisResult<()> {
println!("Run high-level atomic increment:");

// set the initial value so we have something to test with.
let _: () = con.set(key, 42)?;
con.set(key, 42)?;

// run the transaction block.
let (new_val,): (isize,) = transaction(con, &[key], |con, pipe| {
Expand Down
69 changes: 31 additions & 38 deletions src/cluster.rs
Expand Up @@ -11,36 +11,32 @@
//! use redis::Commands;
//! use redis::cluster::ClusterClient;
//!
//! fn main() {
//! let nodes = vec!["redis://127.0.0.1:6379/", "redis://127.0.0.1:6378/", "redis://127.0.0.1:6377/"];
//! let client = ClusterClient::open(nodes).unwrap();
//! let mut connection = client.get_connection().unwrap();
//! let nodes = vec!["redis://127.0.0.1:6379/", "redis://127.0.0.1:6378/", "redis://127.0.0.1:6377/"];
//! let client = ClusterClient::open(nodes).unwrap();
//! let mut connection = client.get_connection().unwrap();
//!
//! let _: () = connection.set("test", "test_data").unwrap();
//! let rv: String = connection.get("test").unwrap();
//! let _: () = connection.set("test", "test_data").unwrap();
//! let rv: String = connection.get("test").unwrap();
//!
//! assert_eq!(rv, "test_data");
//! }
//! assert_eq!(rv, "test_data");
//! ```
//!
//! # Pipelining
//! ```rust,no_run
//! use redis::{Commands, pipe};
//! use redis::cluster::ClusterClient;
//!
//! fn main() {
//! let nodes = vec!["redis://127.0.0.1:6379/", "redis://127.0.0.1:6378/", "redis://127.0.0.1:6377/"];
//! let client = ClusterClient::open(nodes).unwrap();
//! let mut connection = client.get_connection().unwrap();
//! let nodes = vec!["redis://127.0.0.1:6379/", "redis://127.0.0.1:6378/", "redis://127.0.0.1:6377/"];
//! let client = ClusterClient::open(nodes).unwrap();
//! let mut connection = client.get_connection().unwrap();
//!
//! let key = "test";
//! let key = "test";
//!
//! let _: () = pipe()
//! .rpush(key, "123").ignore()
//! .ltrim(key, -10, -1).ignore()
//! .expire(key, 60).ignore()
//! .query(&mut connection).unwrap();
//! }
//! let _: () = pipe()
//! .rpush(key, "123").ignore()
//! .ltrim(key, -10, -1).ignore()
//! .expire(key, 60).ignore()
//! .query(&mut connection).unwrap();
//! ```
use std::cell::RefCell;
use std::collections::{BTreeMap, HashMap, HashSet};
Expand Down Expand Up @@ -290,22 +286,18 @@ impl ClusterConnection {
// Query a node to discover slot-> master mappings.
fn refresh_slots(&self) -> RedisResult<()> {
let mut slots = self.slots.borrow_mut();
*slots = {
let new_slots = if self.readonly {
let mut rng = thread_rng();
self.create_new_slots(|slot_data| {
let replicas = slot_data.replicas();
if replicas.is_empty() {
slot_data.master().to_string()
} else {
replicas.choose(&mut rng).unwrap().to_string()
}
})?
} else {
self.create_new_slots(|slot_data| slot_data.master().to_string())?
};

new_slots
*slots = if self.readonly {
let mut rng = thread_rng();
self.create_new_slots(|slot_data| {
let replicas = slot_data.replicas();
if replicas.is_empty() {
slot_data.master().to_string()
} else {
replicas.choose(&mut rng).unwrap().to_string()
}
})?
} else {
self.create_new_slots(|slot_data| slot_data.master().to_string())?
};

let mut connections = self.connections.borrow_mut();
Expand Down Expand Up @@ -421,7 +413,7 @@ impl ClusterConnection {
} else {
// Create new connection.
// TODO: error handling
let conn = connect(addr.as_ref(), self.readonly, self.password.clone())?;
let conn = connect(addr, self.readonly, self.password.clone())?;
Ok(connections.entry(addr.to_string()).or_insert(conn))
}
}
Expand All @@ -442,6 +434,7 @@ impl ClusterConnection {
Ok(T::merge_results(results))
}

#[allow(clippy::unnecessary_unwrap)]
fn request<T, F>(&self, cmd: &[u8], mut func: F) -> RedisResult<T>
where
T: MergeResults + std::fmt::Debug,
Expand All @@ -454,10 +447,10 @@ impl ClusterConnection {
return self.execute_on_all_nodes(func);
}
None => {
return Err(((
return Err((
ErrorKind::ClientError,
"this command cannot be safely routed in cluster mode",
))
)
.into())
}
};
Expand Down
21 changes: 14 additions & 7 deletions src/commands.rs
Expand Up @@ -41,14 +41,15 @@ macro_rules! implement_commands {
/// use redis::Commands;
/// let client = redis::Client::open("redis://127.0.0.1/")?;
/// let mut con = client.get_connection()?;
/// let () = con.set("my_key", 42)?;
/// con.set("my_key", 42)?;
/// assert_eq!(con.get("my_key"), Ok(42));
/// # Ok(()) }
/// ```
pub trait Commands : ConnectionLike+Sized {
$(
$(#[$attr])*
#[inline]
#[allow(clippy::extra_unused_lifetimes, clippy::needless_lifetimes)]
fn $name<$lifetime, $($tyargs: $ty, )* RV: FromRedisValue>(
&mut self $(, $argname: $argty)*) -> RedisResult<RV>
{ Cmd::$name($($argname),*).query(self) }
Expand Down Expand Up @@ -126,6 +127,7 @@ macro_rules! implement_commands {
impl Cmd {
$(
$(#[$attr])*
#[allow(clippy::extra_unused_lifetimes, clippy::needless_lifetimes)]
pub fn $name<$lifetime, $($tyargs: $ty),*>($($argname: $argty),*) -> Self {
::std::mem::replace($body, Cmd::new())
}
Expand All @@ -143,7 +145,7 @@ macro_rules! implement_commands {
/// # async fn do_something() -> redis::RedisResult<()> {
/// let client = redis::Client::open("redis://127.0.0.1/")?;
/// let mut con = client.get_async_connection().await?;
/// let () = redis::cmd("SET").arg("my_key").arg(42i32).query_async(&mut con).await?;
/// redis::cmd("SET").arg("my_key").arg(42i32).query_async(&mut con).await?;
/// assert_eq!(redis::cmd("GET").arg("my_key").query_async(&mut con).await, Ok(42i32));
/// # Ok(()) }
/// ```
Expand All @@ -156,7 +158,7 @@ macro_rules! implement_commands {
/// use redis::Commands;
/// let client = redis::Client::open("redis://127.0.0.1/")?;
/// let mut con = client.get_async_connection().await?;
/// let () = con.set("my_key", 42i32).await?;
/// con.set("my_key", 42i32).await?;
/// assert_eq!(con.get("my_key").await, Ok(42i32));
/// # Ok(()) }
/// ```
Expand All @@ -165,11 +167,13 @@ macro_rules! implement_commands {
$(
$(#[$attr])*
#[inline]
#[allow(clippy::extra_unused_lifetimes, clippy::needless_lifetimes)]
fn $name<$lifetime, $($tyargs: $ty + Send + Sync + $lifetime,)* RV>(
& $lifetime mut self
$(, $argname: $argty)*
) -> crate::types::RedisFuture<'a, RV>
where RV: FromRedisValue,
) -> crate::types::RedisFuture<'a, RV>
where
RV: FromRedisValue,
{
Box::pin(async move { ($body).query_async(self).await })
}
Expand All @@ -183,9 +187,12 @@ macro_rules! implement_commands {
$(
$(#[$attr])*
#[inline]
#[allow(clippy::extra_unused_lifetimes, clippy::needless_lifetimes)]
pub fn $name<$lifetime, $($tyargs: $ty),*>(
&mut self $(, $argname: $argty)*) -> &mut Self
{ self.add_command(::std::mem::replace($body, Cmd::new())) }
&mut self $(, $argname: $argty)*
) -> &mut Self {
self.add_command(::std::mem::replace($body, Cmd::new()))
}
)*
}
)
Expand Down
4 changes: 2 additions & 2 deletions src/connection.rs
Expand Up @@ -155,7 +155,7 @@ fn url_to_unix_connection_info(url: url::Url) -> RedisResult<ConnectionInfo> {
),
None => 0,
},
passwd: url.password().and_then(|pw| Some(pw.to_string())),
passwd: url.password().map(|pw| pw.to_string()),
})
}

Expand Down Expand Up @@ -233,7 +233,7 @@ impl ActualConnection {
Some(timeout) => {
let mut tcp = None;
let mut last_error = None;
for addr in ((host, *port)).to_socket_addrs()? {
for addr in (host, *port).to_socket_addrs()? {
match TcpStream::connect_timeout(&addr, timeout) {
Ok(l) => {
tcp = Some(l);
Expand Down
6 changes: 2 additions & 4 deletions src/lib.rs
Expand Up @@ -45,7 +45,6 @@
//!
//! Ok(())
//! }
//! # fn main() {}
//! ```
//!
//! ## Optional Features
Expand Down Expand Up @@ -106,7 +105,6 @@
//! let _ : () = con.set("my_key", 42)?;
//! Ok(())
//! }
//! # fn main() {}
//! ```
//!
//! Note that high-level commands are work in progress and many are still
Expand Down Expand Up @@ -306,9 +304,9 @@
//! let client = redis::Client::open("redis://127.0.0.1/").unwrap();
//! let mut con = client.get_async_connection().await?;
//!
//! let () = con.set("key1", b"foo").await?;
//! con.set("key1", b"foo").await?;
//!
//! let () = redis::cmd("SET").arg(&["key2", "bar"]).query_async(&mut con).await?;
//! redis::cmd("SET").arg(&["key2", "bar"]).query_async(&mut con).await?;
//!
//! let result = redis::cmd("MGET")
//! .arg(&["key1", "key2"])
Expand Down
4 changes: 2 additions & 2 deletions src/parser.rs
Expand Up @@ -203,7 +203,7 @@ where
// SAFETY We either drop `self.reader` and return a slice with the lifetime of the
// reader or we return Pending/Err (neither which contains `'a`).
// In either case `poll_fill_buf` can not be called while its contents are exposed
Poll::Ready(Ok(x)) => unsafe { return Ok(&*(x as *const _)).into() },
Poll::Ready(Ok(x)) => Ok(unsafe { &*(x as *const _) }).into(),
Poll::Ready(Err(err)) => Err(err).into(),
Poll::Pending => {
reader = Some(r);
Expand All @@ -227,7 +227,7 @@ where

let (opt, mut removed) = {
let buffer = fill_buf(&mut reader).await?;
if buffer.len() == 0 {
if buffer.is_empty() {
return Err((ErrorKind::ResponseError, "Could not read enough bytes").into());
}
let buffer = if !remaining.is_empty() {
Expand Down
4 changes: 2 additions & 2 deletions src/script.rs
Expand Up @@ -167,12 +167,12 @@ impl<'a> ScriptInvocation<'a> {
match eval_cmd.query_async(con).await {
Ok(val) => {
// Return the value from the script evaluation
return Ok(val).into();
Ok(val)
}
Err(err) => {
// Load the script into Redis if the script hash wasn't there already
if err.kind() == ErrorKind::NoScriptError {
let _hash = load_cmd.query_async(con).await?;
load_cmd.query_async(con).await?;
eval_cmd.query_async(con).await
} else {
Err(err)
Expand Down