Skip to content

Commit

Permalink
Fix all clippy lints
Browse files Browse the repository at this point in the history
  • Loading branch information
jan-auer committed Jan 15, 2020
1 parent 248f294 commit 19fa1d3
Show file tree
Hide file tree
Showing 9 changed files with 56 additions and 58 deletions.
2 changes: 1 addition & 1 deletion Makefile
Expand Up @@ -40,6 +40,6 @@ style-check:

lint:
@rustup component add clippy 2> /dev/null
cargo clippy --all-features
cargo clippy --all-features --all --tests --examples -- -D clippy::all

.PHONY: build test bench docs upload-docs style-check lint
4 changes: 2 additions & 2 deletions examples/async-await.rs
Expand Up @@ -5,9 +5,9 @@ async fn main() -> redis::RedisResult<()> {
let client = redis::Client::open("redis://127.0.0.1/").unwrap();
let mut con = client.get_async_connection().await?;

let () = con.set("key1", b"foo").await?;
con.set("key1", b"foo").await?;

let () = redis::cmd("SET")
redis::cmd("SET")
.arg(&["key2", "bar"])
.query_async(&mut con)
.await?;
Expand Down
10 changes: 5 additions & 5 deletions examples/basic.rs
Expand Up @@ -51,7 +51,7 @@ fn do_show_scanning(con: &mut redis::Connection) -> redis::RedisResult<()> {

// since we don't care about the return value of the pipeline we can
// just cast it into the unit type.
let _: () = pipe.query(con)?;
pipe.query(con)?;

// since rust currently does not track temporaries for us, we need to
// store it in a local variable.
Expand All @@ -61,7 +61,7 @@ fn do_show_scanning(con: &mut redis::Connection) -> redis::RedisResult<()> {
// as a simple exercise we just sum up the iterator. Since the fold
// method carries an initial value we do not need to define the
// type of the iterator, rust will figure "int" out for us.
let sum = cmd.iter::<i32>(con)?.fold(0, |a, b| a + b);
let sum: i32 = cmd.iter::<i32>(con)?.sum();

println!("The sum of all numbers in the set 0-1000: {}", sum);

Expand All @@ -74,12 +74,12 @@ fn do_atomic_increment_lowlevel(con: &mut redis::Connection) -> redis::RedisResu
println!("Run low-level atomic increment:");

// set the initial value so we have something to test with.
let _: () = redis::cmd("SET").arg(key).arg(42).query(con)?;
redis::cmd("SET").arg(key).arg(42).query(con)?;

loop {
// we need to start watching the key we care about, so that our
// exec fails if the key changes.
let _: () = redis::cmd("WATCH").arg(key).query(con)?;
redis::cmd("WATCH").arg(key).query(con)?;

// load the old value, so we know what to increment.
let val: isize = redis::cmd("GET").arg(key).query(con)?;
Expand Down Expand Up @@ -117,7 +117,7 @@ fn do_atomic_increment(con: &mut redis::Connection) -> redis::RedisResult<()> {
println!("Run high-level atomic increment:");

// set the initial value so we have something to test with.
let _: () = con.set(key, 42)?;
con.set(key, 42)?;

// run the transaction block.
let (new_val,): (isize,) = transaction(con, &[key], |con, pipe| {
Expand Down
69 changes: 31 additions & 38 deletions src/cluster.rs
Expand Up @@ -11,36 +11,32 @@
//! use redis::Commands;
//! use redis::cluster::ClusterClient;
//!
//! fn main() {
//! let nodes = vec!["redis://127.0.0.1:6379/", "redis://127.0.0.1:6378/", "redis://127.0.0.1:6377/"];
//! let client = ClusterClient::open(nodes).unwrap();
//! let mut connection = client.get_connection().unwrap();
//! let nodes = vec!["redis://127.0.0.1:6379/", "redis://127.0.0.1:6378/", "redis://127.0.0.1:6377/"];
//! let client = ClusterClient::open(nodes).unwrap();
//! let mut connection = client.get_connection().unwrap();
//!
//! let _: () = connection.set("test", "test_data").unwrap();
//! let rv: String = connection.get("test").unwrap();
//! let _: () = connection.set("test", "test_data").unwrap();
//! let rv: String = connection.get("test").unwrap();
//!
//! assert_eq!(rv, "test_data");
//! }
//! assert_eq!(rv, "test_data");
//! ```
//!
//! # Pipelining
//! ```rust,no_run
//! use redis::{Commands, pipe};
//! use redis::cluster::ClusterClient;
//!
//! fn main() {
//! let nodes = vec!["redis://127.0.0.1:6379/", "redis://127.0.0.1:6378/", "redis://127.0.0.1:6377/"];
//! let client = ClusterClient::open(nodes).unwrap();
//! let mut connection = client.get_connection().unwrap();
//! let nodes = vec!["redis://127.0.0.1:6379/", "redis://127.0.0.1:6378/", "redis://127.0.0.1:6377/"];
//! let client = ClusterClient::open(nodes).unwrap();
//! let mut connection = client.get_connection().unwrap();
//!
//! let key = "test";
//! let key = "test";
//!
//! let _: () = pipe()
//! .rpush(key, "123").ignore()
//! .ltrim(key, -10, -1).ignore()
//! .expire(key, 60).ignore()
//! .query(&mut connection).unwrap();
//! }
//! let _: () = pipe()
//! .rpush(key, "123").ignore()
//! .ltrim(key, -10, -1).ignore()
//! .expire(key, 60).ignore()
//! .query(&mut connection).unwrap();
//! ```
use std::cell::RefCell;
use std::collections::{BTreeMap, HashMap, HashSet};
Expand Down Expand Up @@ -290,22 +286,18 @@ impl ClusterConnection {
// Query a node to discover slot-> master mappings.
fn refresh_slots(&self) -> RedisResult<()> {
let mut slots = self.slots.borrow_mut();
*slots = {
let new_slots = if self.readonly {
let mut rng = thread_rng();
self.create_new_slots(|slot_data| {
let replicas = slot_data.replicas();
if replicas.is_empty() {
slot_data.master().to_string()
} else {
replicas.choose(&mut rng).unwrap().to_string()
}
})?
} else {
self.create_new_slots(|slot_data| slot_data.master().to_string())?
};

new_slots
*slots = if self.readonly {
let mut rng = thread_rng();
self.create_new_slots(|slot_data| {
let replicas = slot_data.replicas();
if replicas.is_empty() {
slot_data.master().to_string()
} else {
replicas.choose(&mut rng).unwrap().to_string()
}
})?
} else {
self.create_new_slots(|slot_data| slot_data.master().to_string())?
};

let mut connections = self.connections.borrow_mut();
Expand Down Expand Up @@ -421,7 +413,7 @@ impl ClusterConnection {
} else {
// Create new connection.
// TODO: error handling
let conn = connect(addr.as_ref(), self.readonly, self.password.clone())?;
let conn = connect(addr, self.readonly, self.password.clone())?;
Ok(connections.entry(addr.to_string()).or_insert(conn))
}
}
Expand All @@ -442,6 +434,7 @@ impl ClusterConnection {
Ok(T::merge_results(results))
}

#[allow(clippy::unnecessary_unwrap)]
fn request<T, F>(&self, cmd: &[u8], mut func: F) -> RedisResult<T>
where
T: MergeResults + std::fmt::Debug,
Expand All @@ -454,10 +447,10 @@ impl ClusterConnection {
return self.execute_on_all_nodes(func);
}
None => {
return Err(((
return Err((
ErrorKind::ClientError,
"this command cannot be safely routed in cluster mode",
))
)
.into())
}
};
Expand Down
15 changes: 11 additions & 4 deletions src/commands.rs
Expand Up @@ -49,6 +49,7 @@ macro_rules! implement_commands {
$(
$(#[$attr])*
#[inline]
#[allow(clippy::extra_unused_lifetimes, clippy::needless_lifetimes)]
fn $name<$lifetime, $($tyargs: $ty, )* RV: FromRedisValue>(
&mut self $(, $argname: $argty)*) -> RedisResult<RV>
{ Cmd::$name($($argname),*).query(self) }
Expand Down Expand Up @@ -126,6 +127,7 @@ macro_rules! implement_commands {
impl Cmd {
$(
$(#[$attr])*
#[allow(clippy::extra_unused_lifetimes, clippy::needless_lifetimes)]
pub fn $name<$lifetime, $($tyargs: $ty),*>($($argname: $argty),*) -> Self {
::std::mem::replace($body, Cmd::new())
}
Expand Down Expand Up @@ -165,11 +167,13 @@ macro_rules! implement_commands {
$(
$(#[$attr])*
#[inline]
#[allow(clippy::extra_unused_lifetimes, clippy::needless_lifetimes)]
fn $name<$lifetime, $($tyargs: $ty + Send + Sync + $lifetime,)* RV>(
& $lifetime mut self
$(, $argname: $argty)*
) -> crate::types::RedisFuture<'a, RV>
where RV: FromRedisValue,
) -> crate::types::RedisFuture<'a, RV>
where
RV: FromRedisValue,
{
Box::pin(async move { ($body).query_async(self).await })
}
Expand All @@ -183,9 +187,12 @@ macro_rules! implement_commands {
$(
$(#[$attr])*
#[inline]
#[allow(clippy::extra_unused_lifetimes, clippy::needless_lifetimes)]
pub fn $name<$lifetime, $($tyargs: $ty),*>(
&mut self $(, $argname: $argty)*) -> &mut Self
{ self.add_command(::std::mem::replace($body, Cmd::new())) }
&mut self $(, $argname: $argty)*
) -> &mut Self {
self.add_command(::std::mem::replace($body, Cmd::new()))
}
)*
}
)
Expand Down
4 changes: 2 additions & 2 deletions src/connection.rs
Expand Up @@ -155,7 +155,7 @@ fn url_to_unix_connection_info(url: url::Url) -> RedisResult<ConnectionInfo> {
),
None => 0,
},
passwd: url.password().and_then(|pw| Some(pw.to_string())),
passwd: url.password().map(|pw| pw.to_string()),
})
}

Expand Down Expand Up @@ -233,7 +233,7 @@ impl ActualConnection {
Some(timeout) => {
let mut tcp = None;
let mut last_error = None;
for addr in ((host, *port)).to_socket_addrs()? {
for addr in (host, *port).to_socket_addrs()? {
match TcpStream::connect_timeout(&addr, timeout) {
Ok(l) => {
tcp = Some(l);
Expand Down
2 changes: 0 additions & 2 deletions src/lib.rs
Expand Up @@ -45,7 +45,6 @@
//!
//! Ok(())
//! }
//! # fn main() {}
//! ```
//!
//! ## Optional Features
Expand Down Expand Up @@ -106,7 +105,6 @@
//! let _ : () = con.set("my_key", 42)?;
//! Ok(())
//! }
//! # fn main() {}
//! ```
//!
//! Note that high-level commands are work in progress and many are still
Expand Down
4 changes: 2 additions & 2 deletions src/parser.rs
Expand Up @@ -203,7 +203,7 @@ where
// SAFETY We either drop `self.reader` and return a slice with the lifetime of the
// reader or we return Pending/Err (neither which contains `'a`).
// In either case `poll_fill_buf` can not be called while its contents are exposed
Poll::Ready(Ok(x)) => unsafe { return Ok(&*(x as *const _)).into() },
Poll::Ready(Ok(x)) => Ok(unsafe { &*(x as *const _) }).into(),
Poll::Ready(Err(err)) => Err(err).into(),
Poll::Pending => {
reader = Some(r);
Expand All @@ -227,7 +227,7 @@ where

let (opt, mut removed) = {
let buffer = fill_buf(&mut reader).await?;
if buffer.len() == 0 {
if buffer.is_empty() {
return Err((ErrorKind::ResponseError, "Could not read enough bytes").into());
}
let buffer = if !remaining.is_empty() {
Expand Down
4 changes: 2 additions & 2 deletions src/script.rs
Expand Up @@ -167,12 +167,12 @@ impl<'a> ScriptInvocation<'a> {
match eval_cmd.query_async(con).await {
Ok(val) => {
// Return the value from the script evaluation
return Ok(val).into();
Ok(val)
}
Err(err) => {
// Load the script into Redis if the script hash wasn't there already
if err.kind() == ErrorKind::NoScriptError {
let _hash = load_cmd.query_async(con).await?;
load_cmd.query_async(con).await?;
eval_cmd.query_async(con).await
} else {
Err(err)
Expand Down

0 comments on commit 19fa1d3

Please sign in to comment.