diff --git a/src/action/gridfs/download.rs b/src/action/gridfs/download.rs index d931fb6c6..24cf847f8 100644 --- a/src/action/gridfs/download.rs +++ b/src/action/gridfs/download.rs @@ -66,12 +66,14 @@ impl GridFsBucket { } else { (-1, -revision - 1) }; + // unwrap safety: `skip` is always >= 0 + let skip: u64 = skip.try_into().unwrap(); match self .files() .find_one(doc! { "filename": filename }) .sort(doc! { "uploadDate": sort }) - .skip(skip as u64) + .skip(skip) .await? { Some(fcd) => Ok(fcd), diff --git a/src/bson_util.rs b/src/bson_util.rs index f66bd6db1..d29849352 100644 --- a/src/bson_util.rs +++ b/src/bson_util.rs @@ -62,6 +62,29 @@ pub(crate) fn get_int_raw(val: RawBsonRef<'_>) -> Option { } } +#[allow(private_bounds)] +pub(crate) fn round_clamp(input: f64) -> T { + T::round_clamp(input) +} + +trait RoundClampTarget { + fn round_clamp(input: f64) -> Self; +} + +impl RoundClampTarget for u64 { + #[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)] + fn round_clamp(input: f64) -> Self { + input as u64 + } +} + +impl RoundClampTarget for u32 { + #[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)] + fn round_clamp(input: f64) -> Self { + input as u32 + } +} + /// Coerce numeric types into an `u64` if it would be lossless to do so. If this Bson is not numeric /// or the conversion would be lossy (e.g. 1.5 -> 1), this returns `None`. #[allow(clippy::cast_possible_truncation)] @@ -69,7 +92,9 @@ pub(crate) fn get_u64(val: &Bson) -> Option { match *val { Bson::Int32(i) => u64::try_from(i).ok(), Bson::Int64(i) => u64::try_from(i).ok(), - Bson::Double(f) if (f - (f as u64 as f64)).abs() <= f64::EPSILON => Some(f as u64), + Bson::Double(f) if (f - (round_clamp::(f) as f64)).abs() <= f64::EPSILON => { + Some(round_clamp(f)) + } _ => None, } } @@ -291,6 +316,31 @@ impl RawDocumentCollection for RawArrayBuf { } } +pub(crate) mod option_u64_as_i64 { + use serde::{Deserialize, Serialize}; + + pub(crate) fn serialize( + value: &Option, + s: S, + ) -> std::result::Result { + let conv: Option = value + .as_ref() + .map(|&u| u.try_into()) + .transpose() + .map_err(serde::ser::Error::custom)?; + conv.serialize(s) + } + + pub(crate) fn deserialize<'de, D: serde::Deserializer<'de>>( + d: D, + ) -> std::result::Result, D::Error> { + let conv = Option::::deserialize(d)?; + conv.map(|i| i.try_into()) + .transpose() + .map_err(serde::de::Error::custom) + } +} + #[cfg(test)] mod test { use crate::bson_util::num_decimal_digits; diff --git a/src/client/options.rs b/src/client/options.rs index 7f5a55357..aa3058e52 100644 --- a/src/client/options.rs +++ b/src/client/options.rs @@ -1952,7 +1952,10 @@ impl ConnectionString { // -1 maxStaleness means no maxStaleness, which is the default return Ok(()); } - Ordering::Greater => Duration::from_secs(max_staleness_seconds as u64), + Ordering::Greater => { + // unwrap safety: `max_staleness_seconds` will always be >= 0 + Duration::from_secs(max_staleness_seconds.try_into().unwrap()) + } }; parts.max_staleness = Some(max_staleness); diff --git a/src/cmap/conn/stream_description.rs b/src/cmap/conn/stream_description.rs index b6c6ca05f..2a58c2423 100644 --- a/src/cmap/conn/stream_description.rs +++ b/src/cmap/conn/stream_description.rs @@ -57,7 +57,7 @@ impl StreamDescription { logical_session_timeout: reply .command_response .logical_session_timeout_minutes - .map(|mins| Duration::from_secs(mins as u64 * 60)), + .map(|mins| Duration::from_secs(mins * 60)), max_bson_object_size: reply.command_response.max_bson_object_size, // The defaulting to 100,000 is here because mongocryptd doesn't include this field in // hello replies; this should never happen when talking to a real server. diff --git a/src/cmap/options.rs b/src/cmap/options.rs index 66b171dca..47baa2e8f 100644 --- a/src/cmap/options.rs +++ b/src/cmap/options.rs @@ -116,7 +116,8 @@ impl<'de> Deserialize<'de> for BackgroundThreadInterval { Ordering::Less => BackgroundThreadInterval::Never, Ordering::Equal => return Err(D::Error::custom("zero is not allowed")), Ordering::Greater => { - BackgroundThreadInterval::Every(Duration::from_millis(millis as u64)) + // unwrap safety: millis is validated to be in the u64 range + BackgroundThreadInterval::Every(Duration::from_millis(millis.try_into().unwrap())) } }) } diff --git a/src/hello.rs b/src/hello.rs index b070b8db1..5d3b2848f 100644 --- a/src/hello.rs +++ b/src/hello.rs @@ -157,7 +157,8 @@ pub(crate) struct HelloCommandResponse { pub is_replica_set: Option, /// The time in minutes that a session remains active after its most recent use. - pub logical_session_timeout_minutes: Option, + #[serde(default, with = "crate::bson_util::option_u64_as_i64")] + pub logical_session_timeout_minutes: Option, /// Optime and date information for the server's most recent write operation. pub last_write: Option, diff --git a/src/index/options.rs b/src/index/options.rs index b196525e7..8a7afd68a 100644 --- a/src/index/options.rs +++ b/src/index/options.rs @@ -168,7 +168,9 @@ impl<'de> Deserialize<'de> for IndexVersion { 0 => Ok(IndexVersion::V0), 1 => Ok(IndexVersion::V1), 2 => Ok(IndexVersion::V2), - i => Ok(IndexVersion::Custom(i as u32)), + i => Ok(IndexVersion::Custom( + i.try_into().map_err(serde::de::Error::custom)?, + )), } } } @@ -213,7 +215,9 @@ impl<'de> Deserialize<'de> for TextIndexVersion { 1 => Ok(TextIndexVersion::V1), 2 => Ok(TextIndexVersion::V2), 3 => Ok(TextIndexVersion::V3), - i => Ok(TextIndexVersion::Custom(i as u32)), + i => Ok(TextIndexVersion::Custom( + i.try_into().map_err(serde::de::Error::custom)?, + )), } } } @@ -253,7 +257,9 @@ impl<'de> Deserialize<'de> for Sphere2DIndexVersion { match i32::deserialize(deserializer)? { 2 => Ok(Sphere2DIndexVersion::V2), 3 => Ok(Sphere2DIndexVersion::V3), - i => Ok(Sphere2DIndexVersion::Custom(i as u32)), + i => Ok(Sphere2DIndexVersion::Custom( + i.try_into().map_err(serde::de::Error::custom)?, + )), } } } diff --git a/src/lib.rs b/src/lib.rs index bf9fbce86..af394e71e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,8 +1,11 @@ #![doc = include_str!("../README.md")] -#![warn(missing_docs)] -#![warn(rustdoc::missing_crate_level_docs)] -#![warn(clippy::cast_possible_truncation)] -#![warn(clippy::cast_possible_wrap)] +#![warn( + missing_docs, + rustdoc::missing_crate_level_docs, + clippy::cast_possible_truncation, + clippy::cast_possible_wrap, + clippy::cast_sign_loss +)] #![allow( clippy::unreadable_literal, clippy::cognitive_complexity, diff --git a/src/sdam/description/server.rs b/src/sdam/description/server.rs index 7b9d01b7b..5159c1b28 100644 --- a/src/sdam/description/server.rs +++ b/src/sdam/description/server.rs @@ -409,7 +409,7 @@ impl ServerDescription { Ok(Some(ref reply)) => Ok(reply .command_response .logical_session_timeout_minutes - .map(|timeout| Duration::from_secs(timeout as u64 * 60))), + .map(|timeout| Duration::from_secs(timeout * 60))), Err(ref e) => Err(e.clone()), } } diff --git a/src/sdam/description/topology/server_selection/test/in_window.rs b/src/sdam/description/topology/server_selection/test/in_window.rs index ff0b856df..5780209ea 100644 --- a/src/sdam/description/topology/server_selection/test/in_window.rs +++ b/src/sdam/description/topology/server_selection/test/in_window.rs @@ -1,6 +1,9 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; -use crate::bson::{doc, Document}; +use crate::{ + bson::{doc, Document}, + bson_util::round_clamp, +}; use approx::abs_diff_eq; use serde::Deserialize; @@ -188,14 +191,14 @@ async fn load_balancing_test() { assert!( share_of_selections <= max_share, "expected no more than {}% of selections, instead got {}%", - (max_share * 100.0) as u32, - (share_of_selections * 100.0) as u32 + round_clamp::(max_share * 100.0), + round_clamp::(share_of_selections * 100.0) ); assert!( share_of_selections >= min_share, "expected at least {}% of selections, instead got {}%", - (min_share * 100.0) as u32, - (share_of_selections * 100.0) as u32 + round_clamp::(min_share * 100.0), + round_clamp::(share_of_selections * 100.0) ); } } diff --git a/src/sdam/description/topology/test.rs b/src/sdam/description/topology/test.rs index 9a4109177..a3445dc47 100644 --- a/src/sdam/description/topology/test.rs +++ b/src/sdam/description/topology/test.rs @@ -6,7 +6,8 @@ use std::time::Duration; pub use event::TestSdamEvent; -#[allow(clippy::cast_possible_truncation)] +use crate::bson_util::round_clamp; + pub(crate) fn f64_ms_as_duration(f: f64) -> Duration { - Duration::from_micros((f * 1000.0) as u64) + Duration::from_micros(round_clamp(f * 1000.0)) } diff --git a/src/sdam/description/topology/test/sdam.rs b/src/sdam/description/topology/test/sdam.rs index b5e8b3f47..0289a6987 100644 --- a/src/sdam/description/topology/test/sdam.rs +++ b/src/sdam/description/topology/test/sdam.rs @@ -80,7 +80,8 @@ pub(crate) struct TestHelloCommandResponse { pub arbiter_only: Option, #[serde(rename = "isreplicaset")] pub is_replica_set: Option, - pub logical_session_timeout_minutes: Option, + #[serde(default, with = "crate::bson_util::option_u64_as_i64")] + pub logical_session_timeout_minutes: Option, pub last_write: Option, pub min_wire_version: Option, pub max_wire_version: Option, @@ -202,7 +203,8 @@ pub struct DescriptionOutcome { topology_type: TopologyType, set_name: Option, servers: HashMap, - logical_session_timeout_minutes: Option, + #[serde(default, with = "crate::bson_util::option_u64_as_i64")] + logical_session_timeout_minutes: Option, compatible: Option, } @@ -219,7 +221,8 @@ pub struct Server { set_name: Option, set_version: Option, election_id: Option, - logical_session_timeout_minutes: Option, + #[serde(default, with = "crate::bson_util::option_u64_as_i64")] + logical_session_timeout_minutes: Option, min_wire_version: Option, max_wire_version: Option, topology_version: Option, @@ -417,7 +420,7 @@ fn verify_description_outcome( let expected_timeout = outcome .logical_session_timeout_minutes - .map(|mins| Duration::from_secs((mins as u64) * 60)); + .map(|mins| Duration::from_secs(mins * 60)); assert_eq!( topology_description.logical_session_timeout, expected_timeout, "{test_description}: {phase_description}" @@ -475,9 +478,7 @@ fn verify_description_outcome( if let Some(logical_session_timeout_minutes) = server.logical_session_timeout_minutes { assert_eq!( actual_server.logical_session_timeout().unwrap(), - Some(Duration::from_secs( - logical_session_timeout_minutes as u64 * 60 - )), + Some(Duration::from_secs(logical_session_timeout_minutes * 60)), "{test_description} (phase {phase_description})" ); } diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 738538111..f618297b6 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -63,7 +63,8 @@ async fn max_write_batch_size_batching() { let models = vec![model; max_write_batch_size + 1]; let result = client.bulk_write(models).await.unwrap(); - assert_eq!(result.inserted_count as usize, max_write_batch_size + 1); + let inserted_count: usize = result.inserted_count.try_into().unwrap(); + assert_eq!(inserted_count, max_write_batch_size + 1); let mut command_started_events = client .events @@ -105,7 +106,8 @@ async fn max_message_size_bytes_batching() { let models = vec![model; num_models]; let result = client.bulk_write(models).await.unwrap(); - assert_eq!(result.inserted_count as usize, num_models); + let inserted_count: usize = result.inserted_count.try_into().unwrap(); + assert_eq!(inserted_count, num_models); let mut command_started_events = client .events @@ -162,10 +164,8 @@ async fn write_concern_error_batches() { assert_eq!(bulk_write_error.write_concern_errors.len(), 2); let partial_result = bulk_write_error.partial_result.unwrap(); - assert_eq!( - partial_result.inserted_count() as usize, - max_write_batch_size + 1 - ); + let inserted_count: usize = partial_result.inserted_count().try_into().unwrap(); + assert_eq!(inserted_count, max_write_batch_size + 1); let command_started_events = client.events.get_command_started_events(&["bulkWrite"]); assert_eq!(command_started_events.len(), 2); @@ -428,7 +428,8 @@ async fn namespace_batch_splitting() { let num_models = first_models.len(); let result = client.bulk_write(first_models).await.unwrap(); - assert_eq!(result.inserted_count as usize, num_models); + let inserted_count: usize = result.inserted_count.try_into().unwrap(); + assert_eq!(inserted_count, num_models); let command_started_events = client.events.get_command_started_events(&["bulkWrite"]); assert_eq!(command_started_events.len(), 1); @@ -459,7 +460,8 @@ async fn namespace_batch_splitting() { let num_models = second_models.len(); let result = client.bulk_write(second_models).await.unwrap(); - assert_eq!(result.inserted_count as usize, num_models); + let inserted_count: usize = result.inserted_count.try_into().unwrap(); + assert_eq!(inserted_count, num_models); let command_started_events = client.events.get_command_started_events(&["bulkWrite"]); assert_eq!(command_started_events.len(), 2); diff --git a/src/test/coll.rs b/src/test/coll.rs index 282f5dfec..3e8e41df1 100644 --- a/src/test/coll.rs +++ b/src/test/coll.rs @@ -1314,5 +1314,6 @@ async fn aggregate_with_generics() { .await .unwrap(); let lens: Vec = cursor.try_collect().await.unwrap(); - assert_eq!(lens[0].len as usize, len); + let first_len: usize = lens[0].len.try_into().unwrap(); + assert_eq!(first_len, len); }