Skip to content

Commit

Permalink
Fix digest clones and a few other minor clippy warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
allada committed Jul 12, 2023
1 parent 7fef931 commit a523115
Show file tree
Hide file tree
Showing 19 changed files with 115 additions and 162 deletions.
2 changes: 1 addition & 1 deletion cas/grpc_service/cas_server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ impl CasServer {
Ok(digest_info) => digest_info,
Err(err) => return Some(Err(err)),
};
match store_pin.has(digest_info.clone()).await {
match store_pin.has(digest_info).await {
Ok(maybe_size) => maybe_size.map_or(Some(Ok(digest)), |_| None),
Err(err) => {
log::error!(
Expand Down
2 changes: 1 addition & 1 deletion cas/grpc_service/tests/ac_server_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ async fn insert_into_store<T: Message>(
action_result.encode(&mut store_data)?;
let data_len = store_data.len();
let digest = DigestInfo::try_new(hash, action_size)?;
store.update_oneshot(digest.clone(), store_data.freeze()).await?;
store.update_oneshot(digest, store_data.freeze()).await?;
Ok(data_len.try_into().unwrap())
}

Expand Down
4 changes: 2 additions & 2 deletions cas/grpc_service/tests/worker_api_server_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ pub mod execution_response_tests {
load_timestamp: make_system_time(0),
insert_timestamp: make_system_time(0),
unique_qualifier: ActionInfoHashKey {
digest: action_digest.clone(),
digest: action_digest,
salt: SALT,
},
skip_cache_lookup: true,
Expand All @@ -303,7 +303,7 @@ pub mod execution_response_tests {
);
let result = ExecuteResult {
worker_id: test_context.worker_id.to_string(),
action_digest: Some(action_digest.clone().into()),
action_digest: Some(action_digest.into()),
salt: SALT,
result: Some(execute_result::Result::ExecuteResponse(ExecuteResponse {
result: Some(ProtoActionResult {
Expand Down
2 changes: 1 addition & 1 deletion cas/grpc_service/worker_api_server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ impl WorkerApiServer {
.err_tip(|| "Expected action_digest to exist")?
.try_into()?;
let action_info_hash_key = ActionInfoHashKey {
digest: action_digest.clone(),
digest: action_digest,
salt: execute_result.salt,
};

Expand Down
15 changes: 3 additions & 12 deletions cas/store/ac_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,7 @@ pub async fn get_and_decode_digest<T: Message + Default>(
digest: &DigestInfo,
) -> Result<T, Error> {
let mut store_data_resp = store
.get_part_unchunked(
digest.clone(),
0,
Some(MAX_ACTION_MSG_SIZE),
Some(ESTIMATED_DIGEST_SIZE),
)
.get_part_unchunked(*digest, 0, Some(MAX_ACTION_MSG_SIZE), Some(ESTIMATED_DIGEST_SIZE))
.await;
if let Err(err) = &mut store_data_resp {
if err.code == Code::NotFound {
Expand Down Expand Up @@ -76,7 +71,7 @@ pub async fn serialize_and_upload_message<'a, T: Message>(
hasher.update(&buffer);
DigestInfo::new(hasher.finalize().into(), buffer.len() as i64)
};
upload_to_store(cas_store, digest.clone(), &mut Cursor::new(buffer)).await?;
upload_to_store(cas_store, digest, &mut Cursor::new(buffer)).await?;
Ok(digest)
}

Expand Down Expand Up @@ -121,11 +116,7 @@ pub fn upload_to_store<'a, R: AsyncRead + Unpin>(
) -> impl Future<Output = Result<(), Error>> + 'a {
let (mut tx, rx) = make_buf_channel_pair();
let upload_to_store_fut = cas_store
.update(
digest.clone(),
rx,
UploadSizeInfo::ExactSize(digest.size_bytes as usize),
)
.update(digest, rx, UploadSizeInfo::ExactSize(digest.size_bytes as usize))
.map(|r| r.err_tip(|| "Could not upload data to store in upload_to_store"));
let read_data_fut = async move {
loop {
Expand Down
6 changes: 3 additions & 3 deletions cas/store/dedup_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ impl StoreTrait for DedupStore {
let index_entries = {
let maybe_data = self
.pin_index_store()
.get_part_unchunked(digest.clone(), 0, None, Some(self.upload_normal_size))
.get_part_unchunked(digest, 0, None, Some(self.upload_normal_size))
.await
.err_tip(|| "Failed to read index store in dedup store");
let data = match maybe_data {
Expand Down Expand Up @@ -137,7 +137,7 @@ impl StoreTrait for DedupStore {
.map(move |index_entry| {
let content_store = self.content_store.clone();
async move {
let digest = DigestInfo::new(index_entry.packed_hash, index_entry.size_bytes as i64);
let digest = DigestInfo::new(index_entry.packed_hash, index_entry.size_bytes);
Pin::new(content_store.as_ref())
.has(digest)
.await
Expand Down Expand Up @@ -187,7 +187,7 @@ impl StoreTrait for DedupStore {

let content_store_pin = Pin::new(content_store.as_ref());
let digest = DigestInfo::new(hash.into(), frame.len() as i64);
if content_store_pin.has(digest.clone()).await?.is_some() {
if content_store_pin.has(digest).await?.is_some() {
// If our store has this digest, we don't need to upload it.
return Ok(index_entry);
}
Expand Down
14 changes: 7 additions & 7 deletions cas/store/fast_slow_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ impl FastSlowStore {
pub async fn populate_fast_store(self: Pin<&Self>, digest: DigestInfo) -> Result<(), Error> {
let maybe_size_info = self
.pin_fast_store()
.has(digest.clone())
.has(digest)
.await
.err_tip(|| "While querying in populate_fast_store")?;
if maybe_size_info.is_some() {
Expand Down Expand Up @@ -87,7 +87,7 @@ impl StoreTrait for FastSlowStore {
async fn has(self: Pin<&Self>, digest: DigestInfo) -> Result<Option<usize>, Error> {
// TODO(blaise.bruer) Investigate if we should maybe ignore errors here instead of
// forwarding the up.
if let Some(sz) = self.pin_fast_store().has(digest.clone()).await? {
if let Some(sz) = self.pin_fast_store().has(digest).await? {
return Ok(Some(sz));
}
self.pin_slow_store().has(digest).await
Expand Down Expand Up @@ -140,7 +140,7 @@ impl StoreTrait for FastSlowStore {
}
};

let fast_store_fut = self.pin_slow_store().update(digest.clone(), fast_rx, size_info);
let fast_store_fut = self.pin_slow_store().update(digest, fast_rx, size_info);
let slow_store_fut = self.pin_fast_store().update(digest, slow_rx, size_info);

let (data_stream_res, fast_res, slow_res) = join!(data_stream_fut, fast_store_fut, slow_store_fut);
Expand All @@ -159,16 +159,16 @@ impl StoreTrait for FastSlowStore {
// forwarding the up.
let fast_store = self.pin_fast_store();
let slow_store = self.pin_slow_store();
if fast_store.has(digest.clone()).await?.is_some() {
return fast_store.get_part(digest.clone(), writer, offset, length).await;
if fast_store.has(digest).await?.is_some() {
return fast_store.get_part(digest, writer, offset, length).await;
}
// We can only copy the data to our fast store if we are copying everything.
if offset != 0 || length.is_some() {
return slow_store.get_part(digest, writer, offset, length).await;
}

let sz_result = slow_store
.has(digest.clone())
.has(digest)
.await
.err_tip(|| "Failed to run has() on slow store");
let sz = match sz_result {
Expand Down Expand Up @@ -214,7 +214,7 @@ impl StoreTrait for FastSlowStore {
}
};

let slow_store_fut = slow_store.get(digest.clone(), slow_tx);
let slow_store_fut = slow_store.get(digest, slow_tx);
let fast_store_fut = fast_store.update(digest, fast_rx, UploadSizeInfo::ExactSize(sz));

let (data_stream_res, slow_res, fast_res) = join!(data_stream_fut, slow_store_fut, fast_store_fut);
Expand Down
8 changes: 4 additions & 4 deletions cas/store/filesystem_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ impl LenEntry for FileEntryImpl {
return;
}
let from_path = encoded_file_path.get_file_path();
let mut new_digest = encoded_file_path.digest.clone();
let mut new_digest = encoded_file_path.digest;
make_temp_digest(&mut new_digest);

let to_path = to_full_path_from_digest(&encoded_file_path.shared_context.temp_path, &new_digest);
Expand Down Expand Up @@ -346,7 +346,7 @@ async fn add_files_to_cache<Fe: FileEntry>(
RwLock::new(EncodedFilePath {
shared_context: shared_context.clone(),
path_type: PathType::Content,
digest: digest.clone(),
digest,
}),
);
let time_since_anchor = anchor_time
Expand Down Expand Up @@ -534,7 +534,7 @@ impl<Fe: FileEntry> FilesystemStore<Fe> {
&final_digest,
);

self.evicting_map.insert(final_digest.clone(), entry.clone()).await;
self.evicting_map.insert(final_digest, entry.clone()).await;

let result = fs::rename(encoded_file_path.get_file_path(), &final_path)
.await
Expand Down Expand Up @@ -575,7 +575,7 @@ impl<Fe: FileEntry> StoreTrait for FilesystemStore<Fe> {
reader: DropCloserReadHalf,
_upload_size: UploadSizeInfo,
) -> Result<(), Error> {
let mut temp_digest = digest.clone();
let mut temp_digest = digest;
make_temp_digest(&mut temp_digest);

let (entry, temp_file, temp_full_path) = Fe::make_and_open_file(EncodedFilePath {
Expand Down
16 changes: 8 additions & 8 deletions cas/store/tests/compression_store_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ mod compression_store_tests {

const RAW_INPUT: &str = "123";
let digest = DigestInfo::try_new(VALID_HASH, DUMMY_DATA_SIZE).unwrap();
store.update_oneshot(digest.clone(), RAW_INPUT.into()).await?;
store.update_oneshot(digest, RAW_INPUT.into()).await?;

let store_data = store
.get_part_unchunked(digest, 0, None, None)
Expand Down Expand Up @@ -109,15 +109,15 @@ mod compression_store_tests {
];

let digest = DigestInfo::try_new(VALID_HASH, DUMMY_DATA_SIZE).unwrap();
store.update_oneshot(digest.clone(), RAW_DATA.as_ref().into()).await?;
store.update_oneshot(digest, RAW_DATA.as_ref().into()).await?;

// Read through the store forcing lots of decompression steps at different offsets
// and different window sizes. This will ensure we get most edge cases for when
// we go across block boundaries inclusive, on the fence and exclusive.
for read_slice_size in 0..(RAW_DATA.len() + 5) {
for offset in 0..(RAW_DATA.len() + 5) {
let store_data = store
.get_part_unchunked(digest.clone(), offset, Some(read_slice_size), None)
.get_part_unchunked(digest, offset, Some(read_slice_size), None)
.await
.err_tip(|| format!("Failed to get from inner store at {} - {}", offset, read_slice_size))?;

Expand Down Expand Up @@ -155,7 +155,7 @@ mod compression_store_tests {
rng.fill(&mut value[..]);

let digest = DigestInfo::try_new(VALID_HASH, DUMMY_DATA_SIZE).unwrap();
store.update_oneshot(digest.clone(), value.clone().into()).await?;
store.update_oneshot(digest, value.clone().into()).await?;

let store_data = store
.get_part_unchunked(digest, 0, None, None)
Expand All @@ -182,10 +182,10 @@ mod compression_store_tests {
let store = Pin::new(&store_owned);

let digest = DigestInfo::try_new(VALID_HASH, DUMMY_DATA_SIZE).unwrap();
store.update_oneshot(digest.clone(), vec![].into()).await?;
store.update_oneshot(digest, vec![].into()).await?;

let store_data = store
.get_part_unchunked(digest.clone(), 0, None, None)
.get_part_unchunked(digest, 0, None, None)
.await
.err_tip(|| "Failed to get from inner store")?;

Expand Down Expand Up @@ -239,7 +239,7 @@ mod compression_store_tests {
};
let (res1, res2) = futures::join!(
send_fut,
store.update(digest.clone(), rx, UploadSizeInfo::MaxSize(MAX_SIZE_INPUT))
store.update(digest, rx, UploadSizeInfo::MaxSize(MAX_SIZE_INPUT))
);
res1.merge(res2)?;

Expand Down Expand Up @@ -312,7 +312,7 @@ mod compression_store_tests {
rng.fill(&mut value[..(data_len / 2)]);

let digest = DigestInfo::try_new(VALID_HASH, DUMMY_DATA_SIZE).unwrap();
store.update_oneshot(digest.clone(), value.clone().into()).await?;
store.update_oneshot(digest, value.clone().into()).await?;

let compressed_data = Pin::new(inner_store.as_ref())
.get_part_unchunked(digest, 0, None, None)
Expand Down
20 changes: 10 additions & 10 deletions cas/store/tests/dedup_store_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,12 +62,12 @@ mod dedup_store_tests {
let digest = DigestInfo::try_new(VALID_HASH1, MEGABYTE_SZ).unwrap();

store
.update_oneshot(digest.clone(), original_data.clone().into())
.update_oneshot(digest, original_data.clone().into())
.await
.err_tip(|| "Failed to write data to dedup store")?;

let rt_data = store
.get_part_unchunked(digest.clone(), 0, None, Some(original_data.len()))
.get_part_unchunked(digest, 0, None, Some(original_data.len()))
.await
.err_tip(|| "Failed to get_part from dedup store")?;

Expand All @@ -89,7 +89,7 @@ mod dedup_store_tests {
let digest = DigestInfo::try_new(VALID_HASH1, MEGABYTE_SZ).unwrap();

store
.update_oneshot(digest.clone(), original_data.into())
.update_oneshot(digest, original_data.into())
.await
.err_tip(|| "Failed to write data to dedup store")?;

Expand All @@ -103,7 +103,7 @@ mod dedup_store_tests {

assert_eq!(did_delete, true, "Expected item to exist in store");

let result = store.get_part_unchunked(digest.clone(), 0, None, None).await;
let result = store.get_part_unchunked(digest, 0, None, None).await;
assert!(result.is_err(), "Expected result to be an error");
assert_eq!(
result.unwrap_err().code,
Expand All @@ -130,13 +130,13 @@ mod dedup_store_tests {
let digest = DigestInfo::try_new(VALID_HASH1, DATA_SIZE).unwrap();

store
.update_oneshot(digest.clone(), original_data.clone().into())
.update_oneshot(digest, original_data.clone().into())
.await
.err_tip(|| "Failed to write data to dedup store")?;

const ONE_THIRD_SZ: usize = DATA_SIZE / 3;
let rt_data = store
.get_part_unchunked(digest.clone(), ONE_THIRD_SZ, Some(ONE_THIRD_SZ), None)
.get_part_unchunked(digest, ONE_THIRD_SZ, Some(ONE_THIRD_SZ), None)
.await
.err_tip(|| "Failed to get_part from dedup store")?;

Expand Down Expand Up @@ -169,13 +169,13 @@ mod dedup_store_tests {
let digest1 = DigestInfo::try_new(VALID_HASH1, DATA_SIZE).unwrap();

store_pin
.update_oneshot(digest1.clone(), original_data.clone().into())
.update_oneshot(digest1, original_data.clone().into())
.await
.err_tip(|| "Failed to write data to dedup store")?;

{
// Check to ensure we our baseline `.has()` succeeds.
let size_info = store_pin.has(digest1.clone()).await.err_tip(|| "Failed to run .has")?;
let size_info = store_pin.has(digest1).await.err_tip(|| "Failed to run .has")?;
assert_eq!(size_info, Some(DATA_SIZE), "Expected sizes to match");
}
{
Expand All @@ -185,7 +185,7 @@ mod dedup_store_tests {
const DATA2: &str = "1234";
let digest2 = DigestInfo::try_new(VALID_HASH2, DATA2.len()).unwrap();
store_pin
.update_oneshot(digest2.clone(), DATA2.into())
.update_oneshot(digest2, DATA2.into())
.await
.err_tip(|| "Failed to write data to dedup store")?;

Expand Down Expand Up @@ -223,7 +223,7 @@ mod dedup_store_tests {
let digest = DigestInfo::try_new(VALID_HASH1, DATA_SIZE).unwrap();

{
let size_info = store_pin.has(digest.clone()).await.err_tip(|| "Failed to run .has")?;
let size_info = store_pin.has(digest).await.err_tip(|| "Failed to run .has")?;
assert_eq!(size_info, None, "Expected None to be returned, got {:?}", size_info);
}
Ok(())
Expand Down
Loading

0 comments on commit a523115

Please sign in to comment.