Skip to content

Commit

Permalink
fix: fixes to allow upload file works properly
Browse files Browse the repository at this point in the history
This commit contains couple of fixes to make upload file
without verification works as expected.
1, Avoid extra wait on spend existence check via get_record
2, Pause between batch uploads to allow network settle down
3, Avoid out of index panic when try parse RecordHead
  • Loading branch information
maqi authored and joshuef committed Aug 17, 2023
1 parent 3b9c890 commit 6e3a661
Show file tree
Hide file tree
Showing 5 changed files with 20 additions and 3 deletions.
5 changes: 5 additions & 0 deletions .github/workflows/benchmark-prs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,7 @@ jobs:
- name: Confirming the number of files got uploaded and downloaded during the benchmark test
shell: bash
run: |
ls -l $CLIENT_DATA_PATH
ls -l $CLIENT_DATA_PATH/uploaded_files
ls -l $CLIENT_DATA_PATH/downloaded_files
Expand All @@ -198,6 +199,10 @@ jobs:
# Enable Job Summary for PRs
summary-always: true

- name: Start a client to carry out download to output the logs
shell: bash
run: target/release/safe --log-output-dest=data-dir files download

#########################
### Stop Network ###
#########################
Expand Down
1 change: 0 additions & 1 deletion sn_cli/benches/files.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ fn safe_files_upload(dir: &str) {

fn safe_files_download() {
let output = Command::new("./target/release/safe")
.arg("--log-output-dest=data-dir")
.arg("files")
.arg("download")
.output()
Expand Down
6 changes: 5 additions & 1 deletion sn_client/src/file_apis.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ use tracing::trace;
use xor_name::XorName;

// Maximum number of concurrent chunks to be uploaded/retrieved for a file
const CHUNKS_BATCH_MAX_SIZE: usize = 32;
const CHUNKS_BATCH_MAX_SIZE: usize = 8;

/// File APIs.
pub struct Files {
Expand Down Expand Up @@ -174,6 +174,10 @@ impl Files {
if next_batch_size == CHUNKS_BATCH_MAX_SIZE {
let tasks_to_poll = tasks;
join_all_tasks(tasks_to_poll).await?;
// In case of not verifying, sleep for a little bit to let the network settle down.
if !verify_store {
std::thread::sleep(std::time::Duration::from_secs(1));
}
tasks = vec![];
next_batch_size = 0;
}
Expand Down
5 changes: 4 additions & 1 deletion sn_networking/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -734,11 +734,14 @@ impl Network {
}
}
Err(error) => {
error!("{error:?}");
if verification_attempts >= total_attempts {
break;
}
warn!(
"Did not retrieve Record '{:?}' from network!. Retrying...",
PrettyPrintRecordKey::from(key.clone()),
);
error!("{error:?}");
}
}

Expand Down
6 changes: 6 additions & 0 deletions sn_protocol/src/storage/header.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,9 @@ impl RecordHeader {
}

pub fn from_record(record: &Record) -> Result<Self, Error> {
if record.value.len() <= RecordHeader::SIZE {
return Err(Error::RecordHeaderParsingFailed);
}
Self::try_deserialize(&record.value[..RecordHeader::SIZE + 1])
.map_err(|_| Error::RecordHeaderParsingFailed)
}
Expand All @@ -78,6 +81,9 @@ impl RecordHeader {
/// Utility to deserialize a `KAD::Record` into any type.
/// Use `RecordHeader::from_record` if you want the `RecordHeader` instead.
pub fn try_deserialize_record<T: serde::de::DeserializeOwned>(record: &Record) -> Result<T, Error> {
if record.value.len() <= RecordHeader::SIZE {
return Err(Error::RecordParsingFailed);
}
let bytes = &record.value[RecordHeader::SIZE..];
rmp_serde::from_slice(bytes).map_err(|_| Error::RecordParsingFailed)
}
Expand Down

0 comments on commit 6e3a661

Please sign in to comment.