Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

General tidying #1052

Merged
merged 8 commits into from
Dec 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
16 changes: 16 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,19 @@ members = [
"sn_testnet",
"sn_transfers",
]

[workspace.lints.rust]
arithmetic_overflow = "forbid"
mutable_transmutes = "forbid"
no_mangle_const_items = "forbid"
unknown_crate_types = "forbid"
unsafe_code = "forbid"
trivial_casts = "warn"
trivial_numeric_casts = "warn"
unused_extern_crates = "warn"
unused_import_braces = "warn"

[workspace.lints.clippy]
uninlined_format_args = "warn"
unicode_not_nfc = "warn"
unused_async = "warn"
3 changes: 3 additions & 0 deletions sn_build_info/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,6 @@ version = "0.1.2"

[build-dependencies]
vergen = { version = "8.0.0", features = ["build", "git", "gitcl"] }

[lints]
workspace = true
3 changes: 3 additions & 0 deletions sn_cli/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -59,3 +59,6 @@ criterion = "0.5.1"
tempfile = "3.6.0"
rand = { version = "~0.8.5", features = ["small_rng"] }
sn_protocol = { path = "../sn_protocol", version = "0.8.37", features = ["test-utils"]}

[lints]
workspace = true
4 changes: 2 additions & 2 deletions sn_cli/benches/files.rs
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ fn criterion_benchmark(c: &mut Criterion) {
// Wait little bit for the fund to be settled.
std::thread::sleep(Duration::from_secs(10));

let mut group = c.benchmark_group(format!("Upload Benchmark {}MB", size));
let mut group = c.benchmark_group(format!("Upload Benchmark {size}MB"));
group.sampling_mode(criterion::SamplingMode::Flat);
// One sample may compose of multiple iterations, and this is decided by `measurement_time`.
// Set this to a lower value to ensure each sample only contains one iteration.
Expand All @@ -116,7 +116,7 @@ fn criterion_benchmark(c: &mut Criterion) {

// Set the throughput to be reported in terms of bytes
group.throughput(Throughput::Bytes(size * 1024 * 1024));
let bench_id = format!("safe files upload {}mb", size);
let bench_id = format!("safe files upload {size}mb");
group.bench_function(bench_id, |b| {
b.iter(|| safe_files_upload(temp_dir_path_str))
});
Expand Down
2 changes: 1 addition & 1 deletion sn_cli/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ pub fn parse_log_output(val: &str) -> Result<LogOutputDest> {
.join("safe")
.join("client")
.join("logs")
.join(format!("log_{}", timestamp));
.join(format!("log_{timestamp}"));
Ok(LogOutputDest::Path(dir))
}
// The path should be a directory, but we can't use something like `is_dir` to check
Expand Down
2 changes: 1 addition & 1 deletion sn_cli/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ async fn main() -> Result<()> {

println!("Connecting to the network w/peers:");
for peer in &bootstrap_peers {
println!("{}", peer);
println!("{peer}");
}

let bootstrap_peers = if bootstrap_peers.is_empty() {
Expand Down
21 changes: 7 additions & 14 deletions sn_cli/src/subcommands/files/chunk_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -255,11 +255,7 @@ impl ChunkManager {
if !file_chunks_dir.exists() {
return None;
}
Self::read_file_chunks_dir(
file_chunks_dir,
path_xor.clone(),
original_file_name.clone(),
)
Self::read_file_chunks_dir(file_chunks_dir, path_xor, original_file_name.clone())
})
.collect::<BTreeMap<_, _>>();

Expand Down Expand Up @@ -365,7 +361,7 @@ impl ChunkManager {
// original_file_name: Used to create ChunkedFile
fn read_file_chunks_dir(
file_chunks_dir: PathBuf,
path_xor: PathXorName,
path_xor: &PathXorName,
original_file_name: OsString,
) -> Option<(PathXorName, ChunkedFile)> {
let mut file_xor_addr: Option<XorName> = None;
Expand Down Expand Up @@ -409,7 +405,7 @@ impl ChunkManager {
Some((
path_xor.clone(),
ChunkedFile {
file_name: original_file_name.clone(),
file_name: original_file_name,
file_xor_addr,
chunks,
},
Expand Down Expand Up @@ -558,12 +554,9 @@ mod tests {

// 2. the folder should exists, but chunk removed
let file_chunks_dir = manager.artifacts_dir.join(&path_xor.0);
let (path_xor_from_dir, chunked_file_from_dir) = ChunkManager::read_file_chunks_dir(
file_chunks_dir,
path_xor.clone(),
chunked_file.file_name.to_owned(),
)
.expect("Folder and metadata should be present");
let (path_xor_from_dir, chunked_file_from_dir) =
ChunkManager::read_file_chunks_dir(file_chunks_dir, &path_xor, chunked_file.file_name)
.expect("Folder and metadata should be present");
assert_eq!(chunked_file_from_dir.chunks.len(), total_chunks - 1);
assert_eq!(chunked_file_from_dir.file_xor_addr, file_xor_addr);
assert_eq!(path_xor_from_dir, path_xor);
Expand Down Expand Up @@ -601,7 +594,7 @@ mod tests {
let file_chunks_dir = manager_clone.artifacts_dir.join(path_xor.0.clone());
let (path_xor_from_dir, chunked_file_from_dir) = ChunkManager::read_file_chunks_dir(
file_chunks_dir,
path_xor.clone(),
path_xor,
chunked_file.file_name.to_owned(),
)
.expect("Folder and metadata should be present");
Expand Down
23 changes: 10 additions & 13 deletions sn_cli/src/subcommands/files/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -242,10 +242,10 @@ async fn upload_files(

// upload paid chunks
for join_result in join_all(upload_chunks_in_parallel(
file_api.clone(),
&file_api,
chunks_batch.to_vec(),
verify_store,
progress_bar.clone(),
&progress_bar,
show_holders,
))
.await
Expand Down Expand Up @@ -273,11 +273,11 @@ async fn upload_files(
info!("Failed to pay for {failed_payments_len} chunks and failed to upload {failed_uploads_len} chunks.");
if failed_payments_len != 0 {
println!("Failed to pay for {failed_payments_len} chunks with:");
println!("{:#?}", recorded_pay_errors);
println!("{recorded_pay_errors:#?}");
}
if failed_uploads_len != 0 {
println!("Failed to upload {failed_uploads_len} chunks with:");
println!("{:#?}", recorded_upload_errors);
println!("{recorded_upload_errors:#?}");
}
}

Expand Down Expand Up @@ -326,10 +326,10 @@ async fn upload_files(
/// This spawns a task for each chunk to be uploaded, returns those handles.
///
fn upload_chunks_in_parallel(
file_api: Files,
file_api: &Files,
chunks_paths: Vec<(XorName, PathBuf)>,
verify_store: bool,
progress_bar: ProgressBar,
progress_bar: &ProgressBar,
show_holders: bool,
) -> Vec<JoinHandle<Result<()>>> {
let mut upload_handles = Vec::new();
Expand Down Expand Up @@ -400,7 +400,7 @@ async fn download_files(

uploaded_files.insert((xor_name, file_name.to_string()));
} else {
println!("Skipping malformed line: {}", line);
println!("Skipping malformed line: {line}");
}
}

Expand All @@ -424,9 +424,9 @@ fn format_elapsed_time(elapsed_time: std::time::Duration) -> String {
let elapsed_minutes = elapsed_time.as_secs() / 60;
let elapsed_seconds = elapsed_time.as_secs() % 60;
if elapsed_minutes > 0 {
format!("{} minutes {} seconds", elapsed_minutes, elapsed_seconds)
format!("{elapsed_minutes} minutes {elapsed_seconds} seconds")
} else {
format!("{} seconds", elapsed_seconds)
format!("{elapsed_seconds} seconds")
}
}

Expand All @@ -438,10 +438,7 @@ async fn download_file(
show_holders: bool,
batch_size: usize,
) {
println!(
"Downloading {file_name} from {:64x} with batch-size {batch_size}",
xorname
);
println!("Downloading {file_name} from {xorname:64x} with batch-size {batch_size}");
debug!("Downloading {file_name} from {:64x}", xorname);
let downloaded_file_path = download_path.join(file_name);
match file_api
Expand Down
19 changes: 8 additions & 11 deletions sn_cli/src/subcommands/wallet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ pub(crate) async fn wallet_cmds_without_client(cmds: &WalletCmds, root_dir: &Pat
}
Ok(())
}
WalletCmds::Deposit { stdin, cash_note } => deposit(root_dir, *stdin, cash_note.clone()),
WalletCmds::Deposit { stdin, cash_note } => deposit(root_dir, *stdin, cash_note.as_deref()),
WalletCmds::Create { sk } => {
let main_sk = match SecretKey::from_hex(sk) {
Ok(sk) => MainSecretKey::new(sk),
Expand Down Expand Up @@ -233,7 +233,7 @@ async fn get_faucet(root_dir: &Path, client: &Client, url: String) -> Result<()>
} else {
url
};
let req_url = Url::parse(&format!("{}/{}", url, address_hex))?;
let req_url = Url::parse(&format!("{url}/{address_hex}"))?;
println!("Requesting token for wallet address: {address_hex}...");

let response = reqwest::get(req_url).await?;
Expand All @@ -243,15 +243,12 @@ async fn get_faucet(root_dir: &Path, client: &Client, url: String) -> Result<()>
receive(body, false, client, root_dir).await?;
println!("Successfully got tokens from faucet.");
} else {
println!(
"Failed to get tokens from faucet, server responded with: {:?}",
body
);
println!("Failed to get tokens from faucet, server responded with: {body:?}");
}
Ok(())
}

fn deposit(root_dir: &Path, read_from_stdin: bool, cash_note: Option<String>) -> Result<()> {
fn deposit(root_dir: &Path, read_from_stdin: bool, cash_note: Option<&str>) -> Result<()> {
if read_from_stdin {
return read_cash_note_from_stdin(root_dir);
}
Expand All @@ -271,7 +268,7 @@ fn deposit(root_dir: &Path, read_from_stdin: bool, cash_note: Option<String>) ->
if deposited.is_zero() {
println!("Nothing deposited.");
} else if let Err(err) = wallet.deposit_and_store_to_disk(&vec![]) {
println!("Failed to store deposited ({deposited}) amount: {:?}", err);
println!("Failed to store deposited ({deposited}) amount: {err:?}");
} else {
println!("Deposited {deposited}.");
}
Expand All @@ -283,10 +280,10 @@ fn read_cash_note_from_stdin(root_dir: &Path) -> Result<()> {
println!("Please paste your CashNote below:");
let mut input = String::new();
std::io::stdin().read_to_string(&mut input)?;
deposit_from_cash_note_hex(root_dir, input)
deposit_from_cash_note_hex(root_dir, &input)
}

fn deposit_from_cash_note_hex(root_dir: &Path, input: String) -> Result<()> {
fn deposit_from_cash_note_hex(root_dir: &Path, input: &str) -> Result<()> {
let mut wallet = LocalWallet::load_from(root_dir)?;
let cash_note = sn_transfers::CashNote::from_hex(input.trim())?;

Expand Down Expand Up @@ -347,7 +344,7 @@ async fn send(
}
};

let transfer = Transfer::transfer_from_cash_note(cash_note)?.to_hex()?;
let transfer = Transfer::transfer_from_cash_note(&cash_note)?.to_hex()?;
println!("The encrypted transfer has been successfully created.");
println!("Please share this to the recipient:\n\n{transfer}\n");
println!("The recipient can then use the 'receive' command to claim the funds.");
Expand Down
3 changes: 3 additions & 0 deletions sn_client/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -48,3 +48,6 @@ xor_name = "5.0.0"
eyre = "0.6.8"
# add rand to libp2p
libp2p-identity = { version="0.2.7", features = ["rand"] }

[lints]
workspace = true
24 changes: 12 additions & 12 deletions sn_client/src/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ impl Client {
};

let start = std::time::Instant::now();
let event_string = format!("{:?}", the_event);
let event_string = format!("{the_event:?}");
if let Err(err) = client_clone.handle_network_event(the_event) {
warn!("Error handling network event: {err}");
}
Expand Down Expand Up @@ -305,10 +305,10 @@ impl Client {
expected_holders: Default::default(),
};
let maybe_record = self.network.get_record_from_network(key, &get_cfg).await;
let record = match maybe_record {
let record = match &maybe_record {
Ok(r) => r,
Err(NetworkError::GetRecordError(GetRecordError::SplitRecord { result_map })) => {
return merge_split_register_records(address, &result_map)
return merge_split_register_records(address, result_map)
}
Err(e) => {
warn!("Failed to get record at {address:?} from the network: {e:?}");
Expand Down Expand Up @@ -723,11 +723,11 @@ impl Client {
}
}

fn get_register_from_record(record: Record) -> Result<SignedRegister> {
let header = RecordHeader::from_record(&record)?;
fn get_register_from_record(record: &Record) -> Result<SignedRegister> {
let header = RecordHeader::from_record(record)?;

if let RecordKind::Register = header.kind {
let register = try_deserialize_record::<SignedRegister>(&record)?;
let register = try_deserialize_record::<SignedRegister>(record)?;
Ok(register)
} else {
error!("RecordKind mismatch while trying to retrieve a signed register");
Expand All @@ -745,7 +745,7 @@ fn merge_split_register_records(
debug!("Got multiple records from the network for key: {pretty_key:?}");
let mut all_registers = vec![];
for (record, peers) in map.values() {
match get_register_from_record(record.clone()) {
match get_register_from_record(record) {
Ok(r) => all_registers.push(r),
Err(e) => {
warn!("Ignoring invalid register record found for {pretty_key:?} received from {peers:?}: {:?}", e);
Expand Down Expand Up @@ -796,23 +796,23 @@ mod tests {
// prepare registers
let mut register_root = Register::new(owner_pk, meta, Default::default());
let (root_hash, _) =
register_root.write(b"root_entry".to_vec(), Default::default(), &owner_sk)?;
register_root.write(b"root_entry".to_vec(), &BTreeSet::default(), &owner_sk)?;
let root = BTreeSet::from_iter(vec![root_hash]);
let signed_root = register_root.clone().into_signed(&owner_sk)?;

let mut register1 = register_root.clone();
let (_hash, op1) = register1.write(b"entry1".to_vec(), root.clone(), &owner_sk)?;
let (_hash, op1) = register1.write(b"entry1".to_vec(), &root, &owner_sk)?;
let mut signed_register1 = signed_root.clone();
signed_register1.add_op(op1)?;

let mut register2 = register_root.clone();
let (_hash, op2) = register2.write(b"entry2".to_vec(), root.clone(), &owner_sk)?;
let mut signed_register2 = signed_root.clone();
let (_hash, op2) = register2.write(b"entry2".to_vec(), &root, &owner_sk)?;
let mut signed_register2 = signed_root;
signed_register2.add_op(op2)?;

let mut register_bad = Register::new(owner_pk, meta, Default::default());
let (_hash, _op_bad) =
register_bad.write(b"bad_root".to_vec(), Default::default(), &owner_sk)?;
register_bad.write(b"bad_root".to_vec(), &BTreeSet::default(), &owner_sk)?;
let invalid_sig = register2.sign(&owner_sk)?; // steal sig from something else
let signed_register_bad = SignedRegister::new(register_bad, invalid_sig);

Expand Down
8 changes: 4 additions & 4 deletions sn_client/src/chunks/pac_man.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ pub(crate) fn encrypt_large(
.iter()
.map(|chunk_info| {
let chunk_file_path = output_dir.join(hex::encode(chunk_info.dst_hash));
(chunk_info.dst_hash, chunk_file_path.to_path_buf())
(chunk_info.dst_hash, chunk_file_path)
})
.collect();

Expand Down Expand Up @@ -106,7 +106,7 @@ fn pack_data_map(data_map: DataMap) -> Result<(XorName, Vec<Chunk>)> {
// self encrypted into additional chunks, and now we have a new `DataMap`
// which points to all of those additional chunks.. and so on.
let mut chunks = vec![];
let mut chunk_content = wrap_data_map(DataMapLevel::First(data_map))?;
let mut chunk_content = wrap_data_map(&DataMapLevel::First(data_map))?;

let (address, additional_chunks) = loop {
let chunk = to_chunk(chunk_content);
Expand All @@ -129,14 +129,14 @@ fn pack_data_map(data_map: DataMap) -> Result<(XorName, Vec<Chunk>)> {
.map(|c| to_chunk(c.content.clone())) // no need to encrypt what is self-encrypted
.chain(chunks)
.collect();
chunk_content = wrap_data_map(DataMapLevel::Additional(data_map))?;
chunk_content = wrap_data_map(&DataMapLevel::Additional(data_map))?;
}
};

Ok((address, additional_chunks))
}

fn wrap_data_map(data_map: DataMapLevel) -> Result<Bytes> {
fn wrap_data_map(data_map: &DataMapLevel) -> Result<Bytes> {
// we use an initial/starting size of 300 bytes as that's roughly the current size of a DataMapLevel instance.
let mut bytes = BytesMut::with_capacity(300).writer();
let mut serialiser = rmp_serde::Serializer::new(&mut bytes);
Expand Down