Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion rust-toolchain.toml
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
[toolchain]
channel = "1.88.0"
channel = "1.89.0"
components = ["clippy", "llvm-tools-preview", "rustfmt"]
12 changes: 6 additions & 6 deletions src/blocks/tipset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -427,12 +427,12 @@ impl Tipset {
(*calibnet::GENESIS_CID, &headers.calibnet),
(*mainnet::GENESIS_CID, &headers.mainnet),
] {
if let Some(known_block_cid) = known_blocks.get(&tipset.epoch()) {
if known_block_cid == &tipset.min_ticket_block().cid().to_string() {
return store
.get_cbor(&genesis_cid)?
.context("Genesis block missing from database");
}
if let Some(known_block_cid) = known_blocks.get(&tipset.epoch())
&& known_block_cid == &tipset.min_ticket_block().cid().to_string()
{
return store
.get_cbor(&genesis_cid)?
.context("Genesis block missing from database");
}
}

Expand Down
14 changes: 7 additions & 7 deletions src/chain/store/index.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,13 @@ impl<DB: Blockstore> ChainIndex<DB> {
/// Loads a tipset from memory given the tipset keys and cache. Semantically
/// identical to [`Tipset::load`] but the result is cached.
pub fn load_tipset(&self, tsk: &TipsetKey) -> Result<Option<Arc<Tipset>>, Error> {
if !is_env_truthy("FOREST_TIPSET_CACHE_DISABLED") {
if let Some(ts) = self.ts_cache.lock().get(tsk) {
metrics::LRU_CACHE_HIT
.get_or_create(&metrics::values::TIPSET)
.inc();
return Ok(Some(ts.clone()));
}
if !is_env_truthy("FOREST_TIPSET_CACHE_DISABLED")
&& let Some(ts) = self.ts_cache.lock().get(tsk)
{
metrics::LRU_CACHE_HIT
.get_or_create(&metrics::values::TIPSET)
.inc();
return Ok(Some(ts.clone()));
}

let ts_opt = Tipset::load(&self.db, tsk)?.map(Arc::new);
Expand Down
20 changes: 10 additions & 10 deletions src/chain/store/tipset_tracker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,16 +54,16 @@ impl<DB: Blockstore> TipsetTracker<DB> {
/// height.
fn check_multiple_blocks_from_same_miner(&self, cids: &[Cid], header: &CachingBlockHeader) {
for cid in cids.iter() {
if let Ok(Some(block)) = CachingBlockHeader::load(&self.db, *cid) {
if header.miner_address == block.miner_address {
warn!(
"Have multiple blocks from miner {} at height {} in our tipset cache {}-{}",
header.miner_address,
header.epoch,
header.cid(),
cid
);
}
if let Ok(Some(block)) = CachingBlockHeader::load(&self.db, *cid)
&& header.miner_address == block.miner_address
{
warn!(
"Have multiple blocks from miner {} at height {} in our tipset cache {}-{}",
header.miner_address,
header.epoch,
header.cid(),
cid
);
}
}
}
Expand Down
24 changes: 12 additions & 12 deletions src/chain_sync/network_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -103,10 +103,10 @@ where
F: Fn(&T) -> bool,
{
while let Some(result) = self.tasks.join_next().await {
if let Ok(Ok(value)) = result {
if validate(&value) {
return Some(value);
}
if let Ok(Ok(value)) = result
&& validate(&value)
{
return Some(value);
}
}
// So far every task have failed
Expand Down Expand Up @@ -309,14 +309,14 @@ where
.get_ok_validated(validate)
.await
.ok_or_else(make_failure_message)?;
if let Ok(mean) = success_time_cost_millis_stats.lock().mean() {
if CHAIN_EXCHANGE_TIMEOUT_MILLIS.adapt_on_success(mean as _) {
tracing::debug!(
"Decreased chain exchange timeout to {}ms. Current average: {}ms",
CHAIN_EXCHANGE_TIMEOUT_MILLIS.get(),
mean,
);
}
if let Ok(mean) = success_time_cost_millis_stats.lock().mean()
&& CHAIN_EXCHANGE_TIMEOUT_MILLIS.adapt_on_success(mean as _)
{
tracing::debug!(
"Decreased chain exchange timeout to {}ms. Current average: {}ms",
CHAIN_EXCHANGE_TIMEOUT_MILLIS.get(),
mean,
);
}
trace!("Succeed: handle_chain_exchange_request");
v
Expand Down
8 changes: 4 additions & 4 deletions src/chain_sync/validation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,10 @@ impl TipsetValidator<'_> {
// previously been seen in the bad blocks cache
for block in self.0.blocks() {
self.validate_msg_root(&chainstore.db, block)?;
if let Some(bad_block_cache) = bad_block_cache {
if bad_block_cache.peek(block.cid()).is_some() {
return Err(TipsetValidationError::InvalidBlock(*block.cid()));
}
if let Some(bad_block_cache) = bad_block_cache
&& bad_block_cache.peek(block.cid()).is_some()
{
return Err(TipsetValidationError::InvalidBlock(*block.cid()));
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/cid_collections/hash_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ impl<V> CidHashMap<V> {
///
/// See also [`HashMap::entry`].
#[allow(dead_code)]
pub fn entry(&mut self, key: Cid) -> Entry<V> {
pub fn entry(&mut self, key: Cid) -> Entry<'_, V> {
match MaybeCompactedCid::from(key) {
MaybeCompactedCid::Compact(c) => match self.compact.entry(c) {
StdEntry::Occupied(o) => Entry::Occupied(OccupiedEntry {
Expand Down
14 changes: 7 additions & 7 deletions src/cid_collections/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,13 +75,13 @@ mod imp {
type Error = &'static str;

fn try_from(value: Cid) -> Result<Self, Self::Error> {
if value.version() == cid::Version::V1 && value.codec() == fvm_ipld_encoding::DAG_CBOR {
if let Ok(small_hash) = value.hash().resize() {
let (code, digest, size) = small_hash.into_inner();
if code == u64::from(MultihashCode::Blake2b256) && size as usize == Self::WIDTH
{
return Ok(Self { digest });
}
if value.version() == cid::Version::V1
&& value.codec() == fvm_ipld_encoding::DAG_CBOR
&& let Ok(small_hash) = value.hash().resize()
{
let (code, digest, size) = small_hash.into_inner();
if code == u64::from(MultihashCode::Blake2b256) && size as usize == Self::WIDTH {
return Ok(Self { digest });
}
}
Err("cannot be compacted")
Expand Down
8 changes: 4 additions & 4 deletions src/cli/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@ where
.block_on(async {
logger::setup_logger(&crate::cli_shared::cli::CliOpts::default());

if let Ok(name) = StateNetworkName::call(&client, ()).await {
if !matches!(NetworkChain::from_str(&name), Ok(NetworkChain::Mainnet)) {
CurrentNetwork::set_global(Network::Testnet);
}
if let Ok(name) = StateNetworkName::call(&client, ()).await
&& !matches!(NetworkChain::from_str(&name), Ok(NetworkChain::Mainnet))
{
CurrentNetwork::set_global(Network::Testnet);
}

// Run command
Expand Down
12 changes: 6 additions & 6 deletions src/cli/subcommands/net_cmd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,12 +160,12 @@ impl NetCommands {
Self::Reachability => {
let nat_status = NetAutoNatStatus::call(&client, ()).await?;
println!("AutoNAT status: {}", nat_status.reachability_as_str());
if let Some(public_addrs) = nat_status.public_addrs {
if !public_addrs.is_empty() {
// Format is compatible with Go code:
// `fmt.Println("Public address:", []string{"foo", "bar"})`
println!("Public address: [{}]", public_addrs.join(" "));
}
if let Some(public_addrs) = nat_status.public_addrs
&& !public_addrs.is_empty()
{
// Format is compatible with Go code:
// `fmt.Println("Public address:", []string{"foo", "bar"})`
println!("Public address: [{}]", public_addrs.join(" "));
}
Ok(())
}
Expand Down
13 changes: 0 additions & 13 deletions src/cli/subcommands/state_cmd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,25 +6,12 @@ use crate::rpc::state::ForestStateCompute;
use crate::rpc::{self, prelude::*};
use crate::shim::address::{CurrentNetwork, Error, Network, StrictAddress};
use crate::shim::clock::ChainEpoch;
use crate::shim::econ::TokenAmount;
use cid::Cid;
use clap::Subcommand;
use fvm_ipld_encoding::tuple::*;
use std::path::PathBuf;
use std::str::FromStr;
use std::time::Duration;

#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug)]
struct VestingSchedule {
entries: Vec<VestingScheduleEntry>,
}

#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug)]
struct VestingScheduleEntry {
epoch: ChainEpoch,
amount: TokenAmount,
}

#[derive(Debug, Subcommand)]
pub enum StateCommands {
Fetch {
Expand Down
4 changes: 2 additions & 2 deletions src/cli_shared/snapshot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ mod parse {
}
}

fn full(input: &str) -> nom::IResult<&str, ParsedFilename> {
fn full(input: &str) -> nom::IResult<&str, ParsedFilename<'_>> {
let (rest, (vendor, _snapshot_, chain, _, date, _height_, height, car_zst)) = (
take_until("_snapshot_"),
tag("_snapshot_"),
Expand All @@ -291,7 +291,7 @@ mod parse {
))
}

fn short(input: &str) -> nom::IResult<&str, ParsedFilename> {
fn short(input: &str) -> nom::IResult<&str, ParsedFilename<'_>> {
let (rest, (height, _, date, _, time, _)) = (
number,
tag("_"),
Expand Down
18 changes: 9 additions & 9 deletions src/daemon/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -337,15 +337,15 @@ fn handle_admin_token(
tracing::warn!("Failed to save the default admin token file: {e}");
}
if let Some(path) = opts.save_token.as_ref() {
if let Some(dir) = path.parent() {
if !dir.is_dir() {
std::fs::create_dir_all(dir).with_context(|| {
format!(
"Failed to create `--save-token` directory {}",
dir.display()
)
})?;
}
if let Some(dir) = path.parent()
&& !dir.is_dir()
{
std::fs::create_dir_all(dir).with_context(|| {
format!(
"Failed to create `--save-token` directory {}",
dir.display()
)
})?;
}
std::fs::write(path, &token)
.with_context(|| format!("Failed to save admin token to {}", path.display()))?;
Expand Down
44 changes: 22 additions & 22 deletions src/daemon/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -133,28 +133,28 @@ async fn maybe_import_snapshot(

let snapshot_tracker = ctx.snapshot_progress_tracker.clone();
// Import chain if needed
if !opts.skip_load.unwrap_or_default() {
if let Some(path) = &config.client.snapshot_path {
let (car_db_path, ts) = import_chain_as_forest_car(
path,
&ctx.db_meta_data.get_forest_car_db_dir(),
config.client.import_mode,
&snapshot_tracker,
)
.await?;
ctx.db
.read_only_files(std::iter::once(car_db_path.clone()))?;
let ts_epoch = ts.epoch();
// Explicitly set heaviest tipset here in case HEAD_KEY has already been set
// in the current setting store
ctx.state_manager
.chain_store()
.set_heaviest_tipset(ts.into())?;
debug!(
"Loaded car DB at {} and set current head to epoch {ts_epoch}",
car_db_path.display(),
);
}
if !opts.skip_load.unwrap_or_default()
&& let Some(path) = &config.client.snapshot_path
{
let (car_db_path, ts) = import_chain_as_forest_car(
path,
&ctx.db_meta_data.get_forest_car_db_dir(),
config.client.import_mode,
&snapshot_tracker,
)
.await?;
ctx.db
.read_only_files(std::iter::once(car_db_path.clone()))?;
let ts_epoch = ts.epoch();
// Explicitly set heaviest tipset here in case HEAD_KEY has already been set
// in the current setting store
ctx.state_manager
.chain_store()
.set_heaviest_tipset(ts.into())?;
debug!(
"Loaded car DB at {} and set current head to epoch {ts_epoch}",
car_db_path.display(),
);
}

// If the snapshot progress state is not completed,
Expand Down
25 changes: 0 additions & 25 deletions src/db/blockstore_with_read_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@ pub trait BlockstoreReadCache {
fn get(&self, k: &Cid) -> Option<Vec<u8>>;

fn put(&self, k: Cid, block: Vec<u8>);

fn len(&self) -> usize;
}

pub type LruBlockstoreReadCache = SizeTrackingLruCache<get_size::CidWrapper, Vec<u8>>;
Expand All @@ -28,25 +26,6 @@ impl BlockstoreReadCache for SizeTrackingLruCache<get_size::CidWrapper, Vec<u8>>
fn put(&self, k: Cid, block: Vec<u8>) {
self.push(k.into(), block);
}

fn len(&self) -> usize {
self.len()
}
}

#[derive(Debug, Default)]
pub struct VoidBlockstoreReadCache;

impl BlockstoreReadCache for VoidBlockstoreReadCache {
fn get(&self, _: &Cid) -> Option<Vec<u8>> {
None
}

fn put(&self, _: Cid, _: Vec<u8>) {}

fn len(&self) -> usize {
0
}
}

impl<T: BlockstoreReadCache> BlockstoreReadCache for Arc<T> {
Expand All @@ -57,10 +36,6 @@ impl<T: BlockstoreReadCache> BlockstoreReadCache for Arc<T> {
fn put(&self, k: Cid, block: Vec<u8>) {
self.as_ref().put(k, block)
}

fn len(&self) -> usize {
self.as_ref().len()
}
}

pub trait BlockstoreReadCacheStats {
Expand Down
8 changes: 4 additions & 4 deletions src/db/car/any.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,10 @@ impl<ReaderT: RandomAccessFileReader> AnyCar<ReaderT> {
}

// Maybe use a tempfile for this in the future.
if let Ok(decompressed) = zstd::stream::decode_all(positioned_io::Cursor::new(&reader)) {
if let Ok(mem_car) = super::PlainCar::new(decompressed) {
return Ok(AnyCar::Memory(mem_car));
}
if let Ok(decompressed) = zstd::stream::decode_all(positioned_io::Cursor::new(&reader))
&& let Ok(mem_car) = super::PlainCar::new(decompressed)
{
return Ok(AnyCar::Memory(mem_car));
}

if let Ok(plain_car) = super::PlainCar::new(reader) {
Expand Down
8 changes: 4 additions & 4 deletions src/db/car/plain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -375,10 +375,10 @@ fn read_block_data_location_and_skip(
mut reader: (impl Read + Seek),
limit_position: Option<u64>,
) -> io::Result<Option<(Cid, UncompressedBlockDataLocation)>> {
if let Some(limit_position) = limit_position {
if reader.stream_position()? >= limit_position {
return Ok(None);
}
if let Some(limit_position) = limit_position
&& reader.stream_position()? >= limit_position
{
return Ok(None);
}
let Some(body_length) = read_varint_body_length_or_eof(&mut reader)? else {
return Ok(None);
Expand Down
Loading
Loading