diff --git a/chain/chain-primitives/src/error.rs b/chain/chain-primitives/src/error.rs index 62f7c6880d5..6392c2a7061 100644 --- a/chain/chain-primitives/src/error.rs +++ b/chain/chain-primitives/src/error.rs @@ -240,7 +240,7 @@ pub trait LogTransientStorageError { impl LogTransientStorageError for Result { fn log_storage_error(self, message: &str) -> Self { if let Err(err) = &self { - tracing::error!(target: "client", "Transient storage error: {message}, {err}"); + tracing::error!(target: "chain", "Transient storage error: {message}, {err}"); } self } diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index 06d48a500f9..a7217ac037d 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -1080,7 +1080,7 @@ impl Chain { header: &BlockHeader, challenges: &mut Vec, ) -> Result<(), Error> { - debug!(target: "chain", "Process block header: {} at {}", header.hash(), header.height()); + debug!(target: "chain", block_hash=?header.hash(), height=header.height(), "process_block_header"); check_known(self, header.hash())?.map_err(|e| Error::BlockKnown(e))?; self.validate_header(header, &Provenance::NONE, challenges)?; @@ -1413,12 +1413,12 @@ impl Chain { block_processing_artifacts: &mut BlockProcessingArtifact, apply_chunks_done_sender: Option>, ) -> Result<(), Error> { + let block_height = block.header().height(); let _span = - debug_span!(target: "chain", "start_process_block_async", ?provenance).entered(); + debug_span!(target: "chain", "start_process_block_async", ?provenance, height=block_height).entered(); let block_received_time = self.clock.now(); metrics::BLOCK_PROCESSING_ATTEMPTS_TOTAL.inc(); - let block_height = block.header().height(); let hash = *block.hash(); let res = self.start_process_block_impl( me, @@ -1627,7 +1627,8 @@ impl Chain { block_processing_artifacts: &mut BlockProcessingArtifact, apply_chunks_done_sender: Option>, ) -> Result<(), Error> { - let _span = tracing::debug_span!(target: "sync", "reset_heads_post_state_sync").entered(); + let _span = tracing::debug_span!(target: "sync", "reset_heads_post_state_sync", ?sync_hash) + .entered(); // Get header we were syncing into. let header = self.get_block_header(&sync_hash)?; let hash = *header.prev_hash(); @@ -1738,8 +1739,9 @@ impl Chain { .add_block_with_missing_chunks(orphan, missing_chunk_hashes.clone()); debug!( target: "chain", - "Process block: missing chunks. Block hash: {:?}. Missing chunks: {:?}", - block_hash, missing_chunk_hashes, + ?block_hash, + chunk_hashes=missing_chunk_hashes.iter().map(|h| format!("{:?}", h)).join(","), + "Process block: missing chunks" ); } Error::EpochOutOfBounds(epoch_id) => { @@ -1750,10 +1752,10 @@ impl Chain { Error::BlockKnown(block_known_error) => { debug!( target: "chain", - "Block {} at {} is known at this time: {:?}", - block.hash(), - block_height, - block_known_error); + block_hash=?block.hash(), + height=block_height, + error=?block_known_error, + "Block known at this time"); } _ => {} } @@ -1920,12 +1922,7 @@ impl Chain { true, ) }; - tracing::debug!( - target: "chain", - "Updating flat storage for shard {} need_flat_storage_update: {}", - shard_id, - need_flat_storage_update - ); + tracing::debug!(target: "chain", shard_id,need_flat_storage_update, "Updating flat storage"); if need_flat_storage_update { let shard_uid = self.epoch_manager.shard_id_to_uid(shard_id, epoch_id)?; @@ -2007,7 +2004,7 @@ impl Chain { // see if the block is already in processing or if there are too many blocks being processed self.blocks_in_processing.add_dry_run(block.hash())?; - debug!(target: "chain", num_approvals = header.num_approvals(), "Preprocess block"); + debug!(target: "chain", height=header.height(), num_approvals = header.num_approvals(), "preprocess_block"); // Check that we know the epoch of the block before we try to get the header // (so that a block from unknown epoch doesn't get marked as an orphan) @@ -2525,7 +2522,7 @@ impl Chain { "get_state_response_part", shard_id, part_id, - %sync_hash) + ?sync_hash) .entered(); // Check cache let key = borsh::to_vec(&StatePartKey(sync_hash, shard_id, part_id))?; diff --git a/chain/chain/src/chain_update.rs b/chain/chain/src/chain_update.rs index 37b5c80c910..b14ace7bb10 100644 --- a/chain/chain/src/chain_update.rs +++ b/chain/chain/src/chain_update.rs @@ -152,7 +152,7 @@ impl<'a> ChainUpdate<'a> { apply_results: Vec, should_save_state_transition_data: bool, ) -> Result<(), Error> { - let _span = tracing::debug_span!(target: "chain", "apply_chunk_postprocessing").entered(); + let _span = tracing::debug_span!(target: "chain", "apply_chunk_postprocessing", height=block.header().height()).entered(); for result in apply_results { self.process_apply_chunk_result(block, result, should_save_state_transition_data)?; } @@ -725,7 +725,7 @@ impl<'a> ChainUpdate<'a> { shard_state_header: ShardStateSyncResponseHeader, ) -> Result { let _span = - tracing::debug_span!(target: "sync", "chain_update_set_state_finalize").entered(); + tracing::debug_span!(target: "sync", "chain_update_set_state_finalize", shard_id, ?sync_hash).entered(); let (chunk, incoming_receipts_proofs) = match shard_state_header { ShardStateSyncResponseHeader::V1(shard_state_header) => ( ShardChunk::V1(shard_state_header.chunk), @@ -855,7 +855,9 @@ impl<'a> ChainUpdate<'a> { shard_id: ShardId, sync_hash: CryptoHash, ) -> Result { - let _span = tracing::debug_span!(target: "sync", "set_state_finalize_on_height").entered(); + let _span = + tracing::debug_span!(target: "sync", "set_state_finalize_on_height", height, shard_id) + .entered(); let block_header_result = self.chain_store_update.get_block_header_on_chain_by_height(&sync_hash, height); if let Err(_) = block_header_result { diff --git a/chain/chain/src/update_shard.rs b/chain/chain/src/update_shard.rs index d1e7b213811..8b1a956bae8 100644 --- a/chain/chain/src/update_shard.rs +++ b/chain/chain/src/update_shard.rs @@ -177,7 +177,7 @@ pub fn apply_new_chunk( let _span = tracing::debug_span!( target: "chain", parent: parent_span, - "new_chunk", + "apply_new_chunk", shard_id) .entered(); let gas_limit = chunk_header.gas_limit(); @@ -242,7 +242,7 @@ pub fn apply_old_chunk( let _span = tracing::debug_span!( target: "chain", parent: parent_span, - "existing_chunk", + "apply_old_chunk", shard_id) .entered(); @@ -302,7 +302,7 @@ fn apply_resharding( let _span = tracing::debug_span!( target: "chain", parent: parent_span, - "resharding", + "apply_resharding", shard_id, ?shard_uid) .entered(); diff --git a/chain/chunks/src/client.rs b/chain/chunks/src/client.rs index 5678f0ef5fe..30fdecbfe0b 100644 --- a/chain/chunks/src/client.rs +++ b/chain/chunks/src/client.rs @@ -122,7 +122,7 @@ impl ShardedTransactionPool { /// transactions back to the pool with the new shard uids. pub fn reshard(&mut self, old_shard_layout: &ShardLayout, new_shard_layout: &ShardLayout) { tracing::debug!( - target: "client", + target: "resharding", old_shard_layout_version = old_shard_layout.version(), new_shard_layout_version = new_shard_layout.version(), "resharding the transaction pool" diff --git a/chain/client/src/client.rs b/chain/client/src/client.rs index 2512835a061..6f499506f1c 100644 --- a/chain/client/src/client.rs +++ b/chain/client/src/client.rs @@ -87,7 +87,7 @@ use std::cmp::max; use std::collections::{HashMap, HashSet}; use std::sync::Arc; use std::sync::RwLock; -use tracing::{debug, debug_span, error, info, trace, warn}; +use tracing::{debug, debug_span, error, info, instrument, trace, warn}; #[cfg(feature = "test_features")] use crate::client_actor::AdvProduceChunksMode; @@ -820,7 +820,7 @@ impl Client { Ok(Some(block)) } - pub fn produce_chunk( + pub fn try_produce_chunk( &mut self, prev_block: &Block, epoch_id: &EpochId, @@ -828,13 +828,6 @@ impl Client { next_height: BlockHeight, shard_id: ShardId, ) -> Result, Error> { - let timer = Instant::now(); - let _timer = - metrics::PRODUCE_CHUNK_TIME.with_label_values(&[&shard_id.to_string()]).start_timer(); - let _span = tracing::debug_span!(target: "client", "produce_chunk", next_height, shard_id, ?epoch_id).entered(); - - let prev_block_hash = *prev_block.hash(); - let validator_signer = self .validator_signer .as_ref() @@ -852,6 +845,37 @@ impl Client { "Not producing chunk. Not chunk producer for next chunk."); return Ok(None); } + + self.produce_chunk( + prev_block, + epoch_id, + last_header, + next_height, + shard_id, + validator_signer, + ) + } + + #[instrument(target = "client", level = "debug", "produce_chunk", skip_all, fields( + height = next_height, + shard_id, + ?epoch_id, + chunk_hash = tracing::field::Empty, + ))] + pub fn produce_chunk( + &mut self, + prev_block: &Block, + epoch_id: &EpochId, + last_header: ShardChunkHeader, + next_height: BlockHeight, + shard_id: ShardId, + validator_signer: Arc, + ) -> Result, Error> { + let span = tracing::Span::current(); + let timer = Instant::now(); + let _timer = + metrics::PRODUCE_CHUNK_TIME.with_label_values(&[&shard_id.to_string()]).start_timer(); + let prev_block_hash = *prev_block.hash(); if self.epoch_manager.is_next_block_epoch_start(&prev_block_hash)? { let prev_prev_hash = *self.chain.get_block_header(&prev_block_hash)?.prev_hash(); if !self.chain.prev_block_is_caught_up(&prev_prev_hash, &prev_block_hash)? { @@ -914,6 +938,7 @@ impl Client { protocol_version, )?; + span.record("chunk_hash", tracing::field::debug(encoded_chunk.chunk_hash())); debug!(target: "client", me = %validator_signer.validator_id(), chunk_hash = ?encoded_chunk.chunk_hash(), @@ -1759,7 +1784,13 @@ impl Client { .with_label_values(&[&shard_id.to_string()]) .start_timer(); let last_header = Chain::get_prev_chunk_header(epoch_manager, block, shard_id).unwrap(); - match self.produce_chunk(block, &epoch_id, last_header.clone(), next_height, shard_id) { + match self.try_produce_chunk( + block, + &epoch_id, + last_header.clone(), + next_height, + shard_id, + ) { Ok(Some(result)) => { let shard_chunk = self .persist_and_distribute_encoded_chunk( diff --git a/chain/client/src/stateless_validation/chunk_endorsement_tracker.rs b/chain/client/src/stateless_validation/chunk_endorsement_tracker.rs index d13d27aec4a..4117811b099 100644 --- a/chain/client/src/stateless_validation/chunk_endorsement_tracker.rs +++ b/chain/client/src/stateless_validation/chunk_endorsement_tracker.rs @@ -58,7 +58,7 @@ impl Client { .chunk_endorsement_tracker .process_chunk_endorsement(&chunk.cloned_header(), endorsement), Err(Error::ChunkMissing(_)) => { - tracing::debug!(target: "stateless_validation", ?endorsement, "Endorsement arrived before chunk."); + tracing::debug!(target: "client", ?endorsement, "Endorsement arrived before chunk."); self.chunk_endorsement_tracker.add_chunk_endorsement_to_pending_cache(endorsement) } Err(error) => return Err(error), @@ -87,10 +87,10 @@ impl ChunkEndorsementTracker { let Some(chunk_endorsements) = chunk_endorsements else { return; }; - tracing::debug!(target: "stateless_validation", ?chunk_hash, "Processing pending chunk endorsements."); + tracing::debug!(target: "client", ?chunk_hash, "Processing pending chunk endorsements."); for endorsement in chunk_endorsements.values() { if let Err(error) = self.process_chunk_endorsement(chunk_header, endorsement.clone()) { - tracing::debug!(target: "stateless_validation", ?endorsement, "Error processing pending chunk endorsement: {:?}", error); + tracing::debug!(target: "client", ?endorsement, ?error, "Error processing pending chunk endorsement"); } } } @@ -111,6 +111,7 @@ impl ChunkEndorsementTracker { chunk_header: &ShardChunkHeader, endorsement: ChunkEndorsement, ) -> Result<(), Error> { + let _span = tracing::debug_span!(target: "client", "process_chunk_endorsement", chunk_hash=?chunk_header.chunk_hash()).entered(); self.process_chunk_endorsement_impl(endorsement, Some(chunk_header)) } @@ -135,13 +136,13 @@ impl ChunkEndorsementTracker { .get(chunk_hash) .is_some_and(|existing_endorsements| existing_endorsements.contains_key(account_id)) { - tracing::debug!(target: "stateless_validation", ?endorsement, "Already received chunk endorsement."); + tracing::debug!(target: "client", ?endorsement, "Already received chunk endorsement."); return Ok(()); } if let Some(chunk_header) = chunk_header { if !self.epoch_manager.verify_chunk_endorsement(&chunk_header, &endorsement)? { - tracing::error!(target: "stateless_validation", ?endorsement, "Invalid chunk endorsement."); + tracing::error!(target: "client", ?endorsement, "Invalid chunk endorsement."); return Err(Error::InvalidChunkEndorsement); } } @@ -152,7 +153,7 @@ impl ChunkEndorsementTracker { // for 100 unique chunks thus pushing out current valid endorsements from our cache. // Maybe add check to ensure we don't accept endorsements from chunks already included in some block? // Maybe add check to ensure we don't accept endorsements from chunks that have too old height_created? - tracing::debug!(target: "stateless_validation", ?endorsement, "Received and saved chunk endorsement."); + tracing::debug!(target: "client", ?endorsement, "Received and saved chunk endorsement."); let mut guard = endorsement_cache.lock(); guard.get_or_insert(chunk_hash.clone(), || HashMap::new()); let chunk_endorsements = guard.get_mut(chunk_hash).unwrap(); diff --git a/chain/client/src/stateless_validation/chunk_validator/mod.rs b/chain/client/src/stateless_validation/chunk_validator/mod.rs index 227ea7ab6e2..eb84f20fa29 100644 --- a/chain/client/src/stateless_validation/chunk_validator/mod.rs +++ b/chain/client/src/stateless_validation/chunk_validator/mod.rs @@ -507,7 +507,7 @@ pub(crate) fn validate_chunk_state_witness( let _timer = metrics::CHUNK_STATE_WITNESS_VALIDATION_TIME .with_label_values(&[&state_witness.chunk_header.shard_id().to_string()]) .start_timer(); - let span = tracing::debug_span!(target: "chain", "validate_chunk_state_witness").entered(); + let span = tracing::debug_span!(target: "client", "validate_chunk_state_witness").entered(); let block_hash = pre_validation_output.main_transition_params.block_hash(); let epoch_id = epoch_manager.get_epoch_id(&block_hash)?; let shard_uid = epoch_manager @@ -646,10 +646,10 @@ pub(crate) fn send_chunk_endorsement_to_block_producers( let chunk_hash = chunk_header.chunk_hash(); tracing::debug!( - target: "stateless_validation", + target: "client", chunk_hash=?chunk_hash, ?block_producers, - "Chunk validated successfully, sending endorsement", + "send_chunk_endorsement", ); let endorsement = ChunkEndorsement::new(chunk_header.chunk_hash(), signer); @@ -702,7 +702,7 @@ impl Client { self.partially_validate_state_witness(&encoded_witness)?; tracing::debug!( - target: "stateless_validation", + target: "client", chunk_hash=?witness.chunk_header.chunk_hash(), shard_id=witness.chunk_header.shard_id(), "process_chunk_state_witness", @@ -725,7 +725,7 @@ impl Client { if let Ok(final_head) = self.chain.final_head() { if witness.chunk_header.height_created() <= final_head.height { tracing::warn!( - target: "stateless_validation", + target: "client", chunk_hash=?witness.chunk_header.chunk_hash(), shard_id=witness.chunk_header.shard_id(), witness_height=witness.chunk_header.height_created(), diff --git a/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs b/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs index 8e558059d0e..e1e630eddef 100644 --- a/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs +++ b/chain/client/src/stateless_validation/partial_witness/partial_witness_actor.rs @@ -64,7 +64,7 @@ impl Handler for PartialWitnessActor { #[perf] fn handle(&mut self, msg: DistributeStateWitnessRequest) { if let Err(err) = self.handle_distribute_state_witness_request(msg) { - tracing::error!(target: "stateless_validation", ?err, "Failed to handle distribute chunk state witness request"); + tracing::error!(target: "client", ?err, "Failed to handle distribute chunk state witness request"); } } } @@ -78,7 +78,7 @@ impl Handler for PartialWitnessActor { impl Handler for PartialWitnessActor { fn handle(&mut self, msg: PartialEncodedStateWitnessMessage) { if let Err(err) = self.handle_partial_encoded_state_witness(msg.0) { - tracing::error!(target: "stateless_validation", ?err, "Failed to handle PartialEncodedStateWitnessMessage"); + tracing::error!(target: "client", ?err, "Failed to handle PartialEncodedStateWitnessMessage"); } } } @@ -86,7 +86,7 @@ impl Handler for PartialWitnessActor { impl Handler for PartialWitnessActor { fn handle(&mut self, msg: PartialEncodedStateWitnessForwardMessage) { if let Err(err) = self.handle_partial_encoded_state_witness_forward(msg.0) { - tracing::error!(target: "stateless_validation", ?err, "Failed to handle PartialEncodedStateWitnessForwardMessage"); + tracing::error!(target: "client", ?err, "Failed to handle PartialEncodedStateWitnessForwardMessage"); } } } @@ -127,10 +127,10 @@ impl PartialWitnessActor { .ordered_chunk_validators(); tracing::debug!( - target: "stateless_validation", - "Sending chunk state witness for chunk {:?} to chunk validators {:?}", - chunk_header.chunk_hash(), - chunk_validators, + target: "client", + chunk_hash=?chunk_header.chunk_hash(), + ?chunk_validators, + "distribute_chunk_state_witness", ); let witness_bytes = compress_witness(&state_witness)?; @@ -272,7 +272,7 @@ impl PartialWitnessActor { &mut self, partial_witness: PartialEncodedStateWitness, ) -> Result<(), Error> { - tracing::debug!(target: "stateless_validation", ?partial_witness, "Receive PartialEncodedStateWitnessMessage"); + tracing::debug!(target: "client", ?partial_witness, "Receive PartialEncodedStateWitnessMessage"); // Validate the partial encoded state witness. self.validate_partial_encoded_state_witness(&partial_witness)?; diff --git a/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs b/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs index b6fada8ee25..38ac47147bd 100644 --- a/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs +++ b/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs @@ -95,7 +95,7 @@ impl CacheEntry { // Check if the part is already present. if self.parts[part_ord].is_some() { tracing::warn!( - target: "stateless_validation", + target: "client", ?shard_id, ?height_created, ?part_ord, @@ -139,7 +139,7 @@ impl CacheEntry { // We ideally never expect the decoding to fail. In case it does, we received a bad part // from the chunk producer. tracing::error!( - target: "stateless_validation", + target: "client", ?err, ?shard_id, ?height_created, @@ -182,7 +182,7 @@ impl PartialEncodedStateWitnessTracker { &mut self, partial_witness: PartialEncodedStateWitness, ) -> Result<(), Error> { - tracing::debug!(target: "stateless_validation", ?partial_witness, "store_partial_encoded_state_witness"); + tracing::debug!(target: "client", ?partial_witness, "store_partial_encoded_state_witness"); self.maybe_insert_new_entry_in_parts_cache(&partial_witness)?; @@ -190,7 +190,7 @@ impl PartialEncodedStateWitnessTracker { let entry = self.parts_cache.get_mut(&key).unwrap(); if let Some(encoded_witness) = entry.insert_in_cache_entry(partial_witness) { - tracing::debug!(target: "stateless_validation", ?key, "Sending encoded witness to client."); + tracing::debug!(target: "client", ?key, "Sending encoded witness to client."); // Record the time taken from receiving first part to decoding partial witness. metrics::PARTIAL_WITNESS_DECODE_TIME @@ -246,7 +246,7 @@ impl PartialEncodedStateWitnessTracker { // Check if the evicted entry has been fully decoded and processed. if !evicted_entry.is_decoded { tracing::warn!( - target: "stateless_validation", + target: "client", ?evicted_chunk_hash, data_parts_present = ?evicted_entry.data_parts_present, data_parts_required = ?evicted_entry.data_parts_required, diff --git a/chain/client/src/stateless_validation/shadow_validate.rs b/chain/client/src/stateless_validation/shadow_validate.rs index 7ebaf0269dc..9e7a9ea46c5 100644 --- a/chain/client/src/stateless_validation/shadow_validate.rs +++ b/chain/client/src/stateless_validation/shadow_validate.rs @@ -19,7 +19,7 @@ impl Client { return Ok(()); } let block_hash = block.hash(); - tracing::debug!(target: "stateless_validation", ?block_hash, "shadow validation for block chunks"); + tracing::debug!(target: "client", ?block_hash, "shadow validation for block chunks"); let prev_block = self.chain.get_block(block.header().prev_hash())?; let prev_block_chunks = prev_block.chunks(); for chunk in @@ -32,7 +32,7 @@ impl Client { { metrics::SHADOW_CHUNK_VALIDATION_FAILED_TOTAL.inc(); tracing::error!( - target: "stateless_validation", + target: "client", ?err, shard_id = chunk.shard_id(), ?block_hash, @@ -109,7 +109,7 @@ impl Client { self.runtime_adapter.as_ref(), )?; tracing::debug!( - target: "stateless_validation", + target: "client", shard_id, ?chunk_hash, witness_size = encoded_witness.size_bytes(), @@ -129,7 +129,7 @@ impl Client { ) { Ok(()) => { tracing::debug!( - target: "stateless_validation", + target: "client", shard_id, ?chunk_hash, validation_elapsed = ?validation_start.elapsed(), @@ -139,7 +139,7 @@ impl Client { Err(err) => { metrics::SHADOW_CHUNK_VALIDATION_FAILED_TOTAL.inc(); tracing::error!( - target: "stateless_validation", + target: "client", ?err, shard_id, ?chunk_hash, diff --git a/chain/client/src/stateless_validation/state_witness_producer.rs b/chain/client/src/stateless_validation/state_witness_producer.rs index 9ba32b25ef0..6876472123a 100644 --- a/chain/client/src/stateless_validation/state_witness_producer.rs +++ b/chain/client/src/stateless_validation/state_witness_producer.rs @@ -34,6 +34,9 @@ impl Client { if !checked_feature!("stable", StatelessValidationV0, protocol_version) { return Ok(()); } + let chunk_header = chunk.cloned_header(); + let shard_id = chunk_header.shard_id(); + let _span = tracing::debug_span!(target: "client", "send_chunk_state_witness", chunk_hash=?chunk_header.chunk_hash(), ?shard_id).entered(); let my_signer = self.validator_signer.as_ref().ok_or(Error::NotAValidator)?.clone(); let state_witness = self.create_state_witness( @@ -48,8 +51,6 @@ impl Client { self.chain.chain_store.save_latest_chunk_state_witness(&state_witness)?; } - let chunk_header = chunk.cloned_header(); - let shard_id = chunk_header.shard_id(); let height = chunk_header.height_created(); if self .epoch_manager @@ -57,6 +58,7 @@ impl Client { .contains(my_signer.validator_id()) { // Bypass state witness validation if we created state witness. Endorse the chunk immediately. + tracing::debug!(target: "client", chunk_hash=?chunk_header.chunk_hash(), "send_chunk_endorsement_from_chunk_producer"); send_chunk_endorsement_to_block_producers( &chunk_header, self.epoch_manager.as_ref(), diff --git a/chain/client/src/sync/block.rs b/chain/client/src/sync/block.rs index 961ae383b98..fc22b431e58 100644 --- a/chain/client/src/sync/block.rs +++ b/chain/client/src/sync/block.rs @@ -72,7 +72,8 @@ impl BlockSync { highest_height: BlockHeight, highest_height_peers: &[HighestHeightPeerInfo], ) -> Result { - let _span = tracing::debug_span!(target: "sync", "run", sync = "BlockSync").entered(); + let _span = + tracing::debug_span!(target: "sync", "run_sync", sync_type = "BlockSync").entered(); let head = chain.head()?; let header_head = chain.header_head()?; diff --git a/chain/client/src/sync/header.rs b/chain/client/src/sync/header.rs index cc0ccdb2962..37ef69d9f33 100644 --- a/chain/client/src/sync/header.rs +++ b/chain/client/src/sync/header.rs @@ -98,7 +98,8 @@ impl HeaderSync { highest_height: BlockHeight, highest_height_peers: &[HighestHeightPeerInfo], ) -> Result<(), near_chain::Error> { - let _span = tracing::debug_span!(target: "sync", "run", sync = "HeaderSync").entered(); + let _span = + tracing::debug_span!(target: "sync", "run_sync", sync_type = "HeaderSync").entered(); let head = chain.head()?; let header_head = chain.header_head()?; diff --git a/chain/client/src/sync/state.rs b/chain/client/src/sync/state.rs index 0fd0b7bf582..9acfaedc5e9 100644 --- a/chain/client/src/sync/state.rs +++ b/chain/client/src/sync/state.rs @@ -764,7 +764,8 @@ impl StateSync { use_colour: bool, runtime_adapter: Arc, ) -> Result { - let _span = tracing::debug_span!(target: "sync", "run", sync = "StateSync").entered(); + let _span = + tracing::debug_span!(target: "sync", "run_sync", sync_type = "StateSync").entered(); tracing::trace!(target: "sync", %sync_hash, ?tracking_shards, "syncing state"); let now = self.clock.now_utc(); diff --git a/chain/client/src/sync_jobs_actor.rs b/chain/client/src/sync_jobs_actor.rs index e64ec6cdc68..987cf9fb62c 100644 --- a/chain/client/src/sync_jobs_actor.rs +++ b/chain/client/src/sync_jobs_actor.rs @@ -82,7 +82,7 @@ impl SyncJobsActor { msg: &ApplyStatePartsRequest, ) -> Result<(), near_chain_primitives::error::Error> { let _span: tracing::span::EnteredSpan = - tracing::debug_span!(target: "client", "apply_parts").entered(); + tracing::debug_span!(target: "sync", "apply_parts").entered(); let store = msg.runtime_adapter.store(); let shard_id = msg.shard_uid.shard_id as ShardId; @@ -108,7 +108,7 @@ impl SyncJobsActor { &mut self, msg: &ApplyStatePartsRequest, ) -> Result { - let _span = tracing::debug_span!(target: "client", "clear_flat_state").entered(); + let _span = tracing::debug_span!(target: "sync", "clear_flat_state").entered(); let mut store_update = msg.runtime_adapter.store().store_update(); let success = msg .runtime_adapter @@ -148,10 +148,10 @@ impl SyncJobsActor { } Ok(false) => { // Can't panic here, because that breaks many KvRuntime tests. - tracing::error!(target: "client", shard_uid = ?msg.shard_uid, "Failed to delete Flat State, but proceeding with applying state parts."); + tracing::error!(target: "sync", shard_uid = ?msg.shard_uid, "Failed to delete Flat State, but proceeding with applying state parts."); } Ok(true) => { - tracing::debug!(target: "client", shard_uid = ?msg.shard_uid, "Deleted all Flat State"); + tracing::debug!(target: "sync", shard_uid = ?msg.shard_uid, "Deleted all Flat State"); } } @@ -164,7 +164,7 @@ impl SyncJobsActor { } pub fn handle_block_catch_up_request(&mut self, msg: BlockCatchUpRequest) { - tracing::debug!(target: "client", ?msg); + tracing::debug!(target: "sync", ?msg); let results = do_apply_chunks(msg.block_hash, msg.block_height, msg.work); self.client_sender.send(BlockCatchUpResponse { diff --git a/chain/client/src/test_utils/client.rs b/chain/client/src/test_utils/client.rs index 505a6ef9342..00139e55ffc 100644 --- a/chain/client/src/test_utils/client.rs +++ b/chain/client/src/test_utils/client.rs @@ -128,7 +128,7 @@ fn create_chunk_on_height_for_shard( let last_block_hash = client.chain.head().unwrap().last_block_hash; let last_block = client.chain.get_block(&last_block_hash).unwrap(); client - .produce_chunk( + .try_produce_chunk( &last_block, &client.epoch_manager.get_epoch_id_from_prev_block(&last_block_hash).unwrap(), Chain::get_prev_chunk_header(client.epoch_manager.as_ref(), &last_block, shard_id) @@ -166,7 +166,7 @@ pub fn create_chunk( receipts, transactions_storage_proof, } = client - .produce_chunk( + .try_produce_chunk( &last_block, last_block.header().epoch_id(), last_block.chunks()[0].clone(), diff --git a/chain/network/src/peer/peer_actor.rs b/chain/network/src/peer/peer_actor.rs index b41499c8172..dd472c628f3 100644 --- a/chain/network/src/peer/peer_actor.rs +++ b/chain/network/src/peer/peer_actor.rs @@ -422,6 +422,9 @@ impl PeerActor { PeerMessage::SyncSnapshotHosts(_) => { metrics::SYNC_SNAPSHOT_HOSTS.with_label_values(&["sent"]).inc() } + PeerMessage::Routed(routed) => { + tracing::debug!(target: "network", source=?routed.msg.author, target=?routed.msg.target, message=?routed.msg.body, "send_routed_message"); + } _ => (), }; diff --git a/chain/network/src/peer_manager/peer_manager_actor.rs b/chain/network/src/peer_manager/peer_manager_actor.rs index 21aa76d2853..78c3b62f466 100644 --- a/chain/network/src/peer_manager/peer_manager_actor.rs +++ b/chain/network/src/peer_manager/peer_manager_actor.rs @@ -1093,8 +1093,7 @@ impl actix::Handler> for PeerManagerA msg: WithSpanContext, ctx: &mut Self::Context, ) -> Self::Result { - let msg_type: &str = (&msg.msg).into(); - let (_span, msg) = handler_debug_span!(target: "network", msg, msg_type); + let (_span, msg) = handler_debug_span!(target: "network", msg); let _timer = metrics::PEER_MANAGER_MESSAGES_TIME.with_label_values(&[(&msg).into()]).start_timer(); self.handle_peer_manager_message(msg, ctx) diff --git a/chain/telemetry/src/lib.rs b/chain/telemetry/src/lib.rs index 505dd7f031c..1f7924596bd 100644 --- a/chain/telemetry/src/lib.rs +++ b/chain/telemetry/src/lib.rs @@ -78,7 +78,7 @@ impl TelemetryActor { impl Handler for TelemetryActor { #[perf] fn handle(&mut self, msg: TelemetryEvent) { - tracing::debug!(target: "client", ?msg); + tracing::debug!(target: "telemetry", ?msg); let now = Instant::now(); if now - self.last_telemetry_update < self.config.reporting_interval { // Throttle requests to the telemetry endpoints, to at most one diff --git a/core/primitives/src/utils.rs b/core/primitives/src/utils.rs index ab1cd9b05ac..7f111c6cf80 100644 --- a/core/primitives/src/utils.rs +++ b/core/primitives/src/utils.rs @@ -394,7 +394,7 @@ macro_rules! unwrap_or_return { match $obj { Ok(value) => value, Err(err) => { - tracing::error!(target: "client", "Unwrap error: {}", err); + tracing::error!(target: "near", "Unwrap error: {}", err); return $ret; } } @@ -403,7 +403,7 @@ macro_rules! unwrap_or_return { match $obj { Ok(value) => value, Err(err) => { - tracing::error!(target: "client", "Unwrap error: {}", err); + tracing::error!(target: "near", "Unwrap error: {}", err); return; } } diff --git a/integration-tests/src/tests/client/features/stateless_validation.rs b/integration-tests/src/tests/client/features/stateless_validation.rs index 28e1f77ac20..5d8099384e9 100644 --- a/integration-tests/src/tests/client/features/stateless_validation.rs +++ b/integration-tests/src/tests/client/features/stateless_validation.rs @@ -166,7 +166,7 @@ fn run_chunk_validation_test(seed: u64, prob_missing_chunk: f64) { let block_producer = env.get_block_producer_at_offset(&tip, 1); tracing::debug!( - target: "stateless_validation", + target: "client", "Producing block at height {} by {}", tip.height + 1, block_producer ); let block = env.client(&block_producer).produce_block(tip.height + 1).unwrap().unwrap(); @@ -175,7 +175,7 @@ fn run_chunk_validation_test(seed: u64, prob_missing_chunk: f64) { for i in 0..env.clients.len() { let validator_id = env.get_client_id(i); tracing::debug!( - target: "stateless_validation", + target: "client", "Applying block at height {} at {}", block.header().height(), validator_id ); let blocks_processed = if rng.gen_bool(prob_missing_chunk) {