diff --git a/core/benches/consumer.rs b/core/benches/consumer.rs index d736b93ef96ffd..ec615016f29a92 100644 --- a/core/benches/consumer.rs +++ b/core/benches/consumer.rs @@ -141,9 +141,8 @@ fn bench_process_and_record_transactions(bencher: &mut Bencher, batch_size: usiz assert_eq!( TRANSACTIONS_PER_ITERATION % batch_size, 0, - "batch_size must be a factor of \ - `TRANSACTIONS_PER_ITERATION` ({TRANSACTIONS_PER_ITERATION}) \ - so that bench results are easily comparable" + "batch_size must be a factor of `TRANSACTIONS_PER_ITERATION` \ + ({TRANSACTIONS_PER_ITERATION}) so that bench results are easily comparable" ); let batches_per_iteration = TRANSACTIONS_PER_ITERATION / batch_size; diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 78b6c2d0298c80..bdf0a760fabdf2 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -76,7 +76,10 @@ impl AccountsHashVerifier { &snapshot_config, )); if let Err(err) = result { - error!("Stopping AccountsHashVerifier! Fatal error while processing accounts package: {err}"); + error!( + "Stopping AccountsHashVerifier! Fatal error while processing accounts \ + package: {err}" + ); exit.store(true, Ordering::Relaxed); break; } @@ -144,7 +147,8 @@ impl AccountsHashVerifier { .count(); assert!( num_eah_packages <= 1, - "Only a single EAH accounts package is allowed at a time! count: {num_eah_packages}" + "Only a single EAH accounts package is allowed at a time! count: \ + {num_eah_packages}" ); // Get the two highest priority requests, `y` and `z`. @@ -261,12 +265,12 @@ impl AccountsHashVerifier { accounts_db.get_accounts_hash(base_slot) else { panic!( - "incremental snapshot requires accounts hash and capitalization \ - from the full snapshot it is based on \n\ - package: {accounts_package:?} \n\ - accounts hashes: {:?} \n\ - incremental accounts hashes: {:?} \n\ - full snapshot archives: {:?} \n\ + "incremental snapshot requires accounts hash and capitalization from \ + the full snapshot it is based on\n\ + package: {accounts_package:?}\n\ + accounts hashes: {:?}\n\ + incremental accounts hashes: {:?}\n\ + full snapshot archives: {:?}\n\ bank snapshots: {:?}", accounts_db.get_accounts_hashes(), accounts_db.get_incremental_accounts_hashes(), @@ -344,10 +348,9 @@ impl AccountsHashVerifier { HashStats::default(), ); panic!( - "accounts hash capitalization mismatch: expected {}, but calculated {} (then recalculated {})", - accounts_package.expected_capitalization, - lamports, - second_accounts_hash.1, + "accounts hash capitalization mismatch: expected {}, but calculated {} (then \ + recalculated {})", + accounts_package.expected_capitalization, lamports, second_accounts_hash.1, ); } diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index 1ead68e564c9ce..0ddaaeafa4ac7e 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -355,7 +355,8 @@ impl LatestUnprocessedVotes { .and_then(|account| from_account::(&account)); if slot_hashes.is_none() { error!( - "Slot hashes sysvar doesn't exist on bank {}. Including all votes without filtering", + "Slot hashes sysvar doesn't exist on bank {}. Including all votes without \ + filtering", bank.slot() ); } diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs index 6fe35c46f54e03..8af53ca4d9e7b3 100644 --- a/core/src/banking_stage/qos_service.rs +++ b/core/src/banking_stage/qos_service.rs @@ -101,25 +101,43 @@ impl QosService { let mut cost_tracking_time = Measure::start("cost_tracking_time"); let mut cost_tracker = bank.write_cost_tracker().unwrap(); let mut num_included = 0; - let select_results = transactions.zip(transactions_costs) - .map(|(tx, cost)| { - match cost { - Ok(cost) => { - match cost_tracker.try_add(&cost) { - Ok(UpdatedCosts{updated_block_cost, updated_costliest_account_cost}) => { - debug!("slot {:?}, transaction {:?}, cost {:?}, fit into current block, current block cost {}, updated costliest account cost {}", bank.slot(), tx, cost, updated_block_cost, updated_costliest_account_cost); - self.metrics.stats.selected_txs_count.fetch_add(1, Ordering::Relaxed); - num_included += 1; - Ok(cost) - }, - Err(e) => { - debug!("slot {:?}, transaction {:?}, cost {:?}, not fit into current block, '{:?}'", bank.slot(), tx, cost, e); - Err(TransactionError::from(e)) - } - } - }, - Err(e) => Err(e), - } + let select_results = transactions + .zip(transactions_costs) + .map(|(tx, cost)| match cost { + Ok(cost) => match cost_tracker.try_add(&cost) { + Ok(UpdatedCosts { + updated_block_cost, + updated_costliest_account_cost, + }) => { + debug!( + "slot {:?}, transaction {:?}, cost {:?}, fit into current block, \ + current block cost {}, updated costliest account cost {}", + bank.slot(), + tx, + cost, + updated_block_cost, + updated_costliest_account_cost + ); + self.metrics + .stats + .selected_txs_count + .fetch_add(1, Ordering::Relaxed); + num_included += 1; + Ok(cost) + } + Err(e) => { + debug!( + "slot {:?}, transaction {:?}, cost {:?}, not fit into current block, \ + '{:?}'", + bank.slot(), + tx, + cost, + e + ); + Err(TransactionError::from(e)) + } + }, + Err(e) => Err(e), }) .collect(); cost_tracker.add_transactions_in_flight(num_included); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 5271af556e493f..79446125f5b819 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -800,7 +800,10 @@ impl Tower { ancestors: &HashMap>, last_vote_ancestors: &HashSet, ) -> Option { - trace!("Checking if {candidate_slot} is a valid switching proof vote from {last_voted_slot} to {switch_slot}"); + trace!( + "Checking if {candidate_slot} is a valid switching proof vote from {last_voted_slot} \ + to {switch_slot}" + ); // Ignore if the `candidate_slot` is a descendant of the `last_voted_slot`, since we do not // want to count votes on the same fork. if Self::is_descendant_slot(candidate_slot, last_voted_slot, ancestors)? { @@ -923,9 +926,10 @@ impl Tower { // `switch < last` is needed not to warn! this message just because of using // newer snapshots on validator restart let message = format!( - "bank_forks doesn't have corresponding data for the stray restored \ - last vote({last_voted_slot}), meaning some inconsistency between saved tower and ledger." - ); + "bank_forks doesn't have corresponding data for the stray restored last \ + vote({last_voted_slot}), meaning some inconsistency between saved tower and \ + ledger." + ); warn!("{}", message); datapoint_warn!("tower_warn", ("warn", message, String)); } @@ -1030,8 +1034,9 @@ impl Tower { return suspended_decision_due_to_major_unsynced_ledger(); } else { panic!( - "Should never consider switching to ancestor ({switch_slot}) of last vote: {last_voted_slot}, ancestors({last_vote_ancestors:?})", - ); + "Should never consider switching to ancestor ({switch_slot}) of last vote: \ + {last_voted_slot}, ancestors({last_vote_ancestors:?})", + ); } } @@ -1254,7 +1259,8 @@ impl Tower { let lockout = *fork_stake as f64 / total_stake as f64; trace!( - "fork_stake slot: {}, threshold_vote slot: {}, lockout: {} fork_stake: {} total_stake: {}", + "fork_stake slot: {}, threshold_vote slot: {}, lockout: {} fork_stake: {} \ + total_stake: {}", slot, threshold_vote.slot(), lockout, @@ -1419,9 +1425,8 @@ impl Tower { // While this validator's voting is suspended this way, // suspended_decision_due_to_major_unsynced_ledger() will be also touched. let message = format!( - "For some reason, we're REPROCESSING slots which has already been \ - voted and ROOTED by us; \ - VOTING will be SUSPENDED UNTIL {last_voted_slot}!", + "For some reason, we're REPROCESSING slots which has already been voted and \ + ROOTED by us; VOTING will be SUSPENDED UNTIL {last_voted_slot}!", ); error!("{}", message); datapoint_error!("tower_error", ("error", message, String)); @@ -1549,7 +1554,8 @@ impl Tower { self.last_vote = VoteTransaction::from(Vote::default()); } else { info!( - "{} restored votes (out of {}) were on different fork or are upcoming votes on unrooted slots: {:?}!", + "{} restored votes (out of {}) were on different fork or are upcoming votes on \ + unrooted slots: {:?}!", self.voted_slots().len(), original_votes_len, self.voted_slots() @@ -1623,8 +1629,8 @@ pub enum TowerError { WrongTower(String), #[error( - "The tower is too old: \ - newest slot in tower ({0}) << oldest slot in available history ({1})" + "The tower is too old: newest slot in tower ({0}) << oldest slot in available history \ + ({1})" )] TooOldTower(Slot, Slot), @@ -1704,13 +1710,15 @@ pub fn reconcile_blockstore_roots_with_external_source( Ordering::Equal => false, Ordering::Less => panic!( "last_blockstore_root({last_blockstore_root}) is skipped while traversing \ - blockstore (currently at {current}) from external root ({external_source:?})!?", + blockstore (currently at {current}) from external root \ + ({external_source:?})!?", ), }) .collect(); if !new_roots.is_empty() { info!( - "Reconciling slots as root based on external root: {:?} (external: {:?}, blockstore: {})", + "Reconciling slots as root based on external root: {:?} (external: {:?}, \ + blockstore: {})", new_roots, external_source, last_blockstore_root ); @@ -1733,9 +1741,9 @@ pub fn reconcile_blockstore_roots_with_external_source( // That's because we might have a chance of recovering properly with // newer snapshot. warn!( - "Couldn't find any ancestor slots from external source ({:?}) \ - towards blockstore root ({}); blockstore pruned or only \ - tower moved into new ledger or just hard fork?", + "Couldn't find any ancestor slots from external source ({:?}) towards blockstore \ + root ({}); blockstore pruned or only tower moved into new ledger or just hard \ + fork?", external_source, last_blockstore_root, ); } @@ -3251,9 +3259,10 @@ pub mod test { } #[test] - #[should_panic(expected = "last_blockstore_root(3) is skipped while \ - traversing blockstore (currently at 1) from \ - external root (Tower(4))!?")] + #[should_panic( + expected = "last_blockstore_root(3) is skipped while traversing blockstore (currently at \ + 1) from external root (Tower(4))!?" + )] fn test_reconcile_blockstore_roots_with_tower_panic_no_common_root() { solana_logger::setup(); let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -3522,7 +3531,8 @@ pub mod test { let result = tower.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history); assert_eq!( format!("{}", result.unwrap_err()), - "The tower is too old: newest slot in tower (0) << oldest slot in available history (1)" + "The tower is too old: newest slot in tower (0) << oldest slot in available history \ + (1)" ); } @@ -3601,7 +3611,8 @@ pub mod test { let result = tower.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history); assert_eq!( format!("{}", result.unwrap_err()), - "The tower is fatally inconsistent with blockstore: not too old once after got too old?" + "The tower is fatally inconsistent with blockstore: not too old once after got too \ + old?" ); } diff --git a/core/src/consensus/fork_choice.rs b/core/src/consensus/fork_choice.rs index cb5ddabfbafcf4..04c0b43fa05844 100644 --- a/core/src/consensus/fork_choice.rs +++ b/core/src/consensus/fork_choice.rs @@ -256,7 +256,8 @@ fn select_candidates_failed_switch_duplicate_rollback<'a>( // invalid candidate). Thus, it's safe to use as the reset bank. let reset_bank = Some(heaviest_bank); info!( - "Waiting to switch vote to {}, resetting to slot {:?} for now, latest duplicate ancestor: {:?}", + "Waiting to switch vote to {}, resetting to slot {:?} for now, latest duplicate ancestor: \ + {:?}", heaviest_bank.slot(), reset_bank.as_ref().map(|b| b.slot()), latest_duplicate_ancestor, diff --git a/core/src/consensus/heaviest_subtree_fork_choice.rs b/core/src/consensus/heaviest_subtree_fork_choice.rs index 39b06c5eb4d091..5e4b3089d31e87 100644 --- a/core/src/consensus/heaviest_subtree_fork_choice.rs +++ b/core/src/consensus/heaviest_subtree_fork_choice.rs @@ -144,7 +144,11 @@ impl ForkInfo { ) { if let Some(latest_invalid_ancestor) = self.latest_invalid_ancestor { if latest_invalid_ancestor <= newly_valid_ancestor { - info!("Fork choice for {:?} clearing latest invalid ancestor {:?} because {:?} was duplicate confirmed", my_key, latest_invalid_ancestor, newly_valid_ancestor); + info!( + "Fork choice for {:?} clearing latest invalid ancestor {:?} because {:?} was \ + duplicate confirmed", + my_key, latest_invalid_ancestor, newly_valid_ancestor + ); self.latest_invalid_ancestor = None; } } @@ -1188,8 +1192,9 @@ impl HeaviestSubtreeForkChoice { // validator has been running, so we must be able to fetch best_slots for all of // them. panic!( - "a bank at last_voted_slot({last_voted_slot_hash:?}) is a frozen bank so must have been \ - added to heaviest_subtree_fork_choice at time of freezing", + "a bank at last_voted_slot({last_voted_slot_hash:?}) is a frozen \ + bank so must have been added to heaviest_subtree_fork_choice at \ + time of freezing", ) } else { // fork_infos doesn't have corresponding data for the stale stray last vote, diff --git a/core/src/cost_update_service.rs b/core/src/cost_update_service.rs index 58ef6c48ed7721..de12b3a703ee0a 100644 --- a/core/src/cost_update_service.rs +++ b/core/src/cost_update_service.rs @@ -60,7 +60,7 @@ impl CostUpdateService { let slot = bank.slot(); trace!( "inflight transaction count is {in_flight_transaction_count} \ - for slot {slot} after {loop_count} iteration(s)" + for slot {slot} after {loop_count} iteration(s)" ); cost_tracker.report_stats(slot); break; diff --git a/core/src/repair/ancestor_hashes_service.rs b/core/src/repair/ancestor_hashes_service.rs index 92e5f1ad9d7c4e..a20794189a19ff 100644 --- a/core/src/repair/ancestor_hashes_service.rs +++ b/core/src/repair/ancestor_hashes_service.rs @@ -559,8 +559,10 @@ impl AncestorHashesService { // order to vote. // This fits the alternate criteria we use in `find_epoch_slots_frozen_dead_slots` // so we can upgrade it to `repairable_dead_slot_pool`. - info!("{pruned_slot} is part of a popular pruned fork however we previously marked it as dead. - Upgrading as dead duplicate confirmed"); + info!( + "{pruned_slot} is part of a popular pruned fork however we previously \ + marked it as dead. Upgrading as dead duplicate confirmed" + ); dead_slot_pool.remove(&pruned_slot); repairable_dead_slot_pool.insert(pruned_slot); } else if repairable_dead_slot_pool.contains(&pruned_slot) { @@ -568,8 +570,11 @@ impl AncestorHashesService { // ignore the additional information that `pruned_slot` is popular pruned. // This is similar to the above case where `pruned_slot` was first pruned // and then marked dead duplicate confirmed. - info!("Received pruned duplicate confirmed status for {pruned_slot} that was previously marked - dead duplicate confirmed. Ignoring and processing it as dead duplicate confirmed."); + info!( + "Received pruned duplicate confirmed status for {pruned_slot} that \ + was previously marked dead duplicate confirmed. Ignoring and \ + processing it as dead duplicate confirmed." + ); } else { popular_pruned_slot_pool.insert(pruned_slot); } diff --git a/core/src/repair/cluster_slot_state_verifier.rs b/core/src/repair/cluster_slot_state_verifier.rs index ab1928cee5c568..375cbe466c45f3 100644 --- a/core/src/repair/cluster_slot_state_verifier.rs +++ b/core/src/repair/cluster_slot_state_verifier.rs @@ -456,8 +456,8 @@ fn check_epoch_slots_hash_against_bank_status( assert!(is_popular_pruned); // The cluster sample found the troublesome slot which caused this fork to be pruned warn!( - "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, but we - have pruned it due to incorrect ancestry" + "EpochSlots sample returned slot {slot} with hash {epoch_slots_frozen_hash}, but \ + we have pruned it due to incorrect ancestry" ); } } @@ -644,8 +644,8 @@ fn on_epoch_slots_frozen( if let Some(duplicate_confirmed_hash) = duplicate_confirmed_hash { if epoch_slots_frozen_hash != duplicate_confirmed_hash { warn!( - "EpochSlots sample returned slot {} with hash {}, but we already saw - duplicate confirmation on hash: {:?}", + "EpochSlots sample returned slot {} with hash {}, but we already saw \ + duplicate confirmation on hash: {:?}", slot, epoch_slots_frozen_hash, duplicate_confirmed_hash ); } @@ -676,9 +676,11 @@ fn on_epoch_slots_frozen( } fn on_popular_pruned_fork(slot: Slot) -> Vec { - warn!("{slot} is part of a pruned fork which has reached the DUPLICATE_THRESHOLD aggregating across descendants - and slot versions. It is suspected to be duplicate or have an ancestor that is duplicate. - Notifying ancestor_hashes_service"); + warn!( + "{slot} is part of a pruned fork which has reached the DUPLICATE_THRESHOLD aggregating \ + across descendants and slot versions. It is suspected to be duplicate or have an \ + ancestor that is duplicate. Notifying ancestor_hashes_service" + ); vec![ResultingStateChange::SendAncestorHashesReplayUpdate( AncestorHashesReplayUpdate::PopularPrunedFork(slot), )] diff --git a/core/src/repair/duplicate_repair_status.rs b/core/src/repair/duplicate_repair_status.rs index 75956a64d6e58f..c39d194c604c22 100644 --- a/core/src/repair/duplicate_repair_status.rs +++ b/core/src/repair/duplicate_repair_status.rs @@ -319,14 +319,14 @@ impl AncestorRequestStatus { agreed_response[*mismatch_i]; let mismatch_our_frozen_hash = blockstore.get_bank_hash(mismatch_slot); info!( - "When processing the ancestor sample for {}, there was a mismatch - for {mismatch_slot}: we had frozen hash {:?} and the cluster agreed upon - {mismatch_agreed_upon_hash}. However for a later ancestor {ancestor_slot} - we have agreement on {our_frozen_hash} as the bank hash. This should never - be possible, something is wrong or the cluster sample is invalid. - Rejecting and queuing the ancestor hashes request for retry", - self.requested_mismatched_slot, - mismatch_our_frozen_hash + "When processing the ancestor sample for {}, there was a mismatch \ + for {mismatch_slot}: we had frozen hash {:?} and the cluster agreed \ + upon {mismatch_agreed_upon_hash}. However for a later ancestor \ + {ancestor_slot} we have agreement on {our_frozen_hash} as the bank \ + hash. This should never be possible, something is wrong or the \ + cluster sample is invalid. Rejecting and queuing the ancestor hashes \ + request for retry", + self.requested_mismatched_slot, mismatch_our_frozen_hash ); return DuplicateAncestorDecision::InvalidSample; } @@ -346,13 +346,14 @@ impl AncestorRequestStatus { let (mismatch_slot, mismatch_agreed_upon_hash) = agreed_response[*mismatch_i]; info!( - "When processing the ancestor sample for {}, an earlier ancestor {mismatch_slot} - was agreed upon by the cluster with hash {mismatch_agreed_upon_hash} but not - frozen in our blockstore. However for a later ancestor {ancestor_slot} we have - agreement on {our_frozen_hash} as the bank hash. This should only be possible if - we have just started from snapshot and immediately encountered a duplicate block on - a popular pruned fork, otherwise something is seriously wrong. Continuing with the - repair", + "When processing the ancestor sample for {}, an earlier ancestor \ + {mismatch_slot} was agreed upon by the cluster with hash \ + {mismatch_agreed_upon_hash} but not frozen in our blockstore. \ + However for a later ancestor {ancestor_slot} we have agreement on \ + {our_frozen_hash} as the bank hash. This should only be possible if \ + we have just started from snapshot and immediately encountered a \ + duplicate block on a popular pruned fork, otherwise something is \ + seriously wrong. Continuing with the repair", self.requested_mismatched_slot ); } diff --git a/core/src/repair/repair_generic_traversal.rs b/core/src/repair/repair_generic_traversal.rs index 3e704149cb4dd5..35b5276bcbbaa4 100644 --- a/core/src/repair/repair_generic_traversal.rs +++ b/core/src/repair/repair_generic_traversal.rs @@ -139,7 +139,8 @@ pub fn get_closest_completion( ( "error", format!( - "last_index + 1 < shred_count. last_index={last_index} shred_count={shred_count}", + "last_index + 1 < shred_count. last_index={last_index} \ + shred_count={shred_count}", ), String ), @@ -153,9 +154,9 @@ pub fn get_closest_completion( ( "error", format!( - "last_index < slot_meta.consumed. last_index={} slot_meta.consumed={}", - last_index, - slot_meta.consumed, + "last_index < slot_meta.consumed. last_index={} \ + slot_meta.consumed={}", + last_index, slot_meta.consumed, ), String ), diff --git a/core/src/repair/repair_service.rs b/core/src/repair/repair_service.rs index 7d3d6dd54213e4..f69aea596960e2 100644 --- a/core/src/repair/repair_service.rs +++ b/core/src/repair/repair_service.rs @@ -824,8 +824,8 @@ impl RepairService { // Select weighted sample of valid peers if no valid peer was passed in. if repair_peers.is_empty() { debug!( - "No pubkey was provided or no valid repair socket was found. \ - Sampling a set of repair peers instead." + "No pubkey was provided or no valid repair socket was found. Sampling a set of \ + repair peers instead." ); repair_peers = Self::get_repair_peers(cluster_info.clone(), cluster_slots, slot); } diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index f1a92daa2650f5..4bf1f5cef37d73 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -205,10 +205,10 @@ impl PartitionInfo { heaviest_fork_failures: Vec, ) { if self.partition_start_time.is_none() && partition_detected { - warn!("PARTITION DETECTED waiting to join heaviest fork: {} last vote: {:?}, reset slot: {}", - heaviest_slot, - last_voted_slot, - reset_bank_slot, + warn!( + "PARTITION DETECTED waiting to join heaviest fork: {} last vote: {:?}, reset \ + slot: {}", + heaviest_slot, last_voted_slot, reset_bank_slot, ); datapoint_info!( "replay_stage-partition-start", @@ -580,11 +580,9 @@ impl ReplayStage { Ok(tower) => tower, Err(err) => { error!( - "Unable to load new tower when attempting to change identity from {} to {} on - ReplayStage startup, Exiting: {}", - my_old_pubkey, - my_pubkey, - err + "Unable to load new tower when attempting to change identity from {} \ + to {} on ReplayStage startup, Exiting: {}", + my_old_pubkey, my_pubkey, err ); // drop(_exit) will set the exit flag, eventually tearing down the entire process return; @@ -1037,11 +1035,9 @@ impl ReplayStage { Ok(tower) => tower, Err(err) => { error!( - "Unable to load new tower when attempting to change identity - from {} to {} on set-identity, Exiting: {}", - my_old_pubkey, - my_pubkey, - err + "Unable to load new tower when attempting to change \ + identity from {} to {} on set-identity, Exiting: {}", + my_old_pubkey, my_pubkey, err ); // drop(_exit) will set the exit flag, eventually tearing down the entire process return; @@ -1214,7 +1210,10 @@ impl ReplayStage { match tower { Ok(tower) => Ok(tower), Err(err) if err.is_file_missing() => { - warn!("Failed to load tower, file missing for {}: {}. Creating a new tower from bankforks.", node_pubkey, err); + warn!( + "Failed to load tower, file missing for {node_pubkey}: {err}. Creating a new \ + tower from bankforks." + ); Ok(Tower::new_from_bankforks( &bank_forks.read().unwrap(), node_pubkey, @@ -1222,7 +1221,10 @@ impl ReplayStage { )) } Err(err) if err.is_too_old() => { - warn!("Failed to load tower, too old for {}: {}. Creating a new tower from bankforks.", node_pubkey, err); + warn!( + "Failed to load tower, too old for {node_pubkey}: {err}. Creating a new tower \ + from bankforks." + ); Ok(Tower::new_from_bankforks( &bank_forks.read().unwrap(), node_pubkey, @@ -1284,10 +1286,9 @@ impl ReplayStage { ) { if retransmit_info.reached_retransmit_threshold() { info!( - "Retrying retransmit: latest_leader_slot={} slot={} retransmit_info={:?}", - latest_leader_slot, - slot, - &retransmit_info, + "Retrying retransmit: latest_leader_slot={} slot={} \ + retransmit_info={:?}", + latest_leader_slot, slot, &retransmit_info, ); datapoint_info!( metric_name, @@ -1318,7 +1319,11 @@ impl ReplayStage { // It is possible that bank corresponding to `start_slot` has been // dumped, so we need to double check it exists before proceeding if !progress.contains(&start_slot) { - warn!("Poh start slot {start_slot}, is missing from progress map. This indicates that we are in the middle of a dump and repair. Skipping retransmission of unpropagated leader slots"); + warn!( + "Poh start slot {start_slot}, is missing from progress map. This indicates that \ + we are in the middle of a dump and repair. Skipping retransmission of \ + unpropagated leader slots" + ); return; } @@ -1509,7 +1514,9 @@ impl ReplayStage { } // Should not dump slots for which we were the leader - if Some(*my_pubkey) == leader_schedule_cache.slot_leader_at(*duplicate_slot, None) { + if Some(*my_pubkey) + == leader_schedule_cache.slot_leader_at(*duplicate_slot, None) + { if let Some(bank) = bank_forks.read().unwrap().get(*duplicate_slot) { bank_hash_details::write_bank_hash_details_file(&bank) .map_err(|err| { @@ -1517,14 +1524,18 @@ impl ReplayStage { }) .ok(); } else { - warn!("Unable to get bank for slot {duplicate_slot} from bank forks \ - while attempting to write bank hash details file"); + warn!( + "Unable to get bank for slot {duplicate_slot} from bank forks \ + while attempting to write bank hash details file" + ); } - panic!("We are attempting to dump a block that we produced. \ - This indicates that we are producing duplicate blocks, \ - or that there is a bug in our runtime/replay code which \ - causes us to compute different bank hashes than the rest of the cluster. \ - We froze slot {duplicate_slot} with hash {frozen_hash:?} while the cluster hash is {correct_hash}"); + panic!( + "We are attempting to dump a block that we produced. This indicates \ + that we are producing duplicate blocks, or that there is a bug in \ + our runtime/replay code which causes us to compute different bank \ + hashes than the rest of the cluster. We froze slot {duplicate_slot} \ + with hash {frozen_hash:?} while the cluster hash is {correct_hash}" + ); } let attempt_no = purge_repair_slot_counter @@ -1532,11 +1543,13 @@ impl ReplayStage { .and_modify(|x| *x += 1) .or_insert(1); if *attempt_no > MAX_REPAIR_RETRY_LOOP_ATTEMPTS { - panic!("We have tried to repair duplicate slot: {duplicate_slot} more than {MAX_REPAIR_RETRY_LOOP_ATTEMPTS} times \ - and are unable to freeze a block with bankhash {correct_hash}, \ - instead we have a block with bankhash {frozen_hash:?}. \ - This is most likely a bug in the runtime. \ - At this point manual intervention is needed to make progress. Exiting"); + panic!( + "We have tried to repair duplicate slot: {duplicate_slot} more than \ + {MAX_REPAIR_RETRY_LOOP_ATTEMPTS} times and are unable to freeze a \ + block with bankhash {correct_hash}, instead we have a block with \ + bankhash {frozen_hash:?}. This is most likely a bug in the runtime. \ + At this point manual intervention is needed to make progress. Exiting" + ); } Self::purge_unconfirmed_duplicate_slot( @@ -1597,8 +1610,15 @@ impl ReplayStage { } in ancestor_duplicate_slots_receiver.try_iter() { warn!( - "{} ReplayStage notified of duplicate slot from ancestor hashes service but we observed as {}: {:?}", - pubkey, if request_type.is_pruned() {"pruned"} else {"dead"}, (epoch_slots_frozen_slot, epoch_slots_frozen_hash), + "{} ReplayStage notified of duplicate slot from ancestor hashes service but we \ + observed as {}: {:?}", + pubkey, + if request_type.is_pruned() { + "pruned" + } else { + "dead" + }, + (epoch_slots_frozen_slot, epoch_slots_frozen_hash), ); let epoch_slots_frozen_state = EpochSlotsFrozenState::new_from_state( epoch_slots_frozen_slot, @@ -1726,7 +1746,10 @@ impl ReplayStage { // replay on successful repair of the parent. If this block is also a duplicate, it // will be handled in the next round of repair/replay - so we just clear the dead // flag for now. - warn!("not purging descendant {} of slot {} as it is dead. resetting dead flag instead", slot, duplicate_slot); + warn!( + "not purging descendant {slot} of slot {duplicate_slot} as it is dead. \ + resetting dead flag instead" + ); // Clear the "dead" flag allowing ReplayStage to start replaying // this slot once the parent is repaired blockstore.remove_dead_slot(slot).unwrap(); @@ -1839,7 +1862,8 @@ impl ReplayStage { { assert_eq!( prev_hash, duplicate_confirmed_hash, - "Additional duplicate confirmed notification for slot {confirmed_slot} with a different hash" + "Additional duplicate confirmed notification for slot {confirmed_slot} \ + with a different hash" ); // Already processed this signal continue; @@ -2058,8 +2082,9 @@ impl ReplayStage { let Some(parent) = bank_forks.read().unwrap().get(parent_slot) else { warn!( - "Poh recorder parent slot {parent_slot} is missing from bank_forks. This indicates \ - that we are in the middle of a dump and repair. Unable to start leader"); + "Poh recorder parent slot {parent_slot} is missing from bank_forks. This \ + indicates that we are in the middle of a dump and repair. Unable to start leader" + ); return false; }; @@ -2106,8 +2131,12 @@ impl ReplayStage { ); if !Self::check_propagation_for_start_leader(poh_slot, parent_slot, progress_map) { - let latest_unconfirmed_leader_slot = progress_map.get_latest_leader_slot_must_exist(parent_slot) - .expect("In order for propagated check to fail, latest leader must exist in progress map"); + let latest_unconfirmed_leader_slot = progress_map + .get_latest_leader_slot_must_exist(parent_slot) + .expect( + "In order for propagated check to fail, latest leader must exist in \ + progress map", + ); if poh_slot != skipped_slots_info.last_skipped_slot { datapoint_info!( "replay_stage-skip_leader_slot", @@ -2516,8 +2545,10 @@ impl ReplayStage { .find(|keypair| keypair.pubkey() == authorized_voter_pubkey) { None => { - warn!("The authorized keypair {} for vote account {} is not available. Unable to vote", - authorized_voter_pubkey, vote_account_pubkey); + warn!( + "The authorized keypair {authorized_voter_pubkey} for vote account \ + {vote_account_pubkey} is not available. Unable to vote" + ); return GenerateVoteTxResult::NonVoting; } Some(authorized_voter_keypair) => authorized_voter_keypair, @@ -2584,7 +2615,8 @@ impl ReplayStage { { last_vote_refresh_time.last_print_time = Instant::now(); info!( - "Last landed vote for slot {} in bank {} is greater than the current last vote for slot: {} tracked by Tower", + "Last landed vote for slot {} in bank {} is greater than the current last vote \ + for slot: {} tracked by Tower", my_latest_landed_vote, heaviest_bank_on_same_fork.slot(), last_voted_slot @@ -3107,8 +3139,8 @@ impl ReplayStage { let replay_progress = bank_progress.replay_progress.clone(); let r_replay_progress = replay_progress.read().unwrap(); debug!( - "bank {} has completed replay from blockstore, \ - contribute to update cost with {:?}", + "bank {} has completed replay from blockstore, contribute to update cost with \ + {:?}", bank.slot(), r_replay_stats.batch_execute.totals ); @@ -3793,7 +3825,8 @@ impl ReplayStage { if let Some(prev_hash) = duplicate_confirmed_slots.insert(*slot, *frozen_hash) { assert_eq!( prev_hash, *frozen_hash, - "Additional duplicate confirmed notification for slot {slot} with a different hash" + "Additional duplicate confirmed notification for slot {slot} with a different \ + hash" ); // Already processed this signal continue; diff --git a/core/src/snapshot_packager_service.rs b/core/src/snapshot_packager_service.rs index f9c40e4f9b13fe..274d63cbd31f04 100644 --- a/core/src/snapshot_packager_service.rs +++ b/core/src/snapshot_packager_service.rs @@ -49,9 +49,8 @@ impl SnapshotPackagerService { break; } - let Some(snapshot_package) = Self::get_next_snapshot_package( - &pending_snapshot_packages, - ) + let Some(snapshot_package) = + Self::get_next_snapshot_package(&pending_snapshot_packages) else { std::thread::sleep(Self::LOOP_LIMITER); continue; @@ -67,36 +66,42 @@ impl SnapshotPackagerService { // Archiving the snapshot package is not allowed to fail. // AccountsBackgroundService calls `clean_accounts()` with a value for // latest_full_snapshot_slot that requires this archive call to succeed. - let (archive_result, archive_time_us) = measure_us!(snapshot_utils::serialize_and_archive_snapshot_package( - snapshot_package, - &snapshot_config, - )); + let (archive_result, archive_time_us) = + measure_us!(snapshot_utils::serialize_and_archive_snapshot_package( + snapshot_package, + &snapshot_config, + )); if let Err(err) = archive_result { - error!("Stopping SnapshotPackagerService! Fatal error while archiving snapshot package: {err}"); + error!( + "Stopping SnapshotPackagerService! Fatal error while archiving \ + snapshot package: {err}" + ); exit.store(true, Ordering::Relaxed); break; } - if let Some(snapshot_gossip_manager) = snapshot_gossip_manager.as_mut() { - snapshot_gossip_manager.push_snapshot_hash(snapshot_kind, (snapshot_slot, snapshot_hash)); + snapshot_gossip_manager + .push_snapshot_hash(snapshot_kind, (snapshot_slot, snapshot_hash)); } - let (_, purge_archives_time_us) = measure_us!(snapshot_utils::purge_old_snapshot_archives( - &snapshot_config.full_snapshot_archives_dir, - &snapshot_config.incremental_snapshot_archives_dir, - snapshot_config.maximum_full_snapshot_archives_to_retain, - snapshot_config.maximum_incremental_snapshot_archives_to_retain, - )); + let (_, purge_archives_time_us) = + measure_us!(snapshot_utils::purge_old_snapshot_archives( + &snapshot_config.full_snapshot_archives_dir, + &snapshot_config.incremental_snapshot_archives_dir, + snapshot_config.maximum_full_snapshot_archives_to_retain, + snapshot_config.maximum_incremental_snapshot_archives_to_retain, + )); // Now that this snapshot package has been archived, it is safe to remove // all bank snapshots older than this slot. We want to keep the bank // snapshot *at this slot* so that it can be used during restarts, when // booting from local state. - let (_, purge_bank_snapshots_time_us) = measure_us!(snapshot_utils::purge_bank_snapshots_older_than_slot( - &snapshot_config.bank_snapshots_dir, - snapshot_slot, - )); + let (_, purge_bank_snapshots_time_us) = + measure_us!(snapshot_utils::purge_bank_snapshots_older_than_slot( + &snapshot_config.bank_snapshots_dir, + snapshot_slot, + )); let handling_time_us = measure_handling.end_as_us(); datapoint_info!( @@ -109,11 +114,7 @@ impl SnapshotPackagerService { purge_bank_snapshots_time_us, i64 ), - ( - "purge_old_archives_time_us", - purge_archives_time_us, - i64 - ), + ("purge_old_archives_time_us", purge_archives_time_us, i64), ); } info!("SnapshotPackagerService has stopped"); diff --git a/core/src/snapshot_packager_service/pending_snapshot_packages.rs b/core/src/snapshot_packager_service/pending_snapshot_packages.rs index 726a3dd39477f7..9edf9624a88dd8 100644 --- a/core/src/snapshot_packager_service/pending_snapshot_packages.rs +++ b/core/src/snapshot_packager_service/pending_snapshot_packages.rs @@ -59,7 +59,8 @@ impl PendingSnapshotPackages { old: {pending_incremental_snapshot_package:?}, new: {snapshot_package:?}", ); info!( - "overwrote pending incremental snapshot package, old slot: {}, new slot: {}", + "overwrote pending incremental snapshot package, old slot: {}, new slot: \ + {}", pending_incremental_snapshot_package.slot, snapshot_package.slot, ); } diff --git a/core/src/snapshot_packager_service/snapshot_gossip_manager.rs b/core/src/snapshot_packager_service/snapshot_gossip_manager.rs index d4ab9863642e09..d7e1f89abc263f 100644 --- a/core/src/snapshot_packager_service/snapshot_gossip_manager.rs +++ b/core/src/snapshot_packager_service/snapshot_gossip_manager.rs @@ -102,9 +102,10 @@ impl SnapshotGossipManager { .as_mut() .expect("there must already be a full snapshot hash"); assert_eq!( - base_slot, latest_snapshot_hashes.full.0.0, - "the incremental snapshot's base slot ({}) must match the latest full snapshot's slot ({})", - base_slot, latest_snapshot_hashes.full.0.0, + base_slot, latest_snapshot_hashes.full.0 .0, + "the incremental snapshot's base slot ({}) must match the latest full snapshot's slot \ + ({})", + base_slot, latest_snapshot_hashes.full.0 .0, ); latest_snapshot_hashes.incremental = Some(incremental_snapshot_hash); } @@ -129,8 +130,8 @@ impl SnapshotGossipManager { .collect(), ) .expect( - "Bug! The programmer contract has changed for push_snapshot_hashes() \ - and a new error case has been added that has not been handled here.", + "Bug! The programmer contract has changed for push_snapshot_hashes() and a new \ + error case has been added that has not been handled here.", ); } } diff --git a/core/src/system_monitor_service.rs b/core/src/system_monitor_service.rs index bec85780fc3ede..455698fe300186 100644 --- a/core/src/system_monitor_service.rs +++ b/core/src/system_monitor_service.rs @@ -466,8 +466,13 @@ impl SystemMonitorService { .map(|(key, interesting_limit, current_value)| { datapoint_warn!("os-config", (key, *current_value, i64)); match interesting_limit { - InterestingLimit::Recommend(recommended_value) if current_value < recommended_value => { - warn!(" {key}: recommended={recommended_value} current={current_value}, too small"); + InterestingLimit::Recommend(recommended_value) + if current_value < recommended_value => + { + warn!( + " {key}: recommended={recommended_value}, current={current_value} \ + too small" + ); false } InterestingLimit::Recommend(recommended_value) => { diff --git a/core/src/tpu_entry_notifier.rs b/core/src/tpu_entry_notifier.rs index 22994455e88814..583ef343510ff7 100644 --- a/core/src/tpu_entry_notifier.rs +++ b/core/src/tpu_entry_notifier.rs @@ -85,14 +85,16 @@ impl TpuEntryNotifier { starting_transaction_index: *current_transaction_index, }) { warn!( - "Failed to send slot {slot:?} entry {index:?} from Tpu to EntryNotifierService, error {err:?}", + "Failed to send slot {slot:?} entry {index:?} from Tpu to EntryNotifierService, \ + error {err:?}", ); } *current_transaction_index += entry.transactions.len(); if let Err(err) = broadcast_entry_sender.send((bank, (entry, tick_height))) { warn!( - "Failed to send slot {slot:?} entry {index:?} from Tpu to BroadcastStage, error {err:?}", + "Failed to send slot {slot:?} entry {index:?} from Tpu to BroadcastStage, error \ + {err:?}", ); // If the BroadcastStage channel is closed, the validator has halted. Try to exit // gracefully. diff --git a/core/src/validator.rs b/core/src/validator.rs index 0f99e4c4768497..6f42c60c1c9f5c 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -609,8 +609,8 @@ impl Validator { expected_shred_version, ) .context( - "Failed to backup and clear shreds with incorrect \ - shred version from blockstore", + "Failed to backup and clear shreds with incorrect shred version from \ + blockstore", )?; } } @@ -682,9 +682,8 @@ impl Validator { .and_then(|geyser_plugin_service| geyser_plugin_service.get_block_metadata_notifier()); info!( - "Geyser plugin: accounts_update_notifier: {}, \ - transaction_notifier: {}, \ - entry_notifier: {}", + "Geyser plugin: accounts_update_notifier: {}, transaction_notifier: {}, \ + entry_notifier: {}", accounts_update_notifier.is_some(), transaction_notifier.is_some(), entry_notifier.is_some() @@ -1695,9 +1694,8 @@ fn check_poh_speed(bank: &Bank, maybe_hash_samples: Option) -> Result<(), V (hashes_per_slot as f64 / target_slot_duration.as_secs_f64()) as u64; info!( - "PoH speed check: \ - computed hashes per second {my_hashes_per_second}, \ - target hashes per second {target_hashes_per_second}" + "PoH speed check: computed hashes per second {my_hashes_per_second}, target hashes per \ + second {target_hashes_per_second}" ); if my_hashes_per_second < target_hashes_per_second { return Err(ValidatorError::PohTooSlow { @@ -1777,20 +1775,21 @@ fn post_process_restored_tower( } if should_require_tower && voting_has_been_active { return Err(format!( - "Requested mandatory tower restore failed: {err}. \ - And there is an existing vote_account containing actual votes. \ - Aborting due to possible conflicting duplicate votes" + "Requested mandatory tower restore failed: {err}. And there is an existing \ + vote_account containing actual votes. Aborting due to possible conflicting \ + duplicate votes" )); } if err.is_file_missing() && !voting_has_been_active { // Currently, don't protect against spoofed snapshots with no tower at all info!( - "Ignoring expected failed tower restore because this is the initial \ - validator start with the vote account..." + "Ignoring expected failed tower restore because this is the initial validator \ + start with the vote account..." ); } else { error!( - "Rebuilding a new tower from the latest vote account due to failed tower restore: {}", + "Rebuilding a new tower from the latest vote account due to failed tower \ + restore: {}", err ); } @@ -2396,8 +2395,8 @@ fn wait_for_supermajority( std::cmp::Ordering::Less => return Ok(false), std::cmp::Ordering::Greater => { error!( - "Ledger does not have enough data to wait for supermajority, \ - please enable snapshot fetch. Has {} needs {}", + "Ledger does not have enough data to wait for supermajority, please \ + enable snapshot fetch. Has {} needs {}", bank.slot(), wait_for_supermajority_slot ); diff --git a/core/src/warm_quic_cache_service.rs b/core/src/warm_quic_cache_service.rs index e4a67cbe993169..fd86e2af9f766f 100644 --- a/core/src/warm_quic_cache_service.rs +++ b/core/src/warm_quic_cache_service.rs @@ -39,7 +39,7 @@ impl WarmQuicCacheService { let slot_jitter = thread_rng().gen_range(-CACHE_JITTER_SLOT..CACHE_JITTER_SLOT); let mut maybe_last_leader = None; while !exit.load(Ordering::Relaxed) { - let leader_pubkey = poh_recorder + let leader_pubkey = poh_recorder .read() .unwrap() .leader_after_n_slots((CACHE_OFFSET_SLOT + slot_jitter) as u64); @@ -49,12 +49,15 @@ impl WarmQuicCacheService { { maybe_last_leader = Some(leader_pubkey); if let Some(Ok(addr)) = cluster_info - .lookup_contact_info(&leader_pubkey, |node| node.tpu(Protocol::QUIC)) + .lookup_contact_info(&leader_pubkey, |node| { + node.tpu(Protocol::QUIC) + }) { let conn = connection_cache.get_connection(&addr); if let Err(err) = conn.send_data(&[]) { warn!( - "Failed to warmup QUIC connection to the leader {:?}, Error {:?}", + "Failed to warmup QUIC connection to the leader {:?}, \ + Error {:?}", leader_pubkey, err ); } diff --git a/core/tests/fork-selection.rs b/core/tests/fork-selection.rs index 02385a1e378c88..d54fbfba8094b4 100644 --- a/core/tests/fork-selection.rs +++ b/core/tests/fork-selection.rs @@ -561,14 +561,15 @@ fn test_with_partitions( trunk.0 }; println!( - "time: {}, tip converged: {}, trunk id: {}, trunk time: {}, trunk converged {}, trunk height {}", - time, - calc_tip_converged(&towers, &converge_map), - trunk.0, - trunk_time, - trunk.1, - calc_fork_depth(&fork_tree, trunk.0) - ); + "time: {}, tip converged: {}, trunk id: {}, trunk time: {}, trunk converged {}, \ + trunk height {}", + time, + calc_tip_converged(&towers, &converge_map), + trunk.0, + trunk_time, + trunk.1, + calc_fork_depth(&fork_tree, trunk.0) + ); if break_early && calc_tip_converged(&towers, &converge_map) == len { break; } diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 2a6c77ddb0a0c0..6e17f5a9cfb0f2 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -434,8 +434,14 @@ fn test_bank_forks_incremental_snapshot( INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 5; const LAST_SLOT: Slot = FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 2 - 1; - info!("Running bank forks incremental snapshot test, full snapshot interval: {} slots, incremental snapshot interval: {} slots, last slot: {}, set root interval: {} slots", - FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, LAST_SLOT, SET_ROOT_INTERVAL); + info!( + "Running bank forks incremental snapshot test, full snapshot interval: {} slots, \ + incremental snapshot interval: {} slots, last slot: {}, set root interval: {} slots", + FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, + LAST_SLOT, + SET_ROOT_INTERVAL + ); let snapshot_test_config = SnapshotTestConfig::new( snapshot_version, @@ -444,8 +450,20 @@ fn test_bank_forks_incremental_snapshot( FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, ); - trace!("SnapshotTestConfig:\naccounts_dir: {}\nbank_snapshots_dir: {}\nfull_snapshot_archives_dir: {}\nincremental_snapshot_archives_dir: {}", - snapshot_test_config.accounts_dir.display(), snapshot_test_config.bank_snapshots_dir.path().display(), snapshot_test_config.full_snapshot_archives_dir.path().display(), snapshot_test_config.incremental_snapshot_archives_dir.path().display()); + trace!( + "SnapshotTestConfig:\naccounts_dir: {}\nbank_snapshots_dir: \ + {}\nfull_snapshot_archives_dir: {}\nincremental_snapshot_archives_dir: {}", + snapshot_test_config.accounts_dir.display(), + snapshot_test_config.bank_snapshots_dir.path().display(), + snapshot_test_config + .full_snapshot_archives_dir + .path() + .display(), + snapshot_test_config + .incremental_snapshot_archives_dir + .path() + .display() + ); let bank_forks = snapshot_test_config.bank_forks.clone(); let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair; @@ -637,11 +655,11 @@ fn test_snapshots_with_background_services( info!("Running snapshots with background services test..."); trace!( "Test configuration parameters:\ - \n\tfull snapshot archive interval: {} slots\ - \n\tincremental snapshot archive interval: {} slots\ - \n\tbank snapshot interval: {} slots\ - \n\tset root interval: {} slots\ - \n\tlast slot: {}", + \n\tfull snapshot archive interval: {} slots\ + \n\tincremental snapshot archive interval: {} slots\ + \n\tbank snapshot interval: {} slots\ + \n\tset root interval: {} slots\ + \n\tlast slot: {}", FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, BANK_SNAPSHOT_INTERVAL_SLOTS, @@ -773,7 +791,8 @@ fn test_snapshots_with_background_services( { assert!( timer.elapsed() < MAX_WAIT_DURATION, - "Waiting for full snapshot {slot} exceeded the {MAX_WAIT_DURATION:?} maximum wait duration!", + "Waiting for full snapshot {slot} exceeded the {MAX_WAIT_DURATION:?} maximum \ + wait duration!", ); std::thread::sleep(Duration::from_secs(1)); } @@ -791,7 +810,8 @@ fn test_snapshots_with_background_services( { assert!( timer.elapsed() < MAX_WAIT_DURATION, - "Waiting for incremental snapshot {slot} exceeded the {MAX_WAIT_DURATION:?} maximum wait duration!", + "Waiting for incremental snapshot {slot} exceeded the {MAX_WAIT_DURATION:?} \ + maximum wait duration!", ); std::thread::sleep(Duration::from_secs(1)); }