Skip to content

Commit

Permalink
Format strings in core directory (#2782)
Browse files Browse the repository at this point in the history
Temporarily set format_string = true in rustfmt.toml and run fmt. This
wraps long strings that prevented fmt from working properly in the
whole source file(s).

Several string that were carefully formatted for readability were
left as-is instead of accepting the changes from format_strings
  • Loading branch information
steviez authored Aug 29, 2024
1 parent da81bb8 commit b0bcbc1
Show file tree
Hide file tree
Showing 23 changed files with 329 additions and 216 deletions.
5 changes: 2 additions & 3 deletions core/benches/consumer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,9 +141,8 @@ fn bench_process_and_record_transactions(bencher: &mut Bencher, batch_size: usiz
assert_eq!(
TRANSACTIONS_PER_ITERATION % batch_size,
0,
"batch_size must be a factor of \
`TRANSACTIONS_PER_ITERATION` ({TRANSACTIONS_PER_ITERATION}) \
so that bench results are easily comparable"
"batch_size must be a factor of `TRANSACTIONS_PER_ITERATION` \
({TRANSACTIONS_PER_ITERATION}) so that bench results are easily comparable"
);
let batches_per_iteration = TRANSACTIONS_PER_ITERATION / batch_size;

Expand Down
27 changes: 15 additions & 12 deletions core/src/accounts_hash_verifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,10 @@ impl AccountsHashVerifier {
&snapshot_config,
));
if let Err(err) = result {
error!("Stopping AccountsHashVerifier! Fatal error while processing accounts package: {err}");
error!(
"Stopping AccountsHashVerifier! Fatal error while processing accounts \
package: {err}"
);
exit.store(true, Ordering::Relaxed);
break;
}
Expand Down Expand Up @@ -144,7 +147,8 @@ impl AccountsHashVerifier {
.count();
assert!(
num_eah_packages <= 1,
"Only a single EAH accounts package is allowed at a time! count: {num_eah_packages}"
"Only a single EAH accounts package is allowed at a time! count: \
{num_eah_packages}"
);

// Get the two highest priority requests, `y` and `z`.
Expand Down Expand Up @@ -261,12 +265,12 @@ impl AccountsHashVerifier {
accounts_db.get_accounts_hash(base_slot)
else {
panic!(
"incremental snapshot requires accounts hash and capitalization \
from the full snapshot it is based on \n\
package: {accounts_package:?} \n\
accounts hashes: {:?} \n\
incremental accounts hashes: {:?} \n\
full snapshot archives: {:?} \n\
"incremental snapshot requires accounts hash and capitalization from \
the full snapshot it is based on\n\
package: {accounts_package:?}\n\
accounts hashes: {:?}\n\
incremental accounts hashes: {:?}\n\
full snapshot archives: {:?}\n\
bank snapshots: {:?}",
accounts_db.get_accounts_hashes(),
accounts_db.get_incremental_accounts_hashes(),
Expand Down Expand Up @@ -344,10 +348,9 @@ impl AccountsHashVerifier {
HashStats::default(),
);
panic!(
"accounts hash capitalization mismatch: expected {}, but calculated {} (then recalculated {})",
accounts_package.expected_capitalization,
lamports,
second_accounts_hash.1,
"accounts hash capitalization mismatch: expected {}, but calculated {} (then \
recalculated {})",
accounts_package.expected_capitalization, lamports, second_accounts_hash.1,
);
}

Expand Down
3 changes: 2 additions & 1 deletion core/src/banking_stage/latest_unprocessed_votes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,8 @@ impl LatestUnprocessedVotes {
.and_then(|account| from_account::<SlotHashes, _>(&account));
if slot_hashes.is_none() {
error!(
"Slot hashes sysvar doesn't exist on bank {}. Including all votes without filtering",
"Slot hashes sysvar doesn't exist on bank {}. Including all votes without \
filtering",
bank.slot()
);
}
Expand Down
56 changes: 37 additions & 19 deletions core/src/banking_stage/qos_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,25 +101,43 @@ impl QosService {
let mut cost_tracking_time = Measure::start("cost_tracking_time");
let mut cost_tracker = bank.write_cost_tracker().unwrap();
let mut num_included = 0;
let select_results = transactions.zip(transactions_costs)
.map(|(tx, cost)| {
match cost {
Ok(cost) => {
match cost_tracker.try_add(&cost) {
Ok(UpdatedCosts{updated_block_cost, updated_costliest_account_cost}) => {
debug!("slot {:?}, transaction {:?}, cost {:?}, fit into current block, current block cost {}, updated costliest account cost {}", bank.slot(), tx, cost, updated_block_cost, updated_costliest_account_cost);
self.metrics.stats.selected_txs_count.fetch_add(1, Ordering::Relaxed);
num_included += 1;
Ok(cost)
},
Err(e) => {
debug!("slot {:?}, transaction {:?}, cost {:?}, not fit into current block, '{:?}'", bank.slot(), tx, cost, e);
Err(TransactionError::from(e))
}
}
},
Err(e) => Err(e),
}
let select_results = transactions
.zip(transactions_costs)
.map(|(tx, cost)| match cost {
Ok(cost) => match cost_tracker.try_add(&cost) {
Ok(UpdatedCosts {
updated_block_cost,
updated_costliest_account_cost,
}) => {
debug!(
"slot {:?}, transaction {:?}, cost {:?}, fit into current block, \
current block cost {}, updated costliest account cost {}",
bank.slot(),
tx,
cost,
updated_block_cost,
updated_costliest_account_cost
);
self.metrics
.stats
.selected_txs_count
.fetch_add(1, Ordering::Relaxed);
num_included += 1;
Ok(cost)
}
Err(e) => {
debug!(
"slot {:?}, transaction {:?}, cost {:?}, not fit into current block, \
'{:?}'",
bank.slot(),
tx,
cost,
e
);
Err(TransactionError::from(e))
}
},
Err(e) => Err(e),
})
.collect();
cost_tracker.add_transactions_in_flight(num_included);
Expand Down
57 changes: 34 additions & 23 deletions core/src/consensus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -800,7 +800,10 @@ impl Tower {
ancestors: &HashMap<Slot, HashSet<Slot>>,
last_vote_ancestors: &HashSet<Slot>,
) -> Option<bool> {
trace!("Checking if {candidate_slot} is a valid switching proof vote from {last_voted_slot} to {switch_slot}");
trace!(
"Checking if {candidate_slot} is a valid switching proof vote from {last_voted_slot} \
to {switch_slot}"
);
// Ignore if the `candidate_slot` is a descendant of the `last_voted_slot`, since we do not
// want to count votes on the same fork.
if Self::is_descendant_slot(candidate_slot, last_voted_slot, ancestors)? {
Expand Down Expand Up @@ -923,9 +926,10 @@ impl Tower {
// `switch < last` is needed not to warn! this message just because of using
// newer snapshots on validator restart
let message = format!(
"bank_forks doesn't have corresponding data for the stray restored \
last vote({last_voted_slot}), meaning some inconsistency between saved tower and ledger."
);
"bank_forks doesn't have corresponding data for the stray restored last \
vote({last_voted_slot}), meaning some inconsistency between saved tower and \
ledger."
);
warn!("{}", message);
datapoint_warn!("tower_warn", ("warn", message, String));
}
Expand Down Expand Up @@ -1030,8 +1034,9 @@ impl Tower {
return suspended_decision_due_to_major_unsynced_ledger();
} else {
panic!(
"Should never consider switching to ancestor ({switch_slot}) of last vote: {last_voted_slot}, ancestors({last_vote_ancestors:?})",
);
"Should never consider switching to ancestor ({switch_slot}) of last vote: \
{last_voted_slot}, ancestors({last_vote_ancestors:?})",
);
}
}

Expand Down Expand Up @@ -1254,7 +1259,8 @@ impl Tower {

let lockout = *fork_stake as f64 / total_stake as f64;
trace!(
"fork_stake slot: {}, threshold_vote slot: {}, lockout: {} fork_stake: {} total_stake: {}",
"fork_stake slot: {}, threshold_vote slot: {}, lockout: {} fork_stake: {} \
total_stake: {}",
slot,
threshold_vote.slot(),
lockout,
Expand Down Expand Up @@ -1419,9 +1425,8 @@ impl Tower {
// While this validator's voting is suspended this way,
// suspended_decision_due_to_major_unsynced_ledger() will be also touched.
let message = format!(
"For some reason, we're REPROCESSING slots which has already been \
voted and ROOTED by us; \
VOTING will be SUSPENDED UNTIL {last_voted_slot}!",
"For some reason, we're REPROCESSING slots which has already been voted and \
ROOTED by us; VOTING will be SUSPENDED UNTIL {last_voted_slot}!",
);
error!("{}", message);
datapoint_error!("tower_error", ("error", message, String));
Expand Down Expand Up @@ -1549,7 +1554,8 @@ impl Tower {
self.last_vote = VoteTransaction::from(Vote::default());
} else {
info!(
"{} restored votes (out of {}) were on different fork or are upcoming votes on unrooted slots: {:?}!",
"{} restored votes (out of {}) were on different fork or are upcoming votes on \
unrooted slots: {:?}!",
self.voted_slots().len(),
original_votes_len,
self.voted_slots()
Expand Down Expand Up @@ -1623,8 +1629,8 @@ pub enum TowerError {
WrongTower(String),

#[error(
"The tower is too old: \
newest slot in tower ({0}) << oldest slot in available history ({1})"
"The tower is too old: newest slot in tower ({0}) << oldest slot in available history \
({1})"
)]
TooOldTower(Slot, Slot),

Expand Down Expand Up @@ -1704,13 +1710,15 @@ pub fn reconcile_blockstore_roots_with_external_source(
Ordering::Equal => false,
Ordering::Less => panic!(
"last_blockstore_root({last_blockstore_root}) is skipped while traversing \
blockstore (currently at {current}) from external root ({external_source:?})!?",
blockstore (currently at {current}) from external root \
({external_source:?})!?",
),
})
.collect();
if !new_roots.is_empty() {
info!(
"Reconciling slots as root based on external root: {:?} (external: {:?}, blockstore: {})",
"Reconciling slots as root based on external root: {:?} (external: {:?}, \
blockstore: {})",
new_roots, external_source, last_blockstore_root
);

Expand All @@ -1733,9 +1741,9 @@ pub fn reconcile_blockstore_roots_with_external_source(
// That's because we might have a chance of recovering properly with
// newer snapshot.
warn!(
"Couldn't find any ancestor slots from external source ({:?}) \
towards blockstore root ({}); blockstore pruned or only \
tower moved into new ledger or just hard fork?",
"Couldn't find any ancestor slots from external source ({:?}) towards blockstore \
root ({}); blockstore pruned or only tower moved into new ledger or just hard \
fork?",
external_source, last_blockstore_root,
);
}
Expand Down Expand Up @@ -3251,9 +3259,10 @@ pub mod test {
}

#[test]
#[should_panic(expected = "last_blockstore_root(3) is skipped while \
traversing blockstore (currently at 1) from \
external root (Tower(4))!?")]
#[should_panic(
expected = "last_blockstore_root(3) is skipped while traversing blockstore (currently at \
1) from external root (Tower(4))!?"
)]
fn test_reconcile_blockstore_roots_with_tower_panic_no_common_root() {
solana_logger::setup();
let ledger_path = get_tmp_ledger_path_auto_delete!();
Expand Down Expand Up @@ -3522,7 +3531,8 @@ pub mod test {
let result = tower.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history);
assert_eq!(
format!("{}", result.unwrap_err()),
"The tower is too old: newest slot in tower (0) << oldest slot in available history (1)"
"The tower is too old: newest slot in tower (0) << oldest slot in available history \
(1)"
);
}

Expand Down Expand Up @@ -3601,7 +3611,8 @@ pub mod test {
let result = tower.adjust_lockouts_after_replay(MAX_ENTRIES, &slot_history);
assert_eq!(
format!("{}", result.unwrap_err()),
"The tower is fatally inconsistent with blockstore: not too old once after got too old?"
"The tower is fatally inconsistent with blockstore: not too old once after got too \
old?"
);
}

Expand Down
3 changes: 2 additions & 1 deletion core/src/consensus/fork_choice.rs
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,8 @@ fn select_candidates_failed_switch_duplicate_rollback<'a>(
// invalid candidate). Thus, it's safe to use as the reset bank.
let reset_bank = Some(heaviest_bank);
info!(
"Waiting to switch vote to {}, resetting to slot {:?} for now, latest duplicate ancestor: {:?}",
"Waiting to switch vote to {}, resetting to slot {:?} for now, latest duplicate ancestor: \
{:?}",
heaviest_bank.slot(),
reset_bank.as_ref().map(|b| b.slot()),
latest_duplicate_ancestor,
Expand Down
11 changes: 8 additions & 3 deletions core/src/consensus/heaviest_subtree_fork_choice.rs
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,11 @@ impl ForkInfo {
) {
if let Some(latest_invalid_ancestor) = self.latest_invalid_ancestor {
if latest_invalid_ancestor <= newly_valid_ancestor {
info!("Fork choice for {:?} clearing latest invalid ancestor {:?} because {:?} was duplicate confirmed", my_key, latest_invalid_ancestor, newly_valid_ancestor);
info!(
"Fork choice for {:?} clearing latest invalid ancestor {:?} because {:?} was \
duplicate confirmed",
my_key, latest_invalid_ancestor, newly_valid_ancestor
);
self.latest_invalid_ancestor = None;
}
}
Expand Down Expand Up @@ -1188,8 +1192,9 @@ impl HeaviestSubtreeForkChoice {
// validator has been running, so we must be able to fetch best_slots for all of
// them.
panic!(
"a bank at last_voted_slot({last_voted_slot_hash:?}) is a frozen bank so must have been \
added to heaviest_subtree_fork_choice at time of freezing",
"a bank at last_voted_slot({last_voted_slot_hash:?}) is a frozen \
bank so must have been added to heaviest_subtree_fork_choice at \
time of freezing",
)
} else {
// fork_infos doesn't have corresponding data for the stale stray last vote,
Expand Down
2 changes: 1 addition & 1 deletion core/src/cost_update_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ impl CostUpdateService {
let slot = bank.slot();
trace!(
"inflight transaction count is {in_flight_transaction_count} \
for slot {slot} after {loop_count} iteration(s)"
for slot {slot} after {loop_count} iteration(s)"
);
cost_tracker.report_stats(slot);
break;
Expand Down
13 changes: 9 additions & 4 deletions core/src/repair/ancestor_hashes_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -559,17 +559,22 @@ impl AncestorHashesService {
// order to vote.
// This fits the alternate criteria we use in `find_epoch_slots_frozen_dead_slots`
// so we can upgrade it to `repairable_dead_slot_pool`.
info!("{pruned_slot} is part of a popular pruned fork however we previously marked it as dead.
Upgrading as dead duplicate confirmed");
info!(
"{pruned_slot} is part of a popular pruned fork however we previously \
marked it as dead. Upgrading as dead duplicate confirmed"
);
dead_slot_pool.remove(&pruned_slot);
repairable_dead_slot_pool.insert(pruned_slot);
} else if repairable_dead_slot_pool.contains(&pruned_slot) {
// If we already observed `pruned_slot` as dead duplicate confirmed, we
// ignore the additional information that `pruned_slot` is popular pruned.
// This is similar to the above case where `pruned_slot` was first pruned
// and then marked dead duplicate confirmed.
info!("Received pruned duplicate confirmed status for {pruned_slot} that was previously marked
dead duplicate confirmed. Ignoring and processing it as dead duplicate confirmed.");
info!(
"Received pruned duplicate confirmed status for {pruned_slot} that \
was previously marked dead duplicate confirmed. Ignoring and \
processing it as dead duplicate confirmed."
);
} else {
popular_pruned_slot_pool.insert(pruned_slot);
}
Expand Down
Loading

0 comments on commit b0bcbc1

Please sign in to comment.