From dd481296f577fabb6adf32625c5bf01e2a318daa Mon Sep 17 00:00:00 2001 From: Wen <113942165+wen-coding@users.noreply.github.com> Date: Fri, 23 Aug 2024 14:20:32 -0700 Subject: [PATCH] wen_restart: Fix the epoch_stakes used in calculation. (#2376) * wen_restart: Fix the epoch_stakes used in calculation. * Fix a bad merge. * Remove EpochStakesCache, it only caches epoch stakes from root_bank, better to just keep root_bank around. * Split aggregate into smaller functions. * Switch to node_id_to_stake which is simpler. * Rename update_slots_stake_map and switch to epoch_total_stake(). * Remove unnecessary utility functions. * Do not modify epoch_info_vec, just init it with two epochs we will consider. * Switch to epoch_node_id_to_stake() * Add test for divergence at Epoch boundary. * Make linter happy. * - wait for the new Epoch if > 1/3 of the validators voted for some slot in the new Epoch - switch to voted_percent and voted_for_this_epoch_percent * Fix a bad merge. * Fix a bad merge. * Change constant format. * Do not loop through the whole table. * Address reviewer feedback. * Address reviewer comments. --- Cargo.lock | 1 + wen-restart/Cargo.toml | 1 + wen-restart/proto/wen_restart.proto | 9 +- wen-restart/src/heaviest_fork_aggregate.rs | 4 +- .../src/last_voted_fork_slots_aggregate.rs | 524 +++++++++++++----- wen-restart/src/wen_restart.rs | 418 ++++++++++++-- 6 files changed, 780 insertions(+), 177 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5b12b805c98838..e42722486c837d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8152,6 +8152,7 @@ dependencies = [ "solana-sdk", "solana-streamer", "solana-timings", + "solana-vote", "solana-vote-program", "tempfile", ] diff --git a/wen-restart/Cargo.toml b/wen-restart/Cargo.toml index f5fbf99a1e20c8..ff755c75331124 100644 --- a/wen-restart/Cargo.toml +++ b/wen-restart/Cargo.toml @@ -34,6 +34,7 @@ solana-entry = { workspace = true } solana-logger = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-streamer = { workspace = true } +solana-vote = { workspace = true } tempfile = { workspace = true } [build-dependencies] diff --git a/wen-restart/proto/wen_restart.proto b/wen-restart/proto/wen_restart.proto index e3fc0743ef5dc8..856e7df9ef114a 100644 --- a/wen-restart/proto/wen_restart.proto +++ b/wen-restart/proto/wen_restart.proto @@ -21,9 +21,16 @@ message LastVotedForkSlotsAggregateRecord { optional LastVotedForkSlotsAggregateFinal final_result = 2; } +message LastVotedForkSlotsEpochInfoRecord { + uint64 epoch = 1; + uint64 total_stake = 2; + uint64 actively_voting_stake = 3; + uint64 actively_voting_for_this_epoch_stake = 4; +} + message LastVotedForkSlotsAggregateFinal { map slots_stake_map = 1; - uint64 total_active_stake = 2; + repeated LastVotedForkSlotsEpochInfoRecord epoch_infos = 2; } message HeaviestForkRecord { diff --git a/wen-restart/src/heaviest_fork_aggregate.rs b/wen-restart/src/heaviest_fork_aggregate.rs index dac13bd8274568..d5e454b6eeebe1 100644 --- a/wen-restart/src/heaviest_fork_aggregate.rs +++ b/wen-restart/src/heaviest_fork_aggregate.rs @@ -15,7 +15,8 @@ pub(crate) struct HeaviestForkAggregate { supermajority_threshold: f64, my_shred_version: u16, my_pubkey: Pubkey, - // TODO(wen): using local root's EpochStakes, need to fix if crossing Epoch boundary. + // We use the epoch_stakes of the Epoch our heaviest bank is in. Proceed and exit only if + // enough validator agree with me. epoch_stakes: EpochStakes, heaviest_forks: HashMap, block_stake_map: HashMap<(Slot, Hash), u64>, @@ -171,7 +172,6 @@ impl HeaviestForkAggregate { Some(record) } - // TODO(wen): use better epoch stake and add a test later. pub(crate) fn total_active_stake(&self) -> u64 { self.active_peers.iter().fold(0, |sum: u64, pubkey| { sum.saturating_add(self.epoch_stakes.node_id_to_stake(pubkey).unwrap_or(0)) diff --git a/wen-restart/src/last_voted_fork_slots_aggregate.rs b/wen-restart/src/last_voted_fork_slots_aggregate.rs index 67cd7c1c77ea87..f680dc73238156 100644 --- a/wen-restart/src/last_voted_fork_slots_aggregate.rs +++ b/wen-restart/src/last_voted_fork_slots_aggregate.rs @@ -3,69 +3,121 @@ use { anyhow::Result, log::*, solana_gossip::restart_crds_values::RestartLastVotedForkSlots, - solana_runtime::epoch_stakes::EpochStakes, - solana_sdk::{clock::Slot, hash::Hash, pubkey::Pubkey}, + solana_runtime::bank::Bank, + solana_sdk::{ + clock::{Epoch, Slot}, + hash::Hash, + pubkey::Pubkey, + }, std::{ - collections::{HashMap, HashSet}, + collections::{BTreeSet, HashMap, HashSet}, str::FromStr, + sync::Arc, }, }; +// If at least 1/3 of the stake has voted for a slot in next Epoch, we think +// the cluster's clock is in sync and everyone will enter the new Epoch soon. +// So we require that we have >80% stake in the new Epoch to exit. +const EPOCH_CONSIDERED_FOR_EXIT_THRESHOLD: f64 = 1f64 / 3f64; + +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct LastVotedForkSlotsEpochInfo { + pub epoch: Epoch, + pub total_stake: u64, + // Total stake of active peers in this epoch, no matter they voted for a slot + // in this epoch or not. + pub actively_voting_stake: u64, + // Total stake of active peers which has voted for a slot in this epoch. + pub actively_voting_for_this_epoch_stake: u64, +} + pub(crate) struct LastVotedForkSlotsAggregate { - root_slot: Slot, - repair_threshold: f64, - // TODO(wen): using local root's EpochStakes, need to fix if crossing Epoch boundary. - epoch_stakes: EpochStakes, + // Map each peer pubkey to the epoch of its last vote. + node_to_last_vote_epoch_map: HashMap, + epoch_info_vec: Vec, last_voted_fork_slots: HashMap, - slots_stake_map: HashMap, - active_peers: HashSet, - slots_to_repair: HashSet, my_pubkey: Pubkey, + repair_threshold: f64, + root_bank: Arc, + slots_stake_map: HashMap, + slots_to_repair: BTreeSet, } #[derive(Clone, Debug, PartialEq)] pub struct LastVotedForkSlotsFinalResult { pub slots_stake_map: HashMap, - pub total_active_stake: u64, + pub epoch_info_vec: Vec, } impl LastVotedForkSlotsAggregate { pub(crate) fn new( - root_slot: Slot, + root_bank: Arc, repair_threshold: f64, - epoch_stakes: &EpochStakes, - last_voted_fork_slots: &Vec, + my_last_voted_fork_slots: &Vec, my_pubkey: &Pubkey, ) -> Self { - let mut active_peers = HashSet::new(); - let sender_stake = Self::validator_stake(epoch_stakes, my_pubkey); - active_peers.insert(*my_pubkey); let mut slots_stake_map = HashMap::new(); - for slot in last_voted_fork_slots { + let root_slot = root_bank.slot(); + let root_epoch = root_bank.epoch(); + for slot in my_last_voted_fork_slots { if slot >= &root_slot { - slots_stake_map.insert(*slot, sender_stake); + let epoch = root_bank.epoch_schedule().get_epoch(*slot); + if let Some(sender_stake) = root_bank.epoch_node_id_to_stake(epoch, my_pubkey) { + slots_stake_map.insert(*slot, sender_stake); + } else { + warn!("The root bank {root_slot} does not have the stake for slot {slot}"); + } } } + + let my_last_vote_epoch = root_bank + .get_epoch_and_slot_index( + *my_last_voted_fork_slots + .iter() + .max() + .expect("my voted slots should not be empty"), + ) + .0; + let mut node_to_last_vote_epoch_map = HashMap::new(); + node_to_last_vote_epoch_map.insert(*my_pubkey, my_last_vote_epoch); + // We would only consider slots in root_epoch and the next epoch. + let epoch_info_vec: Vec = (root_epoch + ..root_epoch + .checked_add(2) + .expect("root_epoch should not be so big")) + .map(|epoch| { + let total_stake = root_bank + .epoch_total_stake(epoch) + .expect("epoch stake not found"); + let my_stake = root_bank + .epoch_node_id_to_stake(epoch, my_pubkey) + .unwrap_or(0); + let actively_voting_for_this_epoch_stake = if epoch <= my_last_vote_epoch { + my_stake + } else { + 0 + }; + LastVotedForkSlotsEpochInfo { + epoch, + total_stake, + actively_voting_stake: my_stake, + actively_voting_for_this_epoch_stake, + } + }) + .collect(); Self { - root_slot, - repair_threshold, - epoch_stakes: epoch_stakes.clone(), + node_to_last_vote_epoch_map, + epoch_info_vec, last_voted_fork_slots: HashMap::new(), - slots_stake_map, - active_peers, - slots_to_repair: HashSet::new(), my_pubkey: *my_pubkey, + repair_threshold, + root_bank, + slots_stake_map, + slots_to_repair: BTreeSet::new(), } } - fn validator_stake(epoch_stakes: &EpochStakes, pubkey: &Pubkey) -> u64 { - epoch_stakes - .node_id_to_vote_accounts() - .get(pubkey) - .map(|x| x.total_stake) - .unwrap_or_default() - } - pub(crate) fn aggregate_from_record( &mut self, key_string: &str, @@ -90,80 +142,137 @@ impl LastVotedForkSlotsAggregate { &mut self, new_slots: RestartLastVotedForkSlots, ) -> Option { - let total_stake = self.epoch_stakes.total_stake(); - let threshold_stake = (total_stake as f64 * self.repair_threshold) as u64; let from = &new_slots.from; if from == &self.my_pubkey { return None; } - let sender_stake = Self::validator_stake(&self.epoch_stakes, from); - if sender_stake == 0 { - warn!( - "Gossip should not accept zero-stake RestartLastVotedFork from {:?}", - from - ); + let root_slot = self.root_bank.slot(); + let new_slots_vec = new_slots.to_slots(root_slot); + if new_slots_vec.is_empty() { return None; } - self.active_peers.insert(*from); - let new_slots_vec = new_slots.to_slots(self.root_slot); - let record = LastVotedForkSlotsRecord { - last_voted_fork_slots: new_slots_vec.clone(), + let last_vote_epoch = self + .root_bank + .get_epoch_and_slot_index(*new_slots_vec.last().unwrap()) + .0; + let old_last_vote_epoch = self + .node_to_last_vote_epoch_map + .insert(*from, last_vote_epoch); + if old_last_vote_epoch != Some(last_vote_epoch) { + self.update_epoch_info(from, last_vote_epoch, old_last_vote_epoch); + } + if self.update_and_check_if_message_already_saved(new_slots.clone(), new_slots_vec.clone()) + { + return None; + } + Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: new_slots_vec, last_vote_bankhash: new_slots.last_voted_hash.to_string(), shred_version: new_slots.shred_version as u32, wallclock: new_slots.wallclock, - }; + }) + } + + // Return true if the message has already been saved, so we can skip the rest of the processing. + fn update_and_check_if_message_already_saved( + &mut self, + new_slots: RestartLastVotedForkSlots, + new_slots_vec: Vec, + ) -> bool { + let from = &new_slots.from; let new_slots_set: HashSet = HashSet::from_iter(new_slots_vec); let old_slots_set = match self.last_voted_fork_slots.insert(*from, new_slots.clone()) { Some(old_slots) => { if old_slots == new_slots { - return None; + return true; } else { - HashSet::from_iter(old_slots.to_slots(self.root_slot)) + HashSet::from_iter(old_slots.to_slots(self.root_bank.slot())) } } None => HashSet::new(), }; for slot in old_slots_set.difference(&new_slots_set) { + let epoch = self.root_bank.epoch_schedule().get_epoch(*slot); let entry = self.slots_stake_map.get_mut(slot).unwrap(); - *entry = entry.saturating_sub(sender_stake); - if *entry < threshold_stake { - self.slots_to_repair.remove(slot); + if let Some(sender_stake) = self.root_bank.epoch_node_id_to_stake(epoch, from) { + *entry = entry.saturating_sub(sender_stake); + let repair_threshold_stake = (self.root_bank.epoch_total_stake(epoch).unwrap() + as f64 + * self.repair_threshold) as u64; + if *entry < repair_threshold_stake { + self.slots_to_repair.remove(slot); + } } } for slot in new_slots_set.difference(&old_slots_set) { + let epoch = self.root_bank.epoch_schedule().get_epoch(*slot); let entry = self.slots_stake_map.entry(*slot).or_insert(0); - *entry = entry.saturating_add(sender_stake); - if *entry >= threshold_stake { - self.slots_to_repair.insert(*slot); + if let Some(sender_stake) = self.root_bank.epoch_node_id_to_stake(epoch, from) { + *entry = entry.saturating_add(sender_stake); + let repair_threshold_stake = (self.root_bank.epoch_total_stake(epoch).unwrap() + as f64 + * self.repair_threshold) as u64; + if *entry >= repair_threshold_stake { + self.slots_to_repair.insert(*slot); + } } } - Some(record) + false } - pub(crate) fn active_percent(&self) -> f64 { - let total_stake = self.epoch_stakes.total_stake(); - let total_active_stake = self.active_peers.iter().fold(0, |sum: u64, pubkey| { - sum.saturating_add(Self::validator_stake(&self.epoch_stakes, pubkey)) - }); - total_active_stake as f64 / total_stake as f64 * 100.0 + fn update_epoch_info( + &mut self, + from: &Pubkey, + last_vote_epoch: Epoch, + old_last_vote_epoch: Option, + ) { + if Some(last_vote_epoch) < old_last_vote_epoch { + // We only have two entries so old epoch must be the second one. + let entry = self.epoch_info_vec.last_mut().unwrap(); + if let Some(stake) = self.root_bank.epoch_node_id_to_stake(entry.epoch, from) { + entry.actively_voting_for_this_epoch_stake = entry + .actively_voting_for_this_epoch_stake + .checked_sub(stake) + .unwrap(); + } + } else { + for entry in self.epoch_info_vec.iter_mut() { + if let Some(stake) = self.root_bank.epoch_node_id_to_stake(entry.epoch, from) { + if old_last_vote_epoch.is_none() { + entry.actively_voting_stake = + entry.actively_voting_stake.checked_add(stake).unwrap(); + } + if Some(entry.epoch) > old_last_vote_epoch && entry.epoch <= last_vote_epoch { + entry.actively_voting_for_this_epoch_stake = entry + .actively_voting_for_this_epoch_stake + .checked_add(stake) + .unwrap(); + } + } + } + } } - pub(crate) fn slots_to_repair_iter(&self) -> impl Iterator { - self.slots_to_repair.iter() + pub(crate) fn min_active_percent(&self) -> f64 { + self.epoch_info_vec + .iter() + .filter(|info| { + info.actively_voting_for_this_epoch_stake as f64 / info.total_stake as f64 + > EPOCH_CONSIDERED_FOR_EXIT_THRESHOLD + }) + .map(|info| info.actively_voting_stake as f64 / info.total_stake as f64 * 100.0) + .min_by(|a, b| a.partial_cmp(b).unwrap()) + .unwrap_or(0.0) } - // TODO(wen): use better epoch stake and add a test later. - fn total_active_stake(&self) -> u64 { - self.active_peers.iter().fold(0, |sum: u64, pubkey| { - sum.saturating_add(Self::validator_stake(&self.epoch_stakes, pubkey)) - }) + pub(crate) fn slots_to_repair_iter(&self) -> impl Iterator { + self.slots_to_repair.iter() } pub(crate) fn get_final_result(self) -> LastVotedForkSlotsFinalResult { - let total_active_stake = self.total_active_stake(); LastVotedForkSlotsFinalResult { slots_stake_map: self.slots_stake_map, - total_active_stake, + epoch_info_vec: self.epoch_info_vec, } } } @@ -172,18 +281,20 @@ impl LastVotedForkSlotsAggregate { mod tests { use { crate::{ - last_voted_fork_slots_aggregate::LastVotedForkSlotsAggregate, - solana::wen_restart_proto::LastVotedForkSlotsRecord, + last_voted_fork_slots_aggregate::*, solana::wen_restart_proto::LastVotedForkSlotsRecord, }, solana_gossip::restart_crds_values::RestartLastVotedForkSlots, - solana_program::{clock::Slot, pubkey::Pubkey}, + solana_program::clock::Slot, solana_runtime::{ bank::Bank, + epoch_stakes::EpochStakes, genesis_utils::{ create_genesis_config_with_vote_accounts, GenesisConfigInfo, ValidatorVoteKeypairs, }, }, solana_sdk::{hash::Hash, signature::Signer, timing::timestamp}, + solana_vote::vote_account::VoteAccount, + solana_vote_program::vote_state::create_account_with_authorized, }; const TOTAL_VALIDATOR_COUNT: u16 = 10; @@ -218,9 +329,8 @@ mod tests { ]; TestAggregateInitResult { slots_aggregate: LastVotedForkSlotsAggregate::new( - root_slot, + root_bank, REPAIR_THRESHOLD, - root_bank.epoch_stakes(root_bank.epoch()).unwrap(), &last_voted_fork_slots, &validator_voting_keypairs[MY_INDEX].node_keypair.pubkey(), ), @@ -234,7 +344,9 @@ mod tests { fn test_aggregate() { let mut test_state = test_aggregate_init(); let root_slot = test_state.root_slot; - let initial_num_active_validators = 3; + // Until 33% stake vote, the percentage should be 0. + assert_eq!(test_state.slots_aggregate.min_active_percent(), 0.0); + let initial_num_active_validators = 2; for validator_voting_keypair in test_state .validator_voting_keypairs .iter() @@ -261,9 +373,44 @@ mod tests { }), ); } + assert_eq!(test_state.slots_aggregate.min_active_percent(), 0.0); + assert!(test_state + .slots_aggregate + .slots_to_repair_iter() + .next() + .is_none()); + + // Now add one more validator, min_active_percent should be 40% but repair + // is still empty (< 42%). + let new_active_validator = test_state.validator_voting_keypairs + [initial_num_active_validators] + .node_keypair + .pubkey(); + let now = timestamp(); + let new_active_validator_last_voted_slots = RestartLastVotedForkSlots::new( + new_active_validator, + now, + &test_state.last_voted_fork_slots, + Hash::default(), + SHRED_VERSION, + ) + .unwrap(); + assert_eq!( + test_state + .slots_aggregate + .aggregate(new_active_validator_last_voted_slots), + Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash: Hash::default().to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: now, + }), + ); + let expected_active_percent = + (initial_num_active_validators + 2) as f64 / TOTAL_VALIDATOR_COUNT as f64 * 100.0; assert_eq!( - test_state.slots_aggregate.active_percent(), - (initial_num_active_validators + 1) as f64 / TOTAL_VALIDATOR_COUNT as f64 * 100.0 + test_state.slots_aggregate.min_active_percent(), + expected_active_percent ); assert!(test_state .slots_aggregate @@ -271,6 +418,7 @@ mod tests { .next() .is_none()); + // Add one more validator, then repair is > 42% and no longer empty. let new_active_validator = test_state.validator_voting_keypairs [initial_num_active_validators + 1] .node_keypair @@ -296,9 +444,9 @@ mod tests { }), ); let expected_active_percent = - (initial_num_active_validators + 2) as f64 / TOTAL_VALIDATOR_COUNT as f64 * 100.0; + (initial_num_active_validators + 3) as f64 / TOTAL_VALIDATOR_COUNT as f64 * 100.0; assert_eq!( - test_state.slots_aggregate.active_percent(), + test_state.slots_aggregate.min_active_percent(), expected_active_percent ); let mut actual_slots = @@ -306,7 +454,8 @@ mod tests { actual_slots.sort(); assert_eq!(actual_slots, test_state.last_voted_fork_slots); - let replace_message_validator = test_state.validator_voting_keypairs[2] + let replace_message_validator = test_state.validator_voting_keypairs + [initial_num_active_validators] .node_keypair .pubkey(); // Allow specific validator to replace message. @@ -331,31 +480,7 @@ mod tests { }), ); assert_eq!( - test_state.slots_aggregate.active_percent(), - expected_active_percent - ); - let mut actual_slots = - Vec::from_iter(test_state.slots_aggregate.slots_to_repair_iter().cloned()); - actual_slots.sort(); - assert_eq!(actual_slots, vec![root_slot + 1]); - - // test that zero stake validator is ignored. - let random_pubkey = Pubkey::new_unique(); - assert_eq!( - test_state.slots_aggregate.aggregate( - RestartLastVotedForkSlots::new( - random_pubkey, - timestamp(), - &[root_slot + 1, root_slot + 4, root_slot + 5], - Hash::default(), - SHRED_VERSION, - ) - .unwrap(), - ), - None, - ); - assert_eq!( - test_state.slots_aggregate.active_percent(), + test_state.slots_aggregate.min_active_percent(), expected_active_percent ); let mut actual_slots = @@ -379,6 +504,35 @@ mod tests { ), None, ); + + assert_eq!( + test_state.slots_aggregate.get_final_result(), + LastVotedForkSlotsFinalResult { + slots_stake_map: vec![ + (root_slot + 1, 500), + (root_slot + 2, 400), + (root_slot + 3, 400), + (root_slot + 4, 100), + (root_slot + 5, 100), + ] + .into_iter() + .collect(), + epoch_info_vec: vec![ + LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 500, + actively_voting_for_this_epoch_stake: 500, + }, + LastVotedForkSlotsEpochInfo { + epoch: 1, + total_stake: 1000, + actively_voting_stake: 500, + actively_voting_for_this_epoch_stake: 0, + } + ], + }, + ); } #[test] @@ -393,7 +547,7 @@ mod tests { last_vote_bankhash: last_vote_bankhash.to_string(), shred_version: SHRED_VERSION as u32, }; - assert_eq!(test_state.slots_aggregate.active_percent(), 10.0); + assert_eq!(test_state.slots_aggregate.min_active_percent(), 0.0); assert_eq!( test_state .slots_aggregate @@ -407,7 +561,33 @@ mod tests { .unwrap(), Some(record.clone()), ); - assert_eq!(test_state.slots_aggregate.active_percent(), 20.0); + // Before 33% voted for slot in this epoch, the percentage should be 0. + assert_eq!(test_state.slots_aggregate.min_active_percent(), 0.0); + for i in 1..3 { + assert_eq!(test_state.slots_aggregate.min_active_percent(), 0.0); + let pubkey = test_state.validator_voting_keypairs[i] + .node_keypair + .pubkey(); + let now = timestamp(); + let last_voted_fork_slots = RestartLastVotedForkSlots::new( + pubkey, + now, + &test_state.last_voted_fork_slots, + last_vote_bankhash, + SHRED_VERSION, + ) + .unwrap(); + assert_eq!( + test_state.slots_aggregate.aggregate(last_voted_fork_slots), + Some(LastVotedForkSlotsRecord { + wallclock: now, + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: SHRED_VERSION as u32, + }), + ); + } + assert_eq!(test_state.slots_aggregate.min_active_percent(), 40.0); // Now if you get the same result from Gossip again, it should be ignored. assert_eq!( test_state.slots_aggregate.aggregate( @@ -451,26 +631,7 @@ mod tests { }), ); // percentage doesn't change since it's a replace. - assert_eq!(test_state.slots_aggregate.active_percent(), 20.0); - - // Record from validator with zero stake should be ignored. - assert_eq!( - test_state - .slots_aggregate - .aggregate_from_record( - &Pubkey::new_unique().to_string(), - &LastVotedForkSlotsRecord { - wallclock: timestamp(), - last_voted_fork_slots: vec![root_slot + 10, root_slot + 300], - last_vote_bankhash: Hash::new_unique().to_string(), - shred_version: SHRED_VERSION as u32, - } - ) - .unwrap(), - None, - ); - // percentage doesn't change since the previous aggregate is ignored. - assert_eq!(test_state.slots_aggregate.active_percent(), 20.0); + assert_eq!(test_state.slots_aggregate.min_active_percent(), 40.0); // Record from my pubkey should be ignored. assert_eq!( @@ -491,6 +652,33 @@ mod tests { .unwrap(), None, ); + assert_eq!( + test_state.slots_aggregate.get_final_result(), + LastVotedForkSlotsFinalResult { + slots_stake_map: vec![ + (root_slot + 1, 400), + (root_slot + 2, 400), + (root_slot + 3, 400), + (root_slot + 4, 100), + ] + .into_iter() + .collect(), + epoch_info_vec: vec![ + LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 400, + actively_voting_for_this_epoch_stake: 400, + }, + LastVotedForkSlotsEpochInfo { + epoch: 1, + total_stake: 1000, + actively_voting_stake: 400, + actively_voting_for_this_epoch_stake: 0, + } + ], + }, + ); } #[test] @@ -554,4 +742,84 @@ mod tests { ) .is_err()); } + + #[test] + fn test_aggregate_init_across_epoch() { + let validator_voting_keypairs: Vec<_> = (0..TOTAL_VALIDATOR_COUNT) + .map(|_| ValidatorVoteKeypairs::new_rand()) + .collect(); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( + 10_000, + &validator_voting_keypairs, + vec![100; validator_voting_keypairs.len()], + ); + let (_, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let root_bank = bank_forks.read().unwrap().root_bank(); + // Add bank 1 linking directly to 0, tweak its epoch_stakes, and then add it to bank_forks. + let mut new_root_bank = Bank::new_from_parent(root_bank.clone(), &Pubkey::default(), 1); + + // For epoch 1, let our validator have 90% of the stake. + let vote_accounts_hash_map = validator_voting_keypairs + .iter() + .enumerate() + .map(|(i, keypairs)| { + let stake = if i == MY_INDEX { + 900 * (TOTAL_VALIDATOR_COUNT - 1) as u64 + } else { + 100 + }; + let authorized_voter = keypairs.vote_keypair.pubkey(); + let node_id = keypairs.node_keypair.pubkey(); + ( + authorized_voter, + ( + stake, + VoteAccount::try_from(create_account_with_authorized( + &node_id, + &authorized_voter, + &node_id, + 0, + 100, + )) + .unwrap(), + ), + ) + }) + .collect(); + let epoch1_eopch_stakes = EpochStakes::new_for_tests(vote_accounts_hash_map, 1); + new_root_bank.set_epoch_stakes_for_test(1, epoch1_eopch_stakes); + + let last_voted_fork_slots = vec![root_bank.slot() + 1, root_bank.get_slots_in_epoch(0) + 1]; + let slots_aggregate = LastVotedForkSlotsAggregate::new( + Arc::new(new_root_bank), + REPAIR_THRESHOLD, + &last_voted_fork_slots, + &validator_voting_keypairs[MY_INDEX].node_keypair.pubkey(), + ); + assert_eq!( + slots_aggregate.get_final_result(), + LastVotedForkSlotsFinalResult { + slots_stake_map: vec![ + (root_bank.slot() + 1, 100), + (root_bank.get_slots_in_epoch(0) + 1, 8100), + ] + .into_iter() + .collect(), + epoch_info_vec: vec![ + LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 100, + actively_voting_for_this_epoch_stake: 100, + }, + LastVotedForkSlotsEpochInfo { + epoch: 1, + total_stake: 9000, + actively_voting_stake: 8100, + actively_voting_for_this_epoch_stake: 8100, + } + ], + } + ); + } } diff --git a/wen-restart/src/wen_restart.rs b/wen-restart/src/wen_restart.rs index 67b148b17149b5..0fa7ec1cb65f3e 100644 --- a/wen-restart/src/wen_restart.rs +++ b/wen-restart/src/wen_restart.rs @@ -4,13 +4,13 @@ use { crate::{ heaviest_fork_aggregate::HeaviestForkAggregate, last_voted_fork_slots_aggregate::{ - LastVotedForkSlotsAggregate, LastVotedForkSlotsFinalResult, + LastVotedForkSlotsAggregate, LastVotedForkSlotsEpochInfo, LastVotedForkSlotsFinalResult, }, solana::wen_restart_proto::{ self, GenerateSnapshotRecord, HeaviestForkAggregateFinal, HeaviestForkAggregateRecord, HeaviestForkRecord, LastVotedForkSlotsAggregateFinal, - LastVotedForkSlotsAggregateRecord, LastVotedForkSlotsRecord, State as RestartState, - WenRestartProgress, + LastVotedForkSlotsAggregateRecord, LastVotedForkSlotsEpochInfoRecord, + LastVotedForkSlotsRecord, State as RestartState, WenRestartProgress, }, }, anyhow::Result, @@ -27,7 +27,10 @@ use { blockstore_processor::{process_single_slot, ConfirmationProgress, ProcessOptions}, leader_schedule_cache::LeaderScheduleCache, }, - solana_program::{clock::Slot, hash::Hash}, + solana_program::{ + clock::{Epoch, Slot}, + hash::Hash, + }, solana_runtime::{ accounts_background_service::AbsRequestSender, bank::Bank, @@ -216,9 +219,8 @@ pub(crate) fn aggregate_restart_last_voted_fork_slots( let root_bank = bank_forks.read().unwrap().root_bank(); let root_slot = root_bank.slot(); let mut last_voted_fork_slots_aggregate = LastVotedForkSlotsAggregate::new( - root_slot, + root_bank.clone(), REPAIR_THRESHOLD, - root_bank.epoch_stakes(root_bank.epoch()).unwrap(), last_voted_fork_slots, &cluster_info.id(), ); @@ -260,7 +262,7 @@ pub(crate) fn aggregate_restart_last_voted_fork_slots( // Because all operations on the aggregate are called from this single thread, we can // fetch all results separately without worrying about them being out of sync. We can // also use returned iterator without the vector changing underneath us. - let active_percent = last_voted_fork_slots_aggregate.active_percent(); + let active_percent = last_voted_fork_slots_aggregate.min_active_percent(); let mut filtered_slots: Vec; { filtered_slots = last_voted_fork_slots_aggregate @@ -303,6 +305,23 @@ pub(crate) fn aggregate_restart_last_voted_fork_slots( Ok(last_voted_fork_slots_aggregate.get_final_result()) } +fn is_over_stake_threshold( + epoch_info_vec: &[LastVotedForkSlotsEpochInfo], + epoch: Epoch, + stake: &u64, +) -> bool { + epoch_info_vec + .iter() + .find(|info| info.epoch == epoch) + .map_or(false, |info| { + let threshold = info + .actively_voting_stake + .checked_sub((info.total_stake as f64 * HEAVIEST_FORK_THRESHOLD_DELTA) as u64) + .unwrap(); + stake >= &threshold + }) +} + // Verify that all blocks with at least (active_stake_percnet - 38%) of the stake form a // single chain from the root, and use the highest slot in the blocks as the heaviest fork. // Please see SIMD 46 "gossip current heaviest fork" for correctness proof. @@ -314,16 +333,17 @@ pub(crate) fn find_heaviest_fork( ) -> Result<(Slot, Hash)> { let root_bank = bank_forks.read().unwrap().root_bank(); let root_slot = root_bank.slot(); - // TODO: Should use better epoch_stakes later. - let epoch_stake = root_bank.epoch_stakes(root_bank.epoch()).unwrap(); - let total_stake = epoch_stake.total_stake(); - let stake_threshold = aggregate_final_result - .total_active_stake - .saturating_sub((HEAVIEST_FORK_THRESHOLD_DELTA * total_stake as f64) as u64); let mut slots = aggregate_final_result .slots_stake_map .iter() - .filter(|(slot, stake)| **slot > root_slot && **stake > stake_threshold) + .filter(|(slot, stake)| { + **slot > root_slot + && is_over_stake_threshold( + &aggregate_final_result.epoch_info_vec, + root_bank.epoch_schedule().get_epoch(**slot), + stake, + ) + }) .map(|(slot, _)| *slot) .collect::>(); slots.sort(); @@ -604,8 +624,6 @@ pub(crate) fn aggregate_restart_heaviest_fork( progress: &mut WenRestartProgress, ) -> Result<()> { let root_bank = bank_forks.read().unwrap().root_bank(); - let epoch_stakes = root_bank.epoch_stakes(root_bank.epoch()).unwrap(); - let total_stake = epoch_stakes.total_stake(); if progress.my_heaviest_fork.is_none() { return Err(WenRestartError::MalformedProgress( RestartState::HeaviestFork, @@ -616,6 +634,13 @@ pub(crate) fn aggregate_restart_heaviest_fork( let my_heaviest_fork = progress.my_heaviest_fork.clone().unwrap(); let heaviest_fork_slot = my_heaviest_fork.slot; let heaviest_fork_hash = Hash::from_str(&my_heaviest_fork.bankhash)?; + // When checking whether to exit aggregate_restart_heaviest_fork, use the epoch_stakes + // associated with the heaviest fork slot we picked. This ensures that everyone agreeing + // with me use the same EpochStakes to calculate the supermajority threshold. + let epoch_stakes = root_bank + .epoch_stakes(root_bank.epoch_schedule().get_epoch(heaviest_fork_slot)) + .unwrap(); + let total_stake = epoch_stakes.total_stake(); let adjusted_threshold_percent = wait_for_supermajority_threshold_percent .saturating_sub(HEAVIEST_FORK_DISAGREE_THRESHOLD_PERCENT.round() as u64); // The threshold for supermajority should definitely be higher than 67%. @@ -963,7 +988,17 @@ pub(crate) fn increment_and_write_wen_restart_records( if let Some(aggregate_record) = progress.last_voted_fork_slots_aggregate.as_mut() { aggregate_record.final_result = Some(LastVotedForkSlotsAggregateFinal { slots_stake_map: aggregate_final_result.slots_stake_map.clone(), - total_active_stake: aggregate_final_result.total_active_stake, + epoch_infos: aggregate_final_result + .epoch_info_vec + .iter() + .map(|info| LastVotedForkSlotsEpochInfoRecord { + epoch: info.epoch, + total_stake: info.total_stake, + actively_voting_stake: info.actively_voting_stake, + actively_voting_for_this_epoch_stake: info + .actively_voting_for_this_epoch_stake, + }) + .collect(), }); } WenRestartProgressInternalState::FindHeaviestFork { @@ -1091,7 +1126,17 @@ pub(crate) fn initialize( r.final_result.as_ref().map(|result| { LastVotedForkSlotsFinalResult { slots_stake_map: result.slots_stake_map.clone(), - total_active_stake: result.total_active_stake, + epoch_info_vec: result + .epoch_infos + .iter() + .map(|info| LastVotedForkSlotsEpochInfo { + epoch: info.epoch, + total_stake: info.total_stake, + actively_voting_stake: info.actively_voting_stake, + actively_voting_for_this_epoch_stake: info + .actively_voting_for_this_epoch_stake, + }) + .collect(), } }) }), @@ -1112,7 +1157,17 @@ pub(crate) fn initialize( .as_ref() .map(|result| LastVotedForkSlotsFinalResult { slots_stake_map: result.slots_stake_map.clone(), - total_active_stake: result.total_active_stake, + epoch_info_vec: result + .epoch_infos + .iter() + .map(|info| LastVotedForkSlotsEpochInfo { + epoch: info.epoch, + total_stake: info.total_stake, + actively_voting_stake: info.actively_voting_stake, + actively_voting_for_this_epoch_stake: info + .actively_voting_for_this_epoch_stake, + }) + .collect(), }) }) .ok_or(WenRestartError::MalformedProgress( @@ -1184,6 +1239,7 @@ mod tests { vote::state::{TowerSync, Vote}, }, solana_runtime::{ + epoch_stakes::EpochStakes, genesis_utils::{ create_genesis_config_with_vote_accounts, GenesisConfigInfo, ValidatorVoteKeypairs, }, @@ -1192,16 +1248,19 @@ mod tests { snapshot_utils::build_incremental_snapshot_archive_path, }, solana_sdk::{ + pubkey::Pubkey, signature::{Keypair, Signer}, timing::timestamp, }, solana_streamer::socket::SocketAddrSpace, + solana_vote::vote_account::VoteAccount, + solana_vote_program::vote_state::create_account_with_authorized, std::{fs::remove_file, sync::Arc, thread::Builder}, tempfile::TempDir, }; const SHRED_VERSION: u16 = 2; - const EXPECTED_SLOTS: Slot = 90; + const EXPECTED_SLOTS: Slot = 40; const TICKS_PER_SLOT: u64 = 2; const TOTAL_VALIDATOR_COUNT: u16 = 20; const MY_INDEX: usize = TOTAL_VALIDATOR_COUNT as usize - 1; @@ -1636,6 +1695,8 @@ mod tests { .iter() .map(|slot| (*slot, total_active_stake_during_heaviest_fork)), ); + // We are simulating 5% joined LastVotedForkSlots but not HeaviestFork. + let voted_stake = total_active_stake_during_heaviest_fork + 100; assert_eq!( progress, WenRestartProgress { @@ -1650,8 +1711,20 @@ mod tests { received: expected_received_last_voted_fork_slots, final_result: Some(LastVotedForkSlotsAggregateFinal { slots_stake_map: expected_slots_stake_map, - // We are simulating 5% joined LastVotedForkSlots but not HeaviestFork. - total_active_stake: total_active_stake_during_heaviest_fork + 100, + epoch_infos: vec![ + LastVotedForkSlotsEpochInfoRecord { + epoch: 0, + total_stake: 2000, + actively_voting_stake: voted_stake, + actively_voting_for_this_epoch_stake: voted_stake, + }, + LastVotedForkSlotsEpochInfoRecord { + epoch: 1, + total_stake: 2000, + actively_voting_stake: voted_stake, + actively_voting_for_this_epoch_stake: voted_stake, + }, + ], }), }), my_heaviest_fork: Some(HeaviestForkRecord { @@ -1693,6 +1766,180 @@ mod tests { std::fs::set_permissions(wen_restart_proto_path, perms).unwrap(); } + #[test] + fn test_wen_restart_divergence_across_epoch_boundary() { + solana_logger::setup(); + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let test_state = wen_restart_test_init(&ledger_path); + let last_vote_slot = test_state.last_voted_fork_slots[0]; + + let old_root_bank = test_state.bank_forks.read().unwrap().root_bank(); + + // Add bank last_vote + 1 linking directly to 0, tweak its epoch_stakes, and then add it to bank_forks. + let new_root_slot = last_vote_slot + 1; + let mut new_root_bank = + Bank::new_from_parent(old_root_bank.clone(), &Pubkey::default(), new_root_slot); + assert_eq!(new_root_bank.epoch(), 1); + + // For epoch 2, make validator 0 have 90% of the stake. + let vote_accounts_hash_map = test_state + .validator_voting_keypairs + .iter() + .enumerate() + .map(|(i, keypairs)| { + let stake = if i == 0 { + 900 * (TOTAL_VALIDATOR_COUNT - 1) as u64 + } else { + 100 + }; + let authorized_voter = keypairs.vote_keypair.pubkey(); + let node_id = keypairs.node_keypair.pubkey(); + ( + authorized_voter, + ( + stake, + VoteAccount::try_from(create_account_with_authorized( + &node_id, + &authorized_voter, + &node_id, + 0, + 100, + )) + .unwrap(), + ), + ) + }) + .collect(); + let epoch2_eopch_stakes = EpochStakes::new_for_tests(vote_accounts_hash_map, 2); + new_root_bank.set_epoch_stakes_for_test(2, epoch2_eopch_stakes); + let _ = insert_slots_into_blockstore( + test_state.blockstore.clone(), + 0, + &[new_root_slot], + TICKS_PER_SLOT, + old_root_bank.last_blockhash(), + ); + let replay_tx_thread_pool = rayon::ThreadPoolBuilder::new() + .thread_name(|i| format!("solReplayTx{i:02}")) + .build() + .expect("new rayon threadpool"); + let recyclers = VerifyRecyclers::default(); + let mut timing = ExecuteTimings::default(); + let opts = ProcessOptions::default(); + let mut progress = ConfirmationProgress::new(old_root_bank.last_blockhash()); + let last_vote_bankhash = new_root_bank.hash(); + let bank_with_scheduler = test_state + .bank_forks + .write() + .unwrap() + .insert_from_ledger(new_root_bank); + if let Err(e) = process_single_slot( + &test_state.blockstore, + &bank_with_scheduler, + &replay_tx_thread_pool, + &opts, + &recyclers, + &mut progress, + None, + None, + None, + None, + &mut timing, + ) { + panic!("process_single_slot failed: {:?}", e); + } + + { + let mut bank_forks = test_state.bank_forks.write().unwrap(); + let _ = bank_forks.set_root( + last_vote_slot + 1, + &AbsRequestSender::default(), + Some(last_vote_slot + 1), + ); + } + let new_root_bank = test_state + .bank_forks + .read() + .unwrap() + .get(last_vote_slot + 1) + .unwrap(); + + // Add two more banks: old_epoch_bank (slot = last_vote_slot + 2) and + // new_epoch_bank (slot = first slot in epoch 2). They both link to last_vote_slot + 1. + // old_epoch_bank has everyone's votes except 0, so it has > 66% stake in the old epoch. + // new_epoch_bank has 0's vote, so it has > 66% stake in the new epoch. + let old_epoch_slot = new_root_slot + 1; + let _ = insert_slots_into_blockstore( + test_state.blockstore.clone(), + new_root_bank.slot(), + &[old_epoch_slot], + TICKS_PER_SLOT, + new_root_bank.last_blockhash(), + ); + let new_epoch_slot = new_root_bank.epoch_schedule().get_first_slot_in_epoch(2); + let _ = insert_slots_into_blockstore( + test_state.blockstore.clone(), + new_root_slot, + &[new_epoch_slot], + TICKS_PER_SLOT, + new_root_bank.last_blockhash(), + ); + let mut rng = rand::thread_rng(); + // Everyone except 0 votes for old_epoch_bank. + for (index, keypairs) in test_state + .validator_voting_keypairs + .iter() + .take(TOTAL_VALIDATOR_COUNT as usize - 1) + .enumerate() + { + let node_pubkey = keypairs.node_keypair.pubkey(); + let node = ContactInfo::new_rand(&mut rng, Some(node_pubkey)); + let last_vote_hash = Hash::new_unique(); + let now = timestamp(); + // Validator 0 votes for the new_epoch_bank while everyone elese vote for old_epoch_bank. + let last_voted_fork_slots = if index == 0 { + vec![new_epoch_slot, new_root_slot, 0] + } else { + vec![old_epoch_slot, new_root_slot, 0] + }; + push_restart_last_voted_fork_slots( + test_state.cluster_info.clone(), + &node, + &last_voted_fork_slots, + &last_vote_hash, + &keypairs.node_keypair, + now, + ); + } + + assert_eq!( + wait_for_wen_restart(WenRestartConfig { + wen_restart_path: test_state.wen_restart_proto_path, + last_vote: VoteTransaction::from(Vote::new( + vec![new_root_slot], + last_vote_bankhash + )), + blockstore: test_state.blockstore, + cluster_info: test_state.cluster_info, + bank_forks: test_state.bank_forks, + wen_restart_repair_slots: Some(Arc::new(RwLock::new(Vec::new()))), + wait_for_supermajority_threshold_percent: 80, + snapshot_config: SnapshotConfig::default(), + accounts_background_request_sender: AbsRequestSender::default(), + genesis_config_hash: test_state.genesis_config_hash, + exit: Arc::new(AtomicBool::new(false)), + }) + .unwrap_err() + .downcast::() + .unwrap(), + WenRestartError::BlockNotLinkedToExpectedParent( + new_epoch_slot, + Some(new_root_slot), + old_epoch_slot + ) + ); + } + #[test] fn test_wen_restart_initialize() { solana_logger::setup(); @@ -1889,7 +2136,20 @@ mod tests { received: HashMap::new(), final_result: Some(LastVotedForkSlotsAggregateFinal { slots_stake_map: HashMap::new(), - total_active_stake: 1000, + epoch_infos: vec![ + LastVotedForkSlotsEpochInfoRecord { + epoch: 1, + total_stake: 1000, + actively_voting_stake: 800, + actively_voting_for_this_epoch_stake: 800, + }, + LastVotedForkSlotsEpochInfoRecord { + epoch: 2, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }, + ], }), }), ..Default::default() @@ -1906,7 +2166,20 @@ mod tests { WenRestartProgressInternalState::FindHeaviestFork { aggregate_final_result: LastVotedForkSlotsFinalResult { slots_stake_map: HashMap::new(), - total_active_stake: 1000, + epoch_info_vec: vec![ + LastVotedForkSlotsEpochInfo { + epoch: 1, + total_stake: 1000, + actively_voting_stake: 800, + actively_voting_for_this_epoch_stake: 800, + }, + LastVotedForkSlotsEpochInfo { + epoch: 2, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + } + ], }, my_heaviest_fork: progress.my_heaviest_fork.clone(), }, @@ -2212,7 +2485,12 @@ mod tests { received: HashMap::new(), final_result: Some(LastVotedForkSlotsAggregateFinal { slots_stake_map: vec![(0, 900), (1, 800)].into_iter().collect(), - total_active_stake: 900, + epoch_infos: vec![LastVotedForkSlotsEpochInfoRecord { + epoch: 0, + total_stake: 2000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }], }), }); let my_heaviest_fork = Some(HeaviestForkRecord { @@ -2264,13 +2542,23 @@ mod tests { last_voted_fork_slots: vec![0, 1], aggregate_final_result: Some(LastVotedForkSlotsFinalResult { slots_stake_map: expected_slots_stake_map.clone(), - total_active_stake: 900, + epoch_info_vec: vec![LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 2000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }], }), }, WenRestartProgressInternalState::FindHeaviestFork { aggregate_final_result: LastVotedForkSlotsFinalResult { slots_stake_map: expected_slots_stake_map.clone(), - total_active_stake: 900, + epoch_info_vec: vec![LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 2000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }], }, my_heaviest_fork: None, }, @@ -2291,7 +2579,12 @@ mod tests { WenRestartProgressInternalState::FindHeaviestFork { aggregate_final_result: LastVotedForkSlotsFinalResult { slots_stake_map: expected_slots_stake_map, - total_active_stake: 900, + epoch_info_vec: vec![LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 2000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }], }, my_heaviest_fork: Some(HeaviestForkRecord { slot: 1, @@ -2399,7 +2692,7 @@ mod tests { let exit = Arc::new(AtomicBool::new(false)); let test_state = wen_restart_test_init(&ledger_path); let last_vote_slot = test_state.last_voted_fork_slots[0]; - let slot_with_no_block = last_vote_slot + 5; + let slot_with_no_block = 1; // This fails because corresponding block is not found, which is wrong, we should have // repaired all eligible blocks when we exit LastVotedForkSlots state. assert_eq!( @@ -2408,7 +2701,12 @@ mod tests { slots_stake_map: vec![(0, 900), (slot_with_no_block, 800)] .into_iter() .collect(), - total_active_stake: 900, + epoch_info_vec: vec![LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }], }, test_state.bank_forks.clone(), test_state.blockstore.clone(), @@ -2423,8 +2721,13 @@ mod tests { assert_eq!( find_heaviest_fork( LastVotedForkSlotsFinalResult { - slots_stake_map: vec![(last_vote_slot, 900)].into_iter().collect(), - total_active_stake: 900, + slots_stake_map: vec![(3, 900)].into_iter().collect(), + epoch_info_vec: vec![LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }], }, test_state.bank_forks.clone(), test_state.blockstore.clone(), @@ -2433,19 +2736,20 @@ mod tests { .unwrap_err() .downcast::() .unwrap(), - WenRestartError::BlockNotLinkedToExpectedParent( - last_vote_slot, - Some(last_vote_slot - 1), - 0 - ), + WenRestartError::BlockNotLinkedToExpectedParent(3, Some(2), 0), ); // The following fails because we expect to see the some slot in slots_stake_map doesn't chain to the // one before it. assert_eq!( find_heaviest_fork( LastVotedForkSlotsFinalResult { - slots_stake_map: vec![(2, 900), (last_vote_slot, 900)].into_iter().collect(), - total_active_stake: 900, + slots_stake_map: vec![(2, 900), (5, 900)].into_iter().collect(), + epoch_info_vec: vec![LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }], }, test_state.bank_forks.clone(), test_state.blockstore.clone(), @@ -2454,11 +2758,7 @@ mod tests { .unwrap_err() .downcast::() .unwrap(), - WenRestartError::BlockNotLinkedToExpectedParent( - last_vote_slot, - Some(last_vote_slot - 1), - 2 - ), + WenRestartError::BlockNotLinkedToExpectedParent(5, Some(4), 2), ); // The following fails because the new slot is not full. let not_full_slot = last_vote_slot + 5; @@ -2489,7 +2789,20 @@ mod tests { find_heaviest_fork( LastVotedForkSlotsFinalResult { slots_stake_map, - total_active_stake: 900, + epoch_info_vec: vec![ + LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }, + LastVotedForkSlotsEpochInfo { + epoch: 1, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }, + ], }, test_state.bank_forks.clone(), test_state.blockstore.clone(), @@ -2529,7 +2842,20 @@ mod tests { find_heaviest_fork( LastVotedForkSlotsFinalResult { slots_stake_map, - total_active_stake: 900, + epoch_info_vec: vec![ + LastVotedForkSlotsEpochInfo { + epoch: 0, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }, + LastVotedForkSlotsEpochInfo { + epoch: 1, + total_stake: 1000, + actively_voting_stake: 900, + actively_voting_for_this_epoch_stake: 900, + }, + ], }, test_state.bank_forks.clone(), test_state.blockstore.clone(),