diff --git a/chain/api/src/chain.rs b/chain/api/src/chain.rs index b34310e2d3..fb8e8038c1 100644 --- a/chain/api/src/chain.rs +++ b/chain/api/src/chain.rs @@ -114,6 +114,11 @@ pub trait ChainReader { uncles: &[BlockHeader], header: &BlockHeader, ) -> Result; + fn merge_check_and_ghostdata( + &self, + uncles: &[BlockHeader], + header: &BlockHeader, + ) -> Result; fn is_dag_ancestor_of(&self, ancestor: HashValue, descendants: Vec) -> Result; fn get_pruning_height(&self) -> BlockNumber; fn get_pruning_config(&self) -> (u64, u64); diff --git a/chain/mock/src/mock_chain.rs b/chain/mock/src/mock_chain.rs index c11e8c9766..f2354e12ef 100644 --- a/chain/mock/src/mock_chain.rs +++ b/chain/mock/src/mock_chain.rs @@ -259,7 +259,7 @@ impl MockChain { let MineNewDagBlockInfo { tips: pruned_tips, - blue_blocks, + ghostdata, pruning_point, } = self.head.dag().calc_mergeset_and_tips( previous_pruning, @@ -270,14 +270,15 @@ impl MockChain { debug!( "tips: {:?}, blue_blocks: {:?}, pruning_point: {:?}", - pruned_tips, blue_blocks, pruning_point + pruned_tips, ghostdata.mergeset_blues, pruning_point ); let (template, _) = self.head.create_block_template_by_header( *self.miner.address(), selected_header, vec![], - blue_blocks + ghostdata + .mergeset_blues .get(1..) .unwrap_or(&[]) .iter() diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 43ed856e7c..9272008f5e 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1,7 +1,7 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use crate::verifier::{BlockVerifier, DagVerifier, DagVerifierWithGhostData, FullVerifier}; +use crate::verifier::{BlockVerifier, DagVerifierForMining, DagVerifierForSync, FullVerifier}; use anyhow::{bail, ensure, format_err, Ok, Result}; use sp_utils::stop_watch::{watch, CHAIN_WATCH_NAME}; use starcoin_accumulator::inmemory::InMemoryAccumulator; @@ -13,13 +13,15 @@ use starcoin_chain_api::{ ExcludedTxns, ExecutedBlock, MintedUncleNumber, TransactionInfoWithProof, VerifiedBlock, VerifyBlockField, }; +use starcoin_config::genesis_config::{G_MERGE_DEPTH, G_PRUNING_FINALITY}; use starcoin_consensus::Consensus; use starcoin_crypto::hash::PlainCryptoHash; use starcoin_crypto::HashValue; -use starcoin_dag::blockdag::{BlockDAG, MineNewDagBlockInfo}; +use starcoin_dag::blockdag::BlockDAG; use starcoin_dag::consensusdb::consenses_state::DagState; use starcoin_dag::consensusdb::prelude::StoreError; use starcoin_dag::consensusdb::schemadb::GhostdagStoreReader; +use starcoin_dag::types::ghostdata::GhostdagData; use starcoin_executor::VMMetrics; #[cfg(feature = "force-deploy")] use starcoin_force_upgrade::force_upgrade_management::get_force_upgrade_block_number; @@ -30,6 +32,7 @@ use starcoin_statedb::ChainStateDB; use starcoin_storage::Store; use starcoin_time_service::TimeService; use starcoin_types::block::BlockIdAndNumber; +use starcoin_types::consensus_header::ConsensusHeader; use starcoin_types::contract_event::ContractEventInfo; use starcoin_types::filter::Filter; use starcoin_types::startup_info::{ChainInfo, ChainStatus}; @@ -48,6 +51,7 @@ use starcoin_vm_types::genesis_config::{ChainId, ConsensusStrategy}; use starcoin_vm_types::on_chain_config::FlexiDagConfigV2; use starcoin_vm_types::on_chain_resource::Epoch; use std::cmp::min; +use std::collections::HashSet; use std::iter::Extend; use std::option::Option::{None, Some}; use std::sync::atomic::{AtomicBool, Ordering}; @@ -316,25 +320,16 @@ impl BlockChain { (self.dag().ghostdata(&tips)?, tips) }; - let MineNewDagBlockInfo { - tips, - blue_blocks, - pruning_point: _, - } = { - let blue_blocks = ghostdata.mergeset_blues.clone()[1..].to_vec(); - MineNewDagBlockInfo { - tips, - blue_blocks, - pruning_point, // TODO: new test cases will need pass this field if they have some special requirements. - } - }; - debug!( "Blue blocks:{:?} in chain/create_block_template_by_header", - blue_blocks + ghostdata.mergeset_blues, ); - let blue_blocks = blue_blocks + let blue_blocks = ghostdata + .mergeset_blues + .as_ref() + .clone() .into_iter() + .skip(1) .map(|block| self.storage.get_block_by_hash(block)) .collect::>>>()? .into_iter() @@ -1240,7 +1235,7 @@ impl ChainReader for BlockChain { } fn verify(&self, block: Block) -> Result { - DagVerifier::verify_block(self, block) + DagVerifierForMining::verify_block(self, block) } fn execute(&mut self, verified_block: VerifiedBlock) -> Result { @@ -1390,34 +1385,70 @@ impl ChainReader for BlockChain { .get_block_header_by_hash(header.parent_hash())? .ok_or_else(|| format_err!("cannot find parent block header"))?; let next_ghostdata = self.dag().verify_and_ghostdata(uncles, header)?; - let (pruning_depth, pruning_finality) = self.get_pruning_config(); if self.status().head().pruning_point() != HashValue::zero() { - let previous_ghostdata = if previous_header.pruning_point() == HashValue::zero() { - let genesis = self - .storage - .get_genesis()? - .ok_or_else(|| format_err!("the genesis id is none!"))?; - self.dag().storage.ghost_dag_store.get_data(genesis)? - } else { - self.dag() - .storage - .ghost_dag_store - .get_data(previous_header.pruning_point())? - }; - - self.dag().verify_pruning_point( + self.verify_pruning_point( previous_header.pruning_point(), - previous_ghostdata.as_ref(), header.pruning_point(), &next_ghostdata, - pruning_depth, - pruning_finality, )?; } Ok(next_ghostdata) } + fn merge_check_and_ghostdata( + &self, + uncles: &[BlockHeader], + header: &BlockHeader, + ) -> Result { + let next_ghostdata = self.dag().ghostdata(&header.parents())?; + if next_ghostdata + .mergeset_blues + .iter() + .skip(1) + .cloned() + .collect::>() + != uncles + .iter() + .map(|header| header.id()) + .collect::>() + { + bail!( + "Uncle verification failed: Local mergeset blues ({:?}) do not match miner's uncles ({:?}).", + next_ghostdata.mergeset_blues.iter().skip(1).collect::>(), + uncles.iter().map(|header| header.id()).collect::>() + ); + } + let previous_header = self + .storage + .get_block_header_by_hash(header.parent_hash())? + .ok_or_else(|| format_err!("cannot find parent block header"))?; + if self.status().head().pruning_point() != HashValue::zero() { + self.verify_pruning_point( + previous_header.pruning_point(), + header.pruning_point(), + &next_ghostdata, + )?; + } + let previous_pruning_point = if header.pruning_point() == HashValue::zero() { + self.storage + .get_genesis()? + .ok_or_else(|| format_err!("cannot find generation block"))? + } else { + header.pruning_point() + }; + + let _ = self.dag().check_bounded_merge_depth( + header.parent_hash(), + previous_pruning_point, + &next_ghostdata, + G_MERGE_DEPTH, + G_PRUNING_FINALITY, + )?; + + anyhow::Ok(next_ghostdata) + } + fn is_dag_ancestor_of(&self, ancestor: HashValue, descendants: Vec) -> Result { self.dag().check_ancestor_of(ancestor, descendants) } @@ -1649,6 +1680,38 @@ impl BlockChain { 1 } } + + pub fn verify_pruning_point( + &self, + previous_pruning_point: HashValue, + next_pruning_point: HashValue, + next_ghostdata: &GhostdagData, + ) -> anyhow::Result<()> { + let previous_ghostdata = if previous_pruning_point == HashValue::zero() { + let genesis = self + .storage + .get_genesis()? + .ok_or_else(|| format_err!("the genesis id is none!"))?; + self.dag().storage.ghost_dag_store.get_data(genesis)? + } else { + self.dag() + .storage + .ghost_dag_store + .get_data(previous_pruning_point)? + }; + + let (pruning_depth, pruning_finality) = self.get_pruning_config(); + + self.dag().verify_pruning_point( + previous_pruning_point, + previous_ghostdata.as_ref(), + next_pruning_point, + next_ghostdata, + pruning_depth, + pruning_finality, + )?; + anyhow::Ok(()) + } } impl ChainWriter for BlockChain { @@ -1666,14 +1729,14 @@ impl ChainWriter for BlockChain { } fn apply(&mut self, block: Block) -> Result { - self.apply_with_verifier::(block) + self.apply_with_verifier::(block) } fn chain_state(&mut self) -> &ChainStateDB { &self.statedb } fn apply_for_sync(&mut self, block: Block) -> Result { - self.apply_with_verifier::(block) + self.apply_with_verifier::(block) } } diff --git a/chain/src/verifier/mod.rs b/chain/src/verifier/mod.rs index a4381b2522..3ec774e9c4 100644 --- a/chain/src/verifier/mod.rs +++ b/chain/src/verifier/mod.rs @@ -426,8 +426,9 @@ impl BasicDagVerifier { } } -pub struct DagVerifier; -impl BlockVerifier for DagVerifier { +// for mining +pub struct DagVerifierForMining; +impl BlockVerifier for DagVerifierForMining { fn verify_header(current_chain: &R, new_block_header: &BlockHeader) -> Result<()> where R: ChainReader, @@ -443,16 +444,15 @@ impl BlockVerifier for DagVerifier { where R: ChainReader, { - Ok(Some(BasicDagVerifier::verify_blue_blocks( - current_chain, - uncles, - header, - )?)) + Ok(Some( + current_chain.merge_check_and_ghostdata(uncles, header)?, + )) } } -pub struct DagVerifierWithGhostData; -impl BlockVerifier for DagVerifierWithGhostData { +// for sync +pub struct DagVerifierForSync; +impl BlockVerifier for DagVerifierForSync { fn verify_header(current_chain: &R, new_block_header: &BlockHeader) -> Result<()> where R: ChainReader, diff --git a/config/src/genesis_config.rs b/config/src/genesis_config.rs index 69483e1f29..4baed3dfa6 100644 --- a/config/src/genesis_config.rs +++ b/config/src/genesis_config.rs @@ -756,6 +756,7 @@ pub static G_BASE_BLOCK_GAS_LIMIT: u64 = 50_000_000; //must big than maximum_num pub static G_PRUNING_DEPTH: u64 = 17280; pub static G_PRUNING_FINALITY: u64 = 8640; +pub static G_MERGE_DEPTH: u64 = 3600; // the merge depth should be smaller than the pruning finality static G_EMPTY_BOOT_NODES: Lazy> = Lazy::new(Vec::new); const ONE_DAY: u64 = 86400; diff --git a/flexidag/src/block_depth/block_depth_info.rs b/flexidag/src/block_depth/block_depth_info.rs new file mode 100644 index 0000000000..13c4eb0db5 --- /dev/null +++ b/flexidag/src/block_depth/block_depth_info.rs @@ -0,0 +1,127 @@ +use std::sync::Arc; + +use starcoin_crypto::HashValue; + +use crate::{ + consensusdb::{ + consensus_block_depth::BlockDepthInfoReader, + schemadb::{GhostdagStoreReader, ReachabilityStoreReader}, + }, + reachability::reachability_service::{MTReachabilityService, ReachabilityService}, + types::ghostdata::GhostdagData, +}; + +#[derive(Clone)] +pub struct BlockDepthManagerT< + S: BlockDepthInfoReader, + U: ReachabilityStoreReader, + V: GhostdagStoreReader, +> { + depth_store: Arc, + reachability_service: MTReachabilityService, + ghostdag_store: V, +} + +impl + BlockDepthManagerT +{ + pub fn new( + depth_store: Arc, + reachability_service: MTReachabilityService, + ghostdag_store: V, + ) -> Self { + Self { + depth_store, + reachability_service, + ghostdag_store, + } + } + + pub fn calc_merge_depth_root( + &self, + ghostdag_data: &GhostdagData, + pruning_point: HashValue, + merge_depth: u64, + ) -> anyhow::Result { + self.calculate_block_at_depth(ghostdag_data, merge_depth, pruning_point) + } + + pub fn calc_finality_point( + &self, + ghostdag_data: &GhostdagData, + pruning_point: HashValue, + finality_depth: u64, + ) -> anyhow::Result { + self.calculate_block_at_depth(ghostdag_data, finality_depth, pruning_point) + } + + // return hash zero if no requiring merge depth + fn calculate_block_at_depth( + &self, + ghostdag_data: &GhostdagData, + depth: u64, + pruning_point: HashValue, + ) -> anyhow::Result { + if ghostdag_data.blue_score < depth { + return anyhow::Ok(HashValue::zero()); + } + + let pp_bs = self.ghostdag_store.get_blue_score(pruning_point)?; + + if ghostdag_data.blue_score < pp_bs + depth { + return anyhow::Ok(HashValue::zero()); + } + + if !self + .reachability_service + .is_chain_ancestor_of(pruning_point, ghostdag_data.selected_parent) + { + return anyhow::Ok(HashValue::zero()); + } + + let mut current = match self + .depth_store + .get_block_depth_info(ghostdag_data.selected_parent)? + { + Some(block_depth_info) => block_depth_info.merge_depth_root, + None => HashValue::zero(), + }; + + if current == HashValue::zero() { + current = pruning_point; + } + + let required_blue_score = ghostdag_data.blue_score - depth; + + for chain_block in self.reachability_service.forward_chain_iterator( + current, + ghostdag_data.selected_parent, + true, + ) { + if self.ghostdag_store.get_blue_score(chain_block)? >= required_blue_score { + break; + } + + current = chain_block; + } + + anyhow::Ok(current) + } + + /// Returns the set of blues which are eligible for "kosherizing" merge bound violating blocks. + /// By prunality rules, these blocks must have `merge_depth_root` on their selected chain. + pub fn kosherizing_blues<'a>( + &'a self, + ghostdag_data: &'a GhostdagData, + merge_depth_root: HashValue, + ) -> impl DoubleEndedIterator + 'a { + ghostdag_data + .mergeset_blues + .iter() + .copied() + .filter(move |blue| { + self.reachability_service + .is_chain_ancestor_of(merge_depth_root, *blue) + }) + } +} diff --git a/flexidag/src/block_depth/mod.rs b/flexidag/src/block_depth/mod.rs new file mode 100644 index 0000000000..ab5cf73f40 --- /dev/null +++ b/flexidag/src/block_depth/mod.rs @@ -0,0 +1 @@ +pub mod block_depth_info; diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 8b5e2def2f..876ebb92c2 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -1,8 +1,12 @@ use super::reachability::{inquirer, reachability_service::MTReachabilityService}; use super::types::ghostdata::GhostdagData; +use crate::block_depth::block_depth_info::BlockDepthManagerT; use crate::consensusdb::consenses_state::{ DagState, DagStateReader, DagStateStore, ReachabilityView, }; +use crate::consensusdb::consensus_block_depth::{ + BlockDepthInfo, BlockDepthInfoStore, DbBlockDepthInfoStore, +}; use crate::consensusdb::prelude::{FlexiDagStorageConfig, StoreError}; use crate::consensusdb::schemadb::{GhostdagStoreReader, ReachabilityStore, REINDEX_ROOT_KEY}; use crate::consensusdb::{ @@ -14,6 +18,7 @@ use crate::consensusdb::{ }; use crate::ghostdag::protocol::GhostdagManager; use crate::prune::pruning_point_manager::PruningPointManagerT; +use crate::reachability::reachability_service::ReachabilityService; use crate::{process_key_already_error, reachability}; use anyhow::{bail, ensure, Ok}; use starcoin_config::temp_dir; @@ -38,10 +43,12 @@ pub type DbGhostdagManager = GhostdagManager< >; pub type PruningPointManager = PruningPointManagerT; +pub type BlockDepthManager = + BlockDepthManagerT; pub struct MineNewDagBlockInfo { pub tips: Vec, - pub blue_blocks: Vec, + pub ghostdata: GhostdagData, pub pruning_point: HashValue, } @@ -50,6 +57,7 @@ pub struct BlockDAG { pub storage: FlexiDagStorage, ghostdag_manager: DbGhostdagManager, pruning_point_manager: PruningPointManager, + block_depth_manager: BlockDepthManager, } impl BlockDAG { @@ -70,12 +78,19 @@ impl BlockDAG { header_store, reachability_service.clone(), ); - let pruning_point_manager = PruningPointManager::new(reachability_service, ghostdag_store); + let pruning_point_manager = + PruningPointManager::new(reachability_service.clone(), ghostdag_store.clone()); + let block_depth_manager = BlockDepthManager::new( + db.block_depth_info_store.clone(), + reachability_service, + ghostdag_store, + ); Self { ghostdag_manager, storage: db, pruning_point_manager, + block_depth_manager, } } @@ -499,7 +514,7 @@ impl BlockDAG { if next_pruning_point == Hash::zero() || next_pruning_point == previous_pruning_point { anyhow::Ok(MineNewDagBlockInfo { tips: dag_state.tips, - blue_blocks: (*next_ghostdata.mergeset_blues).clone(), + ghostdata: next_ghostdata, pruning_point: next_pruning_point, }) } else { @@ -508,19 +523,15 @@ impl BlockDAG { previous_pruning_point, next_pruning_point, )?; - let mergeset_blues = (*self - .ghost_dag_manager() - .ghostdag(&pruned_tips)? - .mergeset_blues) - .clone(); + let ghostdata = self.ghost_dag_manager().ghostdag(&pruned_tips)?; info!( - "previous tips are: {:?}, the pruned tips are: {:?}, the mergeset blues are: {:?}, the next pruning point is: {:?}", + "previous tips are: {:?}, the pruned tips are: {:?}, the ghost data are: {:?}, the next pruning point is: {:?}", dag_state.tips, - pruned_tips, mergeset_blues, next_pruning_point + pruned_tips, ghostdata, next_pruning_point ); anyhow::Ok(MineNewDagBlockInfo { tips: pruned_tips, - blue_blocks: mergeset_blues, + ghostdata, pruning_point: next_pruning_point, }) } @@ -549,6 +560,118 @@ impl BlockDAG { anyhow::Ok(()) } + pub fn remove_bounded_merge_breaking_parents( + &self, + mut parents: Vec, + mut ghostdata: GhostdagData, + pruning_point: Hash, + merge_depth: u64, + ) -> anyhow::Result<(Vec, GhostdagData)> { + let merge_depth_root = self + .block_depth_manager + .calc_merge_depth_root(&ghostdata, pruning_point, merge_depth) + .map_err(|e| anyhow::anyhow!("Failed to calculate merge depth root: {}", e))?; + if merge_depth_root == Hash::zero() { + return anyhow::Ok((parents, ghostdata)); + } + debug!("merge depth root: {:?}", merge_depth_root); + let mut kosherizing_blues: Option> = None; + let mut bad_reds = Vec::new(); + + // Find red blocks violating the merge bound and which are not kosherized by any blue + for red in ghostdata.mergeset_reds.iter().copied() { + if self + .reachability_service() + .is_dag_ancestor_of(merge_depth_root, red) + { + continue; + } + // Lazy load the kosherizing blocks since this case is extremely rare + if kosherizing_blues.is_none() { + kosherizing_blues = Some( + self.block_depth_manager + .kosherizing_blues(&ghostdata, merge_depth_root) + .collect(), + ); + } + if !self.reachability_service().is_dag_ancestor_of_any( + red, + &mut kosherizing_blues.as_ref().unwrap().iter().copied(), + ) { + bad_reds.push(red); + } + } + + if !bad_reds.is_empty() { + // Remove all parents which lead to merging a bad red + parents.retain(|&h| { + !self + .reachability_service() + .is_any_dag_ancestor(&mut bad_reds.iter().copied(), h) + }); + // Recompute ghostdag data since parents changed + ghostdata = self.ghostdag_manager.ghostdag(&parents)?; + } + + anyhow::Ok((parents, ghostdata)) + } + + pub fn check_bounded_merge_depth( + &self, + selected_parent: Hash, + pruning_point: Hash, + ghostdata: &GhostdagData, + merge_depth: u64, + finality_depth: u64, + ) -> anyhow::Result<(Hash, Hash)> { + let merge_depth_root = self.block_depth_manager.calc_merge_depth_root( + ghostdata, + pruning_point, + merge_depth, + )?; + if merge_depth_root == Hash::zero() { + return anyhow::Ok((Hash::zero(), Hash::zero())); + } + let finality_point = self.block_depth_manager.calc_finality_point( + ghostdata, + pruning_point, + finality_depth, + )?; + let mut kosherizing_blues: Option> = None; + + for red in ghostdata.mergeset_reds.iter().copied() { + if self + .reachability_service() + .is_dag_ancestor_of(merge_depth_root, red) + { + continue; + } + // Lazy load the kosherizing blocks since this case is extremely rare + if kosherizing_blues.is_none() { + kosherizing_blues = Some( + self.block_depth_manager + .kosherizing_blues(ghostdata, merge_depth_root) + .collect(), + ); + } + if !self.reachability_service().is_dag_ancestor_of_any( + red, + &mut kosherizing_blues.as_ref().unwrap().iter().copied(), + ) { + warn!("failed to verify the bounded merge depth"); + } + } + + self.storage.block_depth_info_store.insert( + selected_parent, + BlockDepthInfo { + merge_depth_root, + finality_point, + }, + )?; + Ok((merge_depth_root, finality_point)) + } + pub fn reachability_store( &self, ) -> Arc> { diff --git a/flexidag/src/consensusdb/consensus_block_depth.rs b/flexidag/src/consensusdb/consensus_block_depth.rs new file mode 100644 index 0000000000..74e2386624 --- /dev/null +++ b/flexidag/src/consensusdb/consensus_block_depth.rs @@ -0,0 +1,99 @@ +use std::sync::Arc; + +use serde::{Deserialize, Serialize}; +use starcoin_logger::prelude::error; + +use crate::define_schema; +use starcoin_crypto::HashValue as Hash; +use starcoin_storage::db_storage::DBStorage; + +use super::{ + access::CachedDbAccess, + error::StoreError, + schema::{KeyCodec, ValueCodec}, + writer::DirectDbWriter, +}; + +#[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug, Default)] +pub struct BlockDepthInfo { + pub merge_depth_root: Hash, + pub finality_point: Hash, +} + +pub(crate) const DAG_BLOCK_DEPTH_INFO_STORE_CF: &str = "dag-block-depth-info-store"; +define_schema!( + BlockDepthInfoData, + Hash, + BlockDepthInfo, + DAG_BLOCK_DEPTH_INFO_STORE_CF +); + +impl KeyCodec for Hash { + fn encode_key(&self) -> Result, StoreError> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Self::from_slice(data).map_err(|e| { + StoreError::DecodeError(format!( + "failed to decode the key for BlockDepthInfoData for error: {}", + e + )) + }) + } +} +impl ValueCodec for BlockDepthInfo { + fn encode_value(&self) -> Result, StoreError> { + bcs_ext::to_bytes(&self).map_err(|e| StoreError::EncodeError(e.to_string())) + } + + fn decode_value(data: &[u8]) -> Result { + bcs_ext::from_bytes(data).map_err(|e| StoreError::DecodeError(e.to_string())) + } +} + +pub trait BlockDepthInfoReader { + fn get_block_depth_info(&self, hash: Hash) -> Result, StoreError>; +} + +pub trait BlockDepthInfoStore: BlockDepthInfoReader { + // This is append only + fn insert(&self, hash: Hash, info: BlockDepthInfo) -> Result<(), StoreError>; +} + +/// A DB + cache implementation of `DbBlockDepthInfoStore` trait, with concurrency support. +#[derive(Clone)] +pub struct DbBlockDepthInfoStore { + db: Arc, + block_depth_info_access: CachedDbAccess, +} + +impl DbBlockDepthInfoStore { + pub fn new(db: Arc, cache_size: usize) -> Self { + Self { + db: Arc::clone(&db), + block_depth_info_access: CachedDbAccess::new(db.clone(), cache_size), + } + } +} + +impl BlockDepthInfoReader for DbBlockDepthInfoStore { + fn get_block_depth_info(&self, hash: Hash) -> Result, StoreError> { + let result = match self.block_depth_info_access.read(hash) { + Ok(info) => Some(info), + Err(e) => { + error!("get_block_depth_info error: {:?} for id: {:?}", e, hash); + None + } + }; + Ok(result) + } +} + +impl BlockDepthInfoStore for DbBlockDepthInfoStore { + fn insert(&self, hash: Hash, info: BlockDepthInfo) -> Result<(), StoreError> { + self.block_depth_info_access + .write(DirectDbWriter::new(&self.db), hash, info)?; + Ok(()) + } +} diff --git a/flexidag/src/consensusdb/db.rs b/flexidag/src/consensusdb/db.rs index 0590c99706..cbce908f83 100644 --- a/flexidag/src/consensusdb/db.rs +++ b/flexidag/src/consensusdb/db.rs @@ -1,5 +1,6 @@ use super::{ consenses_state::{DbDagStateStore, DAG_STATE_STORE_CF}, + consensus_block_depth::{DbBlockDepthInfoStore, DAG_BLOCK_DEPTH_INFO_STORE_CF}, error::StoreError, schemadb::{ DbGhostdagStore, DbHeadersStore, DbReachabilityStore, DbRelationsStore, CHILDREN_CF, @@ -19,6 +20,7 @@ pub struct FlexiDagStorage { pub reachability_store: Arc>, pub relations_store: Arc>, pub state_store: Arc>, + pub block_depth_info_store: Arc, } #[derive(Clone)] @@ -71,6 +73,7 @@ impl FlexiDagStorage { GHOST_DAG_STORE_CF, COMPACT_GHOST_DAG_STORE_CF, DAG_STATE_STORE_CF, + DAG_BLOCK_DEPTH_INFO_STORE_CF, ], false, config.rocksdb_config, @@ -92,7 +95,11 @@ impl FlexiDagStorage { 1, config.cache_size, ))), - state_store: Arc::new(RwLock::new(DbDagStateStore::new(db, config.cache_size))), + state_store: Arc::new(RwLock::new(DbDagStateStore::new( + db.clone(), + config.cache_size, + ))), + block_depth_info_store: Arc::new(DbBlockDepthInfoStore::new(db, config.cache_size)), }) } } diff --git a/flexidag/src/consensusdb/mod.rs b/flexidag/src/consensusdb/mod.rs index 331f288847..9faae8748b 100644 --- a/flexidag/src/consensusdb/mod.rs +++ b/flexidag/src/consensusdb/mod.rs @@ -1,6 +1,7 @@ mod access; mod cache; pub mod consenses_state; +pub mod consensus_block_depth; mod consensus_ghostdag; mod consensus_header; mod consensus_reachability; diff --git a/flexidag/src/lib.rs b/flexidag/src/lib.rs index 0d1c4e8886..a969bc67c9 100644 --- a/flexidag/src/lib.rs +++ b/flexidag/src/lib.rs @@ -1,5 +1,6 @@ use consensusdb::prelude::StoreError; +pub mod block_depth; pub mod blockdag; pub mod consensusdb; pub mod ghostdag; diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index d3002c213c..ffaf7ba2b5 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -19,6 +19,7 @@ use starcoin_logger::prelude::debug; use starcoin_types::{ block::{BlockHeader, BlockHeaderBuilder, BlockNumber}, blockhash::{BlockHashMap, HashKTypeMap, KType}, + U256, }; use std::{ @@ -737,6 +738,41 @@ fn add_and_print_with_ghostdata( Ok(header) } +fn add_and_print_with_pruning_point_and_difficulty( + number: BlockNumber, + parent: Hash, + parents: Vec, + origin: Hash, + pruning_point: Hash, + difficulty: U256, + dag: &mut BlockDAG, +) -> anyhow::Result { + let header_builder = BlockHeaderBuilder::random(); + let header = header_builder + .with_parent_hash(parent) + .with_parents_hash(parents.clone()) + .with_number(number) + .with_pruning_point(pruning_point) + .with_difficulty(difficulty) + .build(); + let start = Instant::now(); + dag.commit(header.to_owned(), origin)?; + let duration = start.elapsed(); + println!( + "commit header: {:?}, number: {:?}, duration: {:?}", + header.id(), + header.number(), + duration + ); + // let ghostdata = dag.ghostdata(&parents)?; + // dag.storage.ghost_dag_store.insert(header.id(), Arc::new(ghostdata))?; + // println!( + // "add a header: {:?}, blue set: {:?}, red set: {:?}, blue anticone size: {:?}", + // header, ghostdata.mergeset_blues, ghostdata.mergeset_reds, ghostdata.blues_anticone_sizes + // ); + Ok(header) +} + fn add_and_print_with_pruning_point( number: BlockNumber, parent: Hash, @@ -748,9 +784,10 @@ fn add_and_print_with_pruning_point( let header_builder = BlockHeaderBuilder::random(); let header = header_builder .with_parent_hash(parent) - .with_parents_hash(parents) + .with_parents_hash(parents.clone()) .with_number(number) .with_pruning_point(pruning_point) + .with_difficulty(100.into()) .build(); let start = Instant::now(); dag.commit(header.to_owned(), origin)?; @@ -761,7 +798,8 @@ fn add_and_print_with_pruning_point( header.number(), duration ); - let _ghostdata = dag.ghostdata(&[header.id()])?; + // let ghostdata = dag.ghostdata(&parents)?; + // dag.storage.ghost_dag_store.insert(header.id(), Arc::new(ghostdata))?; // println!( // "add a header: {:?}, blue set: {:?}, red set: {:?}, blue anticone size: {:?}", // header, ghostdata.mergeset_blues, ghostdata.mergeset_reds, ghostdata.blues_anticone_sizes @@ -1009,7 +1047,7 @@ fn test_prune() -> anyhow::Result<()> { // test the pruning point calculation let MineNewDagBlockInfo { tips, - blue_blocks: _, + ghostdata: _, pruning_point, } = dag.calc_mergeset_and_tips( previous_pruning_point, @@ -1050,7 +1088,7 @@ fn test_prune() -> anyhow::Result<()> { let MineNewDagBlockInfo { tips, - blue_blocks: _, + ghostdata: _, pruning_point, } = dag.calc_mergeset_and_tips( previous_pruning_point, @@ -1309,3 +1347,275 @@ fn test_verification_blue_block() -> anyhow::Result<()> { anyhow::Result::Ok(()) } + +#[test] +fn test_merge_bounded() -> anyhow::Result<()> { + // initialzie the dag firstly + let k = 3; + let pruning_depth = 4; + let pruning_finality = 3; + + let mut dag = BlockDAG::create_for_testing_with_parameters(k).unwrap(); + + let origin = BlockHeaderBuilder::random().with_number(0).build(); + let genesis = BlockHeader::dag_genesis_random_with_parent(origin)?; + + dag.init_with_genesis(genesis.clone()).unwrap(); + + println!( + "origin: {:?}, genesis: {:?}", + genesis.parent_hash(), + genesis.id() + ); + + let block1 = add_and_print( + 1, + genesis.id(), + vec![genesis.id()], + genesis.parent_hash(), + &mut dag, + )?; + + let block_main_2 = add_and_print_with_pruning_point_and_difficulty( + 2, + block1.id(), + vec![block1.id()], + genesis.parent_hash(), + Hash::zero(), + 3000.into(), + &mut dag, + )?; + let block_main_3 = add_and_print( + 3, + block_main_2.id(), + vec![block_main_2.id()], + genesis.parent_hash(), + &mut dag, + )?; + let block_main_3_1 = add_and_print( + 3, + block_main_2.id(), + vec![block_main_2.id()], + genesis.parent_hash(), + &mut dag, + )?; + let block_main_4 = add_and_print( + 4, + block_main_3.id(), + vec![block_main_3.id(), block_main_3_1.id()], + genesis.parent_hash(), + &mut dag, + )?; + let block_main_5 = add_and_print( + 5, + block_main_4.id(), + vec![block_main_4.id()], + genesis.parent_hash(), + &mut dag, + )?; + + let block_red_2 = add_and_print( + 2, + block1.id(), + vec![block1.id()], + genesis.parent_hash(), + &mut dag, + )?; + let block_red_2_1 = add_and_print( + 2, + block1.id(), + vec![block1.id()], + genesis.parent_hash(), + &mut dag, + )?; + let block_red_3 = add_and_print( + 3, + block_red_2.id(), + vec![block_red_2.id(), block_red_2_1.id()], + genesis.parent_hash(), + &mut dag, + )?; + + dag.save_dag_state( + genesis.id(), + DagState { + tips: vec![block_red_3.id(), block_main_5.id()], + }, + )?; + + // prunning process begins + let (previous_ghostdata, previous_pruning_point) = + if block_main_5.pruning_point() == Hash::zero() { + ( + dag.ghostdata_by_hash(genesis.id())?.ok_or_else(|| { + format_err!("failed to get the ghostdata by genesis: {:?}", genesis.id()) + })?, + genesis.id(), + ) + } else { + ( + dag.ghostdata_by_hash(block_main_5.pruning_point())? + .ok_or_else(|| { + format_err!( + "failed to get the ghostdata by pruning point: {:?}", + block_main_5.pruning_point() + ) + })?, + block_main_5.pruning_point(), + ) + }; + + // test the pruning point calculation + println!("**************** test the pruning point calculation and checking, generating the first merge depth point ****************"); + let MineNewDagBlockInfo { + tips, + ghostdata, + pruning_point, + } = dag.calc_mergeset_and_tips( + previous_pruning_point, + previous_ghostdata.as_ref(), + pruning_depth, + pruning_finality, + )?; + + assert_eq!(pruning_point, block_main_2.id()); + assert_eq!(tips.len(), 1); + assert_eq!(*tips.last().unwrap(), block_main_5.id()); + + let merge_depth = 3; + + // to test the algorigthm + let (tips, ghostdata) = + dag.remove_bounded_merge_breaking_parents(tips, ghostdata, pruning_point, merge_depth)?; + assert_eq!(tips, vec![block_main_5.id()]); + assert_eq!(ghostdata, dag.ghostdata(&[block_main_5.id()])?); + dag.storage + .state_store + .write() + .insert(pruning_point, DagState { tips })?; + + let (merge_depth_root, _finality_point) = dag.check_bounded_merge_depth( + ghostdata.selected_parent, + pruning_point, + &ghostdata, + merge_depth, + pruning_finality, + )?; + assert_eq!( + dag.ghost_dag_manager() + .find_selected_parent(vec![block_main_3.id(), block_main_3_1.id()])?, + merge_depth_root + ); + + // to test the calculation + println!("**************** test the pruning point calculation and checking, generating the second merge depth point ****************"); + let block_main_6 = add_and_print( + 6, + block_main_5.id(), + vec![block_main_5.id()], + genesis.parent_hash(), + &mut dag, + )?; + dag.storage.state_store.write().insert( + pruning_point, + DagState { + tips: vec![block_main_6.id()], + }, + )?; + let MineNewDagBlockInfo { + tips, + ghostdata, + pruning_point, + } = dag.calc_mergeset_and_tips( + pruning_point, + dag.storage + .ghost_dag_store + .get_data(pruning_point)? + .as_ref(), + pruning_depth, + pruning_finality, + )?; + + assert_eq!(pruning_point, block_main_2.id()); + assert_eq!(tips.len(), 1); + assert_eq!(*tips.last().unwrap(), block_main_6.id()); + + let (tips, ghostdata) = + dag.remove_bounded_merge_breaking_parents(tips, ghostdata, pruning_point, merge_depth)?; + + assert_eq!(tips.len(), 1); + assert_eq!(*tips.last().unwrap(), block_main_6.id()); + + let (merge_depth_root, _finality_point) = dag.check_bounded_merge_depth( + ghostdata.selected_parent, + pruning_point, + &ghostdata, + merge_depth, + pruning_finality, + )?; + let mut fork = dag + .ghost_dag_manager() + .find_selected_parent(vec![block_main_3.id(), block_main_3_1.id()])?; + assert_eq!(fork, merge_depth_root); + + fork = if block_main_3.id() == fork { + block_main_3_1.id() + } else { + block_main_3.id() + }; + + // to test the filter + let block_red_4 = add_and_print(4, fork, vec![fork], genesis.parent_hash(), &mut dag)?; + let block_red_5 = add_and_print( + 5, + block_red_4.id(), + vec![block_red_4.id()], + genesis.parent_hash(), + &mut dag, + )?; + + let ghostdata = dag.ghostdata(&[block_main_6.id(), block_red_5.id()])?; + assert_eq!( + HashSet::from_iter(vec![block_red_4.id(), block_red_5.id()]), + ghostdata + .mergeset_reds + .as_ref() + .iter() + .cloned() + .collect::>() + ); + + dag.storage.state_store.write().insert( + pruning_point, + DagState { + tips: vec![block_main_6.id(), block_red_5.id()], + }, + )?; + let MineNewDagBlockInfo { + tips, + ghostdata, + pruning_point, + } = dag.calc_mergeset_and_tips( + pruning_point, + dag.storage + .ghost_dag_store + .get_data(pruning_point)? + .as_ref(), + pruning_depth, + pruning_finality, + )?; + + assert_eq!(pruning_point, block_main_2.id()); + assert_eq!(tips.len(), 2); + assert_eq!( + tips.iter().cloned().collect::>(), + HashSet::from_iter(vec![block_main_6.id(), block_red_5.id()]) + ); + + let (tips, _ghostdata) = + dag.remove_bounded_merge_breaking_parents(tips, ghostdata, pruning_point, merge_depth)?; + assert_eq!(tips.len(), 1); + assert_eq!(tips, vec![block_main_6.id()]); + + anyhow::Result::Ok(()) +} diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index 600c1dd885..421b6c0512 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -16,6 +16,7 @@ use anyhow::{bail, format_err, Ok, Result}; use network_api::PeerProvider; use starcoin_chain::BlockChain; use starcoin_chain_api::{ChainReader, ConnectBlockError, WriteableChainService}; +use starcoin_config::genesis_config::G_MERGE_DEPTH; use starcoin_config::{NodeConfig, G_CRATE_VERSION}; use starcoin_consensus::Consensus; use starcoin_crypto::HashValue; @@ -384,8 +385,8 @@ where let dag = self.chain_service.get_dag(); let MineNewDagBlockInfo { - tips, - blue_blocks, + mut tips, + mut ghostdata, pruning_point, } = if main_header.number() >= self.chain_service.get_main().get_pruning_height() { let (previous_ghostdata, pruning_point) = if main_header.pruning_point() @@ -422,18 +423,28 @@ where let tips = dag.get_dag_state(genesis.block().id())?.tips; MineNewDagBlockInfo { tips: tips.clone(), - blue_blocks: dag.ghostdata(&tips)?.mergeset_blues.as_ref().clone(), + ghostdata: dag.ghostdata(&tips)?, pruning_point: HashValue::zero(), } }; - if blue_blocks.is_empty() { + if ghostdata.mergeset_blues.is_empty() { bail!("failed to get the blue blocks from the DAG"); } - let selected_parent = *blue_blocks - .first() - .ok_or_else(|| format_err!("the blue blocks must be not be 0!"))?; + info!("try to remove the red blocks when mining, tips: {:?} and ghostdata: {:?}, pruning point: {:?}", tips, ghostdata, pruning_point); + (tips, ghostdata) = dag.remove_bounded_merge_breaking_parents( + tips, + ghostdata, + pruning_point, + G_MERGE_DEPTH, + )?; + info!( + "after removing the bounded merge breaking parents, tips: {:?} and ghostdata: {:?}", + tips, ghostdata + ); + + let selected_parent = ghostdata.selected_parent; let time_service = self.config.net().time_service(); let storage = ctx.get_shared::>()?; let vm_metrics = ctx.get_shared_opt::()?; @@ -453,7 +464,11 @@ where previous_header, on_chain_block_gas_limit, tips_hash: tips, - blues_hash: blue_blocks[1..].to_vec(), + blues_hash: if ghostdata.mergeset_blues.len() > 1 { + ghostdata.mergeset_blues[1..].to_vec() + } else { + Vec::new() + }, strategy, next_difficulty, now_milliseconds, diff --git a/sync/src/block_connector/write_block_chain.rs b/sync/src/block_connector/write_block_chain.rs index c9ab7e207b..a8249b28fc 100644 --- a/sync/src/block_connector/write_block_chain.rs +++ b/sync/src/block_connector/write_block_chain.rs @@ -248,7 +248,7 @@ where #[cfg(test)] pub fn apply_failed(&mut self, block: Block) -> Result<()> { use anyhow::bail; - use starcoin_chain::verifier::{DagVerifier, FullVerifier}; + use starcoin_chain::verifier::{DagVerifierForMining, FullVerifier}; let verified_block = match self.main.check_chain_type()? { ChainType::Single => { @@ -257,7 +257,8 @@ where } ChainType::Dag => { // apply but no connection - self.main.verify_with_verifier::(block)? + self.main + .verify_with_verifier::(block)? } }; let _executed_block = self.main.execute(verified_block)?; @@ -416,6 +417,7 @@ where descendant ) })?; + deleted_chain.extend(descendant_header.parents_hash()); ready_to_delete.insert(descendant); @@ -457,6 +459,33 @@ where )?; } + if new_head_block.header().pruning_point() == HashValue::zero() { + let genesis = self + .main + .get_storage() + .get_genesis()? + .ok_or_else(|| format_err!("Cannot get the genesis in storage!"))?; + self.main.dag().save_dag_state_directly( + genesis, + DagState { + tips: vec![new_head_block.header().id()], + }, + )?; + self.main.dag().save_dag_state_directly( + HashValue::zero(), + DagState { + tips: vec![new_head_block.header().id()], + }, + )?; + } else { + self.main.dag().save_dag_state_directly( + new_head_block.header().pruning_point(), + DagState { + tips: vec![new_head_block.header().id()], + }, + )?; + } + let executed_block = new_branch.head_block(); self.main = new_branch; diff --git a/sync/src/parallel/executor.rs b/sync/src/parallel/executor.rs index 4b61ff3980..5cd3a6366e 100644 --- a/sync/src/parallel/executor.rs +++ b/sync/src/parallel/executor.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use starcoin_chain::{verifier::DagVerifierWithGhostData, BlockChain, ChainReader}; +use starcoin_chain::{verifier::DagVerifierForSync, BlockChain, ChainReader}; use starcoin_chain_api::ExecutedBlock; use starcoin_config::TimeService; use starcoin_crypto::HashValue; @@ -172,7 +172,7 @@ impl DagBlockExecutor { match chain .as_mut() .expect("it cannot be none!") - .apply_with_verifier::(block) + .apply_with_verifier::(block) { Ok(executed_block) => { info!( diff --git a/sync/src/tasks/block_sync_task.rs b/sync/src/tasks/block_sync_task.rs index b218b70d2f..a247df4564 100644 --- a/sync/src/tasks/block_sync_task.rs +++ b/sync/src/tasks/block_sync_task.rs @@ -423,15 +423,13 @@ where Ok(()) } - async fn find_absent_ancestor(&self, mut block_headers: Vec) -> Result { - let mut count: u64 = 0; + async fn find_absent_ancestor(&self, mut block_headers: Vec) -> Result<()> { loop { let mut absent_blocks = vec![]; self.find_absent_parent_dag_blocks_for_blocks(block_headers, &mut absent_blocks)?; if absent_blocks.is_empty() { - return Ok(count); + return Ok(()); } - count = count.saturating_add(absent_blocks.len() as u64); block_headers = self.fetch_blocks(absent_blocks).await?; } } @@ -453,17 +451,12 @@ where block_header.parents_hash() ); let fut = async { - let count = self - .find_absent_ancestor(vec![block_header.clone()]) - .await?; - - if count == 0 { - return anyhow::Ok(ParallelSign::Continue); - } - if block_header.number() % ASYNC_BLOCK_COUNT == 0 || block_header.number() >= self.target.target_id.number() { + self.find_absent_ancestor(vec![block_header.clone()]) + .await?; + let parallel_execute = DagBlockSender::new( self.sync_dag_store.clone(), 100000, @@ -613,7 +606,9 @@ where // if it is not, we must pull the dag parent blocks from the peer. info!("now sync dag block -- ensure_dag_parent_blocks_exist"); match self.ensure_dag_parent_blocks_exist(block.clone())? { - ParallelSign::NeedMoreBlocks => return Ok(CollectorState::Need), + ParallelSign::NeedMoreBlocks => { + return Ok(CollectorState::Need); + } ParallelSign::Continue => (), } let state = self.check_enough();