Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Reorg Support (WIP) #296

Draft
wants to merge 12 commits into
base: main
Choose a base branch
from
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

## Next release

- fix:(tests): Add testing feature to mc-db dev dependency (#295)
- feat(confg): added chain config template and fgw example
- feat(v0.8.0-rc0): starknet_subscribeNewHeads
- fix(rocksdb): update max open files opt
Expand Down
7 changes: 7 additions & 0 deletions crates/client/block_import/src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -214,3 +214,10 @@ pub struct BlockImportResult {

#[derive(Clone, Debug, Eq, PartialEq)]
pub struct PendingBlockImportResult {}

/// Output of a [`crate::reorg`] operation.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ReorgResult {
pub from: Felt,
pub to: Felt,
}
188 changes: 178 additions & 10 deletions crates/client/block_import/src/verify_apply.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use crate::{
global_spawn_rayon_task, BlockImportError, BlockImportResult, BlockValidationContext, PendingBlockImportResult,
PreValidatedBlock, PreValidatedPendingBlock, UnverifiedHeader, ValidatedCommitments,
global_spawn_rayon_task, BlockImportError, BlockImportResult, BlockValidationContext, PendingBlockImportResult, PreValidatedBlock, PreValidatedPendingBlock, ReorgResult, UnverifiedHeader, ValidatedCommitments
};
use bonsai_trie::id::BasicId;
use itertools::Itertools;
use mc_db::{MadaraBackend, MadaraStorageError};
use mp_block::{
Expand Down Expand Up @@ -75,8 +75,28 @@ pub fn verify_apply_inner(
validation: BlockValidationContext,
) -> Result<BlockImportResult, BlockImportError> {
// Check block number and block hash against db
let (block_number, parent_block_hash) =
check_parent_hash_and_num(backend, block.header.parent_block_hash, block.unverified_block_number, &validation)?;
let (block_number, parent_block_hash) = match check_parent_hash_and_num(
backend,
block.header.parent_block_hash,
block.unverified_block_number,
&validation,
) {
Ok((block_number, parent_block_hash)) => (block_number, parent_block_hash),
/*
Err(BlockImportError::ParentHash { got: _, expected: _ }) => {
let reorg_result = reorg(backend, &block)?;
log::warn!("Reorg from 0x{:x} to 0x{:x}", reorg_result.from, reorg_result.to);
// attempt check again
check_parent_hash_and_num(
backend,
block.header.parent_block_hash,
block.unverified_block_number,
&validation,
)?
}
*/
Err(err) => return Err(err),
};

// Update contract and its storage tries
let global_state_root = update_tries(backend, &block, &validation, block_number)?;
Expand Down Expand Up @@ -171,6 +191,14 @@ fn check_parent_hash_and_num(
(0, Felt::ZERO)
};

// TODO: this originally was checked after block number, but we need to check this first to detect a reorg.
// TODO: this fn shouldn't be responsible for reorg detection anyway...
if let Some(parent_block_hash) = parent_block_hash {
if parent_block_hash != expected_parent_block_hash && !validation.ignore_block_order {
return Err(BlockImportError::ParentHash { expected: expected_parent_block_hash, got: parent_block_hash });
}
}

let block_number = if let Some(block_n) = unverified_block_number {
if block_n != expected_block_number && !validation.ignore_block_order {
return Err(BlockImportError::LatestBlockN { expected: expected_block_number, got: block_n });
Expand All @@ -180,12 +208,6 @@ fn check_parent_hash_and_num(
expected_block_number
};

if let Some(parent_block_hash) = parent_block_hash {
if parent_block_hash != expected_parent_block_hash && !validation.ignore_block_order {
return Err(BlockImportError::ParentHash { expected: expected_parent_block_hash, got: parent_block_hash });
}
}

Ok((block_number, expected_parent_block_hash))
}

Expand Down Expand Up @@ -320,6 +342,47 @@ fn block_hash(
Ok((block_hash, header))
}

// TODO: document
fn reorg(
backend: &MadaraBackend,
block: &PreValidatedBlock,
) -> Result<ReorgResult, BlockImportError> {

// TODO: return proper error
let block_number = block.unverified_block_number.expect("Can't reorg without block number");
let block_id = BasicId::new(block_number);

// TODO: the error type returned from revert_to is slightly different from others, so
// make_db_error() doesn't work

backend
.contract_trie()
.revert_to(block_id)
.map_err(|error| BlockImportError::Internal(Cow::Owned(format!("error reverting contract trie: {}", error))))?;

backend
.contract_storage_trie()
.revert_to(block_id)
.map_err(|error| BlockImportError::Internal(Cow::Owned(format!("error reverting contract storage trie: {}", error))))?;

backend
.class_trie()
.revert_to(block_id)
.map_err(|error| BlockImportError::Internal(Cow::Owned(format!("error reverting class trie: {}", error))))?;

backend.revert_to(block_number.checked_sub(1).unwrap())
.map_err(|error| BlockImportError::Internal(Cow::Owned(format!("error reverting block db: {}", error))))?;

// TODO: should reorg actually append new blocks? if not, it should be renamed to `revert_to`

// TODO: proper results
Ok(ReorgResult {
from: Default::default(),
to: Default::default(),
})

}

#[cfg(test)]
mod verify_apply_tests {
use super::*;
Expand Down Expand Up @@ -807,4 +870,109 @@ mod verify_apply_tests {
);
}
}

mod reorg_tests {
use super::*;

// other ideas for testing:
// * basic tests
// * deeper reorgs
// * reorgs not from block 0
// * multiple reorgs
// * reorg back-and-forth
// * show that we can build blocks (grow) on top of reorg
// * actually change some state in blocks, reorg, show that state changes
// * show that parent height / hash work properly after reorgs
// * verify various storage has been erased
// * test MPT
// * can include some of the basic test mentioned above
// * verify proofs after reorg
// * introduce patricia trie shape changes, verify proof
// * verify non-inclusion proofs
// * negative tests
// * reject reorgs from finalized blocks
// * reject reorgs with no known parent
// * reject reorgs for wrong height
// * reject long distance reorgs (perhaps when L1 hasn't been processed recently?)

struct ReorgTestArgs {
/// original chain depth (including block 0)
original_chain_depth: u64,
/// the parent (in block height) whose child will be reorged away from
reorg_parent_height: u64,
/// number of blocks of the reorg (0 == no reorg)
num_reorg_blocks: u64,
}

/// TODO: document
#[rstest]
#[case::reorg_from_genesis(
ReorgTestArgs{
original_chain_depth: 2,
reorg_parent_height: 0,
num_reorg_blocks: 1,
},
)]
#[case::success(
ReorgTestArgs{
original_chain_depth: 6,
reorg_parent_height: 3,
num_reorg_blocks: 2,
},
)]
#[tokio::test]
async fn test_reorg(
setup_test_backend: Arc<MadaraBackend>,
#[case] args: ReorgTestArgs,
) {
let backend = setup_test_backend;
let validation = create_validation_context(false);

// appends an empty block to the given parent block, returning the new block's hash
let append_empty_block = |new_block_height: u64, parent_hash: Option<Felt>| -> Felt {
println!("attempting to add block {}", new_block_height);
let mut block = create_dummy_block();
block.unverified_block_number = Some(new_block_height);
block.unverified_global_state_root = Some(felt!("0x0"));
block.header.parent_block_hash = parent_hash;
let block_import = verify_apply_inner(&backend, block, validation.clone()).expect("verify_apply_inner failed");
println!("added block {} (0x{:x})", new_block_height, block_import.block_hash);

block_import.block_hash
};

// create the original chain
let mut parent_hash = None;
let mut reorg_parent_hash = None;
for i in 0..args.original_chain_depth {
let new_block_hash = append_empty_block(i, parent_hash);
if i == args.reorg_parent_height {
reorg_parent_hash = Some(new_block_hash);
}
parent_hash = Some(new_block_hash);
}

assert!(reorg_parent_hash.is_some());
println!("Reorging from parent {} ({:?})", args.reorg_parent_height, reorg_parent_hash);

let mut reorg_block = create_dummy_block();
reorg_block.unverified_block_number = Some(args.reorg_parent_height + 1);
reorg_block.unverified_global_state_root = Some(felt!("0x0"));
reorg_block.header.parent_block_hash = reorg_parent_hash;
let _ = reorg(&backend, &reorg_block).expect("reorg failed");
let _block_import = verify_apply_inner(&backend, reorg_block, validation.clone()).expect("verify_apply_inner failed on reorg block");

// reorg after given parent (start with 1 since we already added our reorg block)
for i in 1..args.num_reorg_blocks {
let block_height = args.reorg_parent_height + i;
let new_block_hash = append_empty_block(block_height, parent_hash);
parent_hash = Some(new_block_hash);
}

let latest_block_n = backend.get_latest_block_n().expect("get_latest_block_n() failed").expect("latest_block_n is None");
assert_eq!(args.reorg_parent_height + args.num_reorg_blocks, latest_block_n);
let latest_block_hash = backend.get_block_hash(&BlockId::Number(latest_block_n)).expect("get_block_hash failed after reorg");
assert_eq!(latest_block_hash, parent_hash);
}
}
}
53 changes: 53 additions & 0 deletions crates/client/db/src/block_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,59 @@ impl MadaraBackend {
Ok(())
}

pub(crate) fn block_db_revert(&self, revert_to: u64) -> Result<()> {
let mut tx = WriteBatchWithTransaction::default();

let tx_hash_to_block_n = self.db.get_column(Column::TxHashToBlockN);
let block_hash_to_block_n = self.db.get_column(Column::BlockHashToBlockN);
let block_n_to_block = self.db.get_column(Column::BlockNToBlockInfo);
let block_n_to_block_inner = self.db.get_column(Column::BlockNToBlockInner);
let block_n_to_state_diff = self.db.get_column(Column::BlockNToStateDiff);
let meta = self.db.get_column(Column::BlockStorageMeta);

// clear pending
tx.delete_cf(&meta, ROW_PENDING_INFO);
tx.delete_cf(&meta, ROW_PENDING_INNER);
tx.delete_cf(&meta, ROW_PENDING_STATE_UPDATE);

let latest_block_n = self.get_latest_block_n()?.unwrap(); // TODO: unwrap
println!("reverting to {} from {}", revert_to, latest_block_n);
for block_n in (revert_to + 1 .. latest_block_n + 1).rev() {
println!("reverting block {}", block_n);

let block_n_encoded = bincode::serialize(&block_n)?;

let res = self.db.get_cf(&block_n_to_block, &block_n_encoded)?;
let block_info: MadaraBlockInfo = bincode::deserialize(&res.unwrap())?; // TODO: unwrap

// clear all txns from this block
for txn_hash in block_info.tx_hashes {
let txn_hash_encoded = bincode::serialize(&txn_hash)?;
tx.delete_cf(&tx_hash_to_block_n, &txn_hash_encoded);
}

let block_hash_encoded = bincode::serialize(&block_info.block_hash)?;

println!(" - original block hash: {:x}", block_info.block_hash);
println!(" - block height/hash: {} => {:x?}", block_n, block_hash_encoded);

tx.delete_cf(&block_n_to_block, &block_n_encoded);
tx.delete_cf(&block_hash_to_block_n, &block_hash_encoded);
tx.delete_cf(&block_n_to_block_inner, &block_n_encoded);
tx.delete_cf(&block_n_to_state_diff, &block_n_encoded);

}

let latest_block_n_encoded = bincode::serialize(&revert_to)?;
tx.put_cf(&meta, ROW_SYNC_TIP, latest_block_n_encoded);

let mut writeopts = WriteOptions::new();
writeopts.disable_wal(true);
self.db.write_opt(tx, &writeopts)?;

Ok(())
}

// Convenience functions

pub(crate) fn id_to_storage_type(&self, id: &BlockId) -> Result<Option<DbBlockId>> {
Expand Down
37 changes: 29 additions & 8 deletions crates/client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -481,14 +481,35 @@ impl MadaraBackend {
&self,
map: DatabaseKeyMapping,
) -> BonsaiStorage<BasicId, BonsaiDb<'_>, H> {
let bonsai = BonsaiStorage::new(
BonsaiDb::new(&self.db, map),
BonsaiStorageConfig {
max_saved_trie_logs: Some(0),
max_saved_snapshots: Some(0),
snapshot_interval: u64::MAX,
},
)
// TODO: Bonsai doesn't load the its id_queue on new. Instead, it requires the caller
// to pass the highest value in. In our case, it should be equal to the latest block,
// so we pass that in if we can obtain it.
// Bonsai does, however, store the id_queue in its db, so I believe it should be able
// to load this...
// TODO: unwrap
let block_n = self.get_latest_block_n().unwrap();
let identifiers: Vec<Vec<u8>> = Default::default();
let bonsai = if let Some(block_n) = block_n {
BonsaiStorage::new_from_transactional_state(
BonsaiDb::new(&self.db, map),
BonsaiStorageConfig {
max_saved_trie_logs: Some(128),
max_saved_snapshots: Some(128),
snapshot_interval: u64::MAX,
},
BasicId::new(block_n),
identifiers,
)
} else {
BonsaiStorage::new(
BonsaiDb::new(&self.db, map),
BonsaiStorageConfig {
max_saved_trie_logs: Some(128),
max_saved_snapshots: Some(128),
snapshot_interval: u64::MAX,
}
)
}
// TODO(bonsai-trie): change upstream to reflect that.
.expect("New bonsai storage can never error");

Expand Down
4 changes: 4 additions & 0 deletions crates/client/db/src/storage_updates.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,10 @@ impl MadaraBackend {
r1.and(r2).and(r3)
}

pub fn revert_to(&self, revert_to: u64) -> Result<(), MadaraStorageError> {
self.block_db_revert(revert_to)
}

pub fn clear_pending_block(&self) -> Result<(), MadaraStorageError> {
self.block_db_clear_pending()?;
self.contract_db_clear_pending()?;
Expand Down