From 6905d52f72b5e80f17d23e5b42210e9f85c91daf Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Wed, 8 Jan 2025 13:20:56 +0545 Subject: [PATCH 01/17] Some cleanups --- crates/btcio/src/reader/query.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crates/btcio/src/reader/query.rs b/crates/btcio/src/reader/query.rs index 3e0436257..ce13bea1d 100644 --- a/crates/btcio/src/reader/query.rs +++ b/crates/btcio/src/reader/query.rs @@ -441,7 +441,11 @@ mod test { let l1status: L1Status = gen.generate(); let status_channel = StatusChannel::new(cls, l1status, Some(chs)); let params = Arc::new(gen_params()); +<<<<<<< HEAD let config = Arc::new(ReaderConfig::default()); +======= + let config = Arc::new(BtcioConfig::default()); +>>>>>>> bb9df24b (Some cleanups) let client = Arc::new(TestBitcoinClient::new(1)); ReaderContext { event_tx, From 2d08c81e6eeccbed811b3c6550da6872365e222f Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Thu, 9 Jan 2025 17:22:51 +0545 Subject: [PATCH 02/17] l1tx: Use tags to parse and build l1 payloads --- crates/btcio/src/reader/query.rs | 3 +- crates/btcio/src/test_utils.rs | 8 +- crates/btcio/src/writer/builder.rs | 10 +- crates/l1tx/src/envelope/builder.rs | 42 +++--- crates/l1tx/src/envelope/parser.rs | 122 +++++++++--------- crates/l1tx/src/filter.rs | 66 ++++++---- crates/l1tx/src/utils.rs | 4 +- .../proof-impl/btc-blockspace/src/filter.rs | 2 +- 8 files changed, 141 insertions(+), 116 deletions(-) diff --git a/crates/btcio/src/reader/query.rs b/crates/btcio/src/reader/query.rs index ce13bea1d..b78031bf0 100644 --- a/crates/btcio/src/reader/query.rs +++ b/crates/btcio/src/reader/query.rs @@ -324,7 +324,8 @@ async fn process_block( let txs = block.txdata.len(); let params = ctx.params.clone(); - let filtered_txs = filter_protocol_op_tx_refs(&block, state.filter_config()); + let filtered_txs = + filter_protocol_op_tx_refs(&block, ctx.params.rollup(), state.filter_config()); let block_data = BlockData::new(height, block, filtered_txs); let l1blkid = block_data.block().block_hash(); trace!(%height, %l1blkid, %txs, "fetched block from client"); diff --git a/crates/btcio/src/test_utils.rs b/crates/btcio/src/test_utils.rs index 467d5e487..90a621e9b 100644 --- a/crates/btcio/src/test_utils.rs +++ b/crates/btcio/src/test_utils.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use async_trait::async_trait; use bitcoin::{ bip32::Xpriv, @@ -7,7 +9,7 @@ use bitcoin::{ Address, Amount, Block, BlockHash, Network, ScriptBuf, SignedAmount, Transaction, Txid, Work, }; use strata_l1tx::envelope::builder::build_envelope_script; -use strata_primitives::l1::payload::L1Payload; +use strata_primitives::{l1::payload::L1Payload, params::Params}; use crate::{ rpc::{ @@ -214,10 +216,10 @@ impl SignerRpc for TestBitcoinClient { pub fn generate_envelope_script_test( envelope_data: L1Payload, - rollup_name: &str, + params: Arc, version: u8, ) -> anyhow::Result { - build_envelope_script(&envelope_data, rollup_name, version) + build_envelope_script(params.as_ref(), &envelope_data, version) } pub fn build_reveal_transaction_test( diff --git a/crates/btcio/src/writer/builder.rs b/crates/btcio/src/writer/builder.rs index 7d9be94ae..80ad21c3d 100644 --- a/crates/btcio/src/writer/builder.rs +++ b/crates/btcio/src/writer/builder.rs @@ -22,7 +22,7 @@ use bitcoin::{ use rand::{rngs::OsRng, RngCore}; use strata_config::btcio::FeePolicy; use strata_l1tx::envelope::builder::build_envelope_script; -use strata_primitives::l1::payload::L1Payload; +use strata_primitives::{l1::payload::L1Payload, params::Params}; use thiserror::Error; use super::context::WriterContext; @@ -77,10 +77,10 @@ pub fn create_envelope_transactions( // Create commit key let key_pair = generate_key_pair()?; let public_key = XOnlyPublicKey::from_keypair(&key_pair).0; - let rollup_name = ctx.params.rollup().rollup_name.clone(); // Start creating envelope content - let reveal_script = build_reveal_script(&rollup_name, &public_key, payload, ENVELOPE_VERSION)?; + let reveal_script = + build_reveal_script(ctx.params.as_ref(), &public_key, payload, ENVELOPE_VERSION)?; // Create spend info for tapscript let taproot_spend_info = TaprootBuilder::new() @@ -379,7 +379,7 @@ pub fn generate_key_pair() -> Result { /// Builds reveal script such that it contains opcodes for verifying the internal key as well as the /// envelope block fn build_reveal_script( - rollup_name: &str, + params: &Params, taproot_public_key: &XOnlyPublicKey, envelope_data: &L1Payload, version: u8, @@ -389,7 +389,7 @@ fn build_reveal_script( .push_opcode(OP_CHECKSIG) .into_script() .into_bytes(); - let script = build_envelope_script(envelope_data, rollup_name, version)?; + let script = build_envelope_script(params, envelope_data, version)?; script_bytes.extend(script.into_bytes()); Ok(ScriptBuf::from(script_bytes)) } diff --git a/crates/l1tx/src/envelope/builder.rs b/crates/l1tx/src/envelope/builder.rs index 99fe378fc..2fbeb8207 100644 --- a/crates/l1tx/src/envelope/builder.rs +++ b/crates/l1tx/src/envelope/builder.rs @@ -7,28 +7,31 @@ use bitcoin::{ script::PushBytesBuf, ScriptBuf, }; -use strata_primitives::l1::payload::{L1Payload, L1PayloadType}; +use strata_primitives::{ + l1::payload::{L1Payload, L1PayloadType}, + params::Params, +}; use tracing::*; -use crate::envelope::parser::{BATCH_DATA_TAG, ROLLUP_NAME_TAG, VERSION_TAG}; - // Generates a [`ScriptBuf`] that consists of `OP_IF .. OP_ENDIF` block pub fn build_envelope_script( + params: &Params, envelope_data: &L1Payload, - // TODO: get tagnames from config - rollup_name: &str, version: u8, ) -> anyhow::Result { + let tag = get_payload_type_tag(envelope_data.payload_type(), params)?; let mut builder = script::Builder::new() .push_opcode(OP_FALSE) .push_opcode(OP_IF) - .push_slice(PushBytesBuf::try_from(ROLLUP_NAME_TAG.to_vec())?) - .push_slice(PushBytesBuf::try_from(rollup_name.as_bytes().to_vec())?) - .push_slice(PushBytesBuf::try_from(VERSION_TAG.to_vec())?) - .push_slice(PushBytesBuf::from([version])) - .push_slice(PushBytesBuf::try_from(BATCH_DATA_TAG.to_vec())?) - .push_int(envelope_data.data().len() as i64); + .push_slice(tag) + // Insert version + .push_slice(PushBytesBuf::from(version.to_be_bytes())) + // Insert size + .push_slice(PushBytesBuf::from( + (envelope_data.data().len() as u32).to_be_bytes(), + )); + // Insert actual data trace!(batchdata_size = %envelope_data.data().len(), "Inserting batch data"); for chunk in envelope_data.data().chunks(520) { trace!(size=%chunk.len(), "inserting chunk"); @@ -39,11 +42,14 @@ pub fn build_envelope_script( Ok(builder.into_script()) } -#[allow(dead_code)] -fn get_payload_type_tag(payload_type: &L1PayloadType) -> anyhow::Result { - let ret = match *payload_type { - L1PayloadType::Checkpoint => PushBytesBuf::try_from("checkpoint".as_bytes().to_vec())?, - L1PayloadType::Da => PushBytesBuf::try_from("da".as_bytes().to_vec())?, - }; - Ok(ret) +fn get_payload_type_tag( + payload_type: &L1PayloadType, + params: &Params, +) -> anyhow::Result { + Ok(match *payload_type { + L1PayloadType::Checkpoint => { + PushBytesBuf::try_from(params.rollup().checkpoint_tag.as_bytes().to_vec())? + } + L1PayloadType::Da => PushBytesBuf::try_from(params.rollup().da_tag.as_bytes().to_vec())?, + }) } diff --git a/crates/l1tx/src/envelope/parser.rs b/crates/l1tx/src/envelope/parser.rs index e2876de06..b196d174b 100644 --- a/crates/l1tx/src/envelope/parser.rs +++ b/crates/l1tx/src/envelope/parser.rs @@ -1,13 +1,18 @@ +use std::str::from_utf8; + use bitcoin::{ opcodes::all::OP_IF, script::{Instruction, Instructions}, ScriptBuf, }; -use strata_primitives::l1::payload::L1Payload; +use strata_primitives::{ + l1::payload::{L1Payload, L1PayloadType}, + params::RollupParams, +}; use thiserror::Error; -use tracing::debug; +use tracing::{debug, warn}; -use crate::utils::{next_bytes, next_int, next_op}; +use crate::utils::{next_bytes, next_op, next_u32}; pub const ROLLUP_NAME_TAG: &[u8] = &[1]; pub const VERSION_TAG: &[u8] = &[2]; @@ -19,27 +24,21 @@ pub enum EnvelopeParseError { /// Does not have an `OP_IF..OP_ENDIF` block #[error("Invalid/Missing envelope(NO OP_IF..OP_ENDIF): ")] InvalidEnvelope, - /// Does not have a valid name tag - #[error("Invalid/Missing name tag")] - InvalidNameTag, - /// Does not have a valid name value - #[error("Invalid/Missing value")] - InvalidNameValue, - // Does not have a valid version tag - #[error("Invalid/Missing version tag")] - InvalidVersionTag, + /// Does not have a valid tag + #[error("Invalid/Missing tag")] + InvalidTag, // Does not have a valid version #[error("Invalid/Missing version")] InvalidVersion, - /// Does not have a valid blob tag - #[error("Invalid/Missing blob tag")] - InvalidBlobTag, - /// Does not have a valid blob - #[error("Invalid/Missing blob tag")] - InvalidBlob, + // Does not have a valid size + #[error("Invalid/Missing size")] + InvalidSize, /// Does not have a valid format #[error("Invalid Format")] InvalidFormat, + /// Does not have a payload data of expected size + #[error("Invalid Payload")] + InvalidPayload, } /// Parse [`L1Payload`] @@ -49,45 +48,50 @@ pub enum EnvelopeParseError { /// This function errors if it cannot parse the [`L1Payload`] pub fn parse_envelope_data( script: &ScriptBuf, - rollup_name: &str, + params: &RollupParams, ) -> Result { let mut instructions = script.instructions(); enter_envelope(&mut instructions)?; - // Parse name - let (tag, name) = parse_bytes_pair(&mut instructions)?; - let extracted_rollup_name = match (tag, name) { - (ROLLUP_NAME_TAG, namebytes) => { - String::from_utf8(namebytes.to_vec()).map_err(|_| EnvelopeParseError::InvalidNameValue) - } - _ => Err(EnvelopeParseError::InvalidNameTag), - }?; + // Parse tag + let tag = next_bytes(&mut instructions) + .and_then(|bytes| parse_payload_type(bytes, params)) + .ok_or(EnvelopeParseError::InvalidTag)?; + + // Parse version + let _version = next_bytes(&mut instructions) + .and_then(validate_version) + .ok_or(EnvelopeParseError::InvalidVersion)?; + + // Parse size + let size = next_u32(&mut instructions).ok_or(EnvelopeParseError::InvalidSize)?; + // Parse payload + let payload = extract_n_bytes(size, &mut instructions)?; + Ok(L1Payload::new(payload, tag)) +} - if extracted_rollup_name != rollup_name { - return Err(EnvelopeParseError::InvalidNameTag); +fn parse_payload_type(bytes: &[u8], params: &RollupParams) -> Option { + let str = from_utf8(bytes).ok()?; + if params.checkpoint_tag == str { + Some(L1PayloadType::Checkpoint) + } else if params.da_tag == str { + Some(L1PayloadType::Da) + } else { + None } +} - // Parse version - let (tag, ver) = parse_bytes_pair(&mut instructions)?; - let _version = match (tag, ver) { - (VERSION_TAG, [v]) => Ok(v), - (VERSION_TAG, _) => Err(EnvelopeParseError::InvalidVersion), - _ => Err(EnvelopeParseError::InvalidVersionTag), - }?; - - // Parse bytes - let tag = next_bytes(&mut instructions).ok_or(EnvelopeParseError::InvalidBlobTag)?; - let size = next_int(&mut instructions); - match (tag, size) { - (BATCH_DATA_TAG, Some(size)) => { - let batch_data = extract_n_bytes(size, &mut instructions)?; - Ok(L1Payload::new_checkpoint(batch_data)) // TODO: later this will discern checkpoint - // and da and any other payload types - } - (BATCH_DATA_TAG, None) => Err(EnvelopeParseError::InvalidBlob), - _ => Err(EnvelopeParseError::InvalidBlobTag), +fn validate_version(bytes: &[u8]) -> Option { + if bytes.len() != 8 { + warn!("Invalid version bytes length"); + return None; } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&bytes[0..8]); + let version = u64::from_be_bytes(buf); + // TODO: add version validation logic, i.e which particular versions are supported + Some(version) } /// Check for consecutive `OP_FALSE` and `OP_IF` that marks the beginning of an envelope @@ -119,14 +123,6 @@ fn enter_envelope(instructions: &mut Instructions) -> Result<(), EnvelopeParseEr Ok(()) } -fn parse_bytes_pair<'a>( - instructions: &mut Instructions<'a>, -) -> Result<(&'a [u8], &'a [u8]), EnvelopeParseError> { - let tag = next_bytes(instructions).ok_or(EnvelopeParseError::InvalidFormat)?; - let name = next_bytes(instructions).ok_or(EnvelopeParseError::InvalidFormat)?; - Ok((tag, name)) -} - /// Extract bytes of `size` from the remaining instructions fn extract_n_bytes( size: u32, @@ -143,7 +139,7 @@ fn extract_n_bytes( Ok(data) } else { debug!("Extracting {} bytes from instructions", size); - Err(EnvelopeParseError::InvalidBlob) + Err(EnvelopeParseError::InvalidPayload) } } @@ -152,17 +148,20 @@ mod tests { use strata_btcio::test_utils::generate_envelope_script_test; use strata_primitives::l1::payload::L1Payload; + use strata_test_utils::l2::gen_params; use super::*; #[test] fn test_parse_envelope_data() { let bytes = vec![0, 1, 2, 3]; + let params = gen_params(); let envelope_data = L1Payload::new_checkpoint(bytes.clone()); - let script = generate_envelope_script_test(envelope_data.clone(), "TestRollup", 1).unwrap(); + let script = + generate_envelope_script_test(envelope_data.clone(), params.clone().into(), 1).unwrap(); // Parse the rollup name - let result = parse_envelope_data(&script, "TestRollup").unwrap(); + let result = parse_envelope_data(&script, params.rollup()).unwrap(); // Assert the rollup name was parsed correctly assert_eq!(result, envelope_data); @@ -170,10 +169,11 @@ mod tests { // Try with larger size let bytes = vec![1; 2000]; let envelope_data = L1Payload::new_checkpoint(bytes.clone()); - let script = generate_envelope_script_test(envelope_data.clone(), "TestRollup", 1).unwrap(); + let script = + generate_envelope_script_test(envelope_data.clone(), params.clone().into(), 1).unwrap(); // Parse the rollup name - let result = parse_envelope_data(&script, "TestRollup").unwrap(); + let result = parse_envelope_data(&script, params.rollup()).unwrap(); // Assert the rollup name was parsed correctly assert_eq!(result, envelope_data); diff --git a/crates/l1tx/src/filter.rs b/crates/l1tx/src/filter.rs index 003c9c536..d0f3fd0e0 100644 --- a/crates/l1tx/src/filter.rs +++ b/crates/l1tx/src/filter.rs @@ -1,4 +1,5 @@ use bitcoin::{Block, Transaction}; +use strata_primitives::params::RollupParams; use strata_state::{ batch::SignedBatchCheckpoint, tx::{DepositInfo, DepositRequestInfo, ProtocolOperation}, @@ -15,6 +16,7 @@ use crate::{ /// [`TxFilterConfig`]s pub fn filter_protocol_op_tx_refs( block: &Block, + params: &RollupParams, filter_config: &TxFilterConfig, ) -> Vec { block @@ -22,7 +24,7 @@ pub fn filter_protocol_op_tx_refs( .iter() .enumerate() .flat_map(|(i, tx)| { - extract_protocol_ops(tx, filter_config) + extract_protocol_ops(tx, params, filter_config) .into_iter() .map(move |relevant_tx| ProtocolOpTxRef::new(i as u32, relevant_tx)) }) @@ -33,9 +35,13 @@ pub fn filter_protocol_op_tx_refs( /// info. // TODO: make this function return multiple ops as a single tx can have multiple outpoints that's // relevant -fn extract_protocol_ops(tx: &Transaction, filter_conf: &TxFilterConfig) -> Vec { +fn extract_protocol_ops( + tx: &Transaction, + params: &RollupParams, + filter_conf: &TxFilterConfig, +) -> Vec { // Currently all we have are envelope txs, deposits and deposit requests - parse_envelope_checkpoints(tx, filter_conf) + parse_envelope_checkpoints(tx, params) .map(ProtocolOperation::Checkpoint) .chain(parse_deposits(tx, filter_conf).map(ProtocolOperation::Deposit)) .chain(parse_deposit_requests(tx, filter_conf).map(ProtocolOperation::DepositRequest)) @@ -64,19 +70,20 @@ fn parse_deposits( // DA separately fn parse_envelope_checkpoints<'a>( tx: &'a Transaction, - filter_conf: &'a TxFilterConfig, + params: &'a RollupParams, ) -> impl Iterator + 'a { tx.input.iter().filter_map(|inp| { inp.witness .tapscript() - .and_then(|scr| parse_envelope_data(&scr.into(), &filter_conf.rollup_name.clone()).ok()) + .and_then(|scr| parse_envelope_data(&scr.into(), params).ok()) + // TODO: get checkpoint or da .and_then(|data| borsh::from_slice::(data.data()).ok()) }) } #[cfg(test)] mod test { - use std::str::FromStr; + use std::{str::FromStr, sync::Arc}; use bitcoin::{ absolute::{Height, LockTime}, @@ -91,7 +98,10 @@ mod test { }; use rand::{rngs::OsRng, RngCore}; use strata_btcio::test_utils::{build_reveal_transaction_test, generate_envelope_script_test}; - use strata_primitives::l1::{payload::L1Payload, BitcoinAmount}; + use strata_primitives::{ + l1::{payload::L1Payload, BitcoinAmount}, + params::Params, + }; use strata_state::{batch::SignedBatchCheckpoint, tx::ProtocolOperation}; use strata_test_utils::{l2::gen_params, ArbitraryGenerator}; @@ -155,13 +165,13 @@ mod test { // Create an envelope transaction. The focus here is to create a tapscript, rather than a // completely valid control block - fn create_envelope_tx(rollup_name: String) -> Transaction { + fn create_checkpoint_envelope_tx(params: Arc) -> Transaction { let address = parse_addr(OTHER_ADDR); let inp_tx = create_test_tx(vec![create_test_txout(100000000, &address)]); let signed_checkpoint: SignedBatchCheckpoint = ArbitraryGenerator::new().generate(); let envelope_data = L1Payload::new_checkpoint(borsh::to_vec(&signed_checkpoint).unwrap()); - let script = generate_envelope_script_test(envelope_data, &rollup_name, 1).unwrap(); + let script = generate_envelope_script_test(envelope_data, params, 1).unwrap(); // Create controlblock let mut rand_bytes = [0; 32]; @@ -189,22 +199,23 @@ mod test { // Test with valid name let filter_config = create_tx_filter_config(); - let rollup_name = filter_config.rollup_name.clone(); - let tx = create_envelope_tx(rollup_name.clone()); + let params: Params = gen_params(); + let tx = create_checkpoint_envelope_tx(params.clone().into()); let block = create_test_block(vec![tx]); - let txids: Vec = filter_protocol_op_tx_refs(&block, &filter_config) + let txids: Vec = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config) .iter() .map(|op_refs| op_refs.index()) .collect(); assert_eq!(txids[0], 0, "Should filter valid rollup name"); - // Test with invalid name - let rollup_name = "invalidRollupName".to_string(); - let tx = create_envelope_tx(rollup_name.clone()); + // Test with invalid checkpoint tag + let mut new_params = params.clone(); + new_params.rollup.checkpoint_tag = "invalid_checkpoint_tag".to_string(); + let tx = create_checkpoint_envelope_tx(new_params.into()); let block = create_test_block(vec![tx]); - let result = filter_protocol_op_tx_refs(&block, &filter_config); + let result = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config); assert!(result.is_empty(), "Should filter out invalid name"); } @@ -212,10 +223,11 @@ mod test { fn test_filter_relevant_txs_no_match() { let tx1 = create_test_tx(vec![create_test_txout(1000, &parse_addr(OTHER_ADDR))]); let tx2 = create_test_tx(vec![create_test_txout(10000, &parse_addr(OTHER_ADDR))]); + let params = gen_params(); let block = create_test_block(vec![tx1, tx2]); let filter_config = create_tx_filter_config(); - let txids: Vec = filter_protocol_op_tx_refs(&block, &filter_config) + let txids: Vec = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config) .iter() .map(|op_refs| op_refs.index()) .collect(); @@ -225,13 +237,13 @@ mod test { #[test] fn test_filter_relevant_txs_multiple_matches() { let filter_config = create_tx_filter_config(); - let rollup_name = filter_config.rollup_name.clone(); - let tx1 = create_envelope_tx(rollup_name.clone()); + let params: Params = gen_params(); + let tx1 = create_checkpoint_envelope_tx(params.clone().into()); let tx2 = create_test_tx(vec![create_test_txout(100, &parse_addr(OTHER_ADDR))]); - let tx3 = create_envelope_tx(rollup_name); + let tx3 = create_checkpoint_envelope_tx(params.clone().into()); let block = create_test_block(vec![tx1, tx2, tx3]); - let txids: Vec = filter_protocol_op_tx_refs(&block, &filter_config) + let txids: Vec = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config) .iter() .map(|op_refs| op_refs.index()) .collect(); @@ -245,6 +257,7 @@ mod test { let filter_config = create_tx_filter_config(); let deposit_config = filter_config.deposit_config.clone(); let ee_addr = vec![1u8; 20]; // Example EVM address + let params = gen_params(); let deposit_script = build_test_deposit_script(deposit_config.magic_bytes.clone(), ee_addr.clone()); @@ -256,7 +269,7 @@ mod test { let block = create_test_block(vec![tx]); - let result = filter_protocol_op_tx_refs(&block, &filter_config); + let result = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config); assert_eq!(result.len(), 1, "Should find one relevant transaction"); assert_eq!( @@ -281,6 +294,7 @@ mod test { fn test_filter_relevant_txs_deposit_request() { let filter_config = create_tx_filter_config(); let mut deposit_config = filter_config.deposit_config.clone(); + let params = gen_params(); let extra_amt = 10000; deposit_config.deposit_amount += extra_amt; let dest_addr = vec![2u8; 20]; // Example EVM address @@ -299,7 +313,7 @@ mod test { let block = create_test_block(vec![tx]); - let result = filter_protocol_op_tx_refs(&block, &filter_config); + let result = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config); assert_eq!(result.len(), 1, "Should find one relevant transaction"); assert_eq!( @@ -326,6 +340,7 @@ mod test { fn test_filter_relevant_txs_no_deposit() { let filter_config = create_tx_filter_config(); let deposit_config = filter_config.deposit_config.clone(); + let params = gen_params(); let irrelevant_tx = create_test_deposit_tx( Amount::from_sat(deposit_config.deposit_amount), &test_taproot_addr().address().script_pubkey(), @@ -334,7 +349,7 @@ mod test { let block = create_test_block(vec![irrelevant_tx]); - let result = filter_protocol_op_tx_refs(&block, &filter_config); + let result = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config); assert!( result.is_empty(), @@ -346,6 +361,7 @@ mod test { fn test_filter_relevant_txs_multiple_deposits() { let filter_config = create_tx_filter_config(); let deposit_config = filter_config.deposit_config.clone(); + let params = gen_params(); let dest_addr1 = vec![3u8; 20]; let dest_addr2 = vec![4u8; 20]; @@ -367,7 +383,7 @@ mod test { let block = create_test_block(vec![tx1, tx2]); - let result = filter_protocol_op_tx_refs(&block, &filter_config); + let result = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config); assert_eq!(result.len(), 2, "Should find two relevant transactions"); assert_eq!( diff --git a/crates/l1tx/src/utils.rs b/crates/l1tx/src/utils.rs index 4e55b4b0c..ef9c332ad 100644 --- a/crates/l1tx/src/utils.rs +++ b/crates/l1tx/src/utils.rs @@ -31,12 +31,12 @@ pub fn next_bytes<'a>(instructions: &mut Instructions<'a>) -> Option<&'a [u8]> { } /// Extract next integer value(unsigned) -pub fn next_int(instructions: &mut Instructions<'_>) -> Option { +pub fn next_u32(instructions: &mut Instructions<'_>) -> Option { let n = instructions.next(); match n { Some(Ok(Instruction::PushBytes(bytes))) => { // Convert the bytes to an integer - if bytes.len() > 4 { + if bytes.len() != 4 { return None; } let mut buf = [0; 4]; diff --git a/crates/proof-impl/btc-blockspace/src/filter.rs b/crates/proof-impl/btc-blockspace/src/filter.rs index 05413d7b7..6b8dd31e8 100644 --- a/crates/proof-impl/btc-blockspace/src/filter.rs +++ b/crates/proof-impl/btc-blockspace/src/filter.rs @@ -17,7 +17,7 @@ pub fn extract_relevant_info( let mut deposits = Vec::new(); let mut prev_checkpoint = None; - let relevant_txs = filter_protocol_op_tx_refs(block, filter_config); + let relevant_txs = filter_protocol_op_tx_refs(block, rollup_params, &filter_config); for tx in relevant_txs { match tx.proto_op() { From 3930c7d308d5eec7772808178e308a8f81669cc3 Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Mon, 13 Jan 2025 18:08:32 +0545 Subject: [PATCH 03/17] db: Add IntentEntry and corrensponding db operations, and tests --- crates/db/src/traits.rs | 9 +++- crates/db/src/types.rs | 38 ++++++++++++- crates/primitives/src/l1/payload.rs | 1 + crates/rocksdb-store/src/sequencer/db.rs | 53 +++++++++++++++++-- crates/rocksdb-store/src/sequencer/schemas.rs | 7 ++- crates/storage/src/ops/envelope.rs | 22 +++++++- 6 files changed, 123 insertions(+), 7 deletions(-) diff --git a/crates/db/src/traits.rs b/crates/db/src/traits.rs index 37bf7ff02..3cfce12bf 100644 --- a/crates/db/src/traits.rs +++ b/crates/db/src/traits.rs @@ -19,7 +19,7 @@ use strata_zkvm::ProofReceipt; use crate::{ entities::bridge_tx_state::BridgeTxState, - types::{CheckpointEntry, L1TxEntry, PayloadEntry}, + types::{CheckpointEntry, IntentEntry, L1TxEntry, PayloadEntry}, DbResult, }; @@ -248,6 +248,7 @@ pub trait CheckpointDatabase { /// A trait encapsulating provider and store traits to interact with the underlying database for /// [`PayloadEntry`] pub trait SequencerDatabase { + // TODO: remove this, and possibly separate out/rename to writer db type L1PayloadDB: L1PayloadDatabase; fn payload_db(&self) -> &Arc; @@ -267,6 +268,12 @@ pub trait L1PayloadDatabase { /// Get the last payload index fn get_last_payload_idx(&self) -> DbResult>; + + /// Store the [`IntentEntry`]. + fn put_intent_entry(&self, payloadid: Buf32, payloadentry: IntentEntry) -> DbResult<()>; + + /// Get a [`IntentEntry`] by its hash + fn get_intent_by_id(&self, id: Buf32) -> DbResult>; } pub trait ProofDatabase { diff --git a/crates/db/src/types.rs b/crates/db/src/types.rs index 32a7f74cd..1e7386cac 100644 --- a/crates/db/src/types.rs +++ b/crates/db/src/types.rs @@ -7,10 +7,46 @@ use bitcoin::{ }; use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Serialize}; -use strata_primitives::{buf::Buf32, l1::payload::L1Payload}; +use strata_primitives::{ + buf::Buf32, + l1::payload::{L1Payload, PayloadIntent}, +}; use strata_state::batch::{BatchCheckpoint, BatchInfo, BootstrapState, CommitmentInfo}; use strata_zkvm::ProofReceipt; +/// Represents an intent to publish to some DA, which will be bundled for efficiency. +#[derive(Debug, Clone, PartialEq, BorshSerialize, BorshDeserialize, Arbitrary)] +pub struct IntentEntry { + pub intent: PayloadIntent, + pub status: IntentStatus, +} + +impl IntentEntry { + pub fn new_unbundled(intent: PayloadIntent) -> Self { + Self { + intent, + status: IntentStatus::Unbundled, + } + } + + pub fn new_bundled(intent: PayloadIntent, bundle_idx: u64) -> Self { + Self { + intent, + status: IntentStatus::Bundled(bundle_idx), + } + } +} + +/// Status of Intent indicating various stages of being bundled to L1 transaction. +/// Unbundled Intents are collected and bundled to create [`PayloadEntry]. +#[derive(Debug, Clone, PartialEq, BorshSerialize, BorshDeserialize, Arbitrary)] +pub enum IntentStatus { + // It is not bundled yet, and thus will be collected and processed by bundler. + Unbundled, + // It has been bundled to [`PayloadEntry`] with given bundle idx. + Bundled(u64), +} + /// Represents data for a payload we're still planning to post to L1. #[derive(Debug, Clone, PartialEq, BorshSerialize, BorshDeserialize, Arbitrary)] pub struct PayloadEntry { diff --git a/crates/primitives/src/l1/payload.rs b/crates/primitives/src/l1/payload.rs index 1824ebaa3..3e15e4e52 100644 --- a/crates/primitives/src/l1/payload.rs +++ b/crates/primitives/src/l1/payload.rs @@ -171,6 +171,7 @@ pub enum L1PayloadType { /// /// These are never stored on-chain. #[derive(Clone, Debug, Eq, PartialEq, Arbitrary, BorshDeserialize, BorshSerialize)] +// TODO: rename this to L1PayloadIntent and remove the dest field pub struct PayloadIntent { /// The destination for this payload. dest: PayloadDest, diff --git a/crates/rocksdb-store/src/sequencer/db.rs b/crates/rocksdb-store/src/sequencer/db.rs index 7743bb278..31c0eda32 100644 --- a/crates/rocksdb-store/src/sequencer/db.rs +++ b/crates/rocksdb-store/src/sequencer/db.rs @@ -4,12 +4,12 @@ use rockbound::{OptimisticTransactionDB, SchemaDBOperationsExt}; use strata_db::{ errors::DbError, traits::{L1PayloadDatabase, SequencerDatabase}, - types::PayloadEntry, + types::{IntentEntry, PayloadEntry}, DbResult, }; use strata_primitives::buf::Buf32; -use super::schemas::{SeqBlobIdSchema, SeqBlobSchema}; +use super::schemas::{SeqBlobIdSchema, SeqBlobSchema, SeqIntentSchema}; use crate::{sequence::get_next_id, DbOpsConfig}; pub struct RBSeqBlobDb { @@ -59,6 +59,23 @@ impl L1PayloadDatabase for RBSeqBlobDb { fn get_payload_id(&self, blobidx: u64) -> DbResult> { Ok(self.db.get::(&blobidx)?) } + + fn put_intent_entry(&self, intent_id: Buf32, intent_entry: IntentEntry) -> DbResult<()> { + self.db + .with_optimistic_txn( + rockbound::TransactionRetry::Count(self.ops.retry_count), + |tx| -> Result<(), DbError> { + tx.put::(&intent_id, &intent_entry)?; + + Ok(()) + }, + ) + .map_err(|e| DbError::TransactionError(e.to_string())) + } + + fn get_intent_by_id(&self, id: Buf32) -> DbResult> { + Ok(self.db.get::(&id)?) + } } pub struct SequencerDB { @@ -123,7 +140,7 @@ mod tests { } #[test] - fn test_update_blob_() { + fn test_update_blob() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); let seq_db = RBSeqBlobDb::new(db, db_ops); @@ -183,4 +200,34 @@ mod tests { let last_blob_idx = seq_db.get_last_payload_idx().unwrap(); assert_eq!(last_blob_idx, Some(1)); } + + // Intent related tests + + #[test] + fn test_put_intent_new_entry() { + let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); + let seq_db = RBSeqBlobDb::new(db, db_ops); + + let intent: IntentEntry = ArbitraryGenerator::new().generate(); + let intent_id: Buf32 = [0; 32].into(); + + seq_db.put_intent_entry(intent_id, intent.clone()).unwrap(); + + let stored_intent = seq_db.get_intent_by_id(intent_id).unwrap(); + assert_eq!(stored_intent, Some(intent)); + } + + #[test] + fn test_put_intent_entry() { + let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); + let seq_db = RBSeqBlobDb::new(db, db_ops); + let intent: IntentEntry = ArbitraryGenerator::new().generate(); + let intent_id: Buf32 = [0; 32].into(); + + let result = seq_db.put_intent_entry(intent_id, intent.clone()); + assert!(result.is_ok()); + + let retrieved = seq_db.get_intent_by_id(intent_id).unwrap().unwrap(); + assert_eq!(retrieved, intent); + } } diff --git a/crates/rocksdb-store/src/sequencer/schemas.rs b/crates/rocksdb-store/src/sequencer/schemas.rs index 42918bc50..f42e6cb73 100644 --- a/crates/rocksdb-store/src/sequencer/schemas.rs +++ b/crates/rocksdb-store/src/sequencer/schemas.rs @@ -1,4 +1,4 @@ -use strata_db::types::PayloadEntry; +use strata_db::types::{IntentEntry, PayloadEntry}; use strata_primitives::buf::Buf32; use crate::{ @@ -15,3 +15,8 @@ define_table_with_default_codec!( /// A table to store blobid -> blob mapping (SeqBlobSchema) Buf32 => PayloadEntry ); + +define_table_with_default_codec!( + /// A table to store intentid -> intent mapping + (SeqIntentSchema) Buf32 => IntentEntry +); diff --git a/crates/storage/src/ops/envelope.rs b/crates/storage/src/ops/envelope.rs index 6d23e5790..9ae041a1d 100644 --- a/crates/storage/src/ops/envelope.rs +++ b/crates/storage/src/ops/envelope.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use strata_db::{ traits::{L1PayloadDatabase, SequencerDatabase}, - types::PayloadEntry, + types::{IntentEntry, PayloadEntry}, DbResult, }; use strata_primitives::buf::Buf32; @@ -36,6 +36,9 @@ inst_ops! { get_payload_entry_id(idx: u64) => Option; get_next_payload_idx() => u64; put_payload_entry(id: Buf32, entry: PayloadEntry) => (); + // Intent related methods + get_intent_by_id(id: Buf32) => Option; + put_intent_entry(id: Buf32, entry: IntentEntry) => (); } } @@ -47,6 +50,14 @@ fn get_payload_entry( payload_db.get_payload_by_id(id) } +fn get_intent_by_id( + ctx: &Context, + id: Buf32, +) -> DbResult> { + let payload_db = ctx.db.payload_db(); + payload_db.get_intent_by_id(id) +} + fn get_payload_entry_id( ctx: &Context, idx: u64, @@ -84,3 +95,12 @@ fn put_payload_entry( let payload_db = ctx.db.payload_db(); payload_db.put_payload_entry(id, entry) } + +fn put_intent_entry( + ctx: &Context, + id: Buf32, + entry: IntentEntry, +) -> DbResult<()> { + let payload_db = ctx.db.payload_db(); + payload_db.put_intent_entry(id, entry) +} From cc8ae5120b069e5410b9bdfe307a56e1e4ef67be Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Tue, 14 Jan 2025 17:03:14 +0545 Subject: [PATCH 04/17] Support multiple payloads as envelopes --- crates/btcio/src/test_utils.rs | 2 +- crates/btcio/src/writer/builder.rs | 12 ++++---- crates/btcio/src/writer/signer.rs | 4 +-- crates/btcio/src/writer/task.rs | 36 ++++++++--------------- crates/consensus-logic/src/duty/worker.rs | 2 +- crates/l1tx/src/envelope/builder.rs | 26 +++++++++++----- crates/l1tx/src/filter.rs | 27 ++++++++--------- 7 files changed, 54 insertions(+), 55 deletions(-) diff --git a/crates/btcio/src/test_utils.rs b/crates/btcio/src/test_utils.rs index 90a621e9b..409cb774f 100644 --- a/crates/btcio/src/test_utils.rs +++ b/crates/btcio/src/test_utils.rs @@ -219,7 +219,7 @@ pub fn generate_envelope_script_test( params: Arc, version: u8, ) -> anyhow::Result { - build_envelope_script(params.as_ref(), &envelope_data, version) + build_envelope_script(params.as_ref(), &[envelope_data], version) } pub fn build_reveal_transaction_test( diff --git a/crates/btcio/src/writer/builder.rs b/crates/btcio/src/writer/builder.rs index 80ad21c3d..541fbf474 100644 --- a/crates/btcio/src/writer/builder.rs +++ b/crates/btcio/src/writer/builder.rs @@ -52,7 +52,7 @@ pub enum EnvelopeError { // dependencies on `tx-parser`, we include {btcio, feature="strata_test_utils"} , so cyclic // dependency doesn't happen pub async fn build_envelope_txs( - payload: &L1Payload, + payloads: &[L1Payload], ctx: &WriterContext, ) -> anyhow::Result<(Transaction, Transaction)> { let network = ctx.client.network().await?; @@ -62,14 +62,14 @@ pub async fn build_envelope_txs( FeePolicy::Smart => ctx.client.estimate_smart_fee(1).await? * 2, FeePolicy::Fixed(val) => val, }; - create_envelope_transactions(ctx, payload, utxos, fee_rate, network) + create_envelope_transactions(ctx, payloads, utxos, fee_rate, network) .map_err(|e| anyhow::anyhow!(e.to_string())) } #[allow(clippy::too_many_arguments)] pub fn create_envelope_transactions( ctx: &WriterContext, - payload: &L1Payload, + payloads: &[L1Payload], utxos: Vec, fee_rate: u64, network: Network, @@ -80,7 +80,7 @@ pub fn create_envelope_transactions( // Start creating envelope content let reveal_script = - build_reveal_script(ctx.params.as_ref(), &public_key, payload, ENVELOPE_VERSION)?; + build_reveal_script(ctx.params.as_ref(), &public_key, payloads, ENVELOPE_VERSION)?; // Create spend info for tapscript let taproot_spend_info = TaprootBuilder::new() @@ -381,7 +381,7 @@ pub fn generate_key_pair() -> Result { fn build_reveal_script( params: &Params, taproot_public_key: &XOnlyPublicKey, - envelope_data: &L1Payload, + payloads: &[L1Payload], version: u8, ) -> Result { let mut script_bytes = script::Builder::new() @@ -389,7 +389,7 @@ fn build_reveal_script( .push_opcode(OP_CHECKSIG) .into_script() .into_bytes(); - let script = build_envelope_script(params, envelope_data, version)?; + let script = build_envelope_script(params, payloads, version)?; script_bytes.extend(script.into_bytes()); Ok(ScriptBuf::from(script_bytes)) } diff --git a/crates/btcio/src/writer/signer.rs b/crates/btcio/src/writer/signer.rs index e79d157cd..16932b1b3 100644 --- a/crates/btcio/src/writer/signer.rs +++ b/crates/btcio/src/writer/signer.rs @@ -25,7 +25,7 @@ pub async fn create_and_sign_payload_envelopes( ctx: Arc>, ) -> Result<(Buf32, Buf32), EnvelopeError> { trace!("Creating and signing payload envelopes"); - let (commit, reveal) = build_envelope_txs(&payloadentry.payload, ctx.as_ref()).await?; + let (commit, reveal) = build_envelope_txs(&payloadentry.payloads, ctx.as_ref()).await?; let ctxid = commit.compute_txid(); debug!(commit_txid = ?ctxid, "Signing commit transaction"); @@ -81,7 +81,7 @@ mod test { assert_eq!(entry.commit_txid, Buf32::zero()); assert_eq!(entry.reveal_txid, Buf32::zero()); - let intent_hash = hash::raw(entry.payload.data()); + let intent_hash = hash::raw(entry.payloads.data()); iops.put_payload_entry_async(intent_hash, entry.clone()) .await .unwrap(); diff --git a/crates/btcio/src/writer/task.rs b/crates/btcio/src/writer/task.rs index 47ffb1719..a12769066 100644 --- a/crates/btcio/src/writer/task.rs +++ b/crates/btcio/src/writer/task.rs @@ -4,7 +4,7 @@ use bitcoin::Address; use strata_config::btcio::WriterConfig; use strata_db::{ traits::SequencerDatabase, - types::{L1TxStatus, PayloadEntry, PayloadL1Status}, + types::{IntentEntry, L1TxStatus, PayloadEntry, PayloadL1Status}, }; use strata_primitives::{ l1::payload::{PayloadDest, PayloadIntent}, @@ -40,20 +40,15 @@ impl EnvelopeHandle { return Ok(()); } - let entry = PayloadEntry::new_unsigned(intent.payload().clone()); + let id = *intent.commitment(); debug!(commitment = %intent.commitment(), "Received intent"); - if self - .ops - .get_payload_entry_blocking(*intent.commitment())? - .is_some() - { - warn!(commitment = %intent.commitment(), "Received duplicate intent"); + if self.ops.get_intent_by_id_blocking(id)?.is_some() { + warn!(commitment = %id, "Received duplicate intent"); return Ok(()); } + let entry = IntentEntry::new_unbundled(intent); - Ok(self - .ops - .put_payload_entry_blocking(*intent.commitment(), entry)?) + Ok(self.ops.put_intent_entry_blocking(id, entry)?) } pub async fn submit_intent_async(&self, intent: PayloadIntent) -> anyhow::Result<()> { @@ -62,22 +57,15 @@ impl EnvelopeHandle { return Ok(()); } - let entry = PayloadEntry::new_unsigned(intent.payload().clone()); + let id = *intent.commitment(); debug!(commitment = %intent.commitment(), "Received intent"); - - if self - .ops - .get_payload_entry_async(*intent.commitment()) - .await? - .is_some() - { - warn!(commitment = %intent.commitment(), "Received duplicate intent"); + if self.ops.get_intent_by_id_async(id).await?.is_some() { + warn!(commitment = %id, "Received duplicate intent"); return Ok(()); } - Ok(self - .ops - .put_payload_entry_async(*intent.commitment(), entry) - .await?) + let entry = IntentEntry::new_unbundled(intent); + + Ok(self.ops.put_intent_entry_async(id, entry).await?) } } diff --git a/crates/consensus-logic/src/duty/worker.rs b/crates/consensus-logic/src/duty/worker.rs index 78c289356..4b567bbc3 100644 --- a/crates/consensus-logic/src/duty/worker.rs +++ b/crates/consensus-logic/src/duty/worker.rs @@ -410,7 +410,7 @@ fn perform_duty( Ok(()) } Duty::CommitBatch(data) => { - info!(data = ?data, "commit batch"); + info!(data = ?data, "commit batch duty"); let checkpoint = check_and_get_batch_checkpoint(data, checkpoint_handle, pool, params.as_ref())?; diff --git a/crates/l1tx/src/envelope/builder.rs b/crates/l1tx/src/envelope/builder.rs index 2fbeb8207..1dae598b4 100644 --- a/crates/l1tx/src/envelope/builder.rs +++ b/crates/l1tx/src/envelope/builder.rs @@ -16,10 +16,23 @@ use tracing::*; // Generates a [`ScriptBuf`] that consists of `OP_IF .. OP_ENDIF` block pub fn build_envelope_script( params: &Params, - envelope_data: &L1Payload, + payloads: &[L1Payload], version: u8, ) -> anyhow::Result { - let tag = get_payload_type_tag(envelope_data.payload_type(), params)?; + let mut bytes = Vec::new(); + for payload in payloads { + let script_bytes = build_payload_envelope(params, payload, version)?; + bytes.extend(script_bytes); + } + Ok(ScriptBuf::from_bytes(bytes)) +} + +fn build_payload_envelope( + params: &Params, + payload: &L1Payload, + version: u8, +) -> anyhow::Result> { + let tag = get_payload_type_tag(payload.payload_type(), params)?; let mut builder = script::Builder::new() .push_opcode(OP_FALSE) .push_opcode(OP_IF) @@ -28,18 +41,17 @@ pub fn build_envelope_script( .push_slice(PushBytesBuf::from(version.to_be_bytes())) // Insert size .push_slice(PushBytesBuf::from( - (envelope_data.data().len() as u32).to_be_bytes(), + (payload.data().len() as u32).to_be_bytes(), )); // Insert actual data - trace!(batchdata_size = %envelope_data.data().len(), "Inserting batch data"); - for chunk in envelope_data.data().chunks(520) { + trace!(batchdata_size = %payload.data().len(), "Inserting batch data"); + for chunk in payload.data().chunks(520) { trace!(size=%chunk.len(), "inserting chunk"); builder = builder.push_slice(PushBytesBuf::try_from(chunk.to_vec())?); } builder = builder.push_opcode(OP_ENDIF); - - Ok(builder.into_script()) + Ok(builder.as_bytes().to_vec()) } fn get_payload_type_tag( diff --git a/crates/l1tx/src/filter.rs b/crates/l1tx/src/filter.rs index d0f3fd0e0..919ae5d56 100644 --- a/crates/l1tx/src/filter.rs +++ b/crates/l1tx/src/filter.rs @@ -117,8 +117,7 @@ mod test { const OTHER_ADDR: &str = "bcrt1q6u6qyya3sryhh42lahtnz2m7zuufe7dlt8j0j5"; /// Helper function to create filter config - fn create_tx_filter_config() -> TxFilterConfig { - let params = gen_params(); + fn create_tx_filter_config(params: &Params) -> TxFilterConfig { TxFilterConfig::derive_from(params.rollup()).expect("can't get filter config") } @@ -197,9 +196,9 @@ mod test { #[test] fn test_filter_relevant_txs_with_rollup_envelope() { // Test with valid name - let filter_config = create_tx_filter_config(); - let params: Params = gen_params(); + let filter_config = create_tx_filter_config(¶ms); + let tx = create_checkpoint_envelope_tx(params.clone().into()); let block = create_test_block(vec![tx]); @@ -225,7 +224,7 @@ mod test { let tx2 = create_test_tx(vec![create_test_txout(10000, &parse_addr(OTHER_ADDR))]); let params = gen_params(); let block = create_test_block(vec![tx1, tx2]); - let filter_config = create_tx_filter_config(); + let filter_config = create_tx_filter_config(¶ms); let txids: Vec = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config) .iter() @@ -236,8 +235,8 @@ mod test { #[test] fn test_filter_relevant_txs_multiple_matches() { - let filter_config = create_tx_filter_config(); let params: Params = gen_params(); + let filter_config = create_tx_filter_config(¶ms); let tx1 = create_checkpoint_envelope_tx(params.clone().into()); let tx2 = create_test_tx(vec![create_test_txout(100, &parse_addr(OTHER_ADDR))]); let tx3 = create_checkpoint_envelope_tx(params.clone().into()); @@ -254,10 +253,10 @@ mod test { #[test] fn test_filter_relevant_txs_deposit() { - let filter_config = create_tx_filter_config(); + let params = gen_params(); + let filter_config = create_tx_filter_config(¶ms); let deposit_config = filter_config.deposit_config.clone(); let ee_addr = vec![1u8; 20]; // Example EVM address - let params = gen_params(); let deposit_script = build_test_deposit_script(deposit_config.magic_bytes.clone(), ee_addr.clone()); @@ -292,9 +291,9 @@ mod test { #[test] fn test_filter_relevant_txs_deposit_request() { - let filter_config = create_tx_filter_config(); - let mut deposit_config = filter_config.deposit_config.clone(); let params = gen_params(); + let filter_config = create_tx_filter_config(¶ms); + let mut deposit_config = filter_config.deposit_config.clone(); let extra_amt = 10000; deposit_config.deposit_amount += extra_amt; let dest_addr = vec![2u8; 20]; // Example EVM address @@ -338,9 +337,9 @@ mod test { #[test] fn test_filter_relevant_txs_no_deposit() { - let filter_config = create_tx_filter_config(); - let deposit_config = filter_config.deposit_config.clone(); let params = gen_params(); + let filter_config = create_tx_filter_config(¶ms); + let deposit_config = filter_config.deposit_config.clone(); let irrelevant_tx = create_test_deposit_tx( Amount::from_sat(deposit_config.deposit_amount), &test_taproot_addr().address().script_pubkey(), @@ -359,9 +358,9 @@ mod test { #[test] fn test_filter_relevant_txs_multiple_deposits() { - let filter_config = create_tx_filter_config(); - let deposit_config = filter_config.deposit_config.clone(); let params = gen_params(); + let filter_config = create_tx_filter_config(¶ms); + let deposit_config = filter_config.deposit_config.clone(); let dest_addr1 = vec![3u8; 20]; let dest_addr2 = vec![4u8; 20]; From f9dd1593242b766cf16f98d551c09f645d5b0247 Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Tue, 14 Jan 2025 17:03:34 +0545 Subject: [PATCH 05/17] DB changes for payload entries --- bin/strata-client/src/main.rs | 4 +- crates/btcio/src/reader/query.rs | 4 - crates/btcio/src/writer/task.rs | 27 ++---- crates/db/src/traits.rs | 13 +-- crates/db/src/types.rs | 10 +- .../proof-impl/btc-blockspace/src/filter.rs | 2 +- crates/rocksdb-store/src/lib.rs | 6 +- crates/rocksdb-store/src/sequencer/db.rs | 88 ++++++----------- crates/rocksdb-store/src/sequencer/schemas.rs | 9 +- crates/storage/src/ops/envelope.rs | 94 +------------------ 10 files changed, 64 insertions(+), 193 deletions(-) diff --git a/bin/strata-client/src/main.rs b/bin/strata-client/src/main.rs index 08e4d621e..10632c88a 100644 --- a/bin/strata-client/src/main.rs +++ b/bin/strata-client/src/main.rs @@ -18,7 +18,7 @@ use strata_consensus_logic::{ sync_manager::{self, SyncManager}, }; use strata_db::{ - traits::{BroadcastDatabase, ChainstateDatabase, Database}, + traits::{BroadcastDatabase, ChainstateDatabase, Database, SequencerDatabase}, DbError, }; use strata_eectl::engine::ExecEngineCtl; @@ -417,7 +417,7 @@ fn start_sequencer_tasks( Arc::new(btcio_config.writer.clone()), params.clone(), sequencer_bitcoin_address, - seq_db, + SequencerDatabase::payload_db(seq_db.as_ref()).clone(), status_channel.clone(), pool.clone(), broadcast_handle.clone(), diff --git a/crates/btcio/src/reader/query.rs b/crates/btcio/src/reader/query.rs index b78031bf0..1c35c719a 100644 --- a/crates/btcio/src/reader/query.rs +++ b/crates/btcio/src/reader/query.rs @@ -442,11 +442,7 @@ mod test { let l1status: L1Status = gen.generate(); let status_channel = StatusChannel::new(cls, l1status, Some(chs)); let params = Arc::new(gen_params()); -<<<<<<< HEAD let config = Arc::new(ReaderConfig::default()); -======= - let config = Arc::new(BtcioConfig::default()); ->>>>>>> bb9df24b (Some cleanups) let client = Arc::new(TestBitcoinClient::new(1)); ReaderContext { event_tx, diff --git a/crates/btcio/src/writer/task.rs b/crates/btcio/src/writer/task.rs index a12769066..1971dcd4f 100644 --- a/crates/btcio/src/writer/task.rs +++ b/crates/btcio/src/writer/task.rs @@ -3,7 +3,7 @@ use std::{sync::Arc, time::Duration}; use bitcoin::Address; use strata_config::btcio::WriterConfig; use strata_db::{ - traits::SequencerDatabase, + traits::L1PayloadDatabase, types::{IntentEntry, L1TxStatus, PayloadEntry, PayloadL1Status}, }; use strata_primitives::{ @@ -78,7 +78,7 @@ impl EnvelopeHandle { /// /// [`Result`](anyhow::Result) #[allow(clippy::too_many_arguments)] -pub fn start_envelope_task( +pub fn start_envelope_task( executor: &TaskExecutor, bitcoin_client: Arc, config: Arc, @@ -140,7 +140,7 @@ fn get_next_payloadidx_to_watch(insc_ops: &EnvelopeDataOps) -> anyhow::Result( - next_blbidx_to_watch: u64, + next_watch_payload_idx: u64, context: Arc>, insc_ops: Arc, broadcast_handle: Arc, @@ -149,7 +149,7 @@ pub async fn watcher_task( let interval = tokio::time::interval(Duration::from_millis(context.config.write_poll_dur_ms)); tokio::pin!(interval); - let mut curr_payloadidx = next_blbidx_to_watch; + let mut curr_payloadidx = next_watch_payload_idx; loop { interval.as_mut().tick().await; @@ -174,7 +174,8 @@ pub async fn watcher_task( updated_entry.status = PayloadL1Status::Unpublished; updated_entry.commit_txid = cid; updated_entry.reveal_txid = rid; - update_existing_entry(curr_payloadidx, updated_entry, &insc_ops) + insc_ops + .put_payload_entry_async(curr_payloadidx, updated_entry) .await?; debug!(%curr_payloadidx, "Signed payload"); @@ -218,7 +219,8 @@ pub async fn watcher_task( // Update payloadentry with new status let mut updated_entry = payloadentry.clone(); updated_entry.status = new_status.clone(); - update_existing_entry(curr_payloadidx, updated_entry, &insc_ops) + insc_ops + .put_payload_entry_async(curr_payloadidx, updated_entry) .await?; if new_status == PayloadL1Status::Finalized { @@ -229,7 +231,8 @@ pub async fn watcher_task( warn!(%curr_payloadidx, "Corresponding commit/reveal entry for payloadentry not found in broadcast db. Sign and create transactions again."); let mut updated_entry = payloadentry.clone(); updated_entry.status = PayloadL1Status::Unsigned; - update_existing_entry(curr_payloadidx, updated_entry, &insc_ops) + insc_ops + .put_payload_entry_async(curr_payloadidx, updated_entry) .await?; } } @@ -261,16 +264,6 @@ async fn update_l1_status( } } -async fn update_existing_entry( - idx: u64, - updated_entry: PayloadEntry, - insc_ops: &EnvelopeDataOps, -) -> anyhow::Result<()> { - let msg = format!("Expect to find payloadentry {idx} in db"); - let id = insc_ops.get_payload_entry_id_async(idx).await?.expect(&msg); - Ok(insc_ops.put_payload_entry_async(id, updated_entry).await?) -} - /// Determine the status of the `PayloadEntry` based on the status of its commit and reveal /// transactions in bitcoin. fn determine_payload_next_status( diff --git a/crates/db/src/traits.rs b/crates/db/src/traits.rs index 3cfce12bf..211154a6d 100644 --- a/crates/db/src/traits.rs +++ b/crates/db/src/traits.rs @@ -258,16 +258,13 @@ pub trait SequencerDatabase { /// database and to fetch [`PayloadEntry`] and indices from the database pub trait L1PayloadDatabase { /// Store the [`PayloadEntry`]. - fn put_payload_entry(&self, payloadid: Buf32, payloadentry: PayloadEntry) -> DbResult<()>; + fn put_payload_entry(&self, idx: u64, payloadentry: PayloadEntry) -> DbResult<()>; - /// Get a [`PayloadEntry`] by its hash - fn get_payload_by_id(&self, id: Buf32) -> DbResult>; + /// Get a [`PayloadEntry`] by its index. + fn get_payload_entry_by_idx(&self, idx: u64) -> DbResult>; - /// Get the payload ID corresponding to the index - fn get_payload_id(&self, payloadidx: u64) -> DbResult>; - - /// Get the last payload index - fn get_last_payload_idx(&self) -> DbResult>; + /// Get the next payload index + fn get_next_payload_idx(&self) -> DbResult; /// Store the [`IntentEntry`]. fn put_intent_entry(&self, payloadid: Buf32, payloadentry: IntentEntry) -> DbResult<()>; diff --git a/crates/db/src/types.rs b/crates/db/src/types.rs index 1e7386cac..728bf86d6 100644 --- a/crates/db/src/types.rs +++ b/crates/db/src/types.rs @@ -50,7 +50,7 @@ pub enum IntentStatus { /// Represents data for a payload we're still planning to post to L1. #[derive(Debug, Clone, PartialEq, BorshSerialize, BorshDeserialize, Arbitrary)] pub struct PayloadEntry { - pub payload: L1Payload, + pub payloads: Vec, pub commit_txid: Buf32, pub reveal_txid: Buf32, pub status: PayloadL1Status, @@ -58,13 +58,13 @@ pub struct PayloadEntry { impl PayloadEntry { pub fn new( - payload: L1Payload, + payloads: Vec, commit_txid: Buf32, reveal_txid: Buf32, status: PayloadL1Status, ) -> Self { Self { - payload, + payloads, commit_txid, reveal_txid, status, @@ -76,10 +76,10 @@ impl PayloadEntry { /// NOTE: This won't have commit - reveal pairs associated with it. /// Because it is better to defer gathering utxos as late as possible to prevent being spent /// by others. Those will be created and signed in a single step. - pub fn new_unsigned(payload: L1Payload) -> Self { + pub fn new_unsigned(payloads: Vec) -> Self { let cid = Buf32::zero(); let rid = Buf32::zero(); - Self::new(payload, cid, rid, PayloadL1Status::Unsigned) + Self::new(payloads, cid, rid, PayloadL1Status::Unsigned) } } diff --git a/crates/proof-impl/btc-blockspace/src/filter.rs b/crates/proof-impl/btc-blockspace/src/filter.rs index 6b8dd31e8..499a22659 100644 --- a/crates/proof-impl/btc-blockspace/src/filter.rs +++ b/crates/proof-impl/btc-blockspace/src/filter.rs @@ -17,7 +17,7 @@ pub fn extract_relevant_info( let mut deposits = Vec::new(); let mut prev_checkpoint = None; - let relevant_txs = filter_protocol_op_tx_refs(block, rollup_params, &filter_config); + let relevant_txs = filter_protocol_op_tx_refs(block, rollup_params, filter_config); for tx in relevant_txs { match tx.proto_op() { diff --git a/crates/rocksdb-store/src/lib.rs b/crates/rocksdb-store/src/lib.rs index 7cd6766da..ddc14e6ea 100644 --- a/crates/rocksdb-store/src/lib.rs +++ b/crates/rocksdb-store/src/lib.rs @@ -36,8 +36,8 @@ pub const STORE_COLUMN_FAMILIES: &[ColumnFamilyName] = &[ L2BlockHeightSchema::COLUMN_FAMILY_NAME, WriteBatchSchema::COLUMN_FAMILY_NAME, // Seqdb schemas - SeqBlobIdSchema::COLUMN_FAMILY_NAME, - SeqBlobSchema::COLUMN_FAMILY_NAME, + SeqPayloadSchema::COLUMN_FAMILY_NAME, + SeqIntentSchema::COLUMN_FAMILY_NAME, // Bcast schemas BcastL1TxIdSchema::COLUMN_FAMILY_NAME, BcastL1TxSchema::COLUMN_FAMILY_NAME, @@ -90,7 +90,7 @@ use rockbound::{schema::ColumnFamilyName, Schema}; pub use sequencer::db::RBSeqBlobDb; use sequencer::{ db::SequencerDB, - schemas::{SeqBlobIdSchema, SeqBlobSchema}, + schemas::{SeqIntentSchema, SeqPayloadSchema}, }; pub use sync_event::db::SyncEventDb; diff --git a/crates/rocksdb-store/src/sequencer/db.rs b/crates/rocksdb-store/src/sequencer/db.rs index 31c0eda32..28dd568b6 100644 --- a/crates/rocksdb-store/src/sequencer/db.rs +++ b/crates/rocksdb-store/src/sequencer/db.rs @@ -9,8 +9,8 @@ use strata_db::{ }; use strata_primitives::buf::Buf32; -use super::schemas::{SeqBlobIdSchema, SeqBlobSchema, SeqIntentSchema}; -use crate::{sequence::get_next_id, DbOpsConfig}; +use super::schemas::{SeqIntentSchema, SeqPayloadSchema}; +use crate::DbOpsConfig; pub struct RBSeqBlobDb { db: Arc, @@ -28,36 +28,26 @@ impl RBSeqBlobDb { } impl L1PayloadDatabase for RBSeqBlobDb { - fn put_payload_entry(&self, payload_id: Buf32, entry: PayloadEntry) -> DbResult<()> { + fn put_payload_entry(&self, idx: u64, entry: PayloadEntry) -> DbResult<()> { self.db .with_optimistic_txn( rockbound::TransactionRetry::Count(self.ops.retry_count), |tx| -> Result<(), DbError> { - // If new, increment idx - if tx.get::(&payload_id)?.is_none() { - let idx = get_next_id::(tx)?; - - tx.put::(&idx, &payload_id)?; - } - - tx.put::(&payload_id, &entry)?; - + tx.put::(&idx, &entry)?; Ok(()) }, ) .map_err(|e| DbError::TransactionError(e.to_string())) } - fn get_payload_by_id(&self, id: Buf32) -> DbResult> { - Ok(self.db.get::(&id)?) + fn get_payload_entry_by_idx(&self, idx: u64) -> DbResult> { + Ok(self.db.get::(&idx)?) } - fn get_last_payload_idx(&self) -> DbResult> { - Ok(rockbound::utils::get_last::(&*self.db)?.map(|(x, _)| x)) - } - - fn get_payload_id(&self, blobidx: u64) -> DbResult> { - Ok(self.db.get::(&blobidx)?) + fn get_next_payload_idx(&self) -> DbResult { + Ok(rockbound::utils::get_last::(&*self.db)? + .map(|(x, _)| x + 1) + .unwrap_or(0)) } fn put_intent_entry(&self, intent_id: Buf32, intent_entry: IntentEntry) -> DbResult<()> { @@ -116,11 +106,11 @@ mod tests { let blob_hash: Buf32 = [0; 32].into(); seq_db.put_payload_entry(blob_hash, blob.clone()).unwrap(); - let idx = seq_db.get_last_payload_idx().unwrap().unwrap(); + let idx = seq_db.get_next_payload_idx().unwrap().unwrap(); assert_eq!(seq_db.get_payload_id(idx).unwrap(), Some(blob_hash)); - let stored_blob = seq_db.get_payload_by_id(blob_hash).unwrap(); + let stored_blob = seq_db.get_payload_entry_by_idx(blob_hash).unwrap(); assert_eq!(stored_blob, Some(blob)); } @@ -140,65 +130,49 @@ mod tests { } #[test] - fn test_update_blob() { + fn test_update_entry() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); let seq_db = RBSeqBlobDb::new(db, db_ops); - let blob: PayloadEntry = ArbitraryGenerator::new().generate(); - let blob_hash: Buf32 = [0; 32].into(); + let entry: PayloadEntry = ArbitraryGenerator::new().generate(); // Insert - seq_db.put_payload_entry(blob_hash, blob.clone()).unwrap(); + seq_db.put_payload_entry(0, entry.clone()).unwrap(); - let updated_blob: PayloadEntry = ArbitraryGenerator::new().generate(); + let updated_entry: PayloadEntry = ArbitraryGenerator::new().generate(); // Update existing idx - seq_db - .put_payload_entry(blob_hash, updated_blob.clone()) - .unwrap(); - let retrieved_blob = seq_db.get_payload_by_id(blob_hash).unwrap().unwrap(); - assert_eq!(updated_blob, retrieved_blob); - } - - #[test] - fn test_get_blob_by_id() { - let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seq_db = RBSeqBlobDb::new(db, db_ops); - - let blob: PayloadEntry = ArbitraryGenerator::new().generate(); - let blob_hash: Buf32 = [0; 32].into(); - - seq_db.put_payload_entry(blob_hash, blob.clone()).unwrap(); - - let retrieved = seq_db.get_payload_by_id(blob_hash).unwrap().unwrap(); - assert_eq!(retrieved, blob); + seq_db.put_payload_entry(0, updated_entry.clone()).unwrap(); + let retrieved_entry = seq_db.get_payload_entry_by_idx(0).unwrap().unwrap(); + assert_eq!(updated_entry, retrieved_entry); } #[test] - fn test_get_last_blob_idx() { + fn test_get_last_entry_idx() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); let seq_db = RBSeqBlobDb::new(db, db_ops); let blob: PayloadEntry = ArbitraryGenerator::new().generate(); - let blob_hash: Buf32 = [0; 32].into(); - let last_blob_idx = seq_db.get_last_payload_idx().unwrap(); + let next_blob_idx = seq_db.get_next_payload_idx().unwrap(); assert_eq!( - last_blob_idx, None, + next_blob_idx, 0, "There is no last blobidx in the beginning" ); - seq_db.put_payload_entry(blob_hash, blob.clone()).unwrap(); - // Now the last idx is 0 + seq_db + .put_payload_entry(next_blob_idx, blob.clone()) + .unwrap(); + let next_blob_idx = seq_db.get_next_payload_idx().unwrap(); + // Now the next idx is 1 let blob: PayloadEntry = ArbitraryGenerator::new().generate(); - let blob_hash: Buf32 = [1; 32].into(); - seq_db.put_payload_entry(blob_hash, blob.clone()).unwrap(); - // Now the last idx is 1 + seq_db.put_payload_entry(1, blob.clone()).unwrap(); + let next_blob_idx = seq_db.get_next_payload_idx().unwrap(); + // Now the last idx is 2 - let last_blob_idx = seq_db.get_last_payload_idx().unwrap(); - assert_eq!(last_blob_idx, Some(1)); + assert_eq!(next_blob_idx, 2); } // Intent related tests diff --git a/crates/rocksdb-store/src/sequencer/schemas.rs b/crates/rocksdb-store/src/sequencer/schemas.rs index f42e6cb73..30a8559f2 100644 --- a/crates/rocksdb-store/src/sequencer/schemas.rs +++ b/crates/rocksdb-store/src/sequencer/schemas.rs @@ -7,13 +7,8 @@ use crate::{ }; define_table_with_seek_key_codec!( - /// A table to store idx-> blobid mapping - (SeqBlobIdSchema) u64 => Buf32 -); - -define_table_with_default_codec!( - /// A table to store blobid -> blob mapping - (SeqBlobSchema) Buf32 => PayloadEntry + /// A table to store idx-> payload entry mapping + (SeqPayloadSchema) u64 => PayloadEntry ); define_table_with_default_codec!( diff --git a/crates/storage/src/ops/envelope.rs b/crates/storage/src/ops/envelope.rs index 9ae041a1d..7832e154b 100644 --- a/crates/storage/src/ops/envelope.rs +++ b/crates/storage/src/ops/envelope.rs @@ -3,104 +3,20 @@ use std::sync::Arc; use strata_db::{ - traits::{L1PayloadDatabase, SequencerDatabase}, + traits::L1PayloadDatabase, types::{IntentEntry, PayloadEntry}, DbResult, }; use strata_primitives::buf::Buf32; -use threadpool::ThreadPool; use crate::exec::*; -/// Database context for an database operation interface. -pub struct Context { - db: Arc, -} - -impl Context { - /// Create a `Context` for [`EnvelopeDataOps`] - pub fn new(db: Arc) -> Self { - Self { db } - } - - /// Convert to [`EnvelopeDataOps`] using a [`ThreadPool`] - pub fn into_ops(self, pool: ThreadPool) -> EnvelopeDataOps { - EnvelopeDataOps::new(pool, Arc::new(self)) - } -} - -inst_ops! { - (EnvelopeDataOps, Context) { - get_payload_entry(id: Buf32) => Option; +inst_ops_simple! { + ( => EnvelopeDataOps) { + put_payload_entry(idx: u64, payloadentry: PayloadEntry) => (); get_payload_entry_by_idx(idx: u64) => Option; - get_payload_entry_id(idx: u64) => Option; get_next_payload_idx() => u64; - put_payload_entry(id: Buf32, entry: PayloadEntry) => (); - // Intent related methods - get_intent_by_id(id: Buf32) => Option; put_intent_entry(id: Buf32, entry: IntentEntry) => (); + get_intent_by_id(id: Buf32) => Option; } } - -fn get_payload_entry( - ctx: &Context, - id: Buf32, -) -> DbResult> { - let payload_db = ctx.db.payload_db(); - payload_db.get_payload_by_id(id) -} - -fn get_intent_by_id( - ctx: &Context, - id: Buf32, -) -> DbResult> { - let payload_db = ctx.db.payload_db(); - payload_db.get_intent_by_id(id) -} - -fn get_payload_entry_id( - ctx: &Context, - idx: u64, -) -> DbResult> { - let payload_db = ctx.db.payload_db(); - payload_db.get_payload_id(idx) -} - -fn get_payload_entry_by_idx( - ctx: &Context, - idx: u64, -) -> DbResult> { - let payload_db = ctx.db.payload_db(); - let id_res = payload_db.get_payload_id(idx)?; - match id_res { - Some(id) => payload_db.get_payload_by_id(id), - None => Ok(None), - } -} - -/// Returns zero if there are no elements else last index incremented by 1. -fn get_next_payload_idx(ctx: &Context) -> DbResult { - let payload_db = ctx.db.payload_db(); - Ok(payload_db - .get_last_payload_idx()? - .map(|i| i + 1) - .unwrap_or_default()) -} - -fn put_payload_entry( - ctx: &Context, - id: Buf32, - entry: PayloadEntry, -) -> DbResult<()> { - let payload_db = ctx.db.payload_db(); - payload_db.put_payload_entry(id, entry) -} - -fn put_intent_entry( - ctx: &Context, - id: Buf32, - entry: IntentEntry, -) -> DbResult<()> { - let payload_db = ctx.db.payload_db(); - payload_db.put_intent_entry(id, entry) -} From 012b28c8c88929a129c20b280cd6d974b83e8c45 Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Wed, 15 Jan 2025 16:51:11 +0545 Subject: [PATCH 06/17] Remove sequencerdb --- bin/strata-client/src/main.rs | 16 +++--- crates/btcio/src/writer/test_utils.rs | 6 +-- crates/db/src/traits.rs | 10 ---- crates/rocksdb-store/src/lib.rs | 20 +++----- crates/rocksdb-store/src/sequencer/db.rs | 50 ++++++------------- crates/rocksdb-store/src/sequencer/schemas.rs | 4 +- 6 files changed, 37 insertions(+), 69 deletions(-) diff --git a/bin/strata-client/src/main.rs b/bin/strata-client/src/main.rs index 10632c88a..52e421e65 100644 --- a/bin/strata-client/src/main.rs +++ b/bin/strata-client/src/main.rs @@ -18,16 +18,15 @@ use strata_consensus_logic::{ sync_manager::{self, SyncManager}, }; use strata_db::{ - traits::{BroadcastDatabase, ChainstateDatabase, Database, SequencerDatabase}, + traits::{BroadcastDatabase, ChainstateDatabase, Database}, DbError, }; use strata_eectl::engine::ExecEngineCtl; use strata_evmexec::{engine::RpcExecEngineCtl, EngineRpcClient}; use strata_primitives::params::Params; use strata_rocksdb::{ - broadcaster::db::BroadcastDb, init_broadcaster_database, init_core_dbs, - init_sequencer_database, open_rocksdb_database, sequencer::db::SequencerDB, CommonDb, - DbOpsConfig, RBSeqBlobDb, ROCKSDB_NAME, + broadcaster::db::BroadcastDb, init_broadcaster_database, init_core_dbs, init_writer_database, + open_rocksdb_database, CommonDb, DbOpsConfig, RBPayloadDb, ROCKSDB_NAME, }; use strata_rpc_api::{ StrataAdminApiServer, StrataApiServer, StrataDebugApiServer, StrataSequencerApiServer, @@ -143,14 +142,15 @@ fn main_inner(args: Args) -> anyhow::Result<()> { ctx.bitcoin_client.clone(), params.clone(), ); - let seq_db = init_sequencer_database(rbdb.clone(), ops_config); + let writer_db = init_writer_database(rbdb.clone(), ops_config); + // TODO: split writer tasks from this start_sequencer_tasks( ctx.clone(), &config, sequencer_config, &executor, - seq_db, + writer_db, checkpoint_handle.clone(), broadcast_handle, &mut methods, @@ -373,7 +373,7 @@ fn start_sequencer_tasks( config: &Config, sequencer_config: &SequencerConfig, executor: &TaskExecutor, - seq_db: Arc>, + writer_db: Arc, checkpoint_handle: Arc, broadcast_handle: Arc, methods: &mut Methods, @@ -417,7 +417,7 @@ fn start_sequencer_tasks( Arc::new(btcio_config.writer.clone()), params.clone(), sequencer_bitcoin_address, - SequencerDatabase::payload_db(seq_db.as_ref()).clone(), + writer_db, status_channel.clone(), pool.clone(), broadcast_handle.clone(), diff --git a/crates/btcio/src/writer/test_utils.rs b/crates/btcio/src/writer/test_utils.rs index 7840dc6a1..e6a90f84d 100644 --- a/crates/btcio/src/writer/test_utils.rs +++ b/crates/btcio/src/writer/test_utils.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use strata_db::{traits::BroadcastDatabase, types::L1TxEntry}; use strata_rocksdb::{ broadcaster::db::BroadcastDb, sequencer::db::SequencerDB, test_utils::get_rocksdb_tmp_instance, - L1BroadcastDb, RBSeqBlobDb, + L1BroadcastDb, RBPayloadDb, }; use strata_storage::ops::{ envelope::{Context, EnvelopeDataOps}, @@ -13,9 +13,9 @@ use strata_storage::ops::{ use crate::broadcaster::L1BroadcastHandle; /// Returns [`Arc`] of [`SequencerDB`] for testing -pub fn get_db() -> Arc> { +pub fn get_db() -> Arc> { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seqdb = Arc::new(RBSeqBlobDb::new(db, db_ops)); + let seqdb = Arc::new(RBPayloadDb::new(db, db_ops)); Arc::new(SequencerDB::new(seqdb)) } diff --git a/crates/db/src/traits.rs b/crates/db/src/traits.rs index 211154a6d..224bd4053 100644 --- a/crates/db/src/traits.rs +++ b/crates/db/src/traits.rs @@ -244,16 +244,6 @@ pub trait CheckpointDatabase { fn put_batch_checkpoint(&self, batchidx: u64, entry: CheckpointEntry) -> DbResult<()>; } -/// NOTE: We might have to merge this with the [`Database`] -/// A trait encapsulating provider and store traits to interact with the underlying database for -/// [`PayloadEntry`] -pub trait SequencerDatabase { - // TODO: remove this, and possibly separate out/rename to writer db - type L1PayloadDB: L1PayloadDatabase; - - fn payload_db(&self) -> &Arc; -} - /// A trait encapsulating provider and store traits to create/update [`PayloadEntry`] in the /// database and to fetch [`PayloadEntry`] and indices from the database pub trait L1PayloadDatabase { diff --git a/crates/rocksdb-store/src/lib.rs b/crates/rocksdb-store/src/lib.rs index ddc14e6ea..2558494f4 100644 --- a/crates/rocksdb-store/src/lib.rs +++ b/crates/rocksdb-store/src/lib.rs @@ -35,9 +35,9 @@ pub const STORE_COLUMN_FAMILIES: &[ColumnFamilyName] = &[ L2BlockStatusSchema::COLUMN_FAMILY_NAME, L2BlockHeightSchema::COLUMN_FAMILY_NAME, WriteBatchSchema::COLUMN_FAMILY_NAME, - // Seqdb schemas - SeqPayloadSchema::COLUMN_FAMILY_NAME, - SeqIntentSchema::COLUMN_FAMILY_NAME, + // Payload/intent schemas + PayloadSchema::COLUMN_FAMILY_NAME, + IntentSchema::COLUMN_FAMILY_NAME, // Bcast schemas BcastL1TxIdSchema::COLUMN_FAMILY_NAME, BcastL1TxSchema::COLUMN_FAMILY_NAME, @@ -87,11 +87,8 @@ use l2::{ schemas::{L2BlockHeightSchema, L2BlockSchema, L2BlockStatusSchema}, }; use rockbound::{schema::ColumnFamilyName, Schema}; -pub use sequencer::db::RBSeqBlobDb; -use sequencer::{ - db::SequencerDB, - schemas::{SeqIntentSchema, SeqPayloadSchema}, -}; +pub use sequencer::db::RBPayloadDb; +use sequencer::schemas::{IntentSchema, PayloadSchema}; pub use sync_event::db::SyncEventDb; use crate::{ @@ -175,10 +172,9 @@ pub fn init_broadcaster_database( BroadcastDb::new(l1_broadcast_db.into()).into() } -pub fn init_sequencer_database( +pub fn init_writer_database( rbdb: Arc, ops_config: DbOpsConfig, -) -> Arc> { - let seqdb = RBSeqBlobDb::new(rbdb, ops_config).into(); - SequencerDB::new(seqdb).into() +) -> Arc { + RBPayloadDb::new(rbdb, ops_config).into() } diff --git a/crates/rocksdb-store/src/sequencer/db.rs b/crates/rocksdb-store/src/sequencer/db.rs index 28dd568b6..5ea506457 100644 --- a/crates/rocksdb-store/src/sequencer/db.rs +++ b/crates/rocksdb-store/src/sequencer/db.rs @@ -3,21 +3,21 @@ use std::sync::Arc; use rockbound::{OptimisticTransactionDB, SchemaDBOperationsExt}; use strata_db::{ errors::DbError, - traits::{L1PayloadDatabase, SequencerDatabase}, + traits::L1PayloadDatabase, types::{IntentEntry, PayloadEntry}, DbResult, }; use strata_primitives::buf::Buf32; -use super::schemas::{SeqIntentSchema, SeqPayloadSchema}; +use super::schemas::{IntentSchema, PayloadSchema}; use crate::DbOpsConfig; -pub struct RBSeqBlobDb { +pub struct RBPayloadDb { db: Arc, ops: DbOpsConfig, } -impl RBSeqBlobDb { +impl RBPayloadDb { /// Wraps an existing database handle. /// /// Assumes it was opened with column families as defined in `STORE_COLUMN_FAMILIES`. @@ -27,13 +27,13 @@ impl RBSeqBlobDb { } } -impl L1PayloadDatabase for RBSeqBlobDb { +impl L1PayloadDatabase for RBPayloadDb { fn put_payload_entry(&self, idx: u64, entry: PayloadEntry) -> DbResult<()> { self.db .with_optimistic_txn( rockbound::TransactionRetry::Count(self.ops.retry_count), |tx| -> Result<(), DbError> { - tx.put::(&idx, &entry)?; + tx.put::(&idx, &entry)?; Ok(()) }, ) @@ -41,11 +41,11 @@ impl L1PayloadDatabase for RBSeqBlobDb { } fn get_payload_entry_by_idx(&self, idx: u64) -> DbResult> { - Ok(self.db.get::(&idx)?) + Ok(self.db.get::(&idx)?) } fn get_next_payload_idx(&self) -> DbResult { - Ok(rockbound::utils::get_last::(&*self.db)? + Ok(rockbound::utils::get_last::(&*self.db)? .map(|(x, _)| x + 1) .unwrap_or(0)) } @@ -55,7 +55,7 @@ impl L1PayloadDatabase for RBSeqBlobDb { .with_optimistic_txn( rockbound::TransactionRetry::Count(self.ops.retry_count), |tx| -> Result<(), DbError> { - tx.put::(&intent_id, &intent_entry)?; + tx.put::(&intent_id, &intent_entry)?; Ok(()) }, @@ -64,25 +64,7 @@ impl L1PayloadDatabase for RBSeqBlobDb { } fn get_intent_by_id(&self, id: Buf32) -> DbResult> { - Ok(self.db.get::(&id)?) - } -} - -pub struct SequencerDB { - db: Arc, -} - -impl SequencerDB { - pub fn new(db: Arc) -> Self { - Self { db } - } -} - -impl SequencerDatabase for SequencerDB { - type L1PayloadDB = B; - - fn payload_db(&self) -> &Arc { - &self.db + Ok(self.db.get::(&id)?) } } @@ -100,7 +82,7 @@ mod tests { #[test] fn test_put_blob_new_entry() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seq_db = RBSeqBlobDb::new(db, db_ops); + let seq_db = RBPayloadDb::new(db, db_ops); let blob: PayloadEntry = ArbitraryGenerator::new().generate(); let blob_hash: Buf32 = [0; 32].into(); @@ -117,7 +99,7 @@ mod tests { #[test] fn test_put_blob_existing_entry() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seq_db = RBSeqBlobDb::new(db, db_ops); + let seq_db = RBPayloadDb::new(db, db_ops); let blob: PayloadEntry = ArbitraryGenerator::new().generate(); let blob_hash: Buf32 = [0; 32].into(); @@ -132,7 +114,7 @@ mod tests { #[test] fn test_update_entry() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seq_db = RBSeqBlobDb::new(db, db_ops); + let seq_db = RBPayloadDb::new(db, db_ops); let entry: PayloadEntry = ArbitraryGenerator::new().generate(); @@ -150,7 +132,7 @@ mod tests { #[test] fn test_get_last_entry_idx() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seq_db = RBSeqBlobDb::new(db, db_ops); + let seq_db = RBPayloadDb::new(db, db_ops); let blob: PayloadEntry = ArbitraryGenerator::new().generate(); @@ -180,7 +162,7 @@ mod tests { #[test] fn test_put_intent_new_entry() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seq_db = RBSeqBlobDb::new(db, db_ops); + let seq_db = RBPayloadDb::new(db, db_ops); let intent: IntentEntry = ArbitraryGenerator::new().generate(); let intent_id: Buf32 = [0; 32].into(); @@ -194,7 +176,7 @@ mod tests { #[test] fn test_put_intent_entry() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seq_db = RBSeqBlobDb::new(db, db_ops); + let seq_db = RBPayloadDb::new(db, db_ops); let intent: IntentEntry = ArbitraryGenerator::new().generate(); let intent_id: Buf32 = [0; 32].into(); diff --git a/crates/rocksdb-store/src/sequencer/schemas.rs b/crates/rocksdb-store/src/sequencer/schemas.rs index 30a8559f2..0fac7412d 100644 --- a/crates/rocksdb-store/src/sequencer/schemas.rs +++ b/crates/rocksdb-store/src/sequencer/schemas.rs @@ -8,10 +8,10 @@ use crate::{ define_table_with_seek_key_codec!( /// A table to store idx-> payload entry mapping - (SeqPayloadSchema) u64 => PayloadEntry + (PayloadSchema) u64 => PayloadEntry ); define_table_with_default_codec!( /// A table to store intentid -> intent mapping - (SeqIntentSchema) Buf32 => IntentEntry + (IntentSchema) Buf32 => IntentEntry ); From b8c95dcbc5d3beb9390828bcfe7aa670c2d6b5ff Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Wed, 15 Jan 2025 17:51:42 +0545 Subject: [PATCH 07/17] btcio: Add payload bundler --- crates/btcio/src/writer/bundler.rs | 76 ++++++++++++++++++++++++++++++ crates/btcio/src/writer/mod.rs | 1 + crates/btcio/src/writer/task.rs | 20 ++++---- 3 files changed, 87 insertions(+), 10 deletions(-) create mode 100644 crates/btcio/src/writer/bundler.rs diff --git a/crates/btcio/src/writer/bundler.rs b/crates/btcio/src/writer/bundler.rs new file mode 100644 index 000000000..1454e78ee --- /dev/null +++ b/crates/btcio/src/writer/bundler.rs @@ -0,0 +1,76 @@ +use std::{sync::Arc, time::Duration}; + +use strata_db::types::{IntentEntry, IntentStatus, PayloadEntry}; +use strata_storage::ops::envelope::EnvelopeDataOps; +use tokio::time::sleep; +use tracing::warn; + +const BUNDLE_INTERVAL: u64 = 10; + +/// Periodically bundles unbundled intents into payload entries. +pub(crate) async fn bundler_task(ops: Arc) -> anyhow::Result<()> { + let mut last_idx = 0; + loop { + let (unbundled, new_idx) = get_unbundled_intents_after(last_idx, ops.as_ref()).await?; + process_unbundled_entries(ops.as_ref(), unbundled).await?; + last_idx = new_idx; + + let _ = sleep(Duration::from_secs(BUNDLE_INTERVAL)).await; + } +} + +/// Processes and bundles a list of unbundled intents into payload entries. +/// NOTE: The logic current is simply 1-1 mapping between intents and payloads, in future it can +/// be sophisticated. +async fn process_unbundled_entries( + ops: &EnvelopeDataOps, + unbundled: Vec, +) -> anyhow::Result<()> { + for mut entry in unbundled { + // NOTE: In future, the logic to create payload will be different. We need to group + // intents and create payload entries accordingly + let payload_entry = PayloadEntry::new_unsigned(vec![entry.payload().clone()]); + + // TODO: the following block till "Atomic Ends" should be atomic. + let idx = ops.get_next_payload_idx_async().await?; + ops.put_payload_entry_async(idx, payload_entry).await?; + + // Set the entry to be bundled so that it won't be processed next time. + entry.status = IntentStatus::Bundled(idx); + ops.put_intent_entry_async(*entry.intent.commitment(), entry) + .await?; + // Atomic Ends. + } + Ok(()) +} + +/// Retrieves unbundled intents after a given index in ascending order along with the latest +/// unbundled entry idx. +async fn get_unbundled_intents_after( + idx: u64, + ops: &EnvelopeDataOps, +) -> anyhow::Result<(Vec, u64)> { + let latest_idx = ops.get_next_payload_idx_async().await?.saturating_sub(1); + let mut curr_intent_idx = latest_idx; + let mut unbundled_intents = Vec::new(); + while curr_intent_idx > idx { + if let Some(intent_entry) = ops.get_intent_by_idx_async(curr_intent_idx).await? { + match intent_entry.status { + IntentStatus::Unbundled => unbundled_intents.push(intent_entry), + IntentStatus::Bundled(_) => { + // Bundled intent found, no more to scan + break; + } + } + } else { + warn!(%curr_intent_idx, "Could not find expected intent in db"); + break; + } + curr_intent_idx -= 1; + } + + // Reverse the items so that they are in ascending order of index + unbundled_intents.reverse(); + + Ok((unbundled_intents, latest_idx)) +} diff --git a/crates/btcio/src/writer/mod.rs b/crates/btcio/src/writer/mod.rs index 6f43204b4..dff1c47d7 100644 --- a/crates/btcio/src/writer/mod.rs +++ b/crates/btcio/src/writer/mod.rs @@ -1,4 +1,5 @@ pub mod builder; +mod bundler; pub(crate) mod context; mod signer; mod task; diff --git a/crates/btcio/src/writer/task.rs b/crates/btcio/src/writer/task.rs index 1971dcd4f..99a2766e4 100644 --- a/crates/btcio/src/writer/task.rs +++ b/crates/btcio/src/writer/task.rs @@ -15,6 +15,7 @@ use strata_storage::ops::envelope::{Context, EnvelopeDataOps}; use strata_tasks::TaskExecutor; use tracing::*; +use super::bundler::bundler_task; use crate::{ broadcaster::L1BroadcastHandle, rpc::{traits::WriterRpc, BitcoinClient}, @@ -89,10 +90,10 @@ pub fn start_envelope_task( pool: threadpool::ThreadPool, broadcast_handle: Arc, ) -> anyhow::Result> { - let envelope_data_ops = Arc::new(Context::new(db).into_ops(pool)); - let next_watch_payload_idx = get_next_payloadidx_to_watch(envelope_data_ops.as_ref())?; + let writer_ops = Arc::new(Context::new(db).into_ops(pool)); + let next_watch_payload_idx = get_next_payloadidx_to_watch(writer_ops.as_ref())?; - let envelope_handle = Arc::new(EnvelopeHandle::new(envelope_data_ops.clone())); + let envelope_handle = Arc::new(EnvelopeHandle::new(writer_ops.clone())); let ctx = Arc::new(WriterContext::new( params, config, @@ -101,14 +102,13 @@ pub fn start_envelope_task( status_channel, )); + let wops = writer_ops.clone(); executor.spawn_critical_async("btcio::watcher_task", async move { - watcher_task( - next_watch_payload_idx, - ctx, - envelope_data_ops, - broadcast_handle, - ) - .await + watcher_task(next_watch_payload_idx, ctx, wops.clone(), broadcast_handle).await + }); + + executor.spawn_critical_async("btcio::bundler_task", async move { + bundler_task(writer_ops).await }); Ok(envelope_handle) From 3a94a67d6bf8f1b4190efbbfab1ed1c2c80395c4 Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Wed, 15 Jan 2025 18:12:02 +0545 Subject: [PATCH 08/17] db: rename envelope_db to writer, Add/edit more methods for writer db --- bin/strata-client/src/main.rs | 4 +- crates/btcio/src/writer/bundler.rs | 2 +- crates/btcio/src/writer/task.rs | 6 +- crates/btcio/src/writer/test_utils.rs | 8 +-- crates/db/src/traits.rs | 8 ++- crates/db/src/types.rs | 4 ++ crates/rocksdb-store/src/lib.rs | 6 +- crates/rocksdb-store/src/sequencer/db.rs | 57 +++++++++++++------ crates/rocksdb-store/src/sequencer/schemas.rs | 5 ++ crates/storage/src/ops/mod.rs | 2 +- .../src/ops/{envelope.rs => writer.rs} | 6 +- 11 files changed, 74 insertions(+), 34 deletions(-) rename crates/storage/src/ops/{envelope.rs => writer.rs} (75%) diff --git a/bin/strata-client/src/main.rs b/bin/strata-client/src/main.rs index 52e421e65..6594e1259 100644 --- a/bin/strata-client/src/main.rs +++ b/bin/strata-client/src/main.rs @@ -26,7 +26,7 @@ use strata_evmexec::{engine::RpcExecEngineCtl, EngineRpcClient}; use strata_primitives::params::Params; use strata_rocksdb::{ broadcaster::db::BroadcastDb, init_broadcaster_database, init_core_dbs, init_writer_database, - open_rocksdb_database, CommonDb, DbOpsConfig, RBPayloadDb, ROCKSDB_NAME, + open_rocksdb_database, CommonDb, DbOpsConfig, RBL1WriterDb, ROCKSDB_NAME, }; use strata_rpc_api::{ StrataAdminApiServer, StrataApiServer, StrataDebugApiServer, StrataSequencerApiServer, @@ -373,7 +373,7 @@ fn start_sequencer_tasks( config: &Config, sequencer_config: &SequencerConfig, executor: &TaskExecutor, - writer_db: Arc, + writer_db: Arc, checkpoint_handle: Arc, broadcast_handle: Arc, methods: &mut Methods, diff --git a/crates/btcio/src/writer/bundler.rs b/crates/btcio/src/writer/bundler.rs index 1454e78ee..375520246 100644 --- a/crates/btcio/src/writer/bundler.rs +++ b/crates/btcio/src/writer/bundler.rs @@ -1,7 +1,7 @@ use std::{sync::Arc, time::Duration}; use strata_db::types::{IntentEntry, IntentStatus, PayloadEntry}; -use strata_storage::ops::envelope::EnvelopeDataOps; +use strata_storage::ops::writer::EnvelopeDataOps; use tokio::time::sleep; use tracing::warn; diff --git a/crates/btcio/src/writer/task.rs b/crates/btcio/src/writer/task.rs index 99a2766e4..305174490 100644 --- a/crates/btcio/src/writer/task.rs +++ b/crates/btcio/src/writer/task.rs @@ -3,7 +3,7 @@ use std::{sync::Arc, time::Duration}; use bitcoin::Address; use strata_config::btcio::WriterConfig; use strata_db::{ - traits::L1PayloadDatabase, + traits::L1WriterDatabase, types::{IntentEntry, L1TxStatus, PayloadEntry, PayloadL1Status}, }; use strata_primitives::{ @@ -11,7 +11,7 @@ use strata_primitives::{ params::Params, }; use strata_status::StatusChannel; -use strata_storage::ops::envelope::{Context, EnvelopeDataOps}; +use strata_storage::ops::writer::{Context, EnvelopeDataOps}; use strata_tasks::TaskExecutor; use tracing::*; @@ -79,7 +79,7 @@ impl EnvelopeHandle { /// /// [`Result`](anyhow::Result) #[allow(clippy::too_many_arguments)] -pub fn start_envelope_task( +pub fn start_envelope_task( executor: &TaskExecutor, bitcoin_client: Arc, config: Arc, diff --git a/crates/btcio/src/writer/test_utils.rs b/crates/btcio/src/writer/test_utils.rs index e6a90f84d..28415ca22 100644 --- a/crates/btcio/src/writer/test_utils.rs +++ b/crates/btcio/src/writer/test_utils.rs @@ -3,19 +3,19 @@ use std::sync::Arc; use strata_db::{traits::BroadcastDatabase, types::L1TxEntry}; use strata_rocksdb::{ broadcaster::db::BroadcastDb, sequencer::db::SequencerDB, test_utils::get_rocksdb_tmp_instance, - L1BroadcastDb, RBPayloadDb, + L1BroadcastDb, RBL1WriterDb, }; use strata_storage::ops::{ - envelope::{Context, EnvelopeDataOps}, + writer::{Context, EnvelopeDataOps}, l1tx_broadcast::Context as BContext, }; use crate::broadcaster::L1BroadcastHandle; /// Returns [`Arc`] of [`SequencerDB`] for testing -pub fn get_db() -> Arc> { +pub fn get_db() -> Arc> { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seqdb = Arc::new(RBPayloadDb::new(db, db_ops)); + let seqdb = Arc::new(RBL1WriterDb::new(db, db_ops)); Arc::new(SequencerDB::new(seqdb)) } diff --git a/crates/db/src/traits.rs b/crates/db/src/traits.rs index 224bd4053..d31ca6465 100644 --- a/crates/db/src/traits.rs +++ b/crates/db/src/traits.rs @@ -246,7 +246,7 @@ pub trait CheckpointDatabase { /// A trait encapsulating provider and store traits to create/update [`PayloadEntry`] in the /// database and to fetch [`PayloadEntry`] and indices from the database -pub trait L1PayloadDatabase { +pub trait L1WriterDatabase { /// Store the [`PayloadEntry`]. fn put_payload_entry(&self, idx: u64, payloadentry: PayloadEntry) -> DbResult<()>; @@ -261,6 +261,12 @@ pub trait L1PayloadDatabase { /// Get a [`IntentEntry`] by its hash fn get_intent_by_id(&self, id: Buf32) -> DbResult>; + + /// Get a [`IntentEntry`] by its idx + fn get_intent_by_idx(&self, idx: u64) -> DbResult>; + + /// Get the next intent index + fn get_next_intent_idx(&self) -> DbResult; } pub trait ProofDatabase { diff --git a/crates/db/src/types.rs b/crates/db/src/types.rs index 728bf86d6..6f83b8102 100644 --- a/crates/db/src/types.rs +++ b/crates/db/src/types.rs @@ -35,6 +35,10 @@ impl IntentEntry { status: IntentStatus::Bundled(bundle_idx), } } + + pub fn payload(&self) -> &L1Payload { + self.intent.payload() + } } /// Status of Intent indicating various stages of being bundled to L1 transaction. diff --git a/crates/rocksdb-store/src/lib.rs b/crates/rocksdb-store/src/lib.rs index 2558494f4..13ed2c359 100644 --- a/crates/rocksdb-store/src/lib.rs +++ b/crates/rocksdb-store/src/lib.rs @@ -87,7 +87,7 @@ use l2::{ schemas::{L2BlockHeightSchema, L2BlockSchema, L2BlockStatusSchema}, }; use rockbound::{schema::ColumnFamilyName, Schema}; -pub use sequencer::db::RBPayloadDb; +pub use sequencer::db::RBL1WriterDb; use sequencer::schemas::{IntentSchema, PayloadSchema}; pub use sync_event::db::SyncEventDb; @@ -175,6 +175,6 @@ pub fn init_broadcaster_database( pub fn init_writer_database( rbdb: Arc, ops_config: DbOpsConfig, -) -> Arc { - RBPayloadDb::new(rbdb, ops_config).into() +) -> Arc { + RBL1WriterDb::new(rbdb, ops_config).into() } diff --git a/crates/rocksdb-store/src/sequencer/db.rs b/crates/rocksdb-store/src/sequencer/db.rs index 5ea506457..95a06e526 100644 --- a/crates/rocksdb-store/src/sequencer/db.rs +++ b/crates/rocksdb-store/src/sequencer/db.rs @@ -1,33 +1,33 @@ use std::sync::Arc; -use rockbound::{OptimisticTransactionDB, SchemaDBOperationsExt}; +use rockbound::{utils::get_last, OptimisticTransactionDB as DB, SchemaDBOperationsExt}; use strata_db::{ errors::DbError, - traits::L1PayloadDatabase, + traits::L1WriterDatabase, types::{IntentEntry, PayloadEntry}, DbResult, }; use strata_primitives::buf::Buf32; -use super::schemas::{IntentSchema, PayloadSchema}; -use crate::DbOpsConfig; +use super::schemas::{IntentIdxSchema, IntentSchema, PayloadSchema}; +use crate::{sequence::get_next_id, DbOpsConfig}; -pub struct RBPayloadDb { - db: Arc, +pub struct RBL1WriterDb { + db: Arc, ops: DbOpsConfig, } -impl RBPayloadDb { +impl RBL1WriterDb { /// Wraps an existing database handle. /// /// Assumes it was opened with column families as defined in `STORE_COLUMN_FAMILIES`. // FIXME Make it better/generic. - pub fn new(db: Arc, ops: DbOpsConfig) -> Self { + pub fn new(db: Arc, ops: DbOpsConfig) -> Self { Self { db, ops } } } -impl L1PayloadDatabase for RBPayloadDb { +impl L1WriterDatabase for RBL1WriterDb { fn put_payload_entry(&self, idx: u64, entry: PayloadEntry) -> DbResult<()> { self.db .with_optimistic_txn( @@ -45,7 +45,7 @@ impl L1PayloadDatabase for RBPayloadDb { } fn get_next_payload_idx(&self) -> DbResult { - Ok(rockbound::utils::get_last::(&*self.db)? + Ok(get_last::(&*self.db)? .map(|(x, _)| x + 1) .unwrap_or(0)) } @@ -55,6 +55,8 @@ impl L1PayloadDatabase for RBPayloadDb { .with_optimistic_txn( rockbound::TransactionRetry::Count(self.ops.retry_count), |tx| -> Result<(), DbError> { + let idx = get_next_id::(tx)?; + tx.put::(&idx, &intent_id)?; tx.put::(&intent_id, &intent_entry)?; Ok(()) @@ -66,12 +68,33 @@ impl L1PayloadDatabase for RBPayloadDb { fn get_intent_by_id(&self, id: Buf32) -> DbResult> { Ok(self.db.get::(&id)?) } + + fn get_intent_by_idx(&self, idx: u64) -> DbResult> { + match self.db.get::(&idx)? { + Some(id) => self + .db + .get::(&id)? + .ok_or_else(|| { + DbError::Other(format!( + "Intent index({idx}) exists but corresponding id does not exist in writer db" + )) + }) + .map(Some), + None => Ok(None), + } + } + + fn get_next_intent_idx(&self) -> DbResult { + Ok(get_last::(&*self.db)? + .map(|(x, _)| x + 1) + .unwrap_or(0)) + } } #[cfg(feature = "test_utils")] #[cfg(test)] mod tests { - use strata_db::traits::L1PayloadDatabase; + use strata_db::traits::L1WriterDatabase; use strata_primitives::buf::Buf32; use strata_test_utils::ArbitraryGenerator; use test; @@ -82,7 +105,7 @@ mod tests { #[test] fn test_put_blob_new_entry() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seq_db = RBPayloadDb::new(db, db_ops); + let seq_db = RBL1WriterDb::new(db, db_ops); let blob: PayloadEntry = ArbitraryGenerator::new().generate(); let blob_hash: Buf32 = [0; 32].into(); @@ -99,7 +122,7 @@ mod tests { #[test] fn test_put_blob_existing_entry() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seq_db = RBPayloadDb::new(db, db_ops); + let seq_db = RBL1WriterDb::new(db, db_ops); let blob: PayloadEntry = ArbitraryGenerator::new().generate(); let blob_hash: Buf32 = [0; 32].into(); @@ -114,7 +137,7 @@ mod tests { #[test] fn test_update_entry() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seq_db = RBPayloadDb::new(db, db_ops); + let seq_db = RBL1WriterDb::new(db, db_ops); let entry: PayloadEntry = ArbitraryGenerator::new().generate(); @@ -132,7 +155,7 @@ mod tests { #[test] fn test_get_last_entry_idx() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seq_db = RBPayloadDb::new(db, db_ops); + let seq_db = RBL1WriterDb::new(db, db_ops); let blob: PayloadEntry = ArbitraryGenerator::new().generate(); @@ -162,7 +185,7 @@ mod tests { #[test] fn test_put_intent_new_entry() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seq_db = RBPayloadDb::new(db, db_ops); + let seq_db = RBL1WriterDb::new(db, db_ops); let intent: IntentEntry = ArbitraryGenerator::new().generate(); let intent_id: Buf32 = [0; 32].into(); @@ -176,7 +199,7 @@ mod tests { #[test] fn test_put_intent_entry() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seq_db = RBPayloadDb::new(db, db_ops); + let seq_db = RBL1WriterDb::new(db, db_ops); let intent: IntentEntry = ArbitraryGenerator::new().generate(); let intent_id: Buf32 = [0; 32].into(); diff --git a/crates/rocksdb-store/src/sequencer/schemas.rs b/crates/rocksdb-store/src/sequencer/schemas.rs index 0fac7412d..c3000ecd1 100644 --- a/crates/rocksdb-store/src/sequencer/schemas.rs +++ b/crates/rocksdb-store/src/sequencer/schemas.rs @@ -15,3 +15,8 @@ define_table_with_default_codec!( /// A table to store intentid -> intent mapping (IntentSchema) Buf32 => IntentEntry ); + +define_table_with_seek_key_codec!( + /// A table to store idx-> intent id mapping + (IntentIdxSchema) u64 => Buf32 +); diff --git a/crates/storage/src/ops/mod.rs b/crates/storage/src/ops/mod.rs index 72c6283f3..be59ede31 100644 --- a/crates/storage/src/ops/mod.rs +++ b/crates/storage/src/ops/mod.rs @@ -3,7 +3,7 @@ pub mod bridge_duty; pub mod bridge_duty_index; pub mod bridge_relay; pub mod checkpoint; -pub mod envelope; +pub mod writer; pub mod l1; pub mod l1tx_broadcast; pub mod l2; diff --git a/crates/storage/src/ops/envelope.rs b/crates/storage/src/ops/writer.rs similarity index 75% rename from crates/storage/src/ops/envelope.rs rename to crates/storage/src/ops/writer.rs index 7832e154b..70febaca4 100644 --- a/crates/storage/src/ops/envelope.rs +++ b/crates/storage/src/ops/writer.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use strata_db::{ - traits::L1PayloadDatabase, + traits::L1WriterDatabase, types::{IntentEntry, PayloadEntry}, DbResult, }; @@ -12,11 +12,13 @@ use strata_primitives::buf::Buf32; use crate::exec::*; inst_ops_simple! { - ( => EnvelopeDataOps) { + ( => EnvelopeDataOps) { put_payload_entry(idx: u64, payloadentry: PayloadEntry) => (); get_payload_entry_by_idx(idx: u64) => Option; get_next_payload_idx() => u64; put_intent_entry(id: Buf32, entry: IntentEntry) => (); get_intent_by_id(id: Buf32) => Option; + get_intent_by_idx(idx: u64) => Option; + get_next_intent_idx() => u64; } } From 0e381118d0dc0acc231f02b368c54a68f36b4224 Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Thu, 16 Jan 2025 17:31:38 +0545 Subject: [PATCH 09/17] Fix bug around intent not being processed --- crates/btcio/src/writer/bundler.rs | 14 +++++++---- crates/btcio/src/writer/task.rs | 30 +++++++++++++++++------- crates/btcio/src/writer/test_utils.rs | 2 +- crates/rocksdb-store/src/lib.rs | 3 ++- crates/rocksdb-store/src/sequencer/db.rs | 10 ++++++-- 5 files changed, 43 insertions(+), 16 deletions(-) diff --git a/crates/btcio/src/writer/bundler.rs b/crates/btcio/src/writer/bundler.rs index 375520246..89cb61090 100644 --- a/crates/btcio/src/writer/bundler.rs +++ b/crates/btcio/src/writer/bundler.rs @@ -3,19 +3,20 @@ use std::{sync::Arc, time::Duration}; use strata_db::types::{IntentEntry, IntentStatus, PayloadEntry}; use strata_storage::ops::writer::EnvelopeDataOps; use tokio::time::sleep; -use tracing::warn; +use tracing::*; -const BUNDLE_INTERVAL: u64 = 10; +const BUNDLE_INTERVAL: u64 = 200; // millis /// Periodically bundles unbundled intents into payload entries. pub(crate) async fn bundler_task(ops: Arc) -> anyhow::Result<()> { let mut last_idx = 0; loop { let (unbundled, new_idx) = get_unbundled_intents_after(last_idx, ops.as_ref()).await?; + debug!(len=%unbundled.len(), "found unbundled intents"); process_unbundled_entries(ops.as_ref(), unbundled).await?; last_idx = new_idx; - let _ = sleep(Duration::from_secs(BUNDLE_INTERVAL)).await; + let _ = sleep(Duration::from_millis(BUNDLE_INTERVAL)).await; } } @@ -51,9 +52,10 @@ async fn get_unbundled_intents_after( ops: &EnvelopeDataOps, ) -> anyhow::Result<(Vec, u64)> { let latest_idx = ops.get_next_payload_idx_async().await?.saturating_sub(1); + debug!(%idx, "Latest intent idx"); let mut curr_intent_idx = latest_idx; let mut unbundled_intents = Vec::new(); - while curr_intent_idx > idx { + while curr_intent_idx >= idx { if let Some(intent_entry) = ops.get_intent_by_idx_async(curr_intent_idx).await? { match intent_entry.status { IntentStatus::Unbundled => unbundled_intents.push(intent_entry), @@ -66,6 +68,10 @@ async fn get_unbundled_intents_after( warn!(%curr_intent_idx, "Could not find expected intent in db"); break; } + + if curr_intent_idx == 0 { + break; + } curr_intent_idx -= 1; } diff --git a/crates/btcio/src/writer/task.rs b/crates/btcio/src/writer/task.rs index 305174490..e5d8aa321 100644 --- a/crates/btcio/src/writer/task.rs +++ b/crates/btcio/src/writer/task.rs @@ -35,37 +35,51 @@ impl EnvelopeHandle { Self { ops } } + /// Checks if it is duplicate, if not creates a new [`IntentEntry`] from `intent` and puts it in + /// the database. pub fn submit_intent(&self, intent: PayloadIntent) -> anyhow::Result<()> { + let id = *intent.commitment(); + + // Check if the intent is meant for L1 if intent.dest() != PayloadDest::L1 { - warn!(commitment = %intent.commitment(), "Received intent not meant for L1"); + warn!(commitment = %id, "Received intent not meant for L1"); return Ok(()); } - let id = *intent.commitment(); - debug!(commitment = %intent.commitment(), "Received intent"); + debug!(commitment = %id, "Received intent for processing"); + + // Check if it is duplicate if self.ops.get_intent_by_id_blocking(id)?.is_some() { warn!(commitment = %id, "Received duplicate intent"); return Ok(()); } - let entry = IntentEntry::new_unbundled(intent); + // Create and store IntentEntry + let entry = IntentEntry::new_unbundled(intent); Ok(self.ops.put_intent_entry_blocking(id, entry)?) } + /// Checks if it is duplicate, if not creates a new [`IntentEntry`] from `intent` and puts it in + /// the database pub async fn submit_intent_async(&self, intent: PayloadIntent) -> anyhow::Result<()> { + let id = *intent.commitment(); + + // Check if the intent is meant for L1 if intent.dest() != PayloadDest::L1 { - warn!(commitment = %intent.commitment(), "Received intent not meant for L1"); + warn!(commitment = %id, "Received intent not meant for L1"); return Ok(()); } - let id = *intent.commitment(); - debug!(commitment = %intent.commitment(), "Received intent"); + debug!(commitment = %id, "Received intent for processing"); + + // Check if it is duplicate if self.ops.get_intent_by_id_async(id).await?.is_some() { warn!(commitment = %id, "Received duplicate intent"); return Ok(()); } - let entry = IntentEntry::new_unbundled(intent); + // Create and store IntentEntry + let entry = IntentEntry::new_unbundled(intent); Ok(self.ops.put_intent_entry_async(id, entry).await?) } } diff --git a/crates/btcio/src/writer/test_utils.rs b/crates/btcio/src/writer/test_utils.rs index 28415ca22..4a690d7a9 100644 --- a/crates/btcio/src/writer/test_utils.rs +++ b/crates/btcio/src/writer/test_utils.rs @@ -6,8 +6,8 @@ use strata_rocksdb::{ L1BroadcastDb, RBL1WriterDb, }; use strata_storage::ops::{ - writer::{Context, EnvelopeDataOps}, l1tx_broadcast::Context as BContext, + writer::{Context, EnvelopeDataOps}, }; use crate::broadcaster::L1BroadcastHandle; diff --git a/crates/rocksdb-store/src/lib.rs b/crates/rocksdb-store/src/lib.rs index 13ed2c359..bc6e4c7bf 100644 --- a/crates/rocksdb-store/src/lib.rs +++ b/crates/rocksdb-store/src/lib.rs @@ -38,6 +38,7 @@ pub const STORE_COLUMN_FAMILIES: &[ColumnFamilyName] = &[ // Payload/intent schemas PayloadSchema::COLUMN_FAMILY_NAME, IntentSchema::COLUMN_FAMILY_NAME, + IntentIdxSchema::COLUMN_FAMILY_NAME, // Bcast schemas BcastL1TxIdSchema::COLUMN_FAMILY_NAME, BcastL1TxSchema::COLUMN_FAMILY_NAME, @@ -88,7 +89,7 @@ use l2::{ }; use rockbound::{schema::ColumnFamilyName, Schema}; pub use sequencer::db::RBL1WriterDb; -use sequencer::schemas::{IntentSchema, PayloadSchema}; +use sequencer::schemas::{IntentIdxSchema, IntentSchema, PayloadSchema}; pub use sync_event::db::SyncEventDb; use crate::{ diff --git a/crates/rocksdb-store/src/sequencer/db.rs b/crates/rocksdb-store/src/sequencer/db.rs index 95a06e526..434fbffcb 100644 --- a/crates/rocksdb-store/src/sequencer/db.rs +++ b/crates/rocksdb-store/src/sequencer/db.rs @@ -51,18 +51,24 @@ impl L1WriterDatabase for RBL1WriterDb { } fn put_intent_entry(&self, intent_id: Buf32, intent_entry: IntentEntry) -> DbResult<()> { - self.db + let res = self + .db .with_optimistic_txn( rockbound::TransactionRetry::Count(self.ops.retry_count), |tx| -> Result<(), DbError> { + tracing::debug!(%intent_id, "putting intent"); let idx = get_next_id::(tx)?; + tracing::debug!(%idx, "next intent idx..."); tx.put::(&idx, &intent_id)?; tx.put::(&intent_id, &intent_entry)?; Ok(()) }, ) - .map_err(|e| DbError::TransactionError(e.to_string())) + .map_err(|e| DbError::TransactionError(e.to_string())); + let next = self.get_next_intent_idx()?; + tracing::debug!(%next, "next intent idx after put"); + res } fn get_intent_by_id(&self, id: Buf32) -> DbResult> { From 36cf4b0dd3425d4689a2a79ed57144845ac27882 Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Fri, 17 Jan 2025 15:35:39 +0545 Subject: [PATCH 10/17] Make unit tests compile --- crates/btcio/src/writer/builder.rs | 2 +- crates/btcio/src/writer/signer.rs | 6 +++--- crates/btcio/src/writer/task.rs | 15 +++++---------- crates/btcio/src/writer/test_utils.rs | 10 ++++------ crates/rocksdb-store/src/sequencer/db.rs | 13 ++++--------- 5 files changed, 17 insertions(+), 29 deletions(-) diff --git a/crates/btcio/src/writer/builder.rs b/crates/btcio/src/writer/builder.rs index 541fbf474..db812ea95 100644 --- a/crates/btcio/src/writer/builder.rs +++ b/crates/btcio/src/writer/builder.rs @@ -674,7 +674,7 @@ mod tests { let payload = L1Payload::new_da(vec![0u8; 100]); let (commit, reveal) = super::create_envelope_transactions( &ctx, - &payload, + &[payload], utxos.to_vec(), 10, bitcoin::Network::Bitcoin, diff --git a/crates/btcio/src/writer/signer.rs b/crates/btcio/src/writer/signer.rs index 16932b1b3..c15f589ed 100644 --- a/crates/btcio/src/writer/signer.rs +++ b/crates/btcio/src/writer/signer.rs @@ -75,14 +75,14 @@ mod test { let ctx = get_writer_context(); // First insert an unsigned blob - let entry = PayloadEntry::new_unsigned(L1Payload::new_da([1; 100].to_vec())); + let payload = L1Payload::new_da([1; 100].to_vec()); + let entry = PayloadEntry::new_unsigned(vec![payload]); assert_eq!(entry.status, PayloadL1Status::Unsigned); assert_eq!(entry.commit_txid, Buf32::zero()); assert_eq!(entry.reveal_txid, Buf32::zero()); - let intent_hash = hash::raw(entry.payloads.data()); - iops.put_payload_entry_async(intent_hash, entry.clone()) + iops.put_payload_entry_async(0, entry.clone()) .await .unwrap(); diff --git a/crates/btcio/src/writer/task.rs b/crates/btcio/src/writer/task.rs index e5d8aa321..ab5a63135 100644 --- a/crates/btcio/src/writer/task.rs +++ b/crates/btcio/src/writer/task.rs @@ -305,7 +305,6 @@ fn determine_payload_next_status( #[cfg(test)] mod test { - use strata_primitives::buf::Buf32; use strata_test_utils::ArbitraryGenerator; use super::*; @@ -329,24 +328,20 @@ mod test { let mut e1: PayloadEntry = ArbitraryGenerator::new().generate(); e1.status = PayloadL1Status::Finalized; - let payload_hash: Buf32 = [1; 32].into(); - iops.put_payload_entry_blocking(payload_hash, e1).unwrap(); - let expected_idx = iops.get_next_payload_idx_blocking().unwrap(); + iops.put_payload_entry_blocking(0, e1).unwrap(); let mut e2: PayloadEntry = ArbitraryGenerator::new().generate(); e2.status = PayloadL1Status::Published; - let payload_hash: Buf32 = [2; 32].into(); - iops.put_payload_entry_blocking(payload_hash, e2).unwrap(); + iops.put_payload_entry_blocking(1, e2).unwrap(); + let expected_idx = 1; // All entries before this do not need to be watched. let mut e3: PayloadEntry = ArbitraryGenerator::new().generate(); e3.status = PayloadL1Status::Unsigned; - let payload_hash: Buf32 = [3; 32].into(); - iops.put_payload_entry_blocking(payload_hash, e3).unwrap(); + iops.put_payload_entry_blocking(2, e3).unwrap(); let mut e4: PayloadEntry = ArbitraryGenerator::new().generate(); e4.status = PayloadL1Status::Unsigned; - let payload_hash: Buf32 = [4; 32].into(); - iops.put_payload_entry_blocking(payload_hash, e4).unwrap(); + iops.put_payload_entry_blocking(3, e4).unwrap(); let idx = get_next_payloadidx_to_watch(&iops).unwrap(); diff --git a/crates/btcio/src/writer/test_utils.rs b/crates/btcio/src/writer/test_utils.rs index 4a690d7a9..581e1cf12 100644 --- a/crates/btcio/src/writer/test_utils.rs +++ b/crates/btcio/src/writer/test_utils.rs @@ -2,8 +2,7 @@ use std::sync::Arc; use strata_db::{traits::BroadcastDatabase, types::L1TxEntry}; use strata_rocksdb::{ - broadcaster::db::BroadcastDb, sequencer::db::SequencerDB, test_utils::get_rocksdb_tmp_instance, - L1BroadcastDb, RBL1WriterDb, + broadcaster::db::BroadcastDb, test_utils::get_rocksdb_tmp_instance, L1BroadcastDb, RBL1WriterDb, }; use strata_storage::ops::{ l1tx_broadcast::Context as BContext, @@ -12,11 +11,10 @@ use strata_storage::ops::{ use crate::broadcaster::L1BroadcastHandle; -/// Returns [`Arc`] of [`SequencerDB`] for testing -pub fn get_db() -> Arc> { +/// Returns [`Arc`] of [`RBL1WriterDb`] for testing +pub fn get_db() -> Arc { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); - let seqdb = Arc::new(RBL1WriterDb::new(db, db_ops)); - Arc::new(SequencerDB::new(seqdb)) + Arc::new(RBL1WriterDb::new(db, db_ops)) } /// Returns [`Arc`] of [`EnvelopeDataOps`] for testing diff --git a/crates/rocksdb-store/src/sequencer/db.rs b/crates/rocksdb-store/src/sequencer/db.rs index 434fbffcb..e41e81be9 100644 --- a/crates/rocksdb-store/src/sequencer/db.rs +++ b/crates/rocksdb-store/src/sequencer/db.rs @@ -114,14 +114,10 @@ mod tests { let seq_db = RBL1WriterDb::new(db, db_ops); let blob: PayloadEntry = ArbitraryGenerator::new().generate(); - let blob_hash: Buf32 = [0; 32].into(); - seq_db.put_payload_entry(blob_hash, blob.clone()).unwrap(); - let idx = seq_db.get_next_payload_idx().unwrap().unwrap(); + seq_db.put_payload_entry(0, blob.clone()).unwrap(); - assert_eq!(seq_db.get_payload_id(idx).unwrap(), Some(blob_hash)); - - let stored_blob = seq_db.get_payload_entry_by_idx(blob_hash).unwrap(); + let stored_blob = seq_db.get_payload_entry_by_idx(0).unwrap(); assert_eq!(stored_blob, Some(blob)); } @@ -130,11 +126,10 @@ mod tests { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); let seq_db = RBL1WriterDb::new(db, db_ops); let blob: PayloadEntry = ArbitraryGenerator::new().generate(); - let blob_hash: Buf32 = [0; 32].into(); - seq_db.put_payload_entry(blob_hash, blob.clone()).unwrap(); + seq_db.put_payload_entry(0, blob.clone()).unwrap(); - let result = seq_db.put_payload_entry(blob_hash, blob); + let result = seq_db.put_payload_entry(0, blob); // Should be ok to put to existing key assert!(result.is_ok()); From 08e41c9f8234c3b4f2849bfe0fcfa1755946f5ce Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Fri, 17 Jan 2025 17:07:28 +0545 Subject: [PATCH 11/17] Get tests to pass --- crates/btcio/src/writer/bundler.rs | 5 ++--- crates/l1tx/src/envelope/builder.rs | 2 +- crates/l1tx/src/envelope/parser.rs | 12 ++++-------- crates/l1tx/src/utils.rs | 5 +++-- 4 files changed, 10 insertions(+), 14 deletions(-) diff --git a/crates/btcio/src/writer/bundler.rs b/crates/btcio/src/writer/bundler.rs index 89cb61090..72df42a0a 100644 --- a/crates/btcio/src/writer/bundler.rs +++ b/crates/btcio/src/writer/bundler.rs @@ -5,6 +5,7 @@ use strata_storage::ops::writer::EnvelopeDataOps; use tokio::time::sleep; use tracing::*; +// TODO: get this from config const BUNDLE_INTERVAL: u64 = 200; // millis /// Periodically bundles unbundled intents into payload entries. @@ -12,7 +13,6 @@ pub(crate) async fn bundler_task(ops: Arc) -> anyhow::Result<() let mut last_idx = 0; loop { let (unbundled, new_idx) = get_unbundled_intents_after(last_idx, ops.as_ref()).await?; - debug!(len=%unbundled.len(), "found unbundled intents"); process_unbundled_entries(ops.as_ref(), unbundled).await?; last_idx = new_idx; @@ -51,8 +51,7 @@ async fn get_unbundled_intents_after( idx: u64, ops: &EnvelopeDataOps, ) -> anyhow::Result<(Vec, u64)> { - let latest_idx = ops.get_next_payload_idx_async().await?.saturating_sub(1); - debug!(%idx, "Latest intent idx"); + let latest_idx = ops.get_next_intent_idx_async().await?.saturating_sub(1); let mut curr_intent_idx = latest_idx; let mut unbundled_intents = Vec::new(); while curr_intent_idx >= idx { diff --git a/crates/l1tx/src/envelope/builder.rs b/crates/l1tx/src/envelope/builder.rs index 1dae598b4..ef8fe7b6b 100644 --- a/crates/l1tx/src/envelope/builder.rs +++ b/crates/l1tx/src/envelope/builder.rs @@ -38,7 +38,7 @@ fn build_payload_envelope( .push_opcode(OP_IF) .push_slice(tag) // Insert version - .push_slice(PushBytesBuf::from(version.to_be_bytes())) + .push_slice(PushBytesBuf::from([version])) // Insert size .push_slice(PushBytesBuf::from( (payload.data().len() as u32).to_be_bytes(), diff --git a/crates/l1tx/src/envelope/parser.rs b/crates/l1tx/src/envelope/parser.rs index b196d174b..e4902b557 100644 --- a/crates/l1tx/src/envelope/parser.rs +++ b/crates/l1tx/src/envelope/parser.rs @@ -82,14 +82,12 @@ fn parse_payload_type(bytes: &[u8], params: &RollupParams) -> Option Option { - if bytes.len() != 8 { - warn!("Invalid version bytes length"); +fn validate_version(bytes: &[u8]) -> Option { + if bytes.len() != 1 { + warn!("Invalid version bytes length, should be 1"); return None; } - let mut buf: [u8; 8] = [0; 8]; - buf.copy_from_slice(&bytes[0..8]); - let version = u64::from_be_bytes(buf); + let version = bytes[0]; // TODO: add version validation logic, i.e which particular versions are supported Some(version) } @@ -160,10 +158,8 @@ mod tests { let script = generate_envelope_script_test(envelope_data.clone(), params.clone().into(), 1).unwrap(); - // Parse the rollup name let result = parse_envelope_data(&script, params.rollup()).unwrap(); - // Assert the rollup name was parsed correctly assert_eq!(result, envelope_data); // Try with larger size diff --git a/crates/l1tx/src/utils.rs b/crates/l1tx/src/utils.rs index ef9c332ad..c9a99a49d 100644 --- a/crates/l1tx/src/utils.rs +++ b/crates/l1tx/src/utils.rs @@ -24,7 +24,8 @@ pub fn next_op(instructions: &mut Instructions<'_>) -> Option { /// Extract next instruction and try to parse it as a byte slice pub fn next_bytes<'a>(instructions: &mut Instructions<'a>) -> Option<&'a [u8]> { - match instructions.next() { + let ins = instructions.next(); + match ins { Some(Ok(Instruction::PushBytes(bytes))) => Some(bytes.as_bytes()), _ => None, } @@ -41,7 +42,7 @@ pub fn next_u32(instructions: &mut Instructions<'_>) -> Option { } let mut buf = [0; 4]; buf[..bytes.len()].copy_from_slice(bytes.as_bytes()); - Some(u32::from_le_bytes(buf)) + Some(u32::from_be_bytes(buf)) } Some(Ok(Instruction::Op(op))) => { // Handle small integers pushed by OP_1 to OP_16 From f5ca3510eafb6bfc22df7c265cdf877e391f4667 Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Fri, 17 Jan 2025 17:48:51 +0545 Subject: [PATCH 12/17] Rename sequencer to writer in db modules --- crates/btcio/src/writer/bundler.rs | 26 ++++++++++--------- crates/btcio/src/writer/signer.rs | 2 +- crates/rocksdb-store/src/lib.rs | 6 ++--- .../src/{sequencer => writer}/db.rs | 1 - .../src/{sequencer => writer}/mod.rs | 0 .../src/{sequencer => writer}/schemas.rs | 0 crates/storage/src/ops/mod.rs | 2 +- 7 files changed, 19 insertions(+), 18 deletions(-) rename crates/rocksdb-store/src/{sequencer => writer}/db.rs (99%) rename crates/rocksdb-store/src/{sequencer => writer}/mod.rs (100%) rename crates/rocksdb-store/src/{sequencer => writer}/schemas.rs (100%) diff --git a/crates/btcio/src/writer/bundler.rs b/crates/btcio/src/writer/bundler.rs index 72df42a0a..73e07f173 100644 --- a/crates/btcio/src/writer/bundler.rs +++ b/crates/btcio/src/writer/bundler.rs @@ -21,7 +21,7 @@ pub(crate) async fn bundler_task(ops: Arc) -> anyhow::Result<() } /// Processes and bundles a list of unbundled intents into payload entries. -/// NOTE: The logic current is simply 1-1 mapping between intents and payloads, in future it can +/// NOTE: The current logic is simply 1-1 mapping between intents and payloads, in future it can /// be sophisticated. async fn process_unbundled_entries( ops: &EnvelopeDataOps, @@ -52,30 +52,32 @@ async fn get_unbundled_intents_after( ops: &EnvelopeDataOps, ) -> anyhow::Result<(Vec, u64)> { let latest_idx = ops.get_next_intent_idx_async().await?.saturating_sub(1); - let mut curr_intent_idx = latest_idx; - let mut unbundled_intents = Vec::new(); - while curr_intent_idx >= idx { - if let Some(intent_entry) = ops.get_intent_by_idx_async(curr_intent_idx).await? { - match intent_entry.status { - IntentStatus::Unbundled => unbundled_intents.push(intent_entry), + let mut curr_idx = latest_idx; + + let mut unbundled = Vec::new(); + + while curr_idx >= idx { + if let Some(intent) = ops.get_intent_by_idx_async(curr_idx).await? { + match intent.status { + IntentStatus::Unbundled => unbundled.push(intent), IntentStatus::Bundled(_) => { // Bundled intent found, no more to scan break; } } } else { - warn!(%curr_intent_idx, "Could not find expected intent in db"); + warn!(%curr_idx, "Could not find expected intent in db"); break; } - if curr_intent_idx == 0 { + if curr_idx == 0 { break; } - curr_intent_idx -= 1; + curr_idx -= 1; } // Reverse the items so that they are in ascending order of index - unbundled_intents.reverse(); + unbundled.reverse(); - Ok((unbundled_intents, latest_idx)) + Ok((unbundled, latest_idx)) } diff --git a/crates/btcio/src/writer/signer.rs b/crates/btcio/src/writer/signer.rs index c15f589ed..081318a45 100644 --- a/crates/btcio/src/writer/signer.rs +++ b/crates/btcio/src/writer/signer.rs @@ -60,7 +60,7 @@ pub async fn create_and_sign_payload_envelopes( #[cfg(test)] mod test { use strata_db::types::{PayloadEntry, PayloadL1Status}; - use strata_primitives::{hash, l1::payload::L1Payload}; + use strata_primitives::l1::payload::L1Payload; use super::*; use crate::{ diff --git a/crates/rocksdb-store/src/lib.rs b/crates/rocksdb-store/src/lib.rs index bc6e4c7bf..d4cad9ef0 100644 --- a/crates/rocksdb-store/src/lib.rs +++ b/crates/rocksdb-store/src/lib.rs @@ -7,8 +7,8 @@ pub mod client_state; pub mod l1; pub mod l2; pub mod prover; -pub mod sequencer; pub mod sync_event; +pub mod writer; pub mod macros; mod sequence; @@ -88,9 +88,9 @@ use l2::{ schemas::{L2BlockHeightSchema, L2BlockSchema, L2BlockStatusSchema}, }; use rockbound::{schema::ColumnFamilyName, Schema}; -pub use sequencer::db::RBL1WriterDb; -use sequencer::schemas::{IntentIdxSchema, IntentSchema, PayloadSchema}; pub use sync_event::db::SyncEventDb; +pub use writer::db::RBL1WriterDb; +use writer::schemas::{IntentIdxSchema, IntentSchema, PayloadSchema}; use crate::{ chain_state::schemas::{ChainstateSchema, WriteBatchSchema}, diff --git a/crates/rocksdb-store/src/sequencer/db.rs b/crates/rocksdb-store/src/writer/db.rs similarity index 99% rename from crates/rocksdb-store/src/sequencer/db.rs rename to crates/rocksdb-store/src/writer/db.rs index e41e81be9..aa714cf77 100644 --- a/crates/rocksdb-store/src/sequencer/db.rs +++ b/crates/rocksdb-store/src/writer/db.rs @@ -169,7 +169,6 @@ mod tests { seq_db .put_payload_entry(next_blob_idx, blob.clone()) .unwrap(); - let next_blob_idx = seq_db.get_next_payload_idx().unwrap(); // Now the next idx is 1 let blob: PayloadEntry = ArbitraryGenerator::new().generate(); diff --git a/crates/rocksdb-store/src/sequencer/mod.rs b/crates/rocksdb-store/src/writer/mod.rs similarity index 100% rename from crates/rocksdb-store/src/sequencer/mod.rs rename to crates/rocksdb-store/src/writer/mod.rs diff --git a/crates/rocksdb-store/src/sequencer/schemas.rs b/crates/rocksdb-store/src/writer/schemas.rs similarity index 100% rename from crates/rocksdb-store/src/sequencer/schemas.rs rename to crates/rocksdb-store/src/writer/schemas.rs diff --git a/crates/storage/src/ops/mod.rs b/crates/storage/src/ops/mod.rs index be59ede31..3c907e16c 100644 --- a/crates/storage/src/ops/mod.rs +++ b/crates/storage/src/ops/mod.rs @@ -3,7 +3,7 @@ pub mod bridge_duty; pub mod bridge_duty_index; pub mod bridge_relay; pub mod checkpoint; -pub mod writer; pub mod l1; pub mod l1tx_broadcast; pub mod l2; +pub mod writer; From 87eb1115b5a8ba87919ec2ac41ce0cbd17663dc3 Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Fri, 17 Jan 2025 18:04:17 +0545 Subject: [PATCH 13/17] Update tests --- crates/btcio/src/test_utils.rs | 4 ++-- crates/l1tx/src/envelope/parser.rs | 7 ++++-- crates/l1tx/src/filter.rs | 37 +++++++++++++++++++----------- 3 files changed, 30 insertions(+), 18 deletions(-) diff --git a/crates/btcio/src/test_utils.rs b/crates/btcio/src/test_utils.rs index 409cb774f..df0c67584 100644 --- a/crates/btcio/src/test_utils.rs +++ b/crates/btcio/src/test_utils.rs @@ -215,11 +215,11 @@ impl SignerRpc for TestBitcoinClient { } pub fn generate_envelope_script_test( - envelope_data: L1Payload, + payloads: &[L1Payload], params: Arc, version: u8, ) -> anyhow::Result { - build_envelope_script(params.as_ref(), &[envelope_data], version) + build_envelope_script(params.as_ref(), payloads, version) } pub fn build_reveal_transaction_test( diff --git a/crates/l1tx/src/envelope/parser.rs b/crates/l1tx/src/envelope/parser.rs index e4902b557..7c6d2d520 100644 --- a/crates/l1tx/src/envelope/parser.rs +++ b/crates/l1tx/src/envelope/parser.rs @@ -154,9 +154,11 @@ mod tests { fn test_parse_envelope_data() { let bytes = vec![0, 1, 2, 3]; let params = gen_params(); + let version = 1; let envelope_data = L1Payload::new_checkpoint(bytes.clone()); let script = - generate_envelope_script_test(envelope_data.clone(), params.clone().into(), 1).unwrap(); + generate_envelope_script_test(&[envelope_data.clone()], params.clone().into(), version) + .unwrap(); let result = parse_envelope_data(&script, params.rollup()).unwrap(); @@ -166,7 +168,8 @@ mod tests { let bytes = vec![1; 2000]; let envelope_data = L1Payload::new_checkpoint(bytes.clone()); let script = - generate_envelope_script_test(envelope_data.clone(), params.clone().into(), 1).unwrap(); + generate_envelope_script_test(&[envelope_data.clone()], params.clone().into(), version) + .unwrap(); // Parse the rollup name let result = parse_envelope_data(&script, params.rollup()).unwrap(); diff --git a/crates/l1tx/src/filter.rs b/crates/l1tx/src/filter.rs index 919ae5d56..30bb9727a 100644 --- a/crates/l1tx/src/filter.rs +++ b/crates/l1tx/src/filter.rs @@ -163,14 +163,17 @@ mod test { } // Create an envelope transaction. The focus here is to create a tapscript, rather than a - // completely valid control block - fn create_checkpoint_envelope_tx(params: Arc) -> Transaction { + // completely valid control block. Includes `n_envelopes` envelopes in the tapscript. + fn create_checkpoint_envelope_tx(params: Arc, n_envelopes: u32) -> Transaction { let address = parse_addr(OTHER_ADDR); let inp_tx = create_test_tx(vec![create_test_txout(100000000, &address)]); - let signed_checkpoint: SignedBatchCheckpoint = ArbitraryGenerator::new().generate(); - let envelope_data = L1Payload::new_checkpoint(borsh::to_vec(&signed_checkpoint).unwrap()); - - let script = generate_envelope_script_test(envelope_data, params, 1).unwrap(); + let payloads: Vec<_> = (0..n_envelopes) + .map(|_| { + let signed_checkpoint: SignedBatchCheckpoint = ArbitraryGenerator::new().generate(); + L1Payload::new_checkpoint(borsh::to_vec(&signed_checkpoint).unwrap()) + }) + .collect(); + let script = generate_envelope_script_test(&payloads, params, 1).unwrap(); // Create controlblock let mut rand_bytes = [0; 32]; @@ -199,20 +202,26 @@ mod test { let params: Params = gen_params(); let filter_config = create_tx_filter_config(¶ms); - let tx = create_checkpoint_envelope_tx(params.clone().into()); + // Testing multiple envelopes are parsed + let num_envelopes = 2; + let tx = create_checkpoint_envelope_tx(params.clone().into(), num_envelopes); let block = create_test_block(vec![tx]); - let txids: Vec = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config) - .iter() - .map(|op_refs| op_refs.index()) - .collect(); + let ops = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config); + let txids: Vec = ops.iter().map(|op_refs| op_refs.index()).collect(); + assert_eq!( + ops.len(), + num_envelopes as usize, + "All the envelopes should be identified" + ); assert_eq!(txids[0], 0, "Should filter valid rollup name"); // Test with invalid checkpoint tag let mut new_params = params.clone(); new_params.rollup.checkpoint_tag = "invalid_checkpoint_tag".to_string(); - let tx = create_checkpoint_envelope_tx(new_params.into()); + + let tx = create_checkpoint_envelope_tx(new_params.into(), 2); let block = create_test_block(vec![tx]); let result = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config); assert!(result.is_empty(), "Should filter out invalid name"); @@ -237,9 +246,9 @@ mod test { fn test_filter_relevant_txs_multiple_matches() { let params: Params = gen_params(); let filter_config = create_tx_filter_config(¶ms); - let tx1 = create_checkpoint_envelope_tx(params.clone().into()); + let tx1 = create_checkpoint_envelope_tx(params.clone().into(), 1); let tx2 = create_test_tx(vec![create_test_txout(100, &parse_addr(OTHER_ADDR))]); - let tx3 = create_checkpoint_envelope_tx(params.clone().into()); + let tx3 = create_checkpoint_envelope_tx(params.clone().into(), 1); let block = create_test_block(vec![tx1, tx2, tx3]); let txids: Vec = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config) From 4cfd6ede7365b62156c576d5b7dfdbcd77eadd0f Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Fri, 17 Jan 2025 18:43:26 +0545 Subject: [PATCH 14/17] Actually parse multiple envelopes --- crates/l1tx/src/envelope/parser.rs | 51 ++++++++++++++++++++---------- crates/l1tx/src/filter.rs | 26 +++++++++++---- 2 files changed, 54 insertions(+), 23 deletions(-) diff --git a/crates/l1tx/src/envelope/parser.rs b/crates/l1tx/src/envelope/parser.rs index 7c6d2d520..5812d28e2 100644 --- a/crates/l1tx/src/envelope/parser.rs +++ b/crates/l1tx/src/envelope/parser.rs @@ -46,29 +46,42 @@ pub enum EnvelopeParseError { /// # Errors /// /// This function errors if it cannot parse the [`L1Payload`] -pub fn parse_envelope_data( +pub fn parse_envelope_payloads( script: &ScriptBuf, params: &RollupParams, -) -> Result { +) -> Result, EnvelopeParseError> { let mut instructions = script.instructions(); - enter_envelope(&mut instructions)?; + let mut payloads = Vec::new(); + // TODO: make this sophisticated, i.e. even if one payload parsing fails, continue finding other + // envelopes and extracting payloads. Or is that really necessary? + while let Ok(payload) = parse_l1_payload(&mut instructions, params) { + payloads.push(payload); + } + Ok(payloads) +} - // Parse tag - let tag = next_bytes(&mut instructions) +fn parse_l1_payload( + instructions: &mut Instructions, + params: &RollupParams, +) -> Result { + enter_envelope(instructions)?; + + // Parse type + let ptype = next_bytes(instructions) .and_then(|bytes| parse_payload_type(bytes, params)) .ok_or(EnvelopeParseError::InvalidTag)?; // Parse version - let _version = next_bytes(&mut instructions) + let _version = next_bytes(instructions) .and_then(validate_version) .ok_or(EnvelopeParseError::InvalidVersion)?; // Parse size - let size = next_u32(&mut instructions).ok_or(EnvelopeParseError::InvalidSize)?; + let size = next_u32(instructions).ok_or(EnvelopeParseError::InvalidSize)?; // Parse payload - let payload = extract_n_bytes(size, &mut instructions)?; - Ok(L1Payload::new(payload, tag)) + let payload = extract_n_bytes(size, instructions)?; + Ok(L1Payload::new(payload, ptype)) } fn parse_payload_type(bytes: &[u8], params: &RollupParams) -> Option { @@ -155,14 +168,18 @@ mod tests { let bytes = vec![0, 1, 2, 3]; let params = gen_params(); let version = 1; - let envelope_data = L1Payload::new_checkpoint(bytes.clone()); - let script = - generate_envelope_script_test(&[envelope_data.clone()], params.clone().into(), version) - .unwrap(); + let envelope1 = L1Payload::new_checkpoint(bytes.clone()); + let envelope2 = L1Payload::new_checkpoint(bytes.clone()); + let script = generate_envelope_script_test( + &[envelope1.clone(), envelope2.clone()], + params.clone().into(), + version, + ) + .unwrap(); - let result = parse_envelope_data(&script, params.rollup()).unwrap(); + let result = parse_envelope_payloads(&script, params.rollup()).unwrap(); - assert_eq!(result, envelope_data); + assert_eq!(result, vec![envelope1, envelope2]); // Try with larger size let bytes = vec![1; 2000]; @@ -172,9 +189,9 @@ mod tests { .unwrap(); // Parse the rollup name - let result = parse_envelope_data(&script, params.rollup()).unwrap(); + let result = parse_envelope_payloads(&script, params.rollup()).unwrap(); // Assert the rollup name was parsed correctly - assert_eq!(result, envelope_data); + assert_eq!(result, vec![envelope_data]); } } diff --git a/crates/l1tx/src/filter.rs b/crates/l1tx/src/filter.rs index 30bb9727a..a3d93fc19 100644 --- a/crates/l1tx/src/filter.rs +++ b/crates/l1tx/src/filter.rs @@ -1,15 +1,16 @@ use bitcoin::{Block, Transaction}; -use strata_primitives::params::RollupParams; +use strata_primitives::{l1::payload::L1PayloadType, params::RollupParams}; use strata_state::{ batch::SignedBatchCheckpoint, tx::{DepositInfo, DepositRequestInfo, ProtocolOperation}, }; +use tracing::warn; use super::messages::ProtocolOpTxRef; pub use crate::filter_types::TxFilterConfig; use crate::{ deposit::{deposit_request::extract_deposit_request_info, deposit_tx::extract_deposit_info}, - envelope::parser::parse_envelope_data, + envelope::parser::parse_envelope_payloads, }; /// Filter protocol operations as refs from relevant [`Transaction`]s in a block based on given @@ -72,12 +73,25 @@ fn parse_envelope_checkpoints<'a>( tx: &'a Transaction, params: &'a RollupParams, ) -> impl Iterator + 'a { - tx.input.iter().filter_map(|inp| { + tx.input.iter().flat_map(|inp| { inp.witness .tapscript() - .and_then(|scr| parse_envelope_data(&scr.into(), params).ok()) - // TODO: get checkpoint or da - .and_then(|data| borsh::from_slice::(data.data()).ok()) + .and_then(|scr| parse_envelope_payloads(&scr.into(), params).ok()) + .map(|items| { + items + .into_iter() + .filter_map(|item| match *item.payload_type() { + L1PayloadType::Checkpoint => { + borsh::from_slice::(item.data()).ok() + } + L1PayloadType::Da => { + warn!("Da parsing is not supported yet"); + None + } + }) + .collect::>() + }) + .unwrap_or_default() }) } From c175668ca391273cd25587e480997c83c3ce0918 Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Fri, 17 Jan 2025 18:51:00 +0545 Subject: [PATCH 15/17] Some renamings --- crates/btcio/src/writer/bundler.rs | 4 +- crates/btcio/src/writer/signer.rs | 10 +-- crates/btcio/src/writer/task.rs | 74 +++++++++++----------- crates/consensus-logic/src/duty/worker.rs | 2 +- crates/db/src/traits.rs | 14 ++-- crates/db/src/types.rs | 18 +++--- crates/l1tx/src/envelope/builder.rs | 3 - crates/l1tx/src/envelope/parser.rs | 9 +-- crates/rocksdb-store/src/writer/db.rs | 18 +++--- crates/rocksdb-store/src/writer/schemas.rs | 4 +- crates/storage/src/ops/writer.rs | 6 +- 11 files changed, 78 insertions(+), 84 deletions(-) diff --git a/crates/btcio/src/writer/bundler.rs b/crates/btcio/src/writer/bundler.rs index 73e07f173..9fc17d849 100644 --- a/crates/btcio/src/writer/bundler.rs +++ b/crates/btcio/src/writer/bundler.rs @@ -1,6 +1,6 @@ use std::{sync::Arc, time::Duration}; -use strata_db::types::{IntentEntry, IntentStatus, PayloadEntry}; +use strata_db::types::{BundledPayloadEntry, IntentEntry, IntentStatus}; use strata_storage::ops::writer::EnvelopeDataOps; use tokio::time::sleep; use tracing::*; @@ -30,7 +30,7 @@ async fn process_unbundled_entries( for mut entry in unbundled { // NOTE: In future, the logic to create payload will be different. We need to group // intents and create payload entries accordingly - let payload_entry = PayloadEntry::new_unsigned(vec![entry.payload().clone()]); + let payload_entry = BundledPayloadEntry::new_unsigned(vec![entry.payload().clone()]); // TODO: the following block till "Atomic Ends" should be atomic. let idx = ops.get_next_payload_idx_async().await?; diff --git a/crates/btcio/src/writer/signer.rs b/crates/btcio/src/writer/signer.rs index 081318a45..a606b28b4 100644 --- a/crates/btcio/src/writer/signer.rs +++ b/crates/btcio/src/writer/signer.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use bitcoin::{consensus, Transaction}; -use strata_db::types::{L1TxEntry, PayloadEntry}; +use strata_db::types::{BundledPayloadEntry, L1TxEntry}; use strata_primitives::buf::Buf32; use tracing::*; @@ -20,7 +20,7 @@ type BlobIdx = u64; /// 2. A signed intent needs to be resigned because somehow its inputs were spent/missing /// 3. A confirmed block that includes the tx gets reorged pub async fn create_and_sign_payload_envelopes( - payloadentry: &PayloadEntry, + payloadentry: &BundledPayloadEntry, broadcast_handle: &L1BroadcastHandle, ctx: Arc>, ) -> Result<(Buf32, Buf32), EnvelopeError> { @@ -59,7 +59,7 @@ pub async fn create_and_sign_payload_envelopes( #[cfg(test)] mod test { - use strata_db::types::{PayloadEntry, PayloadL1Status}; + use strata_db::types::{BundledPayloadEntry, L1BundleStatus}; use strata_primitives::l1::payload::L1Payload; use super::*; @@ -76,9 +76,9 @@ mod test { // First insert an unsigned blob let payload = L1Payload::new_da([1; 100].to_vec()); - let entry = PayloadEntry::new_unsigned(vec![payload]); + let entry = BundledPayloadEntry::new_unsigned(vec![payload]); - assert_eq!(entry.status, PayloadL1Status::Unsigned); + assert_eq!(entry.status, L1BundleStatus::Unsigned); assert_eq!(entry.commit_txid, Buf32::zero()); assert_eq!(entry.reveal_txid, Buf32::zero()); diff --git a/crates/btcio/src/writer/task.rs b/crates/btcio/src/writer/task.rs index ab5a63135..f71272f9d 100644 --- a/crates/btcio/src/writer/task.rs +++ b/crates/btcio/src/writer/task.rs @@ -4,7 +4,7 @@ use bitcoin::Address; use strata_config::btcio::WriterConfig; use strata_db::{ traits::L1WriterDatabase, - types::{IntentEntry, L1TxStatus, PayloadEntry, PayloadL1Status}, + types::{BundledPayloadEntry, IntentEntry, L1BundleStatus, L1TxStatus}, }; use strata_primitives::{ l1::payload::{PayloadDest, PayloadIntent}, @@ -137,7 +137,7 @@ fn get_next_payloadidx_to_watch(insc_ops: &EnvelopeDataOps) -> anyhow::Result( match payloadentry.status { // If unsigned or needs resign, create new signed commit/reveal txs and update the // entry - PayloadL1Status::Unsigned | PayloadL1Status::NeedsResign => { + L1BundleStatus::Unsigned | L1BundleStatus::NeedsResign => { debug!(?payloadentry.status, %curr_payloadidx, "Processing unsigned payloadentry"); match create_and_sign_payload_envelopes( &payloadentry, @@ -185,7 +185,7 @@ pub async fn watcher_task( { Ok((cid, rid)) => { let mut updated_entry = payloadentry.clone(); - updated_entry.status = PayloadL1Status::Unpublished; + updated_entry.status = L1BundleStatus::Unpublished; updated_entry.commit_txid = cid; updated_entry.reveal_txid = rid; insc_ops @@ -206,13 +206,13 @@ pub async fn watcher_task( } } // If finalized, nothing to do, move on to process next entry - PayloadL1Status::Finalized => { + L1BundleStatus::Finalized => { curr_payloadidx += 1; } // If entry is signed but not finalized or excluded yet, check broadcast txs status - PayloadL1Status::Published - | PayloadL1Status::Confirmed - | PayloadL1Status::Unpublished => { + L1BundleStatus::Published + | L1BundleStatus::Confirmed + | L1BundleStatus::Unpublished => { debug!(%curr_payloadidx, "Checking payloadentry's broadcast status"); let commit_tx = broadcast_handle .get_tx_entry_by_id_async(payloadentry.commit_txid) @@ -237,14 +237,14 @@ pub async fn watcher_task( .put_payload_entry_async(curr_payloadidx, updated_entry) .await?; - if new_status == PayloadL1Status::Finalized { + if new_status == L1BundleStatus::Finalized { curr_payloadidx += 1; } } _ => { warn!(%curr_payloadidx, "Corresponding commit/reveal entry for payloadentry not found in broadcast db. Sign and create transactions again."); let mut updated_entry = payloadentry.clone(); - updated_entry.status = PayloadL1Status::Unsigned; + updated_entry.status = L1BundleStatus::Unsigned; insc_ops .put_payload_entry_async(curr_payloadidx, updated_entry) .await?; @@ -260,15 +260,15 @@ pub async fn watcher_task( } async fn update_l1_status( - payloadentry: &PayloadEntry, - new_status: &PayloadL1Status, + payloadentry: &BundledPayloadEntry, + new_status: &L1BundleStatus, status_channel: &StatusChannel, ) { // Update L1 status. Since we are processing one payloadentry at a time, if the entry is // finalized/confirmed, then it means it is published as well - if *new_status == PayloadL1Status::Published - || *new_status == PayloadL1Status::Confirmed - || *new_status == PayloadL1Status::Finalized + if *new_status == L1BundleStatus::Published + || *new_status == L1BundleStatus::Confirmed + || *new_status == L1BundleStatus::Finalized { let status_updates = [ L1StatusUpdate::LastPublishedTxid(payloadentry.reveal_txid.into()), @@ -283,23 +283,23 @@ async fn update_l1_status( fn determine_payload_next_status( commit_status: &L1TxStatus, reveal_status: &L1TxStatus, -) -> PayloadL1Status { +) -> L1BundleStatus { match (&commit_status, &reveal_status) { // If reveal is finalized, both are finalized - (_, L1TxStatus::Finalized { .. }) => PayloadL1Status::Finalized, + (_, L1TxStatus::Finalized { .. }) => L1BundleStatus::Finalized, // If reveal is confirmed, both are confirmed - (_, L1TxStatus::Confirmed { .. }) => PayloadL1Status::Confirmed, + (_, L1TxStatus::Confirmed { .. }) => L1BundleStatus::Confirmed, // If reveal is published regardless of commit, the payload is published - (_, L1TxStatus::Published) => PayloadL1Status::Published, + (_, L1TxStatus::Published) => L1BundleStatus::Published, // if commit has invalid inputs, needs resign - (L1TxStatus::InvalidInputs, _) => PayloadL1Status::NeedsResign, + (L1TxStatus::InvalidInputs, _) => L1BundleStatus::NeedsResign, // If commit is unpublished, both are upublished - (L1TxStatus::Unpublished, _) => PayloadL1Status::Unpublished, + (L1TxStatus::Unpublished, _) => L1BundleStatus::Unpublished, // If commit is published but not reveal, the payload is unpublished - (_, L1TxStatus::Unpublished) => PayloadL1Status::Unpublished, + (_, L1TxStatus::Unpublished) => L1BundleStatus::Unpublished, // If reveal has invalid inputs, these need resign because we can do nothing with just // commit tx confirmed. This should not occur in practice - (_, L1TxStatus::InvalidInputs) => PayloadL1Status::NeedsResign, + (_, L1TxStatus::InvalidInputs) => L1BundleStatus::NeedsResign, } } @@ -326,21 +326,21 @@ mod test { fn test_initialize_writer_state_with_existing_payloads() { let iops = get_envelope_ops(); - let mut e1: PayloadEntry = ArbitraryGenerator::new().generate(); - e1.status = PayloadL1Status::Finalized; + let mut e1: BundledPayloadEntry = ArbitraryGenerator::new().generate(); + e1.status = L1BundleStatus::Finalized; iops.put_payload_entry_blocking(0, e1).unwrap(); - let mut e2: PayloadEntry = ArbitraryGenerator::new().generate(); - e2.status = PayloadL1Status::Published; + let mut e2: BundledPayloadEntry = ArbitraryGenerator::new().generate(); + e2.status = L1BundleStatus::Published; iops.put_payload_entry_blocking(1, e2).unwrap(); let expected_idx = 1; // All entries before this do not need to be watched. - let mut e3: PayloadEntry = ArbitraryGenerator::new().generate(); - e3.status = PayloadL1Status::Unsigned; + let mut e3: BundledPayloadEntry = ArbitraryGenerator::new().generate(); + e3.status = L1BundleStatus::Unsigned; iops.put_payload_entry_blocking(2, e3).unwrap(); - let mut e4: PayloadEntry = ArbitraryGenerator::new().generate(); - e4.status = PayloadL1Status::Unsigned; + let mut e4: BundledPayloadEntry = ArbitraryGenerator::new().generate(); + e4.status = L1BundleStatus::Unsigned; iops.put_payload_entry_blocking(3, e4).unwrap(); let idx = get_next_payloadidx_to_watch(&iops).unwrap(); @@ -353,30 +353,30 @@ mod test { // When both are unpublished let (commit_status, reveal_status) = (L1TxStatus::Unpublished, L1TxStatus::Unpublished); let next = determine_payload_next_status(&commit_status, &reveal_status); - assert_eq!(next, PayloadL1Status::Unpublished); + assert_eq!(next, L1BundleStatus::Unpublished); // When both are Finalized let fin = L1TxStatus::Finalized { confirmations: 5 }; let (commit_status, reveal_status) = (fin.clone(), fin); let next = determine_payload_next_status(&commit_status, &reveal_status); - assert_eq!(next, PayloadL1Status::Finalized); + assert_eq!(next, L1BundleStatus::Finalized); // When both are Confirmed let conf = L1TxStatus::Confirmed { confirmations: 5 }; let (commit_status, reveal_status) = (conf.clone(), conf.clone()); let next = determine_payload_next_status(&commit_status, &reveal_status); - assert_eq!(next, PayloadL1Status::Confirmed); + assert_eq!(next, L1BundleStatus::Confirmed); // When both are Published let publ = L1TxStatus::Published; let (commit_status, reveal_status) = (publ.clone(), publ.clone()); let next = determine_payload_next_status(&commit_status, &reveal_status); - assert_eq!(next, PayloadL1Status::Published); + assert_eq!(next, L1BundleStatus::Published); // When both have invalid let (commit_status, reveal_status) = (L1TxStatus::InvalidInputs, L1TxStatus::InvalidInputs); let next = determine_payload_next_status(&commit_status, &reveal_status); - assert_eq!(next, PayloadL1Status::NeedsResign); + assert_eq!(next, L1BundleStatus::NeedsResign); // When reveal has invalid inputs but commit is confirmed. I doubt this would happen in // practice for our case. @@ -384,6 +384,6 @@ mod test { // published. let (commit_status, reveal_status) = (conf.clone(), L1TxStatus::InvalidInputs); let next = determine_payload_next_status(&commit_status, &reveal_status); - assert_eq!(next, PayloadL1Status::NeedsResign); + assert_eq!(next, L1BundleStatus::NeedsResign); } } diff --git a/crates/consensus-logic/src/duty/worker.rs b/crates/consensus-logic/src/duty/worker.rs index 4b567bbc3..0f85cdd3b 100644 --- a/crates/consensus-logic/src/duty/worker.rs +++ b/crates/consensus-logic/src/duty/worker.rs @@ -410,7 +410,7 @@ fn perform_duty( Ok(()) } Duty::CommitBatch(data) => { - info!(data = ?data, "commit batch duty"); + info!(epoch_idx = ?data.idx(), "commit batch duty"); let checkpoint = check_and_get_batch_checkpoint(data, checkpoint_handle, pool, params.as_ref())?; diff --git a/crates/db/src/traits.rs b/crates/db/src/traits.rs index d31ca6465..e48dee61a 100644 --- a/crates/db/src/traits.rs +++ b/crates/db/src/traits.rs @@ -19,7 +19,7 @@ use strata_zkvm::ProofReceipt; use crate::{ entities::bridge_tx_state::BridgeTxState, - types::{CheckpointEntry, IntentEntry, L1TxEntry, PayloadEntry}, + types::{BundledPayloadEntry, CheckpointEntry, IntentEntry, L1TxEntry}, DbResult, }; @@ -244,14 +244,14 @@ pub trait CheckpointDatabase { fn put_batch_checkpoint(&self, batchidx: u64, entry: CheckpointEntry) -> DbResult<()>; } -/// A trait encapsulating provider and store traits to create/update [`PayloadEntry`] in the -/// database and to fetch [`PayloadEntry`] and indices from the database +/// A trait encapsulating provider and store traits to create/update [`BundledPayloadEntry`] in the +/// database and to fetch [`BundledPayloadEntry`] and indices from the database pub trait L1WriterDatabase { - /// Store the [`PayloadEntry`]. - fn put_payload_entry(&self, idx: u64, payloadentry: PayloadEntry) -> DbResult<()>; + /// Store the [`BundledPayloadEntry`]. + fn put_payload_entry(&self, idx: u64, payloadentry: BundledPayloadEntry) -> DbResult<()>; - /// Get a [`PayloadEntry`] by its index. - fn get_payload_entry_by_idx(&self, idx: u64) -> DbResult>; + /// Get a [`BundledPayloadEntry`] by its index. + fn get_payload_entry_by_idx(&self, idx: u64) -> DbResult>; /// Get the next payload index fn get_next_payload_idx(&self) -> DbResult; diff --git a/crates/db/src/types.rs b/crates/db/src/types.rs index 6f83b8102..6b1942029 100644 --- a/crates/db/src/types.rs +++ b/crates/db/src/types.rs @@ -42,30 +42,30 @@ impl IntentEntry { } /// Status of Intent indicating various stages of being bundled to L1 transaction. -/// Unbundled Intents are collected and bundled to create [`PayloadEntry]. +/// Unbundled Intents are collected and bundled to create [`BundledPayloadEntry]. #[derive(Debug, Clone, PartialEq, BorshSerialize, BorshDeserialize, Arbitrary)] pub enum IntentStatus { // It is not bundled yet, and thus will be collected and processed by bundler. Unbundled, - // It has been bundled to [`PayloadEntry`] with given bundle idx. + // It has been bundled to [`BundledPayloadEntry`] with given bundle idx. Bundled(u64), } /// Represents data for a payload we're still planning to post to L1. #[derive(Debug, Clone, PartialEq, BorshSerialize, BorshDeserialize, Arbitrary)] -pub struct PayloadEntry { +pub struct BundledPayloadEntry { pub payloads: Vec, pub commit_txid: Buf32, pub reveal_txid: Buf32, - pub status: PayloadL1Status, + pub status: L1BundleStatus, } -impl PayloadEntry { +impl BundledPayloadEntry { pub fn new( payloads: Vec, commit_txid: Buf32, reveal_txid: Buf32, - status: PayloadL1Status, + status: L1BundleStatus, ) -> Self { Self { payloads, @@ -75,7 +75,7 @@ impl PayloadEntry { } } - /// Create new unsigned [`PayloadEntry`]. + /// Create new unsigned [`BundledPayloadEntry`]. /// /// NOTE: This won't have commit - reveal pairs associated with it. /// Because it is better to defer gathering utxos as late as possible to prevent being spent @@ -83,13 +83,13 @@ impl PayloadEntry { pub fn new_unsigned(payloads: Vec) -> Self { let cid = Buf32::zero(); let rid = Buf32::zero(); - Self::new(payloads, cid, rid, PayloadL1Status::Unsigned) + Self::new(payloads, cid, rid, L1BundleStatus::Unsigned) } } /// Various status that transactions corresponding to a payload can be in L1 #[derive(Debug, Clone, PartialEq, BorshSerialize, BorshDeserialize, Arbitrary)] -pub enum PayloadL1Status { +pub enum L1BundleStatus { /// The payload has not been signed yet, i.e commit-reveal transactions have not been created /// yet. Unsigned, diff --git a/crates/l1tx/src/envelope/builder.rs b/crates/l1tx/src/envelope/builder.rs index ef8fe7b6b..914469b46 100644 --- a/crates/l1tx/src/envelope/builder.rs +++ b/crates/l1tx/src/envelope/builder.rs @@ -11,7 +11,6 @@ use strata_primitives::{ l1::payload::{L1Payload, L1PayloadType}, params::Params, }; -use tracing::*; // Generates a [`ScriptBuf`] that consists of `OP_IF .. OP_ENDIF` block pub fn build_envelope_script( @@ -45,9 +44,7 @@ fn build_payload_envelope( )); // Insert actual data - trace!(batchdata_size = %payload.data().len(), "Inserting batch data"); for chunk in payload.data().chunks(520) { - trace!(size=%chunk.len(), "inserting chunk"); builder = builder.push_slice(PushBytesBuf::try_from(chunk.to_vec())?); } builder = builder.push_opcode(OP_ENDIF); diff --git a/crates/l1tx/src/envelope/parser.rs b/crates/l1tx/src/envelope/parser.rs index 5812d28e2..afaf72b53 100644 --- a/crates/l1tx/src/envelope/parser.rs +++ b/crates/l1tx/src/envelope/parser.rs @@ -1,5 +1,3 @@ -use std::str::from_utf8; - use bitcoin::{ opcodes::all::OP_IF, script::{Instruction, Instructions}, @@ -84,11 +82,10 @@ fn parse_l1_payload( Ok(L1Payload::new(payload, ptype)) } -fn parse_payload_type(bytes: &[u8], params: &RollupParams) -> Option { - let str = from_utf8(bytes).ok()?; - if params.checkpoint_tag == str { +fn parse_payload_type(tag_bytes: &[u8], params: &RollupParams) -> Option { + if params.checkpoint_tag.as_bytes() == tag_bytes { Some(L1PayloadType::Checkpoint) - } else if params.da_tag == str { + } else if params.da_tag.as_bytes() == tag_bytes { Some(L1PayloadType::Da) } else { None diff --git a/crates/rocksdb-store/src/writer/db.rs b/crates/rocksdb-store/src/writer/db.rs index aa714cf77..94e7a3505 100644 --- a/crates/rocksdb-store/src/writer/db.rs +++ b/crates/rocksdb-store/src/writer/db.rs @@ -4,7 +4,7 @@ use rockbound::{utils::get_last, OptimisticTransactionDB as DB, SchemaDBOperatio use strata_db::{ errors::DbError, traits::L1WriterDatabase, - types::{IntentEntry, PayloadEntry}, + types::{BundledPayloadEntry, IntentEntry}, DbResult, }; use strata_primitives::buf::Buf32; @@ -28,7 +28,7 @@ impl RBL1WriterDb { } impl L1WriterDatabase for RBL1WriterDb { - fn put_payload_entry(&self, idx: u64, entry: PayloadEntry) -> DbResult<()> { + fn put_payload_entry(&self, idx: u64, entry: BundledPayloadEntry) -> DbResult<()> { self.db .with_optimistic_txn( rockbound::TransactionRetry::Count(self.ops.retry_count), @@ -40,7 +40,7 @@ impl L1WriterDatabase for RBL1WriterDb { .map_err(|e| DbError::TransactionError(e.to_string())) } - fn get_payload_entry_by_idx(&self, idx: u64) -> DbResult> { + fn get_payload_entry_by_idx(&self, idx: u64) -> DbResult> { Ok(self.db.get::(&idx)?) } @@ -113,7 +113,7 @@ mod tests { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); let seq_db = RBL1WriterDb::new(db, db_ops); - let blob: PayloadEntry = ArbitraryGenerator::new().generate(); + let blob: BundledPayloadEntry = ArbitraryGenerator::new().generate(); seq_db.put_payload_entry(0, blob.clone()).unwrap(); @@ -125,7 +125,7 @@ mod tests { fn test_put_blob_existing_entry() { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); let seq_db = RBL1WriterDb::new(db, db_ops); - let blob: PayloadEntry = ArbitraryGenerator::new().generate(); + let blob: BundledPayloadEntry = ArbitraryGenerator::new().generate(); seq_db.put_payload_entry(0, blob.clone()).unwrap(); @@ -140,12 +140,12 @@ mod tests { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); let seq_db = RBL1WriterDb::new(db, db_ops); - let entry: PayloadEntry = ArbitraryGenerator::new().generate(); + let entry: BundledPayloadEntry = ArbitraryGenerator::new().generate(); // Insert seq_db.put_payload_entry(0, entry.clone()).unwrap(); - let updated_entry: PayloadEntry = ArbitraryGenerator::new().generate(); + let updated_entry: BundledPayloadEntry = ArbitraryGenerator::new().generate(); // Update existing idx seq_db.put_payload_entry(0, updated_entry.clone()).unwrap(); @@ -158,7 +158,7 @@ mod tests { let (db, db_ops) = get_rocksdb_tmp_instance().unwrap(); let seq_db = RBL1WriterDb::new(db, db_ops); - let blob: PayloadEntry = ArbitraryGenerator::new().generate(); + let blob: BundledPayloadEntry = ArbitraryGenerator::new().generate(); let next_blob_idx = seq_db.get_next_payload_idx().unwrap(); assert_eq!( @@ -171,7 +171,7 @@ mod tests { .unwrap(); // Now the next idx is 1 - let blob: PayloadEntry = ArbitraryGenerator::new().generate(); + let blob: BundledPayloadEntry = ArbitraryGenerator::new().generate(); seq_db.put_payload_entry(1, blob.clone()).unwrap(); let next_blob_idx = seq_db.get_next_payload_idx().unwrap(); diff --git a/crates/rocksdb-store/src/writer/schemas.rs b/crates/rocksdb-store/src/writer/schemas.rs index c3000ecd1..02a5d05d5 100644 --- a/crates/rocksdb-store/src/writer/schemas.rs +++ b/crates/rocksdb-store/src/writer/schemas.rs @@ -1,4 +1,4 @@ -use strata_db::types::{IntentEntry, PayloadEntry}; +use strata_db::types::{BundledPayloadEntry, IntentEntry}; use strata_primitives::buf::Buf32; use crate::{ @@ -8,7 +8,7 @@ use crate::{ define_table_with_seek_key_codec!( /// A table to store idx-> payload entry mapping - (PayloadSchema) u64 => PayloadEntry + (PayloadSchema) u64 => BundledPayloadEntry ); define_table_with_default_codec!( diff --git a/crates/storage/src/ops/writer.rs b/crates/storage/src/ops/writer.rs index 70febaca4..0b49e01a4 100644 --- a/crates/storage/src/ops/writer.rs +++ b/crates/storage/src/ops/writer.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use strata_db::{ traits::L1WriterDatabase, - types::{IntentEntry, PayloadEntry}, + types::{BundledPayloadEntry, IntentEntry}, DbResult, }; use strata_primitives::buf::Buf32; @@ -13,8 +13,8 @@ use crate::exec::*; inst_ops_simple! { ( => EnvelopeDataOps) { - put_payload_entry(idx: u64, payloadentry: PayloadEntry) => (); - get_payload_entry_by_idx(idx: u64) => Option; + put_payload_entry(idx: u64, payloadentry: BundledPayloadEntry) => (); + get_payload_entry_by_idx(idx: u64) => Option; get_next_payload_idx() => u64; put_intent_entry(id: Buf32, entry: IntentEntry) => (); get_intent_by_id(id: Buf32) => Option; From 9cb200f4ad8d1b922f2a711aafdb78fef227f78b Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Tue, 21 Jan 2025 13:59:16 +0545 Subject: [PATCH 16/17] Enhance bundler task --- crates/btcio/src/writer/bundler.rs | 82 +++++++++++++++++---------- crates/btcio/src/writer/task.rs | 35 +++++++++--- crates/config/src/btcio.rs | 7 ++- crates/db/src/traits.rs | 2 +- crates/l1tx/src/filter.rs | 10 ++-- crates/rocksdb-store/src/writer/db.rs | 10 +--- 6 files changed, 92 insertions(+), 54 deletions(-) diff --git a/crates/btcio/src/writer/bundler.rs b/crates/btcio/src/writer/bundler.rs index 9fc17d849..2f8a8fe45 100644 --- a/crates/btcio/src/writer/bundler.rs +++ b/crates/btcio/src/writer/bundler.rs @@ -1,32 +1,61 @@ use std::{sync::Arc, time::Duration}; -use strata_db::types::{BundledPayloadEntry, IntentEntry, IntentStatus}; +use strata_config::btcio::WriterConfig; +use strata_db::{ + types::{BundledPayloadEntry, IntentEntry, IntentStatus}, + DbResult, +}; use strata_storage::ops::writer::EnvelopeDataOps; -use tokio::time::sleep; +use strata_tasks::ShutdownGuard; +use tokio::{select, sync::mpsc::Receiver}; use tracing::*; -// TODO: get this from config -const BUNDLE_INTERVAL: u64 = 200; // millis - /// Periodically bundles unbundled intents into payload entries. -pub(crate) async fn bundler_task(ops: Arc) -> anyhow::Result<()> { - let mut last_idx = 0; +pub(crate) async fn bundler_task( + mut unbundled: Vec, + ops: Arc, + config: Arc, + mut intent_rx: Receiver, + shutdown: ShutdownGuard, +) -> anyhow::Result<()> { + let interval = tokio::time::interval(Duration::from_millis(config.bundle_interval_ms)); + tokio::pin!(interval); loop { - let (unbundled, new_idx) = get_unbundled_intents_after(last_idx, ops.as_ref()).await?; - process_unbundled_entries(ops.as_ref(), unbundled).await?; - last_idx = new_idx; + select! { + maybe_intent = intent_rx.recv() => { + if shutdown.should_shutdown() { + info!("Bundler received shutdown. Stopping."); + break; + } + if let Some(intent) = maybe_intent { + unbundled.push(intent); + } else { + warn!("Intent receiver closed, stopping bundler task"); + break; + } + } - let _ = sleep(Duration::from_millis(BUNDLE_INTERVAL)).await; + _ = interval.tick() => { + if shutdown.should_shutdown() { + info!("Bundler received shutdown. Stopping."); + break; + } + // Process unbundled, returning entries which are unprocessed for some reason. + unbundled = process_unbundled_entries(ops.as_ref(), unbundled).await?; + } + } } + Ok(()) } -/// Processes and bundles a list of unbundled intents into payload entries. +/// Processes and bundles a list of unbundled intents into payload entries. Returns a vector of +/// entries which are unbundled for some reason. /// NOTE: The current logic is simply 1-1 mapping between intents and payloads, in future it can /// be sophisticated. async fn process_unbundled_entries( ops: &EnvelopeDataOps, unbundled: Vec, -) -> anyhow::Result<()> { +) -> DbResult> { for mut entry in unbundled { // NOTE: In future, the logic to create payload will be different. We need to group // intents and create payload entries accordingly @@ -42,22 +71,22 @@ async fn process_unbundled_entries( .await?; // Atomic Ends. } - Ok(()) + // Return empty Vec because each entry is being bundled right now. This might be different in + // future. + Ok(vec![]) } /// Retrieves unbundled intents after a given index in ascending order along with the latest -/// unbundled entry idx. -async fn get_unbundled_intents_after( - idx: u64, +/// entry idx. +pub(crate) fn get_initial_unbundled_entries( ops: &EnvelopeDataOps, -) -> anyhow::Result<(Vec, u64)> { - let latest_idx = ops.get_next_intent_idx_async().await?.saturating_sub(1); - let mut curr_idx = latest_idx; - +) -> anyhow::Result> { + let mut curr_idx = ops.get_next_intent_idx_blocking()?; let mut unbundled = Vec::new(); - while curr_idx >= idx { - if let Some(intent) = ops.get_intent_by_idx_async(curr_idx).await? { + while curr_idx > 0 { + curr_idx -= 1; + if let Some(intent) = ops.get_intent_by_idx_blocking(curr_idx)? { match intent.status { IntentStatus::Unbundled => unbundled.push(intent), IntentStatus::Bundled(_) => { @@ -69,15 +98,10 @@ async fn get_unbundled_intents_after( warn!(%curr_idx, "Could not find expected intent in db"); break; } - - if curr_idx == 0 { - break; - } - curr_idx -= 1; } // Reverse the items so that they are in ascending order of index unbundled.reverse(); - Ok((unbundled, latest_idx)) + Ok(unbundled) } diff --git a/crates/btcio/src/writer/task.rs b/crates/btcio/src/writer/task.rs index f71272f9d..4cdc65509 100644 --- a/crates/btcio/src/writer/task.rs +++ b/crates/btcio/src/writer/task.rs @@ -13,9 +13,10 @@ use strata_primitives::{ use strata_status::StatusChannel; use strata_storage::ops::writer::{Context, EnvelopeDataOps}; use strata_tasks::TaskExecutor; +use tokio::sync::mpsc::{self, Sender}; use tracing::*; -use super::bundler::bundler_task; +use super::bundler::{bundler_task, get_initial_unbundled_entries}; use crate::{ broadcaster::L1BroadcastHandle, rpc::{traits::WriterRpc, BitcoinClient}, @@ -28,11 +29,12 @@ use crate::{ /// A handle to the Envelope task. pub struct EnvelopeHandle { ops: Arc, + intent_tx: Sender, } impl EnvelopeHandle { - pub fn new(ops: Arc) -> Self { - Self { ops } + pub fn new(ops: Arc, intent_tx: Sender) -> Self { + Self { ops, intent_tx } } /// Checks if it is duplicate, if not creates a new [`IntentEntry`] from `intent` and puts it in @@ -56,7 +58,13 @@ impl EnvelopeHandle { // Create and store IntentEntry let entry = IntentEntry::new_unbundled(intent); - Ok(self.ops.put_intent_entry_blocking(id, entry)?) + self.ops.put_intent_entry_blocking(id, entry.clone())?; + + // Send to bundler + if let Err(e) = self.intent_tx.blocking_send(entry) { + warn!("Could not send intent entry to bundler: {:?}", e); + } + Ok(()) } /// Checks if it is duplicate, if not creates a new [`IntentEntry`] from `intent` and puts it in @@ -80,7 +88,14 @@ impl EnvelopeHandle { // Create and store IntentEntry let entry = IntentEntry::new_unbundled(intent); - Ok(self.ops.put_intent_entry_async(id, entry).await?) + self.ops.put_intent_entry_blocking(id, entry.clone())?; + + // Send to bundler + if let Err(e) = self.intent_tx.send(entry).await { + warn!("Could not send intent entry to bundler: {:?}", e); + } + + Ok(()) } } @@ -106,11 +121,12 @@ pub fn start_envelope_task( ) -> anyhow::Result> { let writer_ops = Arc::new(Context::new(db).into_ops(pool)); let next_watch_payload_idx = get_next_payloadidx_to_watch(writer_ops.as_ref())?; + let (intent_tx, intent_rx) = mpsc::channel::(64); - let envelope_handle = Arc::new(EnvelopeHandle::new(writer_ops.clone())); + let envelope_handle = Arc::new(EnvelopeHandle::new(writer_ops.clone(), intent_tx)); let ctx = Arc::new(WriterContext::new( params, - config, + config.clone(), sequencer_address, bitcoin_client, status_channel, @@ -121,8 +137,9 @@ pub fn start_envelope_task( watcher_task(next_watch_payload_idx, ctx, wops.clone(), broadcast_handle).await }); - executor.spawn_critical_async("btcio::bundler_task", async move { - bundler_task(writer_ops).await + let unbundled = get_initial_unbundled_entries(writer_ops.as_ref())?; + executor.spawn_critical_async_with_shutdown("btcio::bundler_task", |shutdown| async move { + bundler_task(unbundled, writer_ops, config.clone(), intent_rx, shutdown).await }); Ok(envelope_handle) diff --git a/crates/config/src/btcio.rs b/crates/config/src/btcio.rs index 9a29cba84..b7ca3a51d 100644 --- a/crates/config/src/btcio.rs +++ b/crates/config/src/btcio.rs @@ -17,13 +17,15 @@ pub struct ReaderConfig { /// Configuration for btcio writer/signer. #[derive(Debug, Clone, Deserialize)] pub struct WriterConfig { - /// How often to invoke the writer + /// How often to invoke the writer. pub write_poll_dur_ms: u64, /// How the fees for are determined. // FIXME: This should actually be a part of signer. pub fee_policy: FeePolicy, - /// How much amount(in sats) to send to reveal address + /// How much amount(in sats) to send to reveal address. pub reveal_amount: u64, + /// How often to bundle write intents. + pub bundle_interval_ms: u64, } /// Definition of how fees are determined while creating l1 transactions. @@ -42,6 +44,7 @@ impl Default for WriterConfig { write_poll_dur_ms: 1_000, fee_policy: FeePolicy::Smart, reveal_amount: 1_000, + bundle_interval_ms: 500, } } } diff --git a/crates/db/src/traits.rs b/crates/db/src/traits.rs index e48dee61a..8113df232 100644 --- a/crates/db/src/traits.rs +++ b/crates/db/src/traits.rs @@ -244,7 +244,7 @@ pub trait CheckpointDatabase { fn put_batch_checkpoint(&self, batchidx: u64, entry: CheckpointEntry) -> DbResult<()>; } -/// A trait encapsulating provider and store traits to create/update [`BundledPayloadEntry`] in the +/// Encapsulates provider and store traits to create/update [`BundledPayloadEntry`] in the /// database and to fetch [`BundledPayloadEntry`] and indices from the database pub trait L1WriterDatabase { /// Store the [`BundledPayloadEntry`]. diff --git a/crates/l1tx/src/filter.rs b/crates/l1tx/src/filter.rs index a3d93fc19..2cb23348c 100644 --- a/crates/l1tx/src/filter.rs +++ b/crates/l1tx/src/filter.rs @@ -178,7 +178,7 @@ mod test { // Create an envelope transaction. The focus here is to create a tapscript, rather than a // completely valid control block. Includes `n_envelopes` envelopes in the tapscript. - fn create_checkpoint_envelope_tx(params: Arc, n_envelopes: u32) -> Transaction { + fn create_checkpoint_envelope_tx(params: &Params, n_envelopes: u32) -> Transaction { let address = parse_addr(OTHER_ADDR); let inp_tx = create_test_tx(vec![create_test_txout(100000000, &address)]); let payloads: Vec<_> = (0..n_envelopes) @@ -218,7 +218,7 @@ mod test { // Testing multiple envelopes are parsed let num_envelopes = 2; - let tx = create_checkpoint_envelope_tx(params.clone().into(), num_envelopes); + let tx = create_checkpoint_envelope_tx(¶ms, num_envelopes); let block = create_test_block(vec![tx]); let ops = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config); @@ -235,7 +235,7 @@ mod test { let mut new_params = params.clone(); new_params.rollup.checkpoint_tag = "invalid_checkpoint_tag".to_string(); - let tx = create_checkpoint_envelope_tx(new_params.into(), 2); + let tx = create_checkpoint_envelope_tx(&new_params, 2); let block = create_test_block(vec![tx]); let result = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config); assert!(result.is_empty(), "Should filter out invalid name"); @@ -260,9 +260,9 @@ mod test { fn test_filter_relevant_txs_multiple_matches() { let params: Params = gen_params(); let filter_config = create_tx_filter_config(¶ms); - let tx1 = create_checkpoint_envelope_tx(params.clone().into(), 1); + let tx1 = create_checkpoint_envelope_tx(¶ms, 1); let tx2 = create_test_tx(vec![create_test_txout(100, &parse_addr(OTHER_ADDR))]); - let tx3 = create_checkpoint_envelope_tx(params.clone().into(), 1); + let tx3 = create_checkpoint_envelope_tx(¶ms, 1); let block = create_test_block(vec![tx1, tx2, tx3]); let txids: Vec = filter_protocol_op_tx_refs(&block, params.rollup(), &filter_config) diff --git a/crates/rocksdb-store/src/writer/db.rs b/crates/rocksdb-store/src/writer/db.rs index 94e7a3505..43016f3e1 100644 --- a/crates/rocksdb-store/src/writer/db.rs +++ b/crates/rocksdb-store/src/writer/db.rs @@ -51,24 +51,18 @@ impl L1WriterDatabase for RBL1WriterDb { } fn put_intent_entry(&self, intent_id: Buf32, intent_entry: IntentEntry) -> DbResult<()> { - let res = self - .db + self.db .with_optimistic_txn( rockbound::TransactionRetry::Count(self.ops.retry_count), |tx| -> Result<(), DbError> { - tracing::debug!(%intent_id, "putting intent"); let idx = get_next_id::(tx)?; - tracing::debug!(%idx, "next intent idx..."); tx.put::(&idx, &intent_id)?; tx.put::(&intent_id, &intent_entry)?; Ok(()) }, ) - .map_err(|e| DbError::TransactionError(e.to_string())); - let next = self.get_next_intent_idx()?; - tracing::debug!(%next, "next intent idx after put"); - res + .map_err(|e| DbError::TransactionError(e.to_string())) } fn get_intent_by_id(&self, id: Buf32) -> DbResult> { From 0a66e86770919611b171a9dea755a8a1e39367eb Mon Sep 17 00:00:00 2001 From: Bibek Pandey Date: Tue, 21 Jan 2025 14:46:15 +0545 Subject: [PATCH 17/17] Review fixes --- crates/btcio/src/test_utils.rs | 6 ++---- crates/btcio/src/writer/builder.rs | 2 +- crates/btcio/src/writer/bundler.rs | 12 +++++++++--- crates/l1tx/src/envelope/parser.rs | 5 ++--- crates/l1tx/src/filter.rs | 2 +- 5 files changed, 15 insertions(+), 12 deletions(-) diff --git a/crates/btcio/src/test_utils.rs b/crates/btcio/src/test_utils.rs index df0c67584..b22fc6482 100644 --- a/crates/btcio/src/test_utils.rs +++ b/crates/btcio/src/test_utils.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use async_trait::async_trait; use bitcoin::{ bip32::Xpriv, @@ -216,10 +214,10 @@ impl SignerRpc for TestBitcoinClient { pub fn generate_envelope_script_test( payloads: &[L1Payload], - params: Arc, + params: &Params, version: u8, ) -> anyhow::Result { - build_envelope_script(params.as_ref(), payloads, version) + build_envelope_script(params, payloads, version) } pub fn build_reveal_transaction_test( diff --git a/crates/btcio/src/writer/builder.rs b/crates/btcio/src/writer/builder.rs index db812ea95..07e060758 100644 --- a/crates/btcio/src/writer/builder.rs +++ b/crates/btcio/src/writer/builder.rs @@ -51,7 +51,7 @@ pub enum EnvelopeError { // Btcio depends on `tx-parser`. So this file is behind a feature flag 'test-utils' and on dev // dependencies on `tx-parser`, we include {btcio, feature="strata_test_utils"} , so cyclic // dependency doesn't happen -pub async fn build_envelope_txs( +pub(crate) async fn build_envelope_txs( payloads: &[L1Payload], ctx: &WriterContext, ) -> anyhow::Result<(Transaction, Transaction)> { diff --git a/crates/btcio/src/writer/bundler.rs b/crates/btcio/src/writer/bundler.rs index 2f8a8fe45..ce732c51c 100644 --- a/crates/btcio/src/writer/bundler.rs +++ b/crates/btcio/src/writer/bundler.rs @@ -40,7 +40,7 @@ pub(crate) async fn bundler_task( info!("Bundler received shutdown. Stopping."); break; } - // Process unbundled, returning entries which are unprocessed for some reason. + // Process unbundled entries, returning entries which are unprocessed for some reason. unbundled = process_unbundled_entries(ops.as_ref(), unbundled).await?; } } @@ -50,6 +50,8 @@ pub(crate) async fn bundler_task( /// Processes and bundles a list of unbundled intents into payload entries. Returns a vector of /// entries which are unbundled for some reason. +/// The reason could be the entries is too small in size to be included in an envelope and thus +/// makes sense to include once a bunch of entries are collected. /// NOTE: The current logic is simply 1-1 mapping between intents and payloads, in future it can /// be sophisticated. async fn process_unbundled_entries( @@ -57,6 +59,10 @@ async fn process_unbundled_entries( unbundled: Vec, ) -> DbResult> { for mut entry in unbundled { + // Check it is actually unbundled, omit if bundled + if entry.status != IntentStatus::Unbundled { + continue; + } // NOTE: In future, the logic to create payload will be different. We need to group // intents and create payload entries accordingly let payload_entry = BundledPayloadEntry::new_unsigned(vec![entry.payload().clone()]); @@ -76,8 +82,8 @@ async fn process_unbundled_entries( Ok(vec![]) } -/// Retrieves unbundled intents after a given index in ascending order along with the latest -/// entry idx. +/// Retrieves unbundled intents since the beginning in ascending order along with the latest +/// entry idx. This traverses backwards from latest index and breaks once it founds a bundled entry. pub(crate) fn get_initial_unbundled_entries( ops: &EnvelopeDataOps, ) -> anyhow::Result> { diff --git a/crates/l1tx/src/envelope/parser.rs b/crates/l1tx/src/envelope/parser.rs index afaf72b53..8c18c12e9 100644 --- a/crates/l1tx/src/envelope/parser.rs +++ b/crates/l1tx/src/envelope/parser.rs @@ -169,7 +169,7 @@ mod tests { let envelope2 = L1Payload::new_checkpoint(bytes.clone()); let script = generate_envelope_script_test( &[envelope1.clone(), envelope2.clone()], - params.clone().into(), + ¶ms, version, ) .unwrap(); @@ -182,8 +182,7 @@ mod tests { let bytes = vec![1; 2000]; let envelope_data = L1Payload::new_checkpoint(bytes.clone()); let script = - generate_envelope_script_test(&[envelope_data.clone()], params.clone().into(), version) - .unwrap(); + generate_envelope_script_test(&[envelope_data.clone()], ¶ms, version).unwrap(); // Parse the rollup name let result = parse_envelope_payloads(&script, params.rollup()).unwrap(); diff --git a/crates/l1tx/src/filter.rs b/crates/l1tx/src/filter.rs index 2cb23348c..9d502762f 100644 --- a/crates/l1tx/src/filter.rs +++ b/crates/l1tx/src/filter.rs @@ -97,7 +97,7 @@ fn parse_envelope_checkpoints<'a>( #[cfg(test)] mod test { - use std::{str::FromStr, sync::Arc}; + use std::str::FromStr; use bitcoin::{ absolute::{Height, LockTime},