From 5a1024bfa308059b6997f580b18dd27fc3a1f537 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 3 Jan 2025 12:53:24 -0500 Subject: [PATCH] Fix clippy::redundant_closure throughout stackslib Signed-off-by: Jacinta Ferrant --- stackslib/src/blockstack_cli.rs | 2 +- stackslib/src/burnchains/db.rs | 4 +-- stackslib/src/burnchains/tests/affirmation.rs | 4 +-- stackslib/src/chainstate/burn/db/sortdb.rs | 26 +++++++------- .../burn/operations/delegate_stx.rs | 16 ++++----- .../chainstate/burn/operations/stack_stx.rs | 6 ++-- .../burn/operations/transfer_stx.rs | 5 ++- .../burn/operations/vote_for_aggregate_key.rs | 8 ++--- stackslib/src/chainstate/coordinator/mod.rs | 10 ++---- .../src/chainstate/nakamoto/signer_set.rs | 2 +- stackslib/src/chainstate/nakamoto/tenure.rs | 2 +- .../src/chainstate/nakamoto/tests/mod.rs | 2 +- stackslib/src/chainstate/stacks/boot/mod.rs | 6 ++-- .../src/chainstate/stacks/boot/pox_4_tests.rs | 8 ++--- .../src/chainstate/stacks/db/accounts.rs | 2 +- stackslib/src/chainstate/stacks/db/blocks.rs | 10 +++--- stackslib/src/chainstate/stacks/db/mod.rs | 2 +- .../src/chainstate/stacks/index/storage.rs | 5 ++- .../src/chainstate/stacks/index/test/marf.rs | 2 +- stackslib/src/clarity_cli.rs | 2 +- stackslib/src/clarity_vm/database/mod.rs | 4 +-- stackslib/src/core/mempool.rs | 34 ++++++------------- stackslib/src/net/api/getpoxinfo.rs | 2 +- stackslib/src/net/api/tests/mod.rs | 2 +- stackslib/src/net/atlas/db.rs | 2 +- stackslib/src/net/atlas/download.rs | 4 +-- stackslib/src/net/chat.rs | 7 ++-- stackslib/src/net/db.rs | 2 +- stackslib/src/net/relay.rs | 4 +-- stackslib/src/net/stackerdb/db.rs | 2 +- stackslib/src/net/tests/mempool/mod.rs | 10 +++--- stackslib/src/util_lib/db.rs | 6 ++-- 32 files changed, 91 insertions(+), 112 deletions(-) diff --git a/stackslib/src/blockstack_cli.rs b/stackslib/src/blockstack_cli.rs index aadec8a519..b51d20d8ad 100644 --- a/stackslib/src/blockstack_cli.rs +++ b/stackslib/src/blockstack_cli.rs @@ -864,7 +864,7 @@ fn main_handler(mut argv: Vec) -> Result { if let Some(custom_chain_id) = flag.split('=').nth(1) { // Attempt to parse the custom chain ID from hex chain_id = u32::from_str_radix(custom_chain_id.trim_start_matches("0x"), 16) - .map_err(|err| CliError::InvalidChainId(err))?; + .map_err(CliError::InvalidChainId)?; } else { // Use the default testnet chain ID chain_id = CHAIN_ID_TESTNET; diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 8116ea4143..685724716a 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -393,7 +393,7 @@ impl BurnchainDBTransaction<'_> { let args = params![u64_to_sql(target_reward_cycle)?]; self.sql_tx .execute(sql, args) - .map_err(|e| DBError::SqliteError(e))?; + .map_err(DBError::SqliteError)?; let sql = "UPDATE block_commit_metadata SET anchor_block = ?1 WHERE burn_block_hash = ?2 AND txid = ?3"; let args = params![ @@ -424,7 +424,7 @@ impl BurnchainDBTransaction<'_> { self.sql_tx .execute(sql, args) .map(|_| ()) - .map_err(|e| DBError::SqliteError(e)) + .map_err(DBError::SqliteError) } /// Calculate a burnchain block's block-commits' descendancy information. diff --git a/stackslib/src/burnchains/tests/affirmation.rs b/stackslib/src/burnchains/tests/affirmation.rs index 095fe940d1..81c5174421 100644 --- a/stackslib/src/burnchains/tests/affirmation.rs +++ b/stackslib/src/burnchains/tests/affirmation.rs @@ -419,7 +419,7 @@ pub fn make_reward_cycle_with_vote( commits .into_iter() .filter_map(|cmt| cmt) - .map(|cmt| BlockstackOperationType::LeaderBlockCommit(cmt)) + .map(BlockstackOperationType::LeaderBlockCommit) .collect() }; @@ -1617,7 +1617,7 @@ fn test_update_pox_affirmation_maps_unique_anchor_block() { let cmt_ops: Vec = cmts .iter() .filter_map(|op| op.clone()) - .map(|op| BlockstackOperationType::LeaderBlockCommit(op)) + .map(BlockstackOperationType::LeaderBlockCommit) .collect(); burnchain_db diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 0d032bf1f2..e8ec00526f 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -117,8 +117,7 @@ impl FromRow for MissedBlockCommit { fn from_row(row: &Row) -> Result { let intended_sortition = SortitionId::from_column(row, "intended_sortition_id")?; let input_json: String = row.get_unwrap("input"); - let input = - serde_json::from_str(&input_json).map_err(|e| db_error::SerializationError(e))?; + let input = serde_json::from_str(&input_json).map_err(db_error::SerializationError)?; let txid = Txid::from_column(row, "txid")?; Ok(MissedBlockCommit { @@ -264,11 +263,10 @@ impl FromRow for LeaderBlockCommitOp { let memo = memo_bytes.to_vec(); - let input = - serde_json::from_str(&input_json).map_err(|e| db_error::SerializationError(e))?; + let input = serde_json::from_str(&input_json).map_err(db_error::SerializationError)?; - let apparent_sender = serde_json::from_str(&apparent_sender_json) - .map_err(|e| db_error::SerializationError(e))?; + let apparent_sender = + serde_json::from_str(&apparent_sender_json).map_err(db_error::SerializationError)?; let burn_fee = burn_fee_str .parse::() @@ -285,8 +283,8 @@ impl FromRow for LeaderBlockCommitOp { .as_deref() .map(serde_json::from_str) .transpose() - .map_err(|e| db_error::SerializationError(e))? - .unwrap_or_else(|| vec![]); + .map_err(db_error::SerializationError)? + .unwrap_or_default(); let block_commit = LeaderBlockCommitOp { block_header_hash, @@ -4446,7 +4444,7 @@ impl SortitionDB { sortition_id: &SortitionId, ) -> Result { let db_handle = self.index_handle(sortition_id); - SortitionDB::get_max_arrival_index(&db_handle).map_err(|e| BurnchainError::from(e)) + SortitionDB::get_max_arrival_index(&db_handle).map_err(BurnchainError::from) } /// Get a burn blockchain snapshot, given a burnchain configuration struct. @@ -5761,12 +5759,12 @@ impl SortitionHandleTx<'_> { assert!(block_commit.block_height < BLOCK_HEIGHT_MAX); // serialize tx input to JSON - let tx_input_str = serde_json::to_string(&block_commit.input) - .map_err(|e| db_error::SerializationError(e))?; + let tx_input_str = + serde_json::to_string(&block_commit.input).map_err(db_error::SerializationError)?; // serialize apparent sender to JSON let apparent_sender_str = serde_json::to_string(&block_commit.apparent_sender) - .map_err(|e| db_error::SerializationError(e))?; + .map_err(db_error::SerializationError)?; // find parent block commit's snapshot's sortition ID. // If the parent_block_ptr doesn't point to a valid snapshot, then store an empty @@ -5833,7 +5831,7 @@ impl SortitionHandleTx<'_> { fn insert_missed_block_commit(&mut self, op: &MissedBlockCommit) -> Result<(), db_error> { // serialize tx input to JSON let tx_input_str = - serde_json::to_string(&op.input).map_err(|e| db_error::SerializationError(e))?; + serde_json::to_string(&op.input).map_err(db_error::SerializationError)?; let args = params![op.txid, op.intended_sortition, tx_input_str]; @@ -6921,7 +6919,7 @@ pub mod tests { sender: &BurnchainSigner, ) -> Result, db_error> { let apparent_sender_str = - serde_json::to_string(sender).map_err(|e| db_error::SerializationError(e))?; + serde_json::to_string(sender).map_err(db_error::SerializationError)?; let sql = "SELECT * FROM block_commits WHERE apparent_sender = ?1 ORDER BY block_height DESC LIMIT 1"; let args = params![apparent_sender_str]; query_row(conn, sql, args) diff --git a/stackslib/src/chainstate/burn/operations/delegate_stx.rs b/stackslib/src/chainstate/burn/operations/delegate_stx.rs index 130a42784b..ef95c284b6 100644 --- a/stackslib/src/chainstate/burn/operations/delegate_stx.rs +++ b/stackslib/src/chainstate/burn/operations/delegate_stx.rs @@ -227,28 +227,28 @@ impl StacksMessageCodec for DelegateStxOp { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &(Opcodes::DelegateStx as u8))?; fd.write_all(&self.delegated_ustx.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; if let Some((index, _)) = self.reward_addr { fd.write_all(&(1 as u8).to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; fd.write_all(&index.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; } else { fd.write_all(&(0 as u8).to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; fd.write_all(&(0 as u32).to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; } if let Some(height) = self.until_burn_height { fd.write_all(&(1 as u8).to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; fd.write_all(&height.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; } else { fd.write_all(&(0 as u8).to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; } Ok(()) } diff --git a/stackslib/src/chainstate/burn/operations/stack_stx.rs b/stackslib/src/chainstate/burn/operations/stack_stx.rs index 67de678659..afa9375079 100644 --- a/stackslib/src/chainstate/burn/operations/stack_stx.rs +++ b/stackslib/src/chainstate/burn/operations/stack_stx.rs @@ -374,7 +374,7 @@ impl StacksMessageCodec for StackStxOp { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &(Opcodes::StackStx as u8))?; fd.write_all(&self.stacked_ustx.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; write_next(fd, &self.num_cycles)?; if let Some(signer_key) = &self.signer_key { @@ -383,11 +383,11 @@ impl StacksMessageCodec for StackStxOp { } if let Some(max_amount) = &self.max_amount { fd.write_all(&max_amount.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; } if let Some(auth_id) = &self.auth_id { fd.write_all(&auth_id.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; } Ok(()) } diff --git a/stackslib/src/chainstate/burn/operations/transfer_stx.rs b/stackslib/src/chainstate/burn/operations/transfer_stx.rs index 9d1d562d9c..0da3024bbd 100644 --- a/stackslib/src/chainstate/burn/operations/transfer_stx.rs +++ b/stackslib/src/chainstate/burn/operations/transfer_stx.rs @@ -213,9 +213,8 @@ impl StacksMessageCodec for TransferStxOp { } write_next(fd, &(Opcodes::TransferStx as u8))?; fd.write_all(&self.transfered_ustx.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; - fd.write_all(&self.memo) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; + fd.write_all(&self.memo).map_err(codec_error::WriteError)?; Ok(()) } diff --git a/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs b/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs index 648859abc6..db429fec81 100644 --- a/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs +++ b/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs @@ -202,13 +202,13 @@ impl StacksMessageCodec for VoteForAggregateKeyOp { write_next(fd, &(Opcodes::VoteForAggregateKey as u8))?; fd.write_all(&self.signer_index.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; fd.write_all(self.aggregate_key.as_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; fd.write_all(&self.round.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; fd.write_all(&self.reward_cycle.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; + .map_err(codec_error::WriteError)?; Ok(()) } diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 7250257531..cd0aa9373d 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -742,7 +742,7 @@ pub fn get_next_recipients( )?; sort_db .get_next_block_recipients(burnchain, sortition_tip, reward_cycle_info.as_ref()) - .map_err(|e| Error::from(e)) + .map_err(Error::from) } /// returns None if this burnchain block is _not_ the start of a reward cycle @@ -2097,9 +2097,7 @@ impl< // by holding this lock as long as we do, we ensure that the sortition DB's // view of the canonical stacks chain tip can't get changed (since no // Stacks blocks can be processed). - chainstate_db_tx - .commit() - .map_err(|e| DBError::SqliteError(e))?; + chainstate_db_tx.commit().map_err(DBError::SqliteError)?; let highest_valid_snapshot = SortitionDB::get_block_snapshot( &self.sortition_db.conn(), @@ -2786,9 +2784,7 @@ impl< invalidation_height, )?; } - chainstate_db_tx - .commit() - .map_err(|e| DBError::SqliteError(e))?; + chainstate_db_tx.commit().map_err(DBError::SqliteError)?; } let sortition_id = next_snapshot.sortition_id; diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index 38e76f7e51..6da5ac70f8 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -441,7 +441,7 @@ impl NakamotoSigners { coinbase_height, ) }) - .map(|calculation| Some(calculation)) + .map(Some) } /// Make the contract name for a signers DB contract diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index a0e516f283..58eb7ef8aa 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -756,7 +756,7 @@ impl NakamotoChainState { headers_conn.sqlite(), &block_header.parent_block_id, )? - .map(|parent_version| NakamotoBlockHeader::is_shadow_block_version(parent_version)) + .map(NakamotoBlockHeader::is_shadow_block_version) .unwrap_or(false); if !is_parent_shadow_block && !prev_sn.sortition { diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index bd415b68b0..6ddb82ebc6 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -2172,7 +2172,7 @@ fn test_make_miners_stackerdb_config() { miners .clone() .into_iter() - .map(|miner| BlockstackOperationType::LeaderKeyRegister(miner)) + .map(BlockstackOperationType::LeaderKeyRegister) .collect() } else { // subsequent ones include block-commits diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index b941bed938..f5ecc3e558 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -1675,7 +1675,7 @@ pub mod test { .unwrap(), ]; - let addrs: Vec = keys.iter().map(|pk| key_to_stacks_addr(pk)).collect(); + let addrs: Vec = keys.iter().map(key_to_stacks_addr).collect(); let balances: Vec<(PrincipalData, u64)> = addrs .clone() @@ -2341,7 +2341,7 @@ pub mod test { let addr_tuple = Value::Tuple(pox_addr.as_clarity_tuple().unwrap()); let signature = signature_opt .map(|sig| Value::some(Value::buff_from(sig).unwrap()).unwrap()) - .unwrap_or_else(|| Value::none()); + .unwrap_or_else(Value::none); let payload = TransactionPayload::new_contract_call( boot_code_test_addr(), POX_4_NAME, @@ -2372,7 +2372,7 @@ pub mod test { ) -> StacksTransaction { let signature = signature_opt .map(|sig| Value::some(Value::buff_from(sig).unwrap()).unwrap()) - .unwrap_or_else(|| Value::none()); + .unwrap_or_else(Value::none); let payload = TransactionPayload::new_contract_call( boot_code_test_addr(), POX_4_NAME, diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 14dc9e75ab..809a7aa901 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -5179,8 +5179,8 @@ fn balances_from_keys( keys: &[Secp256k1PrivateKey], ) -> Vec { keys.iter() - .map(|key| key_to_stacks_addr(key)) - .map(|addr| PrincipalData::from(addr)) + .map(key_to_stacks_addr) + .map(PrincipalData::from) .map(|principal| get_stx_account_at(peer, tip, &principal)) .collect() } @@ -6766,7 +6766,7 @@ pub fn pox_4_scenario_test_setup_nakamoto<'a>( let private_key = StacksPrivateKey::from_seed(&[2]); let test_signers = TestSigners::new(test_keys.clone()); - let addrs: Vec = test_keys.iter().map(|pk| key_to_stacks_addr(pk)).collect(); + let addrs: Vec = test_keys.iter().map(key_to_stacks_addr).collect(); let initial_stacker_balance = initial_balances .get(0) .expect("Expected at least 1 initial balance") @@ -8843,7 +8843,7 @@ pub fn prepare_pox4_test<'a>( .with_test_signers(test_signers.clone()) .with_private_key(private_key); boot_plan.add_default_balance = false; - let addrs: Vec = keys.iter().map(|pk| key_to_stacks_addr(pk)).collect(); + let addrs: Vec = keys.iter().map(key_to_stacks_addr).collect(); let balances: Vec<(PrincipalData, u64)> = addrs .clone() diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index 977069ceb6..7be97339df 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -654,7 +654,7 @@ impl StacksChainState { ) -> Result, Error> { let sql = "SELECT * FROM matured_rewards WHERE parent_index_block_hash = ?1 AND child_index_block_hash = ?2 AND vtxindex = 0"; let args = params![parent_block_id.0, child_block_id.0]; - let ret: Vec = query_rows(conn, sql, args).map_err(|e| Error::DBError(e))?; + let ret: Vec = query_rows(conn, sql, args).map_err(Error::DBError)?; Ok(ret) } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 96a07dcb83..35246ddc82 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -916,7 +916,7 @@ impl StacksChainState { // gather let mut blobs = vec![]; - while let Some(row) = rows.next().map_err(|e| db_error::SqliteError(e))? { + while let Some(row) = rows.next().map_err(db_error::SqliteError)? { let next_blob: Vec = row.get_unwrap(0); blobs.push(next_blob); } @@ -1733,7 +1733,7 @@ impl StacksChainState { // gather let mut row_data: Vec = vec![]; - while let Some(row) = rows.next().map_err(|e| db_error::SqliteError(e))? { + while let Some(row) = rows.next().map_err(db_error::SqliteError)? { let val_opt: Option = row.get_unwrap(0); if let Some(val) = val_opt { row_data.push(val); @@ -3863,7 +3863,7 @@ impl StacksChainState { .query(NO_PARAMS) .map_err(|e| Error::DBError(db_error::SqliteError(e)))?; - while let Some(row) = rows.next().map_err(|e| db_error::SqliteError(e))? { + while let Some(row) = rows.next().map_err(db_error::SqliteError)? { let mut candidate = StagingBlock::from_row(&row).map_err(Error::DBError)?; // block must correspond to a valid PoX snapshot @@ -6675,7 +6675,7 @@ impl StacksChainState { let epoch = clarity_connection.get_epoch().clone(); StacksChainState::process_transaction_precheck(&chainstate_config, &tx, epoch) - .map_err(|e| MemPoolRejection::FailedToValidate(e))?; + .map_err(MemPoolRejection::FailedToValidate)?; // 3: it must pay a tx fee let fee = tx.get_tx_fee(); @@ -6867,7 +6867,7 @@ impl StacksChainState { epoch, clarity_version, ) - .map_err(|e| MemPoolRejection::BadFunctionArgument(e)) + .map_err(MemPoolRejection::BadFunctionArgument) })?; } TransactionPayload::SmartContract( diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index be29197f83..cb0f615191 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1202,7 +1202,7 @@ impl StacksChainState { test_debug!("Open MARF index at {}", marf_path); let mut open_opts = MARFOpenOpts::default(); open_opts.external_blobs = true; - let marf = MARF::from_path(marf_path, open_opts).map_err(|e| db_error::IndexError(e))?; + let marf = MARF::from_path(marf_path, open_opts).map_err(db_error::IndexError)?; Ok(marf) } diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 4927365882..dcbe035e93 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -584,12 +584,11 @@ impl TrieRAM { // write parent block ptr f.seek(SeekFrom::Start(0))?; f.write_all(parent_hash.as_bytes()) - .map_err(|e| Error::IOError(e))?; + .map_err(Error::IOError)?; // write zero-identifier (TODO: this is a convenience hack for now, we should remove the // identifier from the trie data blob) f.seek(SeekFrom::Start(BLOCK_HEADER_HASH_ENCODED_SIZE as u64))?; - f.write_all(&0u32.to_le_bytes()) - .map_err(|e| Error::IOError(e))?; + f.write_all(&0u32.to_le_bytes()).map_err(Error::IOError)?; for (ix, indirect) in node_data_order.iter().enumerate() { // dump the node to storage diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index 50efc260ab..c70b1fb85d 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -518,7 +518,7 @@ where marf_walk_cow_test( |s| make_node_path(s, node_id.to_u8(), &path_segments, [31u8; 40].to_vec()), - |x, y| path_gen(x, y), + &path_gen, ); } } diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 9f37569b35..c77090538d 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -163,7 +163,7 @@ fn parse( DEFAULT_CLI_EPOCH, ASTRules::PrecheckSize, ) - .map_err(|e| RuntimeErrorType::ASTError(e))?; + .map_err(RuntimeErrorType::ASTError)?; Ok(ast.expressions) } diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index e901e8d908..34bfa0322e 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -72,7 +72,7 @@ impl GetTenureStartId for StacksDBConn<'_> { )? .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) .flatten() - .map(|block_id| TenureBlockId::from(block_id))) + .map(TenureBlockId::from)) } fn get_tenure_block_id_at_cb_height( @@ -108,7 +108,7 @@ impl GetTenureStartId for StacksDBTx<'_> { )? .map(|id_str| nakamoto_keys::parse_block_id(&id_str)) .flatten() - .map(|block_id| TenureBlockId::from(block_id))) + .map(TenureBlockId::from)) } fn get_tenure_block_id_at_cb_height( diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 569b47300c..e09fbe2865 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1144,10 +1144,8 @@ fn db_get_nonce(conn: &DBConn, address: &StacksAddress) -> Result, d #[cfg(test)] pub fn db_get_all_nonces(conn: &DBConn) -> Result, db_error> { let sql = "SELECT * FROM nonces"; - let mut stmt = conn.prepare(&sql).map_err(|e| db_error::SqliteError(e))?; - let mut iter = stmt - .query(NO_PARAMS) - .map_err(|e| db_error::SqliteError(e))?; + let mut stmt = conn.prepare(&sql).map_err(db_error::SqliteError)?; + let mut iter = stmt.query(NO_PARAMS).map_err(db_error::SqliteError)?; let mut ret = vec![]; while let Ok(Some(row)) = iter.next() { let addr = StacksAddress::from_column(row, "address")?; @@ -1664,13 +1662,10 @@ impl MemPoolDB { FROM mempool WHERE fee_rate IS NULL "; - let mut query_stmt_null = self - .db - .prepare(&sql) - .map_err(|err| Error::SqliteError(err))?; + let mut query_stmt_null = self.db.prepare(&sql).map_err(Error::SqliteError)?; let mut null_iterator = query_stmt_null .query(NO_PARAMS) - .map_err(|err| Error::SqliteError(err))?; + .map_err(Error::SqliteError)?; let sql = " SELECT txid, origin_nonce, origin_address, sponsor_nonce, sponsor_address, fee_rate @@ -1678,13 +1673,10 @@ impl MemPoolDB { WHERE fee_rate IS NOT NULL ORDER BY fee_rate DESC "; - let mut query_stmt_fee = self - .db - .prepare(&sql) - .map_err(|err| Error::SqliteError(err))?; + let mut query_stmt_fee = self.db.prepare(&sql).map_err(Error::SqliteError)?; let mut fee_iterator = query_stmt_fee .query(NO_PARAMS) - .map_err(|err| Error::SqliteError(err))?; + .map_err(Error::SqliteError)?; let stop_reason = loop { if start_time.elapsed().as_millis() > settings.max_walk_time_ms as u128 { @@ -1707,22 +1699,18 @@ impl MemPoolDB { // randomly selecting from either the null fee-rate transactions // or those with fee-rate estimates. let opt_tx = if start_with_no_estimate { - null_iterator - .next() - .map_err(|err| Error::SqliteError(err))? + null_iterator.next().map_err(Error::SqliteError)? } else { - fee_iterator.next().map_err(|err| Error::SqliteError(err))? + fee_iterator.next().map_err(Error::SqliteError)? }; match opt_tx { Some(row) => (MemPoolTxInfoPartial::from_row(row)?, start_with_no_estimate), None => { // If the selected iterator is empty, check the other match if start_with_no_estimate { - fee_iterator.next().map_err(|err| Error::SqliteError(err))? + fee_iterator.next().map_err(Error::SqliteError)? } else { - null_iterator - .next() - .map_err(|err| Error::SqliteError(err))? + null_iterator.next().map_err(Error::SqliteError)? } { Some(row) => ( MemPoolTxInfoPartial::from_row(row)?, @@ -2117,7 +2105,7 @@ impl MemPoolDB { &StacksBlockId::new(tip_consensus_hash, tip_block_header_hash), tip_consensus_hash, ) - .map_err(|e| MemPoolRejection::FailedToValidate(e))? + .map_err(MemPoolRejection::FailedToValidate)? .ok_or(MemPoolRejection::NoSuchChainTip( tip_consensus_hash.clone(), tip_block_header_hash.clone(), diff --git a/stackslib/src/net/api/getpoxinfo.rs b/stackslib/src/net/api/getpoxinfo.rs index 81868c81f8..2499f49c7c 100644 --- a/stackslib/src/net/api/getpoxinfo.rs +++ b/stackslib/src/net/api/getpoxinfo.rs @@ -364,7 +364,7 @@ impl RPCPoxInfoData { let cur_cycle_pox_active = sortdb.is_pox_active(burnchain, &burnchain_tip)?; let epochs: Vec<_> = SortitionDB::get_stacks_epochs(sortdb.conn())? .into_iter() - .map(|epoch| RPCPoxEpoch::from(epoch)) + .map(RPCPoxEpoch::from) .collect(); Ok(RPCPoxInfoData { diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index 35e12b5593..85505f498b 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -1152,7 +1152,7 @@ fn prefixed_opt_hex_serialization() { ]; for test in tests_32b.iter() { - let inp = test.clone().map(|bytes| BurnchainHeaderHash(bytes)); + let inp = test.clone().map(BurnchainHeaderHash); let mut out_buff = Vec::new(); let mut serializer = serde_json::Serializer::new(&mut out_buff); prefix_opt_hex::serialize(&inp, &mut serializer).unwrap(); diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index d11dd9995d..cdad26ea4d 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -376,7 +376,7 @@ impl AtlasDB { // Open an atlas database in memory (used for testing) #[cfg(test)] pub fn connect_memory(atlas_config: AtlasConfig) -> Result { - let conn = Connection::open_in_memory().map_err(|e| db_error::SqliteError(e))?; + let conn = Connection::open_in_memory().map_err(db_error::SqliteError)?; let mut db = AtlasDB { atlas_config, conn, diff --git a/stackslib/src/net/atlas/download.rs b/stackslib/src/net/atlas/download.rs index a9dad242a5..ba0829c291 100644 --- a/stackslib/src/net/atlas/download.rs +++ b/stackslib/src/net/atlas/download.rs @@ -158,11 +158,11 @@ impl AttachmentsDownloader { let attachments_instances = network .atlasdb .find_all_attachment_instances(&attachment.hash()) - .map_err(|e| net_error::DBError(e))?; + .map_err(net_error::DBError)?; network .atlasdb .insert_instantiated_attachment(&attachment) - .map_err(|e| net_error::DBError(e))?; + .map_err(net_error::DBError)?; for attachment_instance in attachments_instances.into_iter() { resolved_attachments.push((attachment_instance, attachment.clone())); } diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index d1c2185d10..966644ee2f 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -676,8 +676,7 @@ impl ConversationP2P { } pub fn get_public_key_hash(&self) -> Option { - self.ref_public_key() - .map(|pubk| Hash160::from_node_public_key(pubk)) + self.ref_public_key().map(Hash160::from_node_public_key) } pub fn ref_public_key(&self) -> Option<&StacksPublicKey> { @@ -1461,7 +1460,7 @@ impl ConversationP2P { let neighbor_addrs: Vec = neighbors .iter() - .map(|n| NeighborAddress::from_neighbor(n)) + .map(NeighborAddress::from_neighbor) .collect(); debug!( @@ -1642,7 +1641,7 @@ impl ConversationP2P { reward_cycle, &block_hashes, ) - .map_err(|e| net_error::from(e))?; + .map_err(net_error::from)?; if cfg!(test) { // make *sure* the behavior stays the same in epoch 2 diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 34ee6441a6..37bdb19d4e 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -782,7 +782,7 @@ impl PeerDB { asn4_entries: &[ASEntry4], initial_neighbors: &[Neighbor], ) -> Result { - let conn = Connection::open_in_memory().map_err(|e| db_error::SqliteError(e))?; + let conn = Connection::open_in_memory().map_err(db_error::SqliteError)?; let mut db = PeerDB { conn, diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 86358e7be2..e412472bd5 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -3268,7 +3268,7 @@ impl PeerNetwork { network.advertize_to_peer( recipient, &[((*ch).clone(), (*bhh).clone())], - |payload| StacksMessageType::BlocksAvailable(payload), + StacksMessageType::BlocksAvailable, ); } } @@ -3310,7 +3310,7 @@ impl PeerNetwork { network.advertize_to_peer( recipient, &[((*ch).clone(), (*bhh).clone())], - |payload| StacksMessageType::MicroblocksAvailable(payload), + StacksMessageType::MicroblocksAvailable, ); } } diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 0faf5bbe03..534af40c04 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -476,7 +476,7 @@ impl StackerDBs { let pparent_path = ppath .parent() .unwrap_or_else(|| panic!("BUG: no parent of '{}'", path)); - fs::create_dir_all(&pparent_path).map_err(|e| db_error::IOError(e))?; + fs::create_dir_all(&pparent_path).map_err(db_error::IOError)?; OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE } else { diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index d3f30aca19..8211eaeb3b 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -54,7 +54,7 @@ fn test_mempool_sync_2_peers() { let num_txs = 10; let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); - let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let addrs: Vec<_> = pks.iter().map(to_addr).collect(); let initial_balances: Vec<_> = addrs .iter() .map(|a| (a.to_account_principal(), 1000000000)) @@ -322,7 +322,7 @@ fn test_mempool_sync_2_peers_paginated() { let num_txs = 1024; let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); - let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let addrs: Vec<_> = pks.iter().map(to_addr).collect(); let initial_balances: Vec<_> = addrs .iter() .map(|a| (a.to_account_principal(), 1000000000)) @@ -513,7 +513,7 @@ fn test_mempool_sync_2_peers_blacklisted() { let num_txs = 1024; let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); - let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let addrs: Vec<_> = pks.iter().map(to_addr).collect(); let initial_balances: Vec<_> = addrs .iter() .map(|a| (a.to_account_principal(), 1000000000)) @@ -724,7 +724,7 @@ fn test_mempool_sync_2_peers_problematic() { let num_txs = 128; let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); - let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let addrs: Vec<_> = pks.iter().map(to_addr).collect(); let initial_balances: Vec<_> = addrs .iter() .map(|a| (a.to_account_principal(), 1000000000)) @@ -1098,7 +1098,7 @@ fn test_mempool_sync_2_peers_nakamoto_paginated() { ]; let num_txs = 1024; let pks: Vec<_> = (0..num_txs).map(|_| StacksPrivateKey::new()).collect(); - let addrs: Vec<_> = pks.iter().map(|pk| to_addr(pk)).collect(); + let addrs: Vec<_> = pks.iter().map(to_addr).collect(); let initial_balances: Vec<_> = addrs .iter() .map(|a| (a.to_account_principal(), 1000000000)) diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index cdc4b587b1..4ac28a4a4d 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -489,7 +489,7 @@ where // gather let mut row_data = vec![]; - while let Some(row) = rows.next().map_err(|e| Error::SqliteError(e))? { + while let Some(row) = rows.next().map_err(Error::SqliteError)? { let next_row = T::from_column(&row, column_name)?; row_data.push(next_row); } @@ -506,7 +506,7 @@ where let mut stmt = conn.prepare(sql_query)?; let mut rows = stmt.query(sql_args)?; let mut row_data = vec![]; - while let Some(row) = rows.next().map_err(|e| Error::SqliteError(e))? { + while let Some(row) = rows.next().map_err(Error::SqliteError)? { if row_data.len() > 0 { return Err(Error::Overflow); } @@ -535,7 +535,7 @@ pub fn sql_pragma( pragma_name: &str, pragma_value: &dyn ToSql, ) -> Result<(), Error> { - inner_sql_pragma(conn, pragma_name, pragma_value).map_err(|e| Error::SqliteError(e)) + inner_sql_pragma(conn, pragma_name, pragma_value).map_err(Error::SqliteError) } fn inner_sql_pragma(