From 7fe537f7a48675b1d25542bee6f390d665547580 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A8le=20Oul=C3=A8s?= Date: Tue, 18 Oct 2022 11:20:06 +0200 Subject: [PATCH 001/172] Implement CCoinsViewErrorCatcher::HaveCoin --- src/coins.cpp | 16 +++++++++++++--- src/coins.h | 1 + 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/coins.cpp b/src/coins.cpp index 5983a8a39fc75..37dd71874be07 100644 --- a/src/coins.cpp +++ b/src/coins.cpp @@ -292,11 +292,13 @@ const Coin& AccessByTxid(const CCoinsViewCache& view, const uint256& txid) return coinEmpty; } -bool CCoinsViewErrorCatcher::GetCoin(const COutPoint &outpoint, Coin &coin) const { +template +static bool ExecuteBackedWrapper(Func func, const std::vector>& err_callbacks) +{ try { - return CCoinsViewBacked::GetCoin(outpoint, coin); + return func(); } catch(const std::runtime_error& e) { - for (const auto& f : m_err_callbacks) { + for (const auto& f : err_callbacks) { f(); } LogPrintf("Error reading from database: %s\n", e.what()); @@ -307,3 +309,11 @@ bool CCoinsViewErrorCatcher::GetCoin(const COutPoint &outpoint, Coin &coin) cons std::abort(); } } + +bool CCoinsViewErrorCatcher::GetCoin(const COutPoint &outpoint, Coin &coin) const { + return ExecuteBackedWrapper([&]() { return CCoinsViewBacked::GetCoin(outpoint, coin); }, m_err_callbacks); +} + +bool CCoinsViewErrorCatcher::HaveCoin(const COutPoint &outpoint) const { + return ExecuteBackedWrapper([&]() { return CCoinsViewBacked::HaveCoin(outpoint); }, m_err_callbacks); +} diff --git a/src/coins.h b/src/coins.h index 67fecc9785ddc..a4f477638635f 100644 --- a/src/coins.h +++ b/src/coins.h @@ -349,6 +349,7 @@ class CCoinsViewErrorCatcher final : public CCoinsViewBacked } bool GetCoin(const COutPoint &outpoint, Coin &coin) const override; + bool HaveCoin(const COutPoint &outpoint) const override; private: /** A list of callbacks to execute upon leveldb read error. */ From ed52e71176fc97c6ed01e3eebd85acdec54b4448 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A8le=20Oul=C3=A8s?= Date: Tue, 18 Oct 2022 10:59:37 +0200 Subject: [PATCH 002/172] Periodically check disk space to avoid corruption --- src/init.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/init.cpp b/src/init.cpp index 8ffab64622673..57646c3eebaba 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1159,6 +1159,15 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) RandAddPeriodic(); }, std::chrono::minutes{1}); + // Check disk space every 5 minutes to avoid db corruption. + node.scheduler->scheduleEvery([&args]{ + constexpr uint64_t min_disk_space = 50 << 20; // 50 MB + if (!CheckDiskSpace(args.GetBlocksDirPath(), min_disk_space)) { + LogPrintf("Shutting down due to lack of disk space!\n"); + StartShutdown(); + } + }, std::chrono::minutes{5}); + GetMainSignals().RegisterBackgroundSignalScheduler(*node.scheduler); // Create client interfaces for wallets that are supposed to be loaded From 5d8469362acfb7a03e0f767dbb7166830355bead Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Fri, 14 Jul 2023 16:51:31 -0400 Subject: [PATCH 003/172] test: Add helper functions for checking node versions --- .../wallet_backwards_compatibility.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/test/functional/wallet_backwards_compatibility.py b/test/functional/wallet_backwards_compatibility.py index 49e36b21c5d6d..b4443fd90ef4a 100755 --- a/test/functional/wallet_backwards_compatibility.py +++ b/test/functional/wallet_backwards_compatibility.py @@ -77,6 +77,24 @@ def nodes_wallet_dir(self, node): return node.chain_path return node.wallets_path + def split_version(self, node): + major = node.version // 10000 + minor = (node.version % 10000) // 100 + patch = (node.version % 100) + return (major, minor, patch) + + def major_version_equals(self, node, major): + node_major, _, _ = self.split_version(node) + return node_major == major + + def major_version_less_than(self, node, major): + node_major, _, _ = self.split_version(node) + return node_major < major + + def major_version_at_least(self, node, major): + node_major, _, _ = self.split_version(node) + return node_major >= major + def run_test(self): node_miner = self.nodes[0] node_master = self.nodes[1] From ad66ca1e475d2546dbbda206465307613108a15d Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Fri, 2 Jun 2023 16:36:01 -0400 Subject: [PATCH 004/172] init: abort loading of blockindex in case of missing height. If a height is missing we are facing a non-contiguous block index db, and could previously hit an assert in GetAncestor() called from BuildSkip() instead of returning an error. --- src/node/blockstorage.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 78416ec5765e9..a292ffe8698d2 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -259,8 +259,13 @@ bool BlockManager::LoadBlockIndex() std::sort(vSortedByHeight.begin(), vSortedByHeight.end(), CBlockIndexHeightOnlyComparator()); + CBlockIndex* previous_index{nullptr}; for (CBlockIndex* pindex : vSortedByHeight) { if (m_interrupt) return false; + if (previous_index && pindex->nHeight > previous_index->nHeight + 1) { + return error("%s: block index is non-contiguous, index of height %d missing", __func__, previous_index->nHeight + 1); + } + previous_index = pindex; pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex); pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime); From d27b9a2248476439ddab7700327f074005a810d5 Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Fri, 2 Jun 2023 13:45:02 -0400 Subject: [PATCH 005/172] test: fix feature_init.py file perturbation Simultaneously opening the file in read and write mode would lead to opening of an empty file instead of perturbing the existing file. Also, revert to the previous state after each perturbation so that each perturbation is applied in isolation. --- test/functional/feature_init.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/test/functional/feature_init.py b/test/functional/feature_init.py index 64ca312b842df..94f5116f9b00f 100755 --- a/test/functional/feature_init.py +++ b/test/functional/feature_init.py @@ -5,6 +5,7 @@ """Stress tests related to node initialization.""" import os from pathlib import Path +import shutil from test_framework.test_framework import BitcoinTestFramework, SkipTest from test_framework.test_node import ErrorMatch @@ -47,7 +48,7 @@ def sigterm_node(): def start_expecting_error(err_fragment): node.assert_start_raises_init_error( - extra_args=['-txindex=1', '-blockfilterindex=1', '-coinstatsindex=1'], + extra_args=['-txindex=1', '-blockfilterindex=1', '-coinstatsindex=1', '-checkblocks=200', '-checklevel=4'], expected_msg=err_fragment, match=ErrorMatch.PARTIAL_REGEX, ) @@ -101,9 +102,9 @@ def check_clean_start(): } files_to_perturb = { - 'blocks/index/*.ldb': 'Error opening block database.', + 'blocks/index/*.ldb': 'Error loading block database.', 'chainstate/*.ldb': 'Error opening block database.', - 'blocks/blk*.dat': 'Error opening block database.', + 'blocks/blk*.dat': 'Corrupted block database detected.', } for file_patt, err_fragment in files_to_delete.items(): @@ -124,18 +125,31 @@ def check_clean_start(): check_clean_start() self.stop_node(0) + self.log.info("Test startup errors after perturbing certain essential files") for file_patt, err_fragment in files_to_perturb.items(): + shutil.copytree(node.chain_path / "blocks", node.chain_path / "blocks_bak") + shutil.copytree(node.chain_path / "chainstate", node.chain_path / "chainstate_bak") target_files = list(node.chain_path.glob(file_patt)) for target_file in target_files: self.log.info(f"Perturbing file to ensure failure {target_file}") - with open(target_file, "rb") as tf_read, open(target_file, "wb") as tf_write: + with open(target_file, "rb") as tf_read: contents = tf_read.read() tweaked_contents = bytearray(contents) - tweaked_contents[50:250] = b'1' * 200 + # Since the genesis block is not checked by -checkblocks, the + # perturbation window must be chosen such that a higher block + # in blk*.dat is affected. + tweaked_contents[150:350] = b'1' * 200 + with open(target_file, "wb") as tf_write: tf_write.write(bytes(tweaked_contents)) start_expecting_error(err_fragment) + shutil.rmtree(node.chain_path / "blocks") + shutil.rmtree(node.chain_path / "chainstate") + shutil.move(node.chain_path / "blocks_bak", node.chain_path / "blocks") + shutil.move(node.chain_path / "chainstate_bak", node.chain_path / "chainstate") + + if __name__ == '__main__': InitStressTest().main() From 63e90e1d3f5ed08f9871f07667d389ec66aa621c Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Sun, 6 Aug 2023 19:09:04 +0200 Subject: [PATCH 006/172] test: check for specific disconnect reasons in p2p_blockfilters.py This ensures that the disconnect happens for the expected reason and also makes it easier to navigate between implementation and test code, i.e. both the questions "do we have test coverage for this disconnect?" (from an implementation reader's perspective) and "where is the code handling this disconnect?" (from a test reader's perspective) can be answered simply by grep-ping the corresponding debug message. Can be easiest reviewed with `-w` (to ignore whitespace changes). --- test/functional/p2p_blockfilters.py | 48 +++++++++++++++++------------ 1 file changed, 29 insertions(+), 19 deletions(-) diff --git a/test/functional/p2p_blockfilters.py b/test/functional/p2p_blockfilters.py index e4908735c9a22..abe2f574e6e1a 100755 --- a/test/functional/p2p_blockfilters.py +++ b/test/functional/p2p_blockfilters.py @@ -211,38 +211,48 @@ def run_test(self): ] for request in requests: peer_1 = self.nodes[1].add_p2p_connection(P2PInterface()) - peer_1.send_message(request) - peer_1.wait_for_disconnect() + with self.nodes[1].assert_debug_log(expected_msgs=["requested unsupported block filter type"]): + peer_1.send_message(request) + peer_1.wait_for_disconnect() self.log.info("Check that invalid requests result in disconnection.") requests = [ # Requesting too many filters results in disconnection. - msg_getcfilters( - filter_type=FILTER_TYPE_BASIC, - start_height=0, - stop_hash=int(main_block_hash, 16), + ( + msg_getcfilters( + filter_type=FILTER_TYPE_BASIC, + start_height=0, + stop_hash=int(main_block_hash, 16), + ), "requested too many cfilters/cfheaders" ), # Requesting too many filter headers results in disconnection. - msg_getcfheaders( - filter_type=FILTER_TYPE_BASIC, - start_height=0, - stop_hash=int(tip_hash, 16), + ( + msg_getcfheaders( + filter_type=FILTER_TYPE_BASIC, + start_height=0, + stop_hash=int(tip_hash, 16), + ), "requested too many cfilters/cfheaders" ), # Requesting unknown filter type results in disconnection. - msg_getcfcheckpt( - filter_type=255, - stop_hash=int(main_block_hash, 16), + ( + msg_getcfcheckpt( + filter_type=255, + stop_hash=int(main_block_hash, 16), + ), "requested unsupported block filter type" ), # Requesting unknown hash results in disconnection. - msg_getcfcheckpt( - filter_type=FILTER_TYPE_BASIC, - stop_hash=123456789, + ( + msg_getcfcheckpt( + filter_type=FILTER_TYPE_BASIC, + stop_hash=123456789, + ), "requested invalid block hash" ), ] - for request in requests: + for request, expected_log_msg in requests: peer_0 = self.nodes[0].add_p2p_connection(P2PInterface()) - peer_0.send_message(request) - peer_0.wait_for_disconnect() + with self.nodes[0].assert_debug_log(expected_msgs=[expected_log_msg]): + peer_0.send_message(request) + peer_0.wait_for_disconnect() self.log.info("Test -peerblockfilters without -blockfilterindex raises an error") self.stop_node(0) From 2ab7952bda8d15e91b03f8307839030cbb55614e Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Sun, 6 Aug 2023 19:23:21 +0200 Subject: [PATCH 007/172] test: add bip157 coverage for (start height > stop height) disconnect --- test/functional/p2p_blockfilters.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test/functional/p2p_blockfilters.py b/test/functional/p2p_blockfilters.py index abe2f574e6e1a..680fa9c7faf11 100755 --- a/test/functional/p2p_blockfilters.py +++ b/test/functional/p2p_blockfilters.py @@ -247,6 +247,14 @@ def run_test(self): stop_hash=123456789, ), "requested invalid block hash" ), + ( + # Request with (start block height > stop block height) results in disconnection. + msg_getcfheaders( + filter_type=FILTER_TYPE_BASIC, + start_height=1000, + stop_hash=int(self.nodes[0].getblockhash(999), 16), + ), "sent invalid getcfilters/getcfheaders with start height 1000 and stop height 999" + ), ] for request, expected_log_msg in requests: peer_0 = self.nodes[0].add_p2p_connection(P2PInterface()) From 313d665437079ce8426916a41a11972e97c73d6d Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Mon, 3 Jul 2023 20:53:16 -0400 Subject: [PATCH 008/172] test: Fix 0.16 wallet paths and downgrade test The test for 0.16 wallet downgrading was using the wrong wallet path and thus incorrectly finding that 0.16 could open wallets created in master. --- .../wallet_backwards_compatibility.py | 39 +++++++++---------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/test/functional/wallet_backwards_compatibility.py b/test/functional/wallet_backwards_compatibility.py index b4443fd90ef4a..d8dfd36970212 100755 --- a/test/functional/wallet_backwards_compatibility.py +++ b/test/functional/wallet_backwards_compatibility.py @@ -72,11 +72,6 @@ def setup_nodes(self): self.start_nodes() self.import_deterministic_coinbase_privkeys() - def nodes_wallet_dir(self, node): - if node.version < 170000: - return node.chain_path - return node.wallets_path - def split_version(self, node): major = node.version // 10000 minor = (node.version % 10000) // 100 @@ -178,7 +173,7 @@ def run_test(self): node_master_wallets_dir = node_master.wallets_path node_v19_wallets_dir = node_v19.wallets_path node_v17_wallets_dir = node_v17.wallets_path - node_v16_wallets_dir = node_v16.chain_path + node_v16_wallets_dir = node_v16.wallets_path node_master.unloadwallet("w1") node_master.unloadwallet("w2") node_master.unloadwallet("w3") @@ -186,10 +181,13 @@ def run_test(self): for node in legacy_nodes: # Copy wallets to previous version for wallet in os.listdir(node_master_wallets_dir): - shutil.copytree( - os.path.join(node_master_wallets_dir, wallet), - os.path.join(self.nodes_wallet_dir(node), wallet) - ) + dest = node.wallets_path / wallet + source = node_master_wallets_dir / wallet + if self.major_version_equals(node, 16): + # 0.16 node expect the wallet to be in the wallet dir but as a plain file rather than in directories + shutil.copyfile(source / "wallet.dat", dest) + else: + shutil.copytree(source, dest) if not self.options.descriptors: # Descriptor wallets break compatibility, only run this test for legacy wallet @@ -249,13 +247,14 @@ def run_test(self): node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: Error loading w3: Wallet requires newer version of Bitcoin Core") self.start_node(node_v17.index) - if not self.options.descriptors: - # Descriptor wallets break compatibility, only run this test for legacy wallets - # Open most recent wallet in v0.16 (no loadwallet RPC) - self.restart_node(node_v16.index, extra_args=["-wallet=w2"]) - wallet = node_v16.get_wallet_rpc("w2") - info = wallet.getwalletinfo() - assert info['keypoolsize'] == 1 + # No wallet created in master can be opened in 0.16 + self.log.info("Test that wallets created in master are too new for 0.16") + self.stop_node(node_v16.index) + for wallet_name in ["w1", "w2", "w3"]: + if self.options.descriptors: + node_v16.assert_start_raises_init_error([f"-wallet={wallet_name}"], f"Error: {wallet_name} corrupt, salvage failed") + else: + node_v16.assert_start_raises_init_error([f"-wallet={wallet_name}"], f"Error: Error loading {wallet_name}: Wallet requires newer version of Bitcoin Core") # Create upgrade wallet in v0.16 self.restart_node(node_v16.index, extra_args=["-wallet=u1_v16"]) @@ -278,7 +277,7 @@ def run_test(self): # Old wallets are BDB and will only work if BDB is compiled # Copy the 0.16 wallet to the last Bitcoin Core version and open it: shutil.copyfile( - os.path.join(node_v16_wallets_dir, "wallets/u1_v16"), + os.path.join(node_v16_wallets_dir, "u1_v16"), os.path.join(node_master_wallets_dir, "u1_v16") ) load_res = node_master.loadwallet("u1_v16") @@ -297,10 +296,10 @@ def run_test(self): # Now copy that same wallet back to 0.16 to make sure no automatic upgrade breaks it node_master.unloadwallet("u1_v16") - os.remove(os.path.join(node_v16_wallets_dir, "wallets/u1_v16")) + os.remove(os.path.join(node_v16_wallets_dir, "u1_v16")) shutil.copyfile( os.path.join(node_master_wallets_dir, "u1_v16"), - os.path.join(node_v16_wallets_dir, "wallets/u1_v16") + os.path.join(node_v16_wallets_dir, "u1_v16") ) self.start_node(node_v16.index, extra_args=["-wallet=u1_v16"]) wallet = node_v16.get_wallet_rpc("u1_v16") From 53f35d02cb7b67ddecc9514559083f85093b6ce5 Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Mon, 3 Jul 2023 21:06:23 -0400 Subject: [PATCH 009/172] test: Remove w1_v18 from wallet backwards compatibility This wallet is no longer used in the test --- test/functional/wallet_backwards_compatibility.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/test/functional/wallet_backwards_compatibility.py b/test/functional/wallet_backwards_compatibility.py index d8dfd36970212..ce47f30c38374 100755 --- a/test/functional/wallet_backwards_compatibility.py +++ b/test/functional/wallet_backwards_compatibility.py @@ -94,7 +94,6 @@ def run_test(self): node_miner = self.nodes[0] node_master = self.nodes[1] node_v19 = self.nodes[self.num_nodes - 4] - node_v18 = self.nodes[self.num_nodes - 3] node_v17 = self.nodes[self.num_nodes - 2] node_v16 = self.nodes[self.num_nodes - 1] @@ -145,13 +144,6 @@ def run_test(self): assert wallet.getaddressinfo(address_18075)["solvable"] node_v19.unloadwallet("w1_v19") - # w1_v18: regular wallet, created with v0.18 - node_v18.rpc.createwallet(wallet_name="w1_v18") - wallet = node_v18.get_wallet_rpc("w1_v18") - info = wallet.getwalletinfo() - assert info['private_keys_enabled'] - assert info['keypoolsize'] > 0 - # w2: wallet with private keys disabled, created on master: update this # test when default wallets private keys disabled can no longer be # opened by older versions. From 71c03aeff7e1c63c21fa72d119311230f0b30e73 Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Mon, 3 Jul 2023 21:14:43 -0400 Subject: [PATCH 010/172] test: Refactor v19 addmultisigaddress test to be distinct This specific test is distinct from the rest of the backwards compatibility tests as it is checking a specific failure. --- .../wallet_backwards_compatibility.py | 73 ++++++++++--------- 1 file changed, 40 insertions(+), 33 deletions(-) diff --git a/test/functional/wallet_backwards_compatibility.py b/test/functional/wallet_backwards_compatibility.py index ce47f30c38374..526672e78614f 100755 --- a/test/functional/wallet_backwards_compatibility.py +++ b/test/functional/wallet_backwards_compatibility.py @@ -90,10 +90,47 @@ def major_version_at_least(self, node, major): node_major, _, _ = self.split_version(node) return node_major >= major + def test_v19_addmultisigaddress(self): + if not self.is_bdb_compiled(): + return + # Specific test for addmultisigaddress using v19 + # See #18075 + self.log.info("Testing 0.19 addmultisigaddress case (#18075)") + node_master = self.nodes[1] + node_v19 = self.nodes[self.num_nodes - 4] + node_v19.rpc.createwallet(wallet_name="w1_v19") + wallet = node_v19.get_wallet_rpc("w1_v19") + info = wallet.getwalletinfo() + assert info['private_keys_enabled'] + assert info['keypoolsize'] > 0 + # Use addmultisigaddress (see #18075) + address_18075 = wallet.rpc.addmultisigaddress(1, ["0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52", "037211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073"], "", "legacy")["address"] + assert wallet.getaddressinfo(address_18075)["solvable"] + node_v19.unloadwallet("w1_v19") + + # Copy the 0.19 wallet to the last Bitcoin Core version and open it: + shutil.copytree( + os.path.join(node_v19.wallets_path, "w1_v19"), + os.path.join(node_master.wallets_path, "w1_v19") + ) + node_master.loadwallet("w1_v19") + wallet = node_master.get_wallet_rpc("w1_v19") + assert wallet.getaddressinfo(address_18075)["solvable"] + + # Now copy that same wallet back to 0.19 to make sure no automatic upgrade breaks it + node_master.unloadwallet("w1_v19") + shutil.rmtree(os.path.join(node_v19.wallets_path, "w1_v19")) + shutil.copytree( + os.path.join(node_master.wallets_path, "w1_v19"), + os.path.join(node_v19.wallets_path, "w1_v19") + ) + node_v19.loadwallet("w1_v19") + wallet = node_v19.get_wallet_rpc("w1_v19") + assert wallet.getaddressinfo(address_18075)["solvable"] + def run_test(self): node_miner = self.nodes[0] node_master = self.nodes[1] - node_v19 = self.nodes[self.num_nodes - 4] node_v17 = self.nodes[self.num_nodes - 2] node_v16 = self.nodes[self.num_nodes - 1] @@ -133,17 +170,6 @@ def run_test(self): # Abandon transaction, but don't confirm node_master.abandontransaction(tx3_id) - # w1_v19: regular wallet, created with v0.19 - node_v19.rpc.createwallet(wallet_name="w1_v19") - wallet = node_v19.get_wallet_rpc("w1_v19") - info = wallet.getwalletinfo() - assert info['private_keys_enabled'] - assert info['keypoolsize'] > 0 - # Use addmultisigaddress (see #18075) - address_18075 = wallet.rpc.addmultisigaddress(1, ["0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52", "037211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073"], "", "legacy")["address"] - assert wallet.getaddressinfo(address_18075)["solvable"] - node_v19.unloadwallet("w1_v19") - # w2: wallet with private keys disabled, created on master: update this # test when default wallets private keys disabled can no longer be # opened by older versions. @@ -163,7 +189,6 @@ def run_test(self): # Unload wallets and copy to older nodes: node_master_wallets_dir = node_master.wallets_path - node_v19_wallets_dir = node_v19.wallets_path node_v17_wallets_dir = node_v17.wallets_path node_v16_wallets_dir = node_v16.wallets_path node_master.unloadwallet("w1") @@ -181,6 +206,8 @@ def run_test(self): else: shutil.copytree(source, dest) + self.test_v19_addmultisigaddress() + if not self.options.descriptors: # Descriptor wallets break compatibility, only run this test for legacy wallet # Load modern wallet with older nodes @@ -322,25 +349,5 @@ def run_test(self): info = wallet.getaddressinfo(address) assert_equal(info, v17_info) - # Copy the 0.19 wallet to the last Bitcoin Core version and open it: - shutil.copytree( - os.path.join(node_v19_wallets_dir, "w1_v19"), - os.path.join(node_master_wallets_dir, "w1_v19") - ) - node_master.loadwallet("w1_v19") - wallet = node_master.get_wallet_rpc("w1_v19") - assert wallet.getaddressinfo(address_18075)["solvable"] - - # Now copy that same wallet back to 0.19 to make sure no automatic upgrade breaks it - node_master.unloadwallet("w1_v19") - shutil.rmtree(os.path.join(node_v19_wallets_dir, "w1_v19")) - shutil.copytree( - os.path.join(node_master_wallets_dir, "w1_v19"), - os.path.join(node_v19_wallets_dir, "w1_v19") - ) - node_v19.loadwallet("w1_v19") - wallet = node_v19.get_wallet_rpc("w1_v19") - assert wallet.getaddressinfo(address_18075)["solvable"] - if __name__ == '__main__': BackwardsCompatibilityTest().main() From f41215c3f08f99d1bfa524f2da8055b6a4458bbb Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Mon, 3 Jul 2023 21:23:24 -0400 Subject: [PATCH 011/172] test: add logging 0.17 incompatibilities in wallet back compat --- test/functional/wallet_backwards_compatibility.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/functional/wallet_backwards_compatibility.py b/test/functional/wallet_backwards_compatibility.py index 526672e78614f..658c0e7480985 100755 --- a/test/functional/wallet_backwards_compatibility.py +++ b/test/functional/wallet_backwards_compatibility.py @@ -258,11 +258,13 @@ def run_test(self): # Instead, we stop node and try to launch it with the wallet: self.stop_node(node_v17.index) if self.options.descriptors: + self.log.info("Test descriptor wallet incompatibility with 0.17") # Descriptor wallets appear to be corrupted wallets to old software node_v17.assert_start_raises_init_error(["-wallet=w1"], "Error: wallet.dat corrupt, salvage failed") node_v17.assert_start_raises_init_error(["-wallet=w2"], "Error: wallet.dat corrupt, salvage failed") node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: wallet.dat corrupt, salvage failed") else: + self.log.info("Test blank wallet incompatibility with v17") node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: Error loading w3: Wallet requires newer version of Bitcoin Core") self.start_node(node_v17.index) From f158573be12746991b75587cc9e41a74a5e986eb Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Mon, 3 Jul 2023 21:50:20 -0400 Subject: [PATCH 012/172] test: Add 0.21 tr() incompatibility test --- test/functional/wallet_backwards_compatibility.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/functional/wallet_backwards_compatibility.py b/test/functional/wallet_backwards_compatibility.py index 658c0e7480985..0512fe2924000 100755 --- a/test/functional/wallet_backwards_compatibility.py +++ b/test/functional/wallet_backwards_compatibility.py @@ -131,6 +131,7 @@ def test_v19_addmultisigaddress(self): def run_test(self): node_miner = self.nodes[0] node_master = self.nodes[1] + node_v21 = self.nodes[self.num_nodes - 6] node_v17 = self.nodes[self.num_nodes - 2] node_v16 = self.nodes[self.num_nodes - 1] @@ -277,6 +278,11 @@ def run_test(self): else: node_v16.assert_start_raises_init_error([f"-wallet={wallet_name}"], f"Error: Error loading {wallet_name}: Wallet requires newer version of Bitcoin Core") + # When descriptors are enabled, w1 cannot be opened by 0.21 since it contains a taproot descriptor + if self.options.descriptors: + self.log.info("Test that 0.21 cannot open wallet containing tr() descriptors") + assert_raises_rpc_error(-1, "map::at", node_v21.loadwallet, "w1") + # Create upgrade wallet in v0.16 self.restart_node(node_v16.index, extra_args=["-wallet=u1_v16"]) wallet = node_v16.get_wallet_rpc("u1_v16") From 6d4699028b17cb33953f7d11764e06069dd58915 Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Mon, 3 Jul 2023 19:28:45 -0400 Subject: [PATCH 013/172] test: Run downgrade test on descriptor wallets --- .../wallet_backwards_compatibility.py | 102 ++++++++++-------- 1 file changed, 56 insertions(+), 46 deletions(-) diff --git a/test/functional/wallet_backwards_compatibility.py b/test/functional/wallet_backwards_compatibility.py index 0512fe2924000..e114e05867376 100755 --- a/test/functional/wallet_backwards_compatibility.py +++ b/test/functional/wallet_backwards_compatibility.py @@ -135,7 +135,9 @@ def run_test(self): node_v17 = self.nodes[self.num_nodes - 2] node_v16 = self.nodes[self.num_nodes - 1] - legacy_nodes = self.nodes[2:] + legacy_nodes = self.nodes[2:] # Nodes that support legacy wallets + legacy_only_nodes = self.nodes[-5:] # Nodes that only support legacy wallets + descriptors_nodes = self.nodes[2:-5] # Nodes that support descriptor wallets self.generatetoaddress(node_miner, COINBASE_MATURITY + 1, node_miner.getnewaddress()) @@ -209,52 +211,60 @@ def run_test(self): self.test_v19_addmultisigaddress() - if not self.options.descriptors: - # Descriptor wallets break compatibility, only run this test for legacy wallet - # Load modern wallet with older nodes - for node in legacy_nodes: - for wallet_name in ["w1", "w2", "w3"]: - if node.version < 170000: - # loadwallet was introduced in v0.17.0 - continue - if node.version < 180000 and wallet_name == "w3": - # Blank wallets were introduced in v0.18.0. We test the loading error below. - continue - node.loadwallet(wallet_name) - wallet = node.get_wallet_rpc(wallet_name) - info = wallet.getwalletinfo() - if wallet_name == "w1": - assert info['private_keys_enabled'] == True - assert info['keypoolsize'] > 0 - txs = wallet.listtransactions() - assert_equal(len(txs), 5) - assert_equal(txs[1]["txid"], tx1_id) - assert_equal(txs[2]["walletconflicts"], [tx1_id]) - assert_equal(txs[1]["replaced_by_txid"], tx2_id) - assert not txs[1]["abandoned"] - assert_equal(txs[1]["confirmations"], -1) - assert_equal(txs[2]["blockindex"], 1) - assert txs[3]["abandoned"] - assert_equal(txs[4]["walletconflicts"], [tx3_id]) - assert_equal(txs[3]["replaced_by_txid"], tx4_id) - assert not hasattr(txs[3], "blockindex") - elif wallet_name == "w2": - assert info['private_keys_enabled'] == False - assert info['keypoolsize'] == 0 - else: - assert info['private_keys_enabled'] == True - assert info['keypoolsize'] == 0 - else: - for node in legacy_nodes: + self.log.info("Test that a wallet made on master can be opened on:") + # In descriptors wallet mode, run this test on the nodes that support descriptor wallets + # In legacy wallets mode, run this test on the nodes that support legacy wallets + for node in descriptors_nodes if self.options.descriptors else legacy_nodes: + if self.major_version_less_than(node, 17): + # loadwallet was introduced in v0.17.0 + continue + self.log.info(f"- {node.version}") + for wallet_name in ["w1", "w2", "w3"]: + if self.major_version_less_than(node, 18) and wallet_name == "w3": + # Blank wallets were introduced in v0.18.0. We test the loading error below. + continue + if self.major_version_less_than(node, 22) and wallet_name == "w1" and self.options.descriptors: + # Descriptor wallets created after 0.21 have taproot descriptors which 0.21 does not support, tested below + continue + node.loadwallet(wallet_name) + wallet = node.get_wallet_rpc(wallet_name) + info = wallet.getwalletinfo() + if wallet_name == "w1": + assert info['private_keys_enabled'] == True + assert info['keypoolsize'] > 0 + txs = wallet.listtransactions() + assert_equal(len(txs), 5) + assert_equal(txs[1]["txid"], tx1_id) + assert_equal(txs[2]["walletconflicts"], [tx1_id]) + assert_equal(txs[1]["replaced_by_txid"], tx2_id) + assert not txs[1]["abandoned"] + assert_equal(txs[1]["confirmations"], -1) + assert_equal(txs[2]["blockindex"], 1) + assert txs[3]["abandoned"] + assert_equal(txs[4]["walletconflicts"], [tx3_id]) + assert_equal(txs[3]["replaced_by_txid"], tx4_id) + assert not hasattr(txs[3], "blockindex") + elif wallet_name == "w2": + assert info['private_keys_enabled'] == False + assert info['keypoolsize'] == 0 + else: + assert info['private_keys_enabled'] == True + assert info['keypoolsize'] == 0 + + # Check that descriptor wallets don't work on legacy only nodes + if self.options.descriptors: + self.log.info("Test descriptor wallet incompatibility on:") + for node in legacy_only_nodes: + # RPC loadwallet failure causes bitcoind to exit in <= 0.17, in addition to the RPC + # call failure, so the following test won't work: + # assert_raises_rpc_error(-4, "Wallet loading failed.", node_v17.loadwallet, 'w3') + if self.major_version_less_than(node, 18): + continue + self.log.info(f"- {node.version}") # Descriptor wallets appear to be corrupted wallets to old software - # and loadwallet is introduced in v0.17.0 - if node.version >= 170000 and node.version < 210000: - for wallet_name in ["w1", "w2", "w3"]: - assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node.loadwallet, wallet_name) - - # RPC loadwallet failure causes bitcoind to exit, in addition to the RPC - # call failure, so the following test won't work: - # assert_raises_rpc_error(-4, "Wallet loading failed.", node_v17.loadwallet, 'w3') + assert self.major_version_at_least(node, 18) and self.major_version_less_than(node, 21) + for wallet_name in ["w1", "w2", "w3"]: + assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node.loadwallet, wallet_name) # Instead, we stop node and try to launch it with the wallet: self.stop_node(node_v17.index) From 538939ec39e146bedffb80cf84849a450ea8fead Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Mon, 3 Jul 2023 19:28:58 -0400 Subject: [PATCH 014/172] test: Run upgrade test on all nodes --- .../wallet_backwards_compatibility.py | 130 +++++++++--------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/test/functional/wallet_backwards_compatibility.py b/test/functional/wallet_backwards_compatibility.py index e114e05867376..03da63f9ece38 100755 --- a/test/functional/wallet_backwards_compatibility.py +++ b/test/functional/wallet_backwards_compatibility.py @@ -192,8 +192,6 @@ def run_test(self): # Unload wallets and copy to older nodes: node_master_wallets_dir = node_master.wallets_path - node_v17_wallets_dir = node_v17.wallets_path - node_v16_wallets_dir = node_v16.wallets_path node_master.unloadwallet("w1") node_master.unloadwallet("w2") node_master.unloadwallet("w3") @@ -293,79 +291,81 @@ def run_test(self): self.log.info("Test that 0.21 cannot open wallet containing tr() descriptors") assert_raises_rpc_error(-1, "map::at", node_v21.loadwallet, "w1") - # Create upgrade wallet in v0.16 - self.restart_node(node_v16.index, extra_args=["-wallet=u1_v16"]) - wallet = node_v16.get_wallet_rpc("u1_v16") - v16_addr = wallet.getnewaddress('', "bech32") - v16_info = wallet.validateaddress(v16_addr) - v16_pubkey = v16_info['pubkey'] - self.stop_node(node_v16.index) + self.log.info("Test that a wallet can upgrade to and downgrade from master, from:") + for node in descriptors_nodes if self.options.descriptors else legacy_nodes: + self.log.info(f"- {node.version}") + wallet_name = f"up_{node.version}" + if self.major_version_less_than(node, 17): + # createwallet is only available in 0.17+ + self.restart_node(node.index, extra_args=[f"-wallet={wallet_name}"]) + wallet_prev = node.get_wallet_rpc(wallet_name) + address = wallet_prev.getnewaddress('', "bech32") + addr_info = wallet_prev.validateaddress(address) + else: + if self.major_version_at_least(node, 21): + node.rpc.createwallet(wallet_name=wallet_name, descriptors=self.options.descriptors) + else: + node.rpc.createwallet(wallet_name=wallet_name) + wallet_prev = node.get_wallet_rpc(wallet_name) + address = wallet_prev.getnewaddress('', "bech32") + addr_info = wallet_prev.getaddressinfo(address) + + hdkeypath = addr_info["hdkeypath"].replace("'", "h") + pubkey = addr_info["pubkey"] + + # Make a backup of the wallet file + backup_path = os.path.join(self.options.tmpdir, f"{wallet_name}.dat") + wallet_prev.backupwallet(backup_path) + + # Remove the wallet from old node + if self.major_version_at_least(node, 17): + wallet_prev.unloadwallet() + else: + self.stop_node(node.index) + + # Restore the wallet to master + load_res = node_master.restorewallet(wallet_name, backup_path) - self.log.info("Test wallet upgrade path...") - # u1: regular wallet, created with v0.17 - node_v17.rpc.createwallet(wallet_name="u1_v17") - wallet = node_v17.get_wallet_rpc("u1_v17") - address = wallet.getnewaddress("bech32") - v17_info = wallet.getaddressinfo(address) - hdkeypath = v17_info["hdkeypath"].replace("'", "h") - pubkey = v17_info["pubkey"] - - if self.is_bdb_compiled(): - # Old wallets are BDB and will only work if BDB is compiled - # Copy the 0.16 wallet to the last Bitcoin Core version and open it: - shutil.copyfile( - os.path.join(node_v16_wallets_dir, "u1_v16"), - os.path.join(node_master_wallets_dir, "u1_v16") - ) - load_res = node_master.loadwallet("u1_v16") # Make sure this wallet opens with only the migration warning. See https://github.com/bitcoin/bitcoin/pull/19054 - if int(node_master.getnetworkinfo()["version"]) >= 249900: - # loadwallet#warnings (added in v25) -- only present if there is a warning + if not self.options.descriptors: # Legacy wallets will have only a deprecation warning assert_equal(load_res["warnings"], ["Wallet loaded successfully. The legacy wallet type is being deprecated and support for creating and opening legacy wallets will be removed in the future. Legacy wallets can be migrated to a descriptor wallet with migratewallet."]) else: - # loadwallet#warning (deprecated in v25) -- always present, but empty string if no warning - assert_equal(load_res["warning"], '') - wallet = node_master.get_wallet_rpc("u1_v16") - info = wallet.getaddressinfo(v16_addr) - descriptor = f"wpkh([{info['hdmasterfingerprint']}{hdkeypath[1:]}]{v16_pubkey})" - assert_equal(info["desc"], descsum_create(descriptor)) + assert "warnings" not in load_res - # Now copy that same wallet back to 0.16 to make sure no automatic upgrade breaks it - node_master.unloadwallet("u1_v16") - os.remove(os.path.join(node_v16_wallets_dir, "u1_v16")) - shutil.copyfile( - os.path.join(node_master_wallets_dir, "u1_v16"), - os.path.join(node_v16_wallets_dir, "u1_v16") - ) - self.start_node(node_v16.index, extra_args=["-wallet=u1_v16"]) - wallet = node_v16.get_wallet_rpc("u1_v16") - info = wallet.validateaddress(v16_addr) - assert_equal(info, v16_info) - - # Copy the 0.17 wallet to the last Bitcoin Core version and open it: - node_v17.unloadwallet("u1_v17") - shutil.copytree( - os.path.join(node_v17_wallets_dir, "u1_v17"), - os.path.join(node_master_wallets_dir, "u1_v17") - ) - node_master.loadwallet("u1_v17") - wallet = node_master.get_wallet_rpc("u1_v17") + wallet = node_master.get_wallet_rpc(wallet_name) info = wallet.getaddressinfo(address) descriptor = f"wpkh([{info['hdmasterfingerprint']}{hdkeypath[1:]}]{pubkey})" assert_equal(info["desc"], descsum_create(descriptor)) - # Now copy that same wallet back to 0.17 to make sure no automatic upgrade breaks it - node_master.unloadwallet("u1_v17") - shutil.rmtree(os.path.join(node_v17_wallets_dir, "u1_v17")) - shutil.copytree( - os.path.join(node_master_wallets_dir, "u1_v17"), - os.path.join(node_v17_wallets_dir, "u1_v17") - ) - node_v17.loadwallet("u1_v17") - wallet = node_v17.get_wallet_rpc("u1_v17") - info = wallet.getaddressinfo(address) - assert_equal(info, v17_info) + # Make backup so the wallet can be copied back to old node + down_wallet_name = f"re_down_{node.version}" + down_backup_path = os.path.join(self.options.tmpdir, f"{down_wallet_name}.dat") + wallet.backupwallet(down_backup_path) + wallet.unloadwallet() + + # Check that no automatic upgrade broke the downgrading the wallet + if self.major_version_less_than(node, 17): + # loadwallet is only available in 0.17+ + shutil.copyfile( + down_backup_path, + node.wallets_path / down_wallet_name + ) + self.start_node(node.index, extra_args=[f"-wallet={down_wallet_name}"]) + wallet_res = node.get_wallet_rpc(down_wallet_name) + info = wallet_res.validateaddress(address) + assert_equal(info, addr_info) + else: + target_dir = node.wallets_path / down_wallet_name + os.makedirs(target_dir, exist_ok=True) + shutil.copyfile( + down_backup_path, + target_dir / "wallet.dat" + ) + node.loadwallet(down_wallet_name) + wallet_res = node.get_wallet_rpc(down_wallet_name) + info = wallet_res.getaddressinfo(address) + assert_equal(info, addr_info) if __name__ == '__main__': BackwardsCompatibilityTest().main() From bbf43c63b9472a79462e625a1f0592973c22b47c Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Mon, 3 Jul 2023 21:33:20 -0400 Subject: [PATCH 015/172] test: Add 25.0 to wallet backwards compatibiilty test --- test/functional/wallet_backwards_compatibility.py | 4 +++- test/get_previous_releases.py | 9 +++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/test/functional/wallet_backwards_compatibility.py b/test/functional/wallet_backwards_compatibility.py index 03da63f9ece38..29c4f72607e35 100755 --- a/test/functional/wallet_backwards_compatibility.py +++ b/test/functional/wallet_backwards_compatibility.py @@ -33,11 +33,12 @@ def add_options(self, parser): def set_test_params(self): self.setup_clean_chain = True - self.num_nodes = 11 + self.num_nodes = 12 # Add new version after each release: self.extra_args = [ ["-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # Pre-release: use to mine blocks. noban for immediate tx relay ["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # Pre-release: use to receive coins, swap wallets, etc + ["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # v25.0 ["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # v24.0.1 ["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # v23.0 ["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # v22.0 @@ -58,6 +59,7 @@ def setup_nodes(self): self.add_nodes(self.num_nodes, extra_args=self.extra_args, versions=[ None, None, + 250000, 240001, 230000, 220000, diff --git a/test/get_previous_releases.py b/test/get_previous_releases.py index 60c868ca04595..4a27454ff28e7 100755 --- a/test/get_previous_releases.py +++ b/test/get_previous_releases.py @@ -89,6 +89,15 @@ "6b163cef7de4beb07b8cb3347095e0d76a584019b1891135cd1268a1f05b9d88": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-riscv64-linux-gnu.tar.gz"}, "e2f751512f3c0f00eb68ba946d9c829e6cf99422a61e8f5e0a7c109c318674d0": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-x86_64-apple-darwin.tar.gz"}, "49df6e444515d457ea0b885d66f521f2a26ca92ccf73d5296082e633544253bf": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-x86_64-linux-gnu.tar.gz"}, + + "3a7bdd959a0b426624f63f394f25e5b7769a5a2f96f8126dcc2ea53f3fa5212b": {"tag": "v25.0", "tarball": "bitcoin-25.0-aarch64-linux-gnu.tar.gz"}, + "e537c8630b05e63242d979c3004f851fd73c2a10b5b4fdbb161788427c7b3c0f": {"tag": "v25.0", "tarball": "bitcoin-25.0-arm-linux-gnueabihf.tar.gz"}, + "3b35075d6c1209743611c705a13575be2668bc069bc6301ce78a2e1e53ebe7cc": {"tag": "v25.0", "tarball": "bitcoin-25.0-arm64-apple-darwin.tar.gz"}, + "0c8e135a6fd297270d3b65196042d761453493a022b5ff7fb847fc911e938214": {"tag": "v25.0", "tarball": "bitcoin-25.0-powerpc64-linux-gnu.tar.gz"}, + "fa8af160782f5adfcea570f72b947073c1663b3e9c3cd0f82b216b609fe47573": {"tag": "v25.0", "tarball": "bitcoin-25.0-powerpc64le-linux-gnu.tar.gz"}, + "fe6e347a66043946920c72c9c4afca301968101e6b82fb90a63d7885ebcceb32": {"tag": "v25.0", "tarball": "bitcoin-25.0-riscv64-linux-gnu.tar.gz"}, + "5708fc639cdfc27347cccfd50db9b73b53647b36fb5f3a4a93537cbe8828c27f": {"tag": "v25.0", "tarball": "bitcoin-25.0-x86_64-apple-darwin.tar.gz"}, + "33930d432593e49d58a9bff4c30078823e9af5d98594d2935862788ce8a20aec": {"tag": "v25.0", "tarball": "bitcoin-25.0-x86_64-linux-gnu.tar.gz"}, } From afd9a673c458e97305da49a70a1ddbf60e651876 Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Mon, 3 Jul 2023 22:51:09 -0400 Subject: [PATCH 016/172] test: roundtrip wallet backwards compat downgrade Test that old nodes don't mess up new wallets by loading a downgraded wallet in master again. --- .../wallet_backwards_compatibility.py | 59 +++++++++++-------- 1 file changed, 35 insertions(+), 24 deletions(-) diff --git a/test/functional/wallet_backwards_compatibility.py b/test/functional/wallet_backwards_compatibility.py index 29c4f72607e35..4d6e6024c5efe 100755 --- a/test/functional/wallet_backwards_compatibility.py +++ b/test/functional/wallet_backwards_compatibility.py @@ -226,30 +226,41 @@ def run_test(self): if self.major_version_less_than(node, 22) and wallet_name == "w1" and self.options.descriptors: # Descriptor wallets created after 0.21 have taproot descriptors which 0.21 does not support, tested below continue - node.loadwallet(wallet_name) - wallet = node.get_wallet_rpc(wallet_name) - info = wallet.getwalletinfo() - if wallet_name == "w1": - assert info['private_keys_enabled'] == True - assert info['keypoolsize'] > 0 - txs = wallet.listtransactions() - assert_equal(len(txs), 5) - assert_equal(txs[1]["txid"], tx1_id) - assert_equal(txs[2]["walletconflicts"], [tx1_id]) - assert_equal(txs[1]["replaced_by_txid"], tx2_id) - assert not txs[1]["abandoned"] - assert_equal(txs[1]["confirmations"], -1) - assert_equal(txs[2]["blockindex"], 1) - assert txs[3]["abandoned"] - assert_equal(txs[4]["walletconflicts"], [tx3_id]) - assert_equal(txs[3]["replaced_by_txid"], tx4_id) - assert not hasattr(txs[3], "blockindex") - elif wallet_name == "w2": - assert info['private_keys_enabled'] == False - assert info['keypoolsize'] == 0 - else: - assert info['private_keys_enabled'] == True - assert info['keypoolsize'] == 0 + # Also try to reopen on master after opening on old + for n in [node, node_master]: + n.loadwallet(wallet_name) + wallet = n.get_wallet_rpc(wallet_name) + info = wallet.getwalletinfo() + if wallet_name == "w1": + assert info['private_keys_enabled'] == True + assert info['keypoolsize'] > 0 + txs = wallet.listtransactions() + assert_equal(len(txs), 5) + assert_equal(txs[1]["txid"], tx1_id) + assert_equal(txs[2]["walletconflicts"], [tx1_id]) + assert_equal(txs[1]["replaced_by_txid"], tx2_id) + assert not txs[1]["abandoned"] + assert_equal(txs[1]["confirmations"], -1) + assert_equal(txs[2]["blockindex"], 1) + assert txs[3]["abandoned"] + assert_equal(txs[4]["walletconflicts"], [tx3_id]) + assert_equal(txs[3]["replaced_by_txid"], tx4_id) + assert not hasattr(txs[3], "blockindex") + elif wallet_name == "w2": + assert info['private_keys_enabled'] == False + assert info['keypoolsize'] == 0 + else: + assert info['private_keys_enabled'] == True + assert info['keypoolsize'] == 0 + + # Copy back to master + wallet.unloadwallet() + if n == node: + shutil.rmtree(node_master.wallets_path / wallet_name) + shutil.copytree( + n.wallets_path / wallet_name, + node_master.wallets_path / wallet_name, + ) # Check that descriptor wallets don't work on legacy only nodes if self.options.descriptors: From 5ac1a51ee5a57da59f1ff1986b7d9054484d3c80 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Tue, 25 May 2021 15:01:53 +0200 Subject: [PATCH 017/172] i2p: avoid using Sock::Get() for checking for a valid socket Peeking at the underlying socket file descriptor of `Sock` and checkig if it is `INVALID_SOCKET` is bad encapsulation and stands in the way of testing/mocking/fuzzing. Instead use an empty unique_ptr to denote that there is no valid socket. --- src/i2p.cpp | 10 ++++------ src/i2p.h | 1 + 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/i2p.cpp b/src/i2p.cpp index f03e375adfae9..5a3dde54ced59 100644 --- a/src/i2p.cpp +++ b/src/i2p.cpp @@ -119,7 +119,6 @@ Session::Session(const fs::path& private_key_file, : m_private_key_file{private_key_file}, m_control_host{control_host}, m_interrupt{interrupt}, - m_control_sock{std::make_unique(INVALID_SOCKET)}, m_transient{false} { } @@ -127,7 +126,6 @@ Session::Session(const fs::path& private_key_file, Session::Session(const CService& control_host, CThreadInterrupt* interrupt) : m_control_host{control_host}, m_interrupt{interrupt}, - m_control_sock{std::make_unique(INVALID_SOCKET)}, m_transient{true} { } @@ -315,7 +313,7 @@ void Session::CheckControlSock() LOCK(m_mutex); std::string errmsg; - if (!m_control_sock->IsConnected(errmsg)) { + if (m_control_sock && !m_control_sock->IsConnected(errmsg)) { Log("Control socket error: %s", errmsg); Disconnect(); } @@ -364,7 +362,7 @@ Binary Session::MyDestination() const void Session::CreateIfNotCreatedAlready() { std::string errmsg; - if (m_control_sock->IsConnected(errmsg)) { + if (m_control_sock && m_control_sock->IsConnected(errmsg)) { return; } @@ -437,14 +435,14 @@ std::unique_ptr Session::StreamAccept() void Session::Disconnect() { - if (m_control_sock->Get() != INVALID_SOCKET) { + if (m_control_sock) { if (m_session_id.empty()) { Log("Destroying incomplete SAM session"); } else { Log("Destroying SAM session %s", m_session_id); } + m_control_sock.reset(); } - m_control_sock = std::make_unique(INVALID_SOCKET); m_session_id.clear(); } } // namespace sam diff --git a/src/i2p.h b/src/i2p.h index c9c99292d97dd..cb9da64816f5f 100644 --- a/src/i2p.h +++ b/src/i2p.h @@ -261,6 +261,7 @@ class Session * ("SESSION CREATE"). With the established session id we later open * other connections to the SAM service to accept incoming I2P * connections and make outgoing ones. + * If not connected then this unique_ptr will be empty. * See https://geti2p.net/en/docs/api/samv3 */ std::unique_ptr m_control_sock GUARDED_BY(m_mutex); From aeac68d036e3cff57ce155f1a904d77f98b357d4 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Tue, 25 May 2021 15:05:26 +0200 Subject: [PATCH 018/172] net: don't check if the socket is valid in GetBindAddress() The socket is always valid (the underlying file descriptor is not `INVALID_SOCKET`) when `GetBindAddress()` is called. --- src/net.cpp | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index 53a2dcf12582e..cf19274e34956 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -427,12 +427,10 @@ static CAddress GetBindAddress(const Sock& sock) CAddress addr_bind; struct sockaddr_storage sockaddr_bind; socklen_t sockaddr_bind_len = sizeof(sockaddr_bind); - if (sock.Get() != INVALID_SOCKET) { - if (!sock.GetSockName((struct sockaddr*)&sockaddr_bind, &sockaddr_bind_len)) { - addr_bind.SetSockAddr((const struct sockaddr*)&sockaddr_bind); - } else { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "getsockname failed\n"); - } + if (!sock.GetSockName((struct sockaddr*)&sockaddr_bind, &sockaddr_bind_len)) { + addr_bind.SetSockAddr((const struct sockaddr*)&sockaddr_bind); + } else { + LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "getsockname failed\n"); } return addr_bind; } From 944b21b70ae490a5a746bcc1810a5074d74e9d34 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Tue, 25 May 2021 15:25:40 +0200 Subject: [PATCH 019/172] net: don't check if the socket is valid in ConnectSocketDirectly() The socket is always valid (the underlying file descriptor is not `INVALID_SOCKET`) when `ConnectSocketDirectly()` is called. --- src/netbase.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/netbase.cpp b/src/netbase.cpp index a8419217f46ff..ca1a80d72f7fd 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -514,10 +514,6 @@ bool ConnectSocketDirectly(const CService &addrConnect, const Sock& sock, int nT // Create a sockaddr from the specified service. struct sockaddr_storage sockaddr; socklen_t len = sizeof(sockaddr); - if (sock.Get() == INVALID_SOCKET) { - LogPrintf("Cannot connect to %s: invalid socket\n", addrConnect.ToStringAddrPort()); - return false; - } if (!addrConnect.GetSockAddr((struct sockaddr*)&sockaddr, &len)) { LogPrintf("Cannot connect to %s: unsupported network\n", addrConnect.ToStringAddrPort()); return false; From 7829272f7826511241defd34954e6040ea963f07 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Thu, 29 Apr 2021 18:01:03 +0200 Subject: [PATCH 020/172] net: remove now unnecessary Sock::Get() `Sock::Get()` was used only in `sock.{cpp,h}`. Remove it and access `Sock::m_socket` directly. Unit tests that used `Get()` to test for equality still verify that the behavior is correct by using the added `operator==()`. --- src/test/sock_tests.cpp | 6 +++--- src/util/sock.cpp | 7 +++++-- src/util/sock.h | 32 +++++++++++++++----------------- 3 files changed, 23 insertions(+), 22 deletions(-) diff --git a/src/test/sock_tests.cpp b/src/test/sock_tests.cpp index 26ee724bf806d..38a4804fcf495 100644 --- a/src/test/sock_tests.cpp +++ b/src/test/sock_tests.cpp @@ -38,7 +38,7 @@ BOOST_AUTO_TEST_CASE(constructor_and_destructor) { const SOCKET s = CreateSocket(); Sock* sock = new Sock(s); - BOOST_CHECK_EQUAL(sock->Get(), s); + BOOST_CHECK(*sock == s); BOOST_CHECK(!SocketIsClosed(s)); delete sock; BOOST_CHECK(SocketIsClosed(s)); @@ -51,7 +51,7 @@ BOOST_AUTO_TEST_CASE(move_constructor) Sock* sock2 = new Sock(std::move(*sock1)); delete sock1; BOOST_CHECK(!SocketIsClosed(s)); - BOOST_CHECK_EQUAL(sock2->Get(), s); + BOOST_CHECK(*sock2 == s); delete sock2; BOOST_CHECK(SocketIsClosed(s)); } @@ -64,7 +64,7 @@ BOOST_AUTO_TEST_CASE(move_assignment) *sock2 = std::move(*sock1); delete sock1; BOOST_CHECK(!SocketIsClosed(s)); - BOOST_CHECK_EQUAL(sock2->Get(), s); + BOOST_CHECK(*sock2 == s); delete sock2; BOOST_CHECK(SocketIsClosed(s)); } diff --git a/src/util/sock.cpp b/src/util/sock.cpp index fd64cae404b5a..e08edd42b74b5 100644 --- a/src/util/sock.cpp +++ b/src/util/sock.cpp @@ -44,8 +44,6 @@ Sock& Sock::operator=(Sock&& other) return *this; } -SOCKET Sock::Get() const { return m_socket; } - ssize_t Sock::Send(const void* data, size_t len, int flags) const { return send(m_socket, static_cast(data), len, flags); @@ -411,6 +409,11 @@ void Sock::Close() m_socket = INVALID_SOCKET; } +bool Sock::operator==(SOCKET s) const +{ + return m_socket == s; +}; + std::string NetworkErrorString(int err) { #if defined(WIN32) diff --git a/src/util/sock.h b/src/util/sock.h index 6bac2dfd346f1..a3d8dfe3b63d0 100644 --- a/src/util/sock.h +++ b/src/util/sock.h @@ -21,8 +21,7 @@ static constexpr auto MAX_WAIT_FOR_IO = 1s; /** - * RAII helper class that manages a socket. Mimics `std::unique_ptr`, but instead of a pointer it - * contains a socket and closes it automatically when it goes out of scope. + * RAII helper class that manages a socket and closes it automatically when it goes out of scope. */ class Sock { @@ -63,43 +62,37 @@ class Sock virtual Sock& operator=(Sock&& other); /** - * Get the value of the contained socket. - * @return socket or INVALID_SOCKET if empty - */ - [[nodiscard]] virtual SOCKET Get() const; - - /** - * send(2) wrapper. Equivalent to `send(this->Get(), data, len, flags);`. Code that uses this + * send(2) wrapper. Equivalent to `send(m_socket, data, len, flags);`. Code that uses this * wrapper can be unit tested if this method is overridden by a mock Sock implementation. */ [[nodiscard]] virtual ssize_t Send(const void* data, size_t len, int flags) const; /** - * recv(2) wrapper. Equivalent to `recv(this->Get(), buf, len, flags);`. Code that uses this + * recv(2) wrapper. Equivalent to `recv(m_socket, buf, len, flags);`. Code that uses this * wrapper can be unit tested if this method is overridden by a mock Sock implementation. */ [[nodiscard]] virtual ssize_t Recv(void* buf, size_t len, int flags) const; /** - * connect(2) wrapper. Equivalent to `connect(this->Get(), addr, addrlen)`. Code that uses this + * connect(2) wrapper. Equivalent to `connect(m_socket, addr, addrlen)`. Code that uses this * wrapper can be unit tested if this method is overridden by a mock Sock implementation. */ [[nodiscard]] virtual int Connect(const sockaddr* addr, socklen_t addr_len) const; /** - * bind(2) wrapper. Equivalent to `bind(this->Get(), addr, addr_len)`. Code that uses this + * bind(2) wrapper. Equivalent to `bind(m_socket, addr, addr_len)`. Code that uses this * wrapper can be unit tested if this method is overridden by a mock Sock implementation. */ [[nodiscard]] virtual int Bind(const sockaddr* addr, socklen_t addr_len) const; /** - * listen(2) wrapper. Equivalent to `listen(this->Get(), backlog)`. Code that uses this + * listen(2) wrapper. Equivalent to `listen(m_socket, backlog)`. Code that uses this * wrapper can be unit tested if this method is overridden by a mock Sock implementation. */ [[nodiscard]] virtual int Listen(int backlog) const; /** - * accept(2) wrapper. Equivalent to `std::make_unique(accept(this->Get(), addr, addr_len))`. + * accept(2) wrapper. Equivalent to `std::make_unique(accept(m_socket, addr, addr_len))`. * Code that uses this wrapper can be unit tested if this method is overridden by a mock Sock * implementation. * The returned unique_ptr is empty if `accept()` failed in which case errno will be set. @@ -108,7 +101,7 @@ class Sock /** * getsockopt(2) wrapper. Equivalent to - * `getsockopt(this->Get(), level, opt_name, opt_val, opt_len)`. Code that uses this + * `getsockopt(m_socket, level, opt_name, opt_val, opt_len)`. Code that uses this * wrapper can be unit tested if this method is overridden by a mock Sock implementation. */ [[nodiscard]] virtual int GetSockOpt(int level, @@ -118,7 +111,7 @@ class Sock /** * setsockopt(2) wrapper. Equivalent to - * `setsockopt(this->Get(), level, opt_name, opt_val, opt_len)`. Code that uses this + * `setsockopt(m_socket, level, opt_name, opt_val, opt_len)`. Code that uses this * wrapper can be unit tested if this method is overridden by a mock Sock implementation. */ [[nodiscard]] virtual int SetSockOpt(int level, @@ -128,7 +121,7 @@ class Sock /** * getsockname(2) wrapper. Equivalent to - * `getsockname(this->Get(), name, name_len)`. Code that uses this + * `getsockname(m_socket, name, name_len)`. Code that uses this * wrapper can be unit tested if this method is overridden by a mock Sock implementation. */ [[nodiscard]] virtual int GetSockName(sockaddr* name, socklen_t* name_len) const; @@ -266,6 +259,11 @@ class Sock */ [[nodiscard]] virtual bool IsConnected(std::string& errmsg) const; + /** + * Check if the internal socket is equal to `s`. Use only in tests. + */ + bool operator==(SOCKET s) const; + protected: /** * Contained socket. `INVALID_SOCKET` designates the object is empty. From 5086a99b84367a45706af7197da1016dd966e6d9 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Fri, 28 May 2021 15:37:43 +0200 Subject: [PATCH 021/172] net: remove Sock default constructor, it's not necessary --- src/test/fuzz/util/net.cpp | 5 +++-- src/test/sock_tests.cpp | 4 ++-- src/test/util/net.h | 11 ++++++++--- src/util/sock.cpp | 2 -- src/util/sock.h | 5 +---- 5 files changed, 14 insertions(+), 13 deletions(-) diff --git a/src/test/fuzz/util/net.cpp b/src/test/fuzz/util/net.cpp index 65bc336297b2a..72e61b7e22b70 100644 --- a/src/test/fuzz/util/net.cpp +++ b/src/test/fuzz/util/net.cpp @@ -56,9 +56,10 @@ CAddress ConsumeAddress(FuzzedDataProvider& fuzzed_data_provider) noexcept } FuzzedSock::FuzzedSock(FuzzedDataProvider& fuzzed_data_provider) - : m_fuzzed_data_provider{fuzzed_data_provider}, m_selectable{fuzzed_data_provider.ConsumeBool()} + : Sock{fuzzed_data_provider.ConsumeIntegralInRange(INVALID_SOCKET - 1, INVALID_SOCKET)}, + m_fuzzed_data_provider{fuzzed_data_provider}, + m_selectable{fuzzed_data_provider.ConsumeBool()} { - m_socket = fuzzed_data_provider.ConsumeIntegralInRange(INVALID_SOCKET - 1, INVALID_SOCKET); } FuzzedSock::~FuzzedSock() diff --git a/src/test/sock_tests.cpp b/src/test/sock_tests.cpp index 38a4804fcf495..9641831e1961b 100644 --- a/src/test/sock_tests.cpp +++ b/src/test/sock_tests.cpp @@ -60,7 +60,7 @@ BOOST_AUTO_TEST_CASE(move_assignment) { const SOCKET s = CreateSocket(); Sock* sock1 = new Sock(s); - Sock* sock2 = new Sock(); + Sock* sock2 = new Sock(INVALID_SOCKET); *sock2 = std::move(*sock1); delete sock1; BOOST_CHECK(!SocketIsClosed(s)); @@ -98,7 +98,7 @@ BOOST_AUTO_TEST_CASE(send_and_receive) SendAndRecvMessage(*sock0, *sock1); Sock* sock0moved = new Sock(std::move(*sock0)); - Sock* sock1moved = new Sock(); + Sock* sock1moved = new Sock(INVALID_SOCKET); *sock1moved = std::move(*sock1); delete sock0; diff --git a/src/test/util/net.h b/src/test/util/net.h index b2f6ebb1637dc..403d0ed0eaeb1 100644 --- a/src/test/util/net.h +++ b/src/test/util/net.h @@ -106,10 +106,10 @@ constexpr auto ALL_NETWORKS = std::array{ class StaticContentsSock : public Sock { public: - explicit StaticContentsSock(const std::string& contents) : m_contents{contents} + explicit StaticContentsSock(const std::string& contents) + : Sock{INVALID_SOCKET}, + m_contents{contents} { - // Just a dummy number that is not INVALID_SOCKET. - m_socket = INVALID_SOCKET - 1; } ~StaticContentsSock() override { m_socket = INVALID_SOCKET; } @@ -192,6 +192,11 @@ class StaticContentsSock : public Sock return true; } + bool IsConnected(std::string&) const override + { + return true; + } + private: const std::string m_contents; mutable size_t m_consumed{0}; diff --git a/src/util/sock.cpp b/src/util/sock.cpp index e08edd42b74b5..d16dc56aa3dfa 100644 --- a/src/util/sock.cpp +++ b/src/util/sock.cpp @@ -24,8 +24,6 @@ static inline bool IOErrorIsPermanent(int err) return err != WSAEAGAIN && err != WSAEINTR && err != WSAEWOULDBLOCK && err != WSAEINPROGRESS; } -Sock::Sock() : m_socket(INVALID_SOCKET) {} - Sock::Sock(SOCKET s) : m_socket(s) {} Sock::Sock(Sock&& other) diff --git a/src/util/sock.h b/src/util/sock.h index a3d8dfe3b63d0..d78e01929bc96 100644 --- a/src/util/sock.h +++ b/src/util/sock.h @@ -26,10 +26,7 @@ static constexpr auto MAX_WAIT_FOR_IO = 1s; class Sock { public: - /** - * Default constructor, creates an empty object that does nothing when destroyed. - */ - Sock(); + Sock() = delete; /** * Take ownership of an existent socket. From 7df450836969b81e98322c9a09c08b35d1095a25 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Thu, 24 Aug 2023 15:20:53 +0200 Subject: [PATCH 022/172] test: improve sock_tests/move_assignment Use also a socket for the moved-to object and check which one is closed when. --- src/test/sock_tests.cpp | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/src/test/sock_tests.cpp b/src/test/sock_tests.cpp index 9641831e1961b..5dd73dc101610 100644 --- a/src/test/sock_tests.cpp +++ b/src/test/sock_tests.cpp @@ -58,15 +58,27 @@ BOOST_AUTO_TEST_CASE(move_constructor) BOOST_AUTO_TEST_CASE(move_assignment) { - const SOCKET s = CreateSocket(); - Sock* sock1 = new Sock(s); - Sock* sock2 = new Sock(INVALID_SOCKET); + const SOCKET s1 = CreateSocket(); + const SOCKET s2 = CreateSocket(); + Sock* sock1 = new Sock(s1); + Sock* sock2 = new Sock(s2); + + BOOST_CHECK(!SocketIsClosed(s1)); + BOOST_CHECK(!SocketIsClosed(s2)); + *sock2 = std::move(*sock1); + BOOST_CHECK(!SocketIsClosed(s1)); + BOOST_CHECK(SocketIsClosed(s2)); + BOOST_CHECK(*sock2 == s1); + delete sock1; - BOOST_CHECK(!SocketIsClosed(s)); - BOOST_CHECK(*sock2 == s); + BOOST_CHECK(!SocketIsClosed(s1)); + BOOST_CHECK(SocketIsClosed(s2)); + BOOST_CHECK(*sock2 == s1); + delete sock2; - BOOST_CHECK(SocketIsClosed(s)); + BOOST_CHECK(SocketIsClosed(s1)); + BOOST_CHECK(SocketIsClosed(s2)); } #ifndef WIN32 // Windows does not have socketpair(2). From e14cc8fc69cb3e3a98076fbb23a94eba7873368a Mon Sep 17 00:00:00 2001 From: furszy Date: Fri, 8 Sep 2023 10:58:37 -0300 Subject: [PATCH 023/172] gui: macOS, do not process dock icon actions during shutdown As the 'QMenuBar' is created without a parent window in MacOS, the app crashes when the user presses the shutdown button and, right after it, triggers any action in the menu bar. This happens because the QMenuBar is manually deleted in the BitcoinGUI destructor but the events attached to it children actions are not disconnected, so QActions events such us the 'QMenu::aboutToShow' could try to access null pointers. Instead of guarding every single QAction pointer inside the QMenu::aboutToShow slot, or manually disconnecting all registered events in the destructor, we can check if a shutdown was requested and discard the event. The 'node' field is a ref whose memory is held by the main application class, so it is safe to use here. Events are disconnected prior destructing the main application object. Furthermore, the 'MacDockIconHandler::dockIconClicked' signal can make the app crash during shutdown for the very same reason. The 'show()' call triggers the 'QApplication::focusWindowChanged' event, which is connected to the 'minimize_action' QAction, which is also part of the app menu bar, which could no longer exist. --- src/qt/bitcoingui.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index b84cd02bdade9..73e09630ded6e 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -864,6 +864,7 @@ void BitcoinGUI::createTrayIconMenu() // Note: On macOS, the Dock icon is used to provide the tray's functionality. MacDockIconHandler* dockIconHandler = MacDockIconHandler::instance(); connect(dockIconHandler, &MacDockIconHandler::dockIconClicked, [this] { + if (m_node.shutdownRequested()) return; // nothing to show, node is shutting down. show(); activateWindow(); }); @@ -875,6 +876,8 @@ void BitcoinGUI::createTrayIconMenu() // See https://bugreports.qt.io/browse/QTBUG-91697 trayIconMenu.get(), &QMenu::aboutToShow, [this, show_hide_action, send_action, receive_action, sign_action, verify_action, options_action, node_window_action, quit_action] { + if (m_node.shutdownRequested()) return; // nothing to do, node is shutting down. + if (show_hide_action) show_hide_action->setText( (!isHidden() && !isMinimized() && !GUIUtil::isObscured(this)) ? tr("&Hide") : From bae209e3879fa099302d3b211362c49bbbfbdd14 Mon Sep 17 00:00:00 2001 From: furszy Date: Tue, 12 Sep 2023 11:15:41 -0300 Subject: [PATCH 024/172] gui: macOS, make appMenuBar part of the main app window By moving the appMenuBar destruction responsibility to the QT framework, we ensure the disconnection of the submenus signals prior to the destruction of the main app window. The standalone menu bar may have served a purpose in earlier versions when it didn't contain actions that directly open specific screens within the main application window. However, at present, all the actions within the appMenuBar lead to the opening of screens within the main app window. So, the absence of a main app window makes these actions essentially pointless. --- src/qt/bitcoingui.cpp | 7 ------- 1 file changed, 7 deletions(-) diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index 73e09630ded6e..3375573677040 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -239,7 +239,6 @@ BitcoinGUI::~BitcoinGUI() trayIcon->hide(); #ifdef Q_OS_MACOS delete m_app_nap_inhibitor; - delete appMenuBar; MacDockIconHandler::cleanup(); #endif @@ -470,13 +469,7 @@ void BitcoinGUI::createActions() void BitcoinGUI::createMenuBar() { -#ifdef Q_OS_MACOS - // Create a decoupled menu bar on Mac which stays even if the window is closed - appMenuBar = new QMenuBar(); -#else - // Get the main window's menu bar on other platforms appMenuBar = menuBar(); -#endif // Configure the menus QMenu *file = appMenuBar->addMenu(tr("&File")); From fa6e6a3f03a38f8b431bf694268ed344d1815b3b Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Mon, 21 Aug 2023 14:51:16 +0200 Subject: [PATCH 025/172] doc: Remove confusing assert linter --- doc/developer-notes.md | 6 ------ test/lint/lint-assertions.py | 12 +----------- 2 files changed, 1 insertion(+), 17 deletions(-) diff --git a/doc/developer-notes.md b/doc/developer-notes.md index 80353bcdd264d..3c3f612053dcd 100644 --- a/doc/developer-notes.md +++ b/doc/developer-notes.md @@ -739,12 +739,6 @@ Common misconceptions are clarified in those sections: - Passing (non-)fundamental types in the [C++ Core Guideline](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#Rf-conventional). -- Assertions should not have side-effects. - - - *Rationale*: Even though the source code is set to refuse to compile - with assertions disabled, having side-effects in assertions is unexpected and - makes the code harder to understand. - - If you use the `.h`, you must link the `.cpp`. - *Rationale*: Include files define the interface for the code in implementation files. Including one but diff --git a/test/lint/lint-assertions.py b/test/lint/lint-assertions.py index 6da59b0d48d31..d9f86b22b8f79 100755 --- a/test/lint/lint-assertions.py +++ b/test/lint/lint-assertions.py @@ -23,20 +23,10 @@ def git_grep(params: [], error_msg: ""): def main(): - # PRE31-C (SEI CERT C Coding Standard): - # "Assertions should not contain assignments, increment, or decrement operators." - exit_code = git_grep([ - "-E", - r"[^_]assert\(.*(\+\+|\-\-|[^=!<>]=[^=!<>]).*\);", - "--", - "*.cpp", - "*.h", - ], "Assertions should not have side effects:") - # Aborting the whole process is undesirable for RPC code. So nonfatal # checks should be used over assert. See: src/util/check.h # src/rpc/server.cpp is excluded from this check since it's mostly meta-code. - exit_code |= git_grep([ + exit_code = git_grep([ "-nE", r"\<(A|a)ss(ume|ert) *\(.*\);", "--", From 360b917674e63c1e95119040463b3f50976bf331 Mon Sep 17 00:00:00 2001 From: Erik Arvstedt Date: Tue, 19 Sep 2023 13:42:51 +0200 Subject: [PATCH 026/172] contrib/bash-completions: use package naming conventions This naming scheme supports auto-detection and on-demand loading of completions. See https://github.com/scop/bash-completion/blob/ba109693ee2284f6a82f8f0e1563baf071252df9/README.md#faq, section "Where should I put it to be sure that interactive bash shells will find it and source it". Previously, distro package maintainers had to rename these files manually. --- .../bash/{bitcoin-cli.bash-completion => bitcoin-cli.bash} | 0 .../bash/{bitcoin-tx.bash-completion => bitcoin-tx.bash} | 0 .../completions/bash/{bitcoind.bash-completion => bitcoind.bash} | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename contrib/completions/bash/{bitcoin-cli.bash-completion => bitcoin-cli.bash} (100%) rename contrib/completions/bash/{bitcoin-tx.bash-completion => bitcoin-tx.bash} (100%) rename contrib/completions/bash/{bitcoind.bash-completion => bitcoind.bash} (100%) diff --git a/contrib/completions/bash/bitcoin-cli.bash-completion b/contrib/completions/bash/bitcoin-cli.bash similarity index 100% rename from contrib/completions/bash/bitcoin-cli.bash-completion rename to contrib/completions/bash/bitcoin-cli.bash diff --git a/contrib/completions/bash/bitcoin-tx.bash-completion b/contrib/completions/bash/bitcoin-tx.bash similarity index 100% rename from contrib/completions/bash/bitcoin-tx.bash-completion rename to contrib/completions/bash/bitcoin-tx.bash diff --git a/contrib/completions/bash/bitcoind.bash-completion b/contrib/completions/bash/bitcoind.bash similarity index 100% rename from contrib/completions/bash/bitcoind.bash-completion rename to contrib/completions/bash/bitcoind.bash From fa4a9c0f4334678fb80358ead667807bf2a0a153 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Mon, 11 Sep 2023 14:58:22 +0000 Subject: [PATCH 027/172] Remove unused GetType() from OverrideStream, CVectorWriter, SpanReader GetType() is never called, so it is completely unused and can be removed. --- src/blockfilter.cpp | 9 ++--- src/net.cpp | 2 +- src/netmessagemaker.h | 2 +- src/psbt.h | 26 +++++++------- src/signet.cpp | 4 +-- src/streams.h | 23 ++++--------- src/test/fuzz/golomb_rice.cpp | 6 ++-- .../fuzz/script_assets_test_minimizer.cpp | 4 +-- src/test/script_tests.cpp | 6 ++-- src/test/streams_tests.cpp | 34 +++++++++---------- src/wallet/test/wallet_tests.cpp | 4 +-- 11 files changed, 53 insertions(+), 67 deletions(-) diff --git a/src/blockfilter.cpp b/src/blockfilter.cpp index 985a81f52284a..dd3824fb1c597 100644 --- a/src/blockfilter.cpp +++ b/src/blockfilter.cpp @@ -16,9 +16,6 @@ #include #include -/// SerType used to serialize parameters in GCS filter encoding. -static constexpr int GCS_SER_TYPE = SER_NETWORK; - /// Protocol version used to serialize parameters in GCS filter encoding. static constexpr int GCS_SER_VERSION = 0; @@ -52,7 +49,7 @@ GCSFilter::GCSFilter(const Params& params) GCSFilter::GCSFilter(const Params& params, std::vector encoded_filter, bool skip_decode_check) : m_params(params), m_encoded(std::move(encoded_filter)) { - SpanReader stream{GCS_SER_TYPE, GCS_SER_VERSION, m_encoded}; + SpanReader stream{GCS_SER_VERSION, m_encoded}; uint64_t N = ReadCompactSize(stream); m_N = static_cast(N); @@ -84,7 +81,7 @@ GCSFilter::GCSFilter(const Params& params, const ElementSet& elements) } m_F = static_cast(m_N) * static_cast(m_params.m_M); - CVectorWriter stream(GCS_SER_TYPE, GCS_SER_VERSION, m_encoded, 0); + CVectorWriter stream(GCS_SER_VERSION, m_encoded, 0); WriteCompactSize(stream, m_N); @@ -106,7 +103,7 @@ GCSFilter::GCSFilter(const Params& params, const ElementSet& elements) bool GCSFilter::MatchInternal(const uint64_t* element_hashes, size_t size) const { - SpanReader stream{GCS_SER_TYPE, GCS_SER_VERSION, m_encoded}; + SpanReader stream{GCS_SER_VERSION, m_encoded}; // Seek forward by size of N uint64_t N = ReadCompactSize(stream); diff --git a/src/net.cpp b/src/net.cpp index ef1135bb8c5a8..29d66a1d1ae6c 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -859,7 +859,7 @@ bool V1Transport::SetMessageToSend(CSerializedNetMsg& msg) noexcept // serialize header m_header_to_send.clear(); - CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, m_header_to_send, 0, hdr}; + CVectorWriter{INIT_PROTO_VERSION, m_header_to_send, 0, hdr}; // update state m_message_to_send = std::move(msg); diff --git a/src/netmessagemaker.h b/src/netmessagemaker.h index 89fb4758f9e07..a121183aabe24 100644 --- a/src/netmessagemaker.h +++ b/src/netmessagemaker.h @@ -19,7 +19,7 @@ class CNetMsgMaker { CSerializedNetMsg msg; msg.m_type = std::move(msg_type); - CVectorWriter{ SER_NETWORK, nFlags | nVersion, msg.data, 0, std::forward(args)... }; + CVectorWriter{nFlags | nVersion, msg.data, 0, std::forward(args)...}; return msg; } diff --git a/src/psbt.h b/src/psbt.h index 9464b10268a5f..48e045308481f 100644 --- a/src/psbt.h +++ b/src/psbt.h @@ -226,7 +226,7 @@ struct PSBTInput // Write the utxo if (non_witness_utxo) { SerializeToVector(s, CompactSizeWriter(PSBT_IN_NON_WITNESS_UTXO)); - OverrideStream os(&s, s.GetType(), s.GetVersion() | SERIALIZE_TRANSACTION_NO_WITNESS); + OverrideStream os{&s, s.GetVersion() | SERIALIZE_TRANSACTION_NO_WITNESS}; SerializeToVector(os, non_witness_utxo); } if (!witness_utxo.IsNull()) { @@ -315,7 +315,7 @@ struct PSBTInput const auto& [leaf_hashes, origin] = leaf_origin; SerializeToVector(s, PSBT_IN_TAP_BIP32_DERIVATION, xonly); std::vector value; - CVectorWriter s_value(s.GetType(), s.GetVersion(), value, 0); + CVectorWriter s_value{s.GetVersion(), value, 0}; s_value << leaf_hashes; SerializeKeyOrigin(s_value, origin); s << value; @@ -381,7 +381,7 @@ struct PSBTInput } // Type is compact size uint at beginning of key - SpanReader skey(s.GetType(), s.GetVersion(), key); + SpanReader skey{s.GetVersion(), key}; uint64_t type = ReadCompactSize(skey); // Do stuff based on type @@ -394,7 +394,7 @@ struct PSBTInput throw std::ios_base::failure("Non-witness utxo key is more than one byte type"); } // Set the stream to unserialize with witness since this is always a valid network transaction - OverrideStream os(&s, s.GetType(), s.GetVersion() & ~SERIALIZE_TRANSACTION_NO_WITNESS); + OverrideStream os{&s, s.GetVersion() & ~SERIALIZE_TRANSACTION_NO_WITNESS}; UnserializeFromVector(os, non_witness_utxo); break; } @@ -590,7 +590,7 @@ struct PSBTInput } else if (key.size() != 65) { throw std::ios_base::failure("Input Taproot script signature key is not 65 bytes"); } - SpanReader s_key(s.GetType(), s.GetVersion(), Span{key}.subspan(1)); + SpanReader s_key{s.GetVersion(), Span{key}.subspan(1)}; XOnlyPubKey xonly; uint256 hash; s_key >> xonly; @@ -632,7 +632,7 @@ struct PSBTInput } else if (key.size() != 33) { throw std::ios_base::failure("Input Taproot BIP32 keypath key is not at 33 bytes"); } - SpanReader s_key(s.GetType(), s.GetVersion(), Span{key}.subspan(1)); + SpanReader s_key{s.GetVersion(), Span{key}.subspan(1)}; XOnlyPubKey xonly; s_key >> xonly; std::set leaf_hashes; @@ -757,7 +757,7 @@ struct PSBTOutput if (!m_tap_tree.empty()) { SerializeToVector(s, PSBT_OUT_TAP_TREE); std::vector value; - CVectorWriter s_value(s.GetType(), s.GetVersion(), value, 0); + CVectorWriter s_value{s.GetVersion(), value, 0}; for (const auto& [depth, leaf_ver, script] : m_tap_tree) { s_value << depth; s_value << leaf_ver; @@ -771,7 +771,7 @@ struct PSBTOutput const auto& [leaf_hashes, origin] = leaf; SerializeToVector(s, PSBT_OUT_TAP_BIP32_DERIVATION, xonly); std::vector value; - CVectorWriter s_value(s.GetType(), s.GetVersion(), value, 0); + CVectorWriter s_value{s.GetVersion(), value, 0}; s_value << leaf_hashes; SerializeKeyOrigin(s_value, origin); s << value; @@ -807,7 +807,7 @@ struct PSBTOutput } // Type is compact size uint at beginning of key - SpanReader skey(s.GetType(), s.GetVersion(), key); + SpanReader skey{s.GetVersion(), key}; uint64_t type = ReadCompactSize(skey); // Do stuff based on type @@ -856,7 +856,7 @@ struct PSBTOutput } std::vector tree_v; s >> tree_v; - SpanReader s_tree(s.GetType(), s.GetVersion(), tree_v); + SpanReader s_tree{s.GetVersion(), tree_v}; if (s_tree.empty()) { throw std::ios_base::failure("Output Taproot tree must not be empty"); } @@ -984,7 +984,7 @@ struct PartiallySignedTransaction SerializeToVector(s, CompactSizeWriter(PSBT_GLOBAL_UNSIGNED_TX)); // Write serialized tx to a stream - OverrideStream os(&s, s.GetType(), s.GetVersion() | SERIALIZE_TRANSACTION_NO_WITNESS); + OverrideStream os{&s, s.GetVersion() | SERIALIZE_TRANSACTION_NO_WITNESS}; SerializeToVector(os, *tx); // Write xpubs @@ -1061,7 +1061,7 @@ struct PartiallySignedTransaction } // Type is compact size uint at beginning of key - SpanReader skey(s.GetType(), s.GetVersion(), key); + SpanReader skey{s.GetVersion(), key}; uint64_t type = ReadCompactSize(skey); // Do stuff based on type @@ -1075,7 +1075,7 @@ struct PartiallySignedTransaction } CMutableTransaction mtx; // Set the stream to serialize with non-witness since this should always be non-witness - OverrideStream os(&s, s.GetType(), s.GetVersion() | SERIALIZE_TRANSACTION_NO_WITNESS); + OverrideStream os{&s, s.GetVersion() | SERIALIZE_TRANSACTION_NO_WITNESS}; UnserializeFromVector(os, mtx); tx = std::move(mtx); // Make sure that all scriptSigs and scriptWitnesses are empty diff --git a/src/signet.cpp b/src/signet.cpp index 21b289b637b29..ef0faaa5f8232 100644 --- a/src/signet.cpp +++ b/src/signet.cpp @@ -98,7 +98,7 @@ std::optional SignetTxs::Create(const CBlock& block, const CScript& c // no signet solution -- allow this to support OP_TRUE as trivial block challenge } else { try { - SpanReader v{SER_NETWORK, INIT_PROTO_VERSION, signet_solution}; + SpanReader v{INIT_PROTO_VERSION, signet_solution}; v >> tx_spending.vin[0].scriptSig; v >> tx_spending.vin[0].scriptWitness.stack; if (!v.empty()) return std::nullopt; // extraneous data encountered @@ -109,7 +109,7 @@ std::optional SignetTxs::Create(const CBlock& block, const CScript& c uint256 signet_merkle = ComputeModifiedMerkleRoot(modified_cb, block); std::vector block_data; - CVectorWriter writer(SER_NETWORK, INIT_PROTO_VERSION, block_data, 0); + CVectorWriter writer{INIT_PROTO_VERSION, block_data, 0}; writer << block.nVersion; writer << block.hashPrevBlock; writer << signet_merkle; diff --git a/src/streams.h b/src/streams.h index f9a817c9b646f..2b9a5023bf76b 100644 --- a/src/streams.h +++ b/src/streams.h @@ -50,11 +50,10 @@ class OverrideStream { Stream* stream; - const int nType; const int nVersion; public: - OverrideStream(Stream* stream_, int nType_, int nVersion_) : stream(stream_), nType(nType_), nVersion(nVersion_) {} + OverrideStream(Stream* stream_, int nVersion_) : stream{stream_}, nVersion{nVersion_} {} template OverrideStream& operator<<(const T& obj) @@ -81,7 +80,6 @@ class OverrideStream } int GetVersion() const { return nVersion; } - int GetType() const { return nType; } size_t size() const { return stream->size(); } void ignore(size_t size) { return stream->ignore(size); } }; @@ -95,13 +93,12 @@ class CVectorWriter public: /* - * @param[in] nTypeIn Serialization Type * @param[in] nVersionIn Serialization Version (including any flags) * @param[in] vchDataIn Referenced byte vector to overwrite/append * @param[in] nPosIn Starting position. Vector index where writes should start. The vector will initially * grow as necessary to max(nPosIn, vec.size()). So to append, use vec.size(). */ - CVectorWriter(int nTypeIn, int nVersionIn, std::vector& vchDataIn, size_t nPosIn) : nType(nTypeIn), nVersion(nVersionIn), vchData(vchDataIn), nPos(nPosIn) + CVectorWriter(int nVersionIn, std::vector& vchDataIn, size_t nPosIn) : nVersion{nVersionIn}, vchData{vchDataIn}, nPos{nPosIn} { if(nPos > vchData.size()) vchData.resize(nPos); @@ -111,7 +108,7 @@ class CVectorWriter * @param[in] args A list of items to serialize starting at nPosIn. */ template - CVectorWriter(int nTypeIn, int nVersionIn, std::vector& vchDataIn, size_t nPosIn, Args&&... args) : CVectorWriter(nTypeIn, nVersionIn, vchDataIn, nPosIn) + CVectorWriter(int nVersionIn, std::vector& vchDataIn, size_t nPosIn, Args&&... args) : CVectorWriter{nVersionIn, vchDataIn, nPosIn} { ::SerializeMany(*this, std::forward(args)...); } @@ -137,12 +134,8 @@ class CVectorWriter { return nVersion; } - int GetType() const - { - return nType; - } + private: - const int nType; const int nVersion; std::vector& vchData; size_t nPos; @@ -153,19 +146,16 @@ class CVectorWriter class SpanReader { private: - const int m_type; const int m_version; Span m_data; public: - /** - * @param[in] type Serialization Type * @param[in] version Serialization Version (including any flags) * @param[in] data Referenced byte vector to overwrite/append */ - SpanReader(int type, int version, Span data) - : m_type(type), m_version(version), m_data(data) {} + SpanReader(int version, Span data) + : m_version{version}, m_data{data} {} template SpanReader& operator>>(T&& obj) @@ -175,7 +165,6 @@ class SpanReader } int GetVersion() const { return m_version; } - int GetType() const { return m_type; } size_t size() const { return m_data.size(); } bool empty() const { return m_data.empty(); } diff --git a/src/test/fuzz/golomb_rice.cpp b/src/test/fuzz/golomb_rice.cpp index e006653ca94ce..f3073c5c973b8 100644 --- a/src/test/fuzz/golomb_rice.cpp +++ b/src/test/fuzz/golomb_rice.cpp @@ -51,7 +51,7 @@ FUZZ_TARGET(golomb_rice) for (int i = 0; i < n; ++i) { elements.insert(ConsumeRandomLengthByteVector(fuzzed_data_provider, 16)); } - CVectorWriter stream(SER_NETWORK, 0, golomb_rice_data, 0); + CVectorWriter stream{0, golomb_rice_data, 0}; WriteCompactSize(stream, static_cast(elements.size())); BitStreamWriter bitwriter(stream); if (!elements.empty()) { @@ -68,7 +68,7 @@ FUZZ_TARGET(golomb_rice) std::vector decoded_deltas; { - SpanReader stream{SER_NETWORK, 0, golomb_rice_data}; + SpanReader stream{0, golomb_rice_data}; BitStreamReader bitreader{stream}; const uint32_t n = static_cast(ReadCompactSize(stream)); for (uint32_t i = 0; i < n; ++i) { @@ -80,7 +80,7 @@ FUZZ_TARGET(golomb_rice) { const std::vector random_bytes = ConsumeRandomLengthByteVector(fuzzed_data_provider, 1024); - SpanReader stream{SER_NETWORK, 0, random_bytes}; + SpanReader stream{0, random_bytes}; uint32_t n; try { n = static_cast(ReadCompactSize(stream)); diff --git a/src/test/fuzz/script_assets_test_minimizer.cpp b/src/test/fuzz/script_assets_test_minimizer.cpp index 7862be2f21353..66c862a6f9084 100644 --- a/src/test/fuzz/script_assets_test_minimizer.cpp +++ b/src/test/fuzz/script_assets_test_minimizer.cpp @@ -54,7 +54,7 @@ CMutableTransaction TxFromHex(const std::string& str) { CMutableTransaction tx; try { - SpanReader{SER_DISK, SERIALIZE_TRANSACTION_NO_WITNESS, CheckedParseHex(str)} >> tx; + SpanReader{SERIALIZE_TRANSACTION_NO_WITNESS, CheckedParseHex(str)} >> tx; } catch (const std::ios_base::failure&) { throw std::runtime_error("Tx deserialization failure"); } @@ -68,7 +68,7 @@ std::vector TxOutsFromJSON(const UniValue& univalue) for (size_t i = 0; i < univalue.size(); ++i) { CTxOut txout; try { - SpanReader{SER_DISK, 0, CheckedParseHex(univalue[i].get_str())} >> txout; + SpanReader{0, CheckedParseHex(univalue[i].get_str())} >> txout; } catch (const std::ios_base::failure&) { throw std::runtime_error("Prevout invalid format"); } diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index d63bfb9603292..94656b229ed30 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -1470,7 +1470,7 @@ BOOST_AUTO_TEST_CASE(script_HasValidOps) static CMutableTransaction TxFromHex(const std::string& str) { CMutableTransaction tx; - SpanReader{SER_DISK, SERIALIZE_TRANSACTION_NO_WITNESS, ParseHex(str)} >> tx; + SpanReader{SERIALIZE_TRANSACTION_NO_WITNESS, ParseHex(str)} >> tx; return tx; } @@ -1480,7 +1480,7 @@ static std::vector TxOutsFromJSON(const UniValue& univalue) std::vector prevouts; for (size_t i = 0; i < univalue.size(); ++i) { CTxOut txout; - SpanReader{SER_DISK, 0, ParseHex(univalue[i].get_str())} >> txout; + SpanReader{0, ParseHex(univalue[i].get_str())} >> txout; prevouts.push_back(std::move(txout)); } return prevouts; @@ -1751,7 +1751,7 @@ BOOST_AUTO_TEST_CASE(bip341_keypath_test_vectors) for (const auto& vec : vectors.getValues()) { auto txhex = ParseHex(vec["given"]["rawUnsignedTx"].get_str()); CMutableTransaction tx; - SpanReader{SER_NETWORK, PROTOCOL_VERSION, txhex} >> tx; + SpanReader{PROTOCOL_VERSION, txhex} >> tx; std::vector utxos; for (const auto& utxo_spent : vec["given"]["utxosSpent"].getValues()) { auto script_bytes = ParseHex(utxo_spent["scriptPubKey"].get_str()); diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index 99740ee7792f3..aca38c747f6fc 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -74,49 +74,49 @@ BOOST_AUTO_TEST_CASE(streams_vector_writer) // point should yield the same results, even if the first test grew the // vector. - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 0, a, b}; BOOST_CHECK((vch == std::vector{{1, 2}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 0, a, b}; BOOST_CHECK((vch == std::vector{{1, 2}})); vch.clear(); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 2, a, b}; BOOST_CHECK((vch == std::vector{{0, 0, 1, 2}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 2, a, b}; BOOST_CHECK((vch == std::vector{{0, 0, 1, 2}})); vch.clear(); vch.resize(5, 0); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 2, a, b}; BOOST_CHECK((vch == std::vector{{0, 0, 1, 2, 0}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 2, a, b}; BOOST_CHECK((vch == std::vector{{0, 0, 1, 2, 0}})); vch.clear(); vch.resize(4, 0); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 3, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 3, a, b}; BOOST_CHECK((vch == std::vector{{0, 0, 0, 1, 2}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 3, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 3, a, b}; BOOST_CHECK((vch == std::vector{{0, 0, 0, 1, 2}})); vch.clear(); vch.resize(4, 0); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 4, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 4, a, b}; BOOST_CHECK((vch == std::vector{{0, 0, 0, 0, 1, 2}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 4, a, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 4, a, b}; BOOST_CHECK((vch == std::vector{{0, 0, 0, 0, 1, 2}})); vch.clear(); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, bytes); + CVectorWriter{INIT_PROTO_VERSION, vch, 0, bytes}; BOOST_CHECK((vch == std::vector{{3, 4, 5, 6}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 0, bytes); + CVectorWriter{INIT_PROTO_VERSION, vch, 0, bytes}; BOOST_CHECK((vch == std::vector{{3, 4, 5, 6}})); vch.clear(); vch.resize(4, 8); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, bytes, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 2, a, bytes, b}; BOOST_CHECK((vch == std::vector{{8, 8, 1, 3, 4, 5, 6, 2}})); - CVectorWriter(SER_NETWORK, INIT_PROTO_VERSION, vch, 2, a, bytes, b); + CVectorWriter{INIT_PROTO_VERSION, vch, 2, a, bytes, b}; BOOST_CHECK((vch == std::vector{{8, 8, 1, 3, 4, 5, 6, 2}})); vch.clear(); } @@ -125,7 +125,7 @@ BOOST_AUTO_TEST_CASE(streams_vector_reader) { std::vector vch = {1, 255, 3, 4, 5, 6}; - SpanReader reader{SER_NETWORK, INIT_PROTO_VERSION, vch}; + SpanReader reader{INIT_PROTO_VERSION, vch}; BOOST_CHECK_EQUAL(reader.size(), 6U); BOOST_CHECK(!reader.empty()); @@ -155,7 +155,7 @@ BOOST_AUTO_TEST_CASE(streams_vector_reader) BOOST_CHECK_THROW(reader >> d, std::ios_base::failure); // Read a 4 bytes as a signed int from the beginning of the buffer. - SpanReader new_reader{SER_NETWORK, INIT_PROTO_VERSION, vch}; + SpanReader new_reader{INIT_PROTO_VERSION, vch}; new_reader >> d; BOOST_CHECK_EQUAL(d, 67370753); // 1,255,3,4 in little-endian base-256 BOOST_CHECK_EQUAL(new_reader.size(), 2U); @@ -169,7 +169,7 @@ BOOST_AUTO_TEST_CASE(streams_vector_reader) BOOST_AUTO_TEST_CASE(streams_vector_reader_rvalue) { std::vector data{0x82, 0xa7, 0x31}; - SpanReader reader{SER_NETWORK, INIT_PROTO_VERSION, data}; + SpanReader reader{INIT_PROTO_VERSION, data}; uint32_t varint = 0; // Deserialize into r-value reader >> VARINT(varint); diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index 5c297d76e4881..ab17f51f8e1cf 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -752,14 +752,14 @@ bool malformed_descriptor(std::ios_base::failure e) BOOST_FIXTURE_TEST_CASE(wallet_descriptor_test, BasicTestingSetup) { std::vector malformed_record; - CVectorWriter vw(0, 0, malformed_record, 0); + CVectorWriter vw{0, malformed_record, 0}; vw << std::string("notadescriptor"); vw << uint64_t{0}; vw << int32_t{0}; vw << int32_t{0}; vw << int32_t{1}; - SpanReader vr{0, 0, malformed_record}; + SpanReader vr{0, malformed_record}; WalletDescriptor w_desc; BOOST_CHECK_EXCEPTION(vr >> w_desc, std::ios_base::failure, malformed_descriptor); } From fa72f09d6ff8ee204f331a69d3f5e825223c9e11 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Mon, 11 Sep 2023 15:47:52 +0200 Subject: [PATCH 028/172] Remove CHashWriter type The type is only ever set, but never read via GetType(), so remove it. Also, remove SerializeHash to avoid silent merge conflicts and use the already existing GetHash() boilerplate consistently. --- src/hash.h | 13 +------------ src/primitives/block.cpp | 2 +- src/primitives/transaction.cpp | 6 +++--- src/test/hash_tests.cpp | 2 +- src/test/serialize_tests.cpp | 2 +- src/test/sighash_tests.cpp | 2 +- 6 files changed, 8 insertions(+), 19 deletions(-) diff --git a/src/hash.h b/src/hash.h index f2b627ff4f77a..d355b703ff4a3 100644 --- a/src/hash.h +++ b/src/hash.h @@ -149,13 +149,11 @@ class HashWriter class CHashWriter : public HashWriter { private: - const int nType; const int nVersion; public: - CHashWriter(int nTypeIn, int nVersionIn) : nType(nTypeIn), nVersion(nVersionIn) {} + CHashWriter(int nVersionIn) : nVersion{nVersionIn} {} - int GetType() const { return nType; } int GetVersion() const { return nVersion; } template @@ -223,15 +221,6 @@ class HashedSourceWriter : public HashWriter } }; -/** Compute the 256-bit hash of an object's serialization. */ -template -uint256 SerializeHash(const T& obj, int nType=SER_GETHASH, int nVersion=PROTOCOL_VERSION) -{ - CHashWriter ss(nType, nVersion); - ss << obj; - return ss.GetHash(); -} - /** Single-SHA256 a 32-byte input (represented as uint256). */ [[nodiscard]] uint256 SHA256Uint256(const uint256& input); diff --git a/src/primitives/block.cpp b/src/primitives/block.cpp index 50a30cb511db2..3d21708820371 100644 --- a/src/primitives/block.cpp +++ b/src/primitives/block.cpp @@ -10,7 +10,7 @@ uint256 CBlockHeader::GetHash() const { - return SerializeHash(*this); + return (CHashWriter{PROTOCOL_VERSION} << *this).GetHash(); } std::string CBlock::ToString() const diff --git a/src/primitives/transaction.cpp b/src/primitives/transaction.cpp index 3060746909e2d..2c913bf4327df 100644 --- a/src/primitives/transaction.cpp +++ b/src/primitives/transaction.cpp @@ -67,12 +67,12 @@ CMutableTransaction::CMutableTransaction(const CTransaction& tx) : vin(tx.vin), uint256 CMutableTransaction::GetHash() const { - return SerializeHash(*this, SER_GETHASH, SERIALIZE_TRANSACTION_NO_WITNESS); + return (CHashWriter{SERIALIZE_TRANSACTION_NO_WITNESS} << *this).GetHash(); } uint256 CTransaction::ComputeHash() const { - return SerializeHash(*this, SER_GETHASH, SERIALIZE_TRANSACTION_NO_WITNESS); + return (CHashWriter{SERIALIZE_TRANSACTION_NO_WITNESS} << *this).GetHash(); } uint256 CTransaction::ComputeWitnessHash() const @@ -80,7 +80,7 @@ uint256 CTransaction::ComputeWitnessHash() const if (!HasWitness()) { return hash; } - return SerializeHash(*this, SER_GETHASH, 0); + return (CHashWriter{0} << *this).GetHash(); } CTransaction::CTransaction(const CMutableTransaction& tx) : vin(tx.vin), vout(tx.vout), nVersion(tx.nVersion), nLockTime(tx.nLockTime), hash{ComputeHash()}, m_witness_hash{ComputeWitnessHash()} {} diff --git a/src/test/hash_tests.cpp b/src/test/hash_tests.cpp index a990797ca7c42..54afcef98974b 100644 --- a/src/test/hash_tests.cpp +++ b/src/test/hash_tests.cpp @@ -122,7 +122,7 @@ BOOST_AUTO_TEST_CASE(siphash) (uint64_t(x+4)<<32)|(uint64_t(x+5)<<40)|(uint64_t(x+6)<<48)|(uint64_t(x+7)<<56)); } - CHashWriter ss(SER_DISK, CLIENT_VERSION); + CHashWriter ss{CLIENT_VERSION}; CMutableTransaction tx; // Note these tests were originally written with tx.nVersion=1 // and the test would be affected by default tx version bumps if not fixed. diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index 2f2bb6698c2ff..d18d2623b11c2 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -176,7 +176,7 @@ BOOST_AUTO_TEST_CASE(vector_bool) std::vector vec2{1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1}; BOOST_CHECK(vec1 == std::vector(vec2.begin(), vec2.end())); - BOOST_CHECK(SerializeHash(vec1) == SerializeHash(vec2)); + BOOST_CHECK((HashWriter{} << vec1).GetHash() == (HashWriter{} << vec2).GetHash()); } BOOST_AUTO_TEST_CASE(noncanonical) diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index d1c0e1349e871..178b16772b7ef 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -78,7 +78,7 @@ uint256 static SignatureHashOld(CScript scriptCode, const CTransaction& txTo, un } // Serialize and hash - CHashWriter ss(SER_GETHASH, SERIALIZE_TRANSACTION_NO_WITNESS); + CHashWriter ss{SERIALIZE_TRANSACTION_NO_WITNESS}; ss << txTmp << nHashType; return ss.GetHash(); } From fac29a0ab19fda457b55d7a0a37c5cd3d9680f82 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Mon, 11 Sep 2023 16:12:34 +0000 Subject: [PATCH 029/172] Remove SER_GETHASH, hard-code client version in CKeyPool serialize It was never set, so it can be removed along with any code reading it. --- src/serialize.h | 1 - src/wallet/scriptpubkeyman.h | 10 ++-------- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/src/serialize.h b/src/serialize.h index 1ad8ac4373500..e53ff9fa4c98c 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -131,7 +131,6 @@ enum // primary actions SER_NETWORK = (1 << 0), SER_DISK = (1 << 1), - SER_GETHASH = (1 << 2), }; /** diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h index ec7b017720f81..3722c1ae1fb35 100644 --- a/src/wallet/scriptpubkeyman.h +++ b/src/wallet/scriptpubkeyman.h @@ -123,20 +123,14 @@ class CKeyPool template void Serialize(Stream& s) const { - int nVersion = s.GetVersion(); - if (!(s.GetType() & SER_GETHASH)) { - s << nVersion; - } + s << int{259900}; // Unused field, writes the highest client version ever written s << nTime << vchPubKey << fInternal << m_pre_split; } template void Unserialize(Stream& s) { - int nVersion = s.GetVersion(); - if (!(s.GetType() & SER_GETHASH)) { - s >> nVersion; - } + s >> int{}; // Discard unused field s >> nTime >> vchPubKey; try { s >> fInternal; From 5f72417176cfffece9a5aa11e97d5a6599c51e7a Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Tue, 16 May 2023 16:29:58 +0100 Subject: [PATCH 030/172] Add ability to specify SHA256 implementation for benchmark purposes --- src/crypto/sha256.cpp | 45 ++++++++++++++++++++++++++++--------------- src/crypto/sha256.h | 14 +++++++++++++- 2 files changed, 42 insertions(+), 17 deletions(-) diff --git a/src/crypto/sha256.cpp b/src/crypto/sha256.cpp index a4eef36480dcd..5eacaa44e18aa 100644 --- a/src/crypto/sha256.cpp +++ b/src/crypto/sha256.cpp @@ -579,9 +579,15 @@ bool AVXEnabled() } // namespace -std::string SHA256AutoDetect() +std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implementation) { std::string ret = "standard"; + Transform = sha256::Transform; + TransformD64 = sha256::TransformD64; + TransformD64_2way = nullptr; + TransformD64_4way = nullptr; + TransformD64_8way = nullptr; + #if defined(USE_ASM) && defined(HAVE_GETCPUID) bool have_sse4 = false; bool have_xsave = false; @@ -592,7 +598,9 @@ std::string SHA256AutoDetect() uint32_t eax, ebx, ecx, edx; GetCPUID(1, 0, eax, ebx, ecx, edx); - have_sse4 = (ecx >> 19) & 1; + if (use_implementation & sha256_implementation::USE_SSE4) { + have_sse4 = (ecx >> 19) & 1; + } have_xsave = (ecx >> 27) & 1; have_avx = (ecx >> 28) & 1; if (have_xsave && have_avx) { @@ -600,8 +608,12 @@ std::string SHA256AutoDetect() } if (have_sse4) { GetCPUID(7, 0, eax, ebx, ecx, edx); - have_avx2 = (ebx >> 5) & 1; - have_x86_shani = (ebx >> 29) & 1; + if (use_implementation & sha256_implementation::USE_AVX2) { + have_avx2 = (ebx >> 5) & 1; + } + if (use_implementation & sha256_implementation::USE_SHANI) { + have_x86_shani = (ebx >> 29) & 1; + } } #if defined(ENABLE_X86_SHANI) && !defined(BUILD_BITCOIN_INTERNAL) @@ -637,27 +649,28 @@ std::string SHA256AutoDetect() #if defined(ENABLE_ARM_SHANI) && !defined(BUILD_BITCOIN_INTERNAL) bool have_arm_shani = false; - + if (use_implementation & sha256_implementation::USE_SHANI) { #if defined(__linux__) #if defined(__arm__) // 32-bit - if (getauxval(AT_HWCAP2) & HWCAP2_SHA2) { - have_arm_shani = true; - } + if (getauxval(AT_HWCAP2) & HWCAP2_SHA2) { + have_arm_shani = true; + } #endif #if defined(__aarch64__) // 64-bit - if (getauxval(AT_HWCAP) & HWCAP_SHA2) { - have_arm_shani = true; - } + if (getauxval(AT_HWCAP) & HWCAP_SHA2) { + have_arm_shani = true; + } #endif #endif #if defined(MAC_OSX) - int val = 0; - size_t len = sizeof(val); - if (sysctlbyname("hw.optional.arm.FEAT_SHA256", &val, &len, nullptr, 0) == 0) { - have_arm_shani = val != 0; - } + int val = 0; + size_t len = sizeof(val); + if (sysctlbyname("hw.optional.arm.FEAT_SHA256", &val, &len, nullptr, 0) == 0) { + have_arm_shani = val != 0; + } #endif + } if (have_arm_shani) { Transform = sha256_arm_shani::Transform; diff --git a/src/crypto/sha256.h b/src/crypto/sha256.h index 76255086654d0..b1348631d32e8 100644 --- a/src/crypto/sha256.h +++ b/src/crypto/sha256.h @@ -26,10 +26,22 @@ class CSHA256 CSHA256& Reset(); }; +namespace sha256_implementation { +enum UseImplementation : uint8_t { + STANDARD = 0, + USE_SSE4 = 1 << 0, + USE_AVX2 = 1 << 1, + USE_SHANI = 1 << 2, + USE_SSE4_AND_AVX2 = USE_SSE4 | USE_AVX2, + USE_SSE4_AND_SHANI = USE_SSE4 | USE_SHANI, + USE_ALL = USE_SSE4 | USE_AVX2 | USE_SHANI, +}; +} + /** Autodetect the best available SHA256 implementation. * Returns the name of the implementation. */ -std::string SHA256AutoDetect(); +std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implementation = sha256_implementation::USE_ALL); /** Compute multiple double-SHA256's of 64-byte blobs. * output: pointer to a blocks*32 byte output buffer From ce6df7df9bab2405cfe7d6e382f5682cf30de476 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Tue, 16 May 2023 17:25:58 +0100 Subject: [PATCH 031/172] bench: Add SHA256 implementation specific benchmarks --- src/bench/crypto_hash.cpp | 127 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 121 insertions(+), 6 deletions(-) diff --git a/src/bench/crypto_hash.cpp b/src/bench/crypto_hash.cpp index cf8d807d7b13f..1685a120b454c 100644 --- a/src/bench/crypto_hash.cpp +++ b/src/bench/crypto_hash.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include /* Number of bytes to hash per iteration */ @@ -36,13 +37,48 @@ static void SHA1(benchmark::Bench& bench) }); } -static void SHA256(benchmark::Bench& bench) +static void SHA256_STANDARD(benchmark::Bench& bench) { + bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::STANDARD))); uint8_t hash[CSHA256::OUTPUT_SIZE]; std::vector in(BUFFER_SIZE,0); bench.batch(in.size()).unit("byte").run([&] { CSHA256().Write(in.data(), in.size()).Finalize(hash); }); + SHA256AutoDetect(); +} + +static void SHA256_SSE4(benchmark::Bench& bench) +{ + bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4))); + uint8_t hash[CSHA256::OUTPUT_SIZE]; + std::vector in(BUFFER_SIZE,0); + bench.batch(in.size()).unit("byte").run([&] { + CSHA256().Write(in.data(), in.size()).Finalize(hash); + }); + SHA256AutoDetect(); +} + +static void SHA256_AVX2(benchmark::Bench& bench) +{ + bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4_AND_AVX2))); + uint8_t hash[CSHA256::OUTPUT_SIZE]; + std::vector in(BUFFER_SIZE,0); + bench.batch(in.size()).unit("byte").run([&] { + CSHA256().Write(in.data(), in.size()).Finalize(hash); + }); + SHA256AutoDetect(); +} + +static void SHA256_SHANI(benchmark::Bench& bench) +{ + bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4_AND_SHANI))); + uint8_t hash[CSHA256::OUTPUT_SIZE]; + std::vector in(BUFFER_SIZE,0); + bench.batch(in.size()).unit("byte").run([&] { + CSHA256().Write(in.data(), in.size()).Finalize(hash); + }); + SHA256AutoDetect(); } static void SHA3_256_1M(benchmark::Bench& bench) @@ -54,22 +90,92 @@ static void SHA3_256_1M(benchmark::Bench& bench) }); } -static void SHA256_32b(benchmark::Bench& bench) +static void SHA256_32b_STANDARD(benchmark::Bench& bench) +{ + bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::STANDARD))); + std::vector in(32,0); + bench.batch(in.size()).unit("byte").run([&] { + CSHA256() + .Write(in.data(), in.size()) + .Finalize(in.data()); + }); + SHA256AutoDetect(); +} + +static void SHA256_32b_SSE4(benchmark::Bench& bench) +{ + bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4))); + std::vector in(32,0); + bench.batch(in.size()).unit("byte").run([&] { + CSHA256() + .Write(in.data(), in.size()) + .Finalize(in.data()); + }); + SHA256AutoDetect(); +} + +static void SHA256_32b_AVX2(benchmark::Bench& bench) +{ + bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4_AND_AVX2))); + std::vector in(32,0); + bench.batch(in.size()).unit("byte").run([&] { + CSHA256() + .Write(in.data(), in.size()) + .Finalize(in.data()); + }); + SHA256AutoDetect(); +} + +static void SHA256_32b_SHANI(benchmark::Bench& bench) { + bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4_AND_SHANI))); std::vector in(32,0); bench.batch(in.size()).unit("byte").run([&] { CSHA256() .Write(in.data(), in.size()) .Finalize(in.data()); }); + SHA256AutoDetect(); +} + +static void SHA256D64_1024_STANDARD(benchmark::Bench& bench) +{ + bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::STANDARD))); + std::vector in(64 * 1024, 0); + bench.batch(in.size()).unit("byte").run([&] { + SHA256D64(in.data(), in.data(), 1024); + }); + SHA256AutoDetect(); +} + +static void SHA256D64_1024_SSE4(benchmark::Bench& bench) +{ + bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4))); + std::vector in(64 * 1024, 0); + bench.batch(in.size()).unit("byte").run([&] { + SHA256D64(in.data(), in.data(), 1024); + }); + SHA256AutoDetect(); +} + +static void SHA256D64_1024_AVX2(benchmark::Bench& bench) +{ + bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4_AND_AVX2))); + std::vector in(64 * 1024, 0); + bench.batch(in.size()).unit("byte").run([&] { + SHA256D64(in.data(), in.data(), 1024); + }); + SHA256AutoDetect(); } -static void SHA256D64_1024(benchmark::Bench& bench) +static void SHA256D64_1024_SHANI(benchmark::Bench& bench) { + bench.name(strprintf("%s using the '%s' SHA256 implementation", __func__, SHA256AutoDetect(sha256_implementation::USE_SSE4_AND_SHANI))); std::vector in(64 * 1024, 0); bench.batch(in.size()).unit("byte").run([&] { SHA256D64(in.data(), in.data(), 1024); }); + SHA256AutoDetect(); } static void SHA512(benchmark::Bench& bench) @@ -152,13 +258,22 @@ static void MuHashPrecompute(benchmark::Bench& bench) BENCHMARK(BenchRIPEMD160, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA1, benchmark::PriorityLevel::HIGH); -BENCHMARK(SHA256, benchmark::PriorityLevel::HIGH); +BENCHMARK(SHA256_STANDARD, benchmark::PriorityLevel::HIGH); +BENCHMARK(SHA256_SSE4, benchmark::PriorityLevel::HIGH); +BENCHMARK(SHA256_AVX2, benchmark::PriorityLevel::HIGH); +BENCHMARK(SHA256_SHANI, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA512, benchmark::PriorityLevel::HIGH); BENCHMARK(SHA3_256_1M, benchmark::PriorityLevel::HIGH); -BENCHMARK(SHA256_32b, benchmark::PriorityLevel::HIGH); +BENCHMARK(SHA256_32b_STANDARD, benchmark::PriorityLevel::HIGH); +BENCHMARK(SHA256_32b_SSE4, benchmark::PriorityLevel::HIGH); +BENCHMARK(SHA256_32b_AVX2, benchmark::PriorityLevel::HIGH); +BENCHMARK(SHA256_32b_SHANI, benchmark::PriorityLevel::HIGH); BENCHMARK(SipHash_32b, benchmark::PriorityLevel::HIGH); -BENCHMARK(SHA256D64_1024, benchmark::PriorityLevel::HIGH); +BENCHMARK(SHA256D64_1024_STANDARD, benchmark::PriorityLevel::HIGH); +BENCHMARK(SHA256D64_1024_SSE4, benchmark::PriorityLevel::HIGH); +BENCHMARK(SHA256D64_1024_AVX2, benchmark::PriorityLevel::HIGH); +BENCHMARK(SHA256D64_1024_SHANI, benchmark::PriorityLevel::HIGH); BENCHMARK(FastRandom_32bit, benchmark::PriorityLevel::HIGH); BENCHMARK(FastRandom_1bit, benchmark::PriorityLevel::HIGH); From 4660fc82a1f5cf6eb6404d5268beef5919581661 Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Tue, 26 Sep 2023 21:28:20 -0400 Subject: [PATCH 032/172] wallet: Check last block and conflict height are valid in MarkConflicted MarkConflicted calculates conflict confirmations incorrectly when both the last block processed height and the conflicting height are negative (i.e. uninitialized). If either are negative, we should not be marking conflicts and should exit early. --- src/wallet/wallet.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 245990841950c..d00f8de85f5bf 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -1339,11 +1339,14 @@ void CWallet::MarkConflicted(const uint256& hashBlock, int conflicting_height, c { LOCK(cs_wallet); - int conflictconfirms = (m_last_block_processed_height - conflicting_height + 1) * -1; // If number of conflict confirms cannot be determined, this means // that the block is still unknown or not yet part of the main chain, // for example when loading the wallet during a reindex. Do nothing in that // case. + if (m_last_block_processed_height < 0 || conflicting_height < 0) { + return; + } + int conflictconfirms = (m_last_block_processed_height - conflicting_height + 1) * -1; if (conflictconfirms >= 0) return; From 782701ce7d31919dba2241ee43b582d8ae5a2541 Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Tue, 26 Sep 2023 21:23:18 -0400 Subject: [PATCH 033/172] test: Test loading wallets with conflicts without a chain Loading a wallet with conflicts without a chain (e.g. wallet tool and migration) would previously result in an assertion due to -1 being both a valid number of conflict confirmations, and the indicator that that member has not been set yet. --- test/functional/tool_wallet.py | 57 +++++++++++++++++++++++++++++ test/functional/wallet_migration.py | 44 ++++++++++++++++++++++ 2 files changed, 101 insertions(+) diff --git a/test/functional/tool_wallet.py b/test/functional/tool_wallet.py index 9d381a2cd2d8b..8b0c8ce4057c0 100755 --- a/test/functional/tool_wallet.py +++ b/test/functional/tool_wallet.py @@ -394,6 +394,62 @@ def test_dump_createfromdump(self): self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=badload', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump') assert not (self.nodes[0].wallets_path / "badload").is_dir() + def test_chainless_conflicts(self): + self.log.info("Test wallet tool when wallet contains conflicting transactions") + self.restart_node(0) + self.generate(self.nodes[0], 101) + + def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name) + + self.nodes[0].createwallet("conflicts") + wallet = self.nodes[0].get_wallet_rpc("conflicts") + def_wallet.sendtoaddress(wallet.getnewaddress(), 10) + self.generate(self.nodes[0], 1) + + # parent tx + parent_txid = wallet.sendtoaddress(wallet.getnewaddress(), 9) + parent_txid_bytes = bytes.fromhex(parent_txid)[::-1] + conflict_utxo = wallet.gettransaction(txid=parent_txid, verbose=True)["decoded"]["vin"][0] + + # The specific assertion in MarkConflicted being tested requires that the parent tx is already loaded + # by the time the child tx is loaded. Since transactions end up being loaded in txid order due to how both + # and sqlite store things, we can just grind the child tx until it has a txid that is greater than the parent's. + locktime = 500000000 # Use locktime as nonce, starting at unix timestamp minimum + addr = wallet.getnewaddress() + while True: + child_send_res = wallet.send(outputs=[{addr: 8}], add_to_wallet=False, locktime=locktime) + child_txid = child_send_res["txid"] + child_txid_bytes = bytes.fromhex(child_txid)[::-1] + if (child_txid_bytes > parent_txid_bytes): + wallet.sendrawtransaction(child_send_res["hex"]) + break + locktime += 1 + + # conflict with parent + conflict_unsigned = self.nodes[0].createrawtransaction(inputs=[conflict_utxo], outputs=[{wallet.getnewaddress(): 9.9999}]) + conflict_signed = wallet.signrawtransactionwithwallet(conflict_unsigned)["hex"] + conflict_txid = self.nodes[0].sendrawtransaction(conflict_signed) + self.generate(self.nodes[0], 1) + assert_equal(wallet.gettransaction(txid=parent_txid)["confirmations"], -1) + assert_equal(wallet.gettransaction(txid=child_txid)["confirmations"], -1) + assert_equal(wallet.gettransaction(txid=conflict_txid)["confirmations"], 1) + + self.stop_node(0) + + # Wallet tool should successfully give info for this wallet + expected_output = textwrap.dedent(f'''\ + Wallet info + =========== + Name: conflicts + Format: {"sqlite" if self.options.descriptors else "bdb"} + Descriptors: {"yes" if self.options.descriptors else "no"} + Encrypted: no + HD (hd seed available): yes + Keypool Size: {"8" if self.options.descriptors else "1"} + Transactions: 4 + Address Book: 4 + ''') + self.assert_tool_output(expected_output, "-wallet=conflicts", "info") def run_test(self): self.wallet_path = os.path.join(self.nodes[0].wallets_path, self.default_wallet_name, self.wallet_data_filename) @@ -407,6 +463,7 @@ def run_test(self): # Salvage is a legacy wallet only thing self.test_salvage() self.test_dump_createfromdump() + self.test_chainless_conflicts() if __name__ == '__main__': ToolWalletTest().main() diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 395044c8b2e17..bcd71197bf138 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -727,6 +727,49 @@ def send_to_script(script, amount): self.nodes[0].loadwallet(info_migration["watchonly_name"]) assert_equal(wallet_wo.getbalances()['mine']['trusted'], 5) + def test_conflict_txs(self): + self.log.info("Test migration when wallet contains conflicting transactions") + def_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name) + + wallet = self.create_legacy_wallet("conflicts") + def_wallet.sendtoaddress(wallet.getnewaddress(), 10) + self.generate(self.nodes[0], 1) + + # parent tx + parent_txid = wallet.sendtoaddress(wallet.getnewaddress(), 9) + parent_txid_bytes = bytes.fromhex(parent_txid)[::-1] + conflict_utxo = wallet.gettransaction(txid=parent_txid, verbose=True)["decoded"]["vin"][0] + + # The specific assertion in MarkConflicted being tested requires that the parent tx is already loaded + # by the time the child tx is loaded. Since transactions end up being loaded in txid order due to how both + # and sqlite store things, we can just grind the child tx until it has a txid that is greater than the parent's. + locktime = 500000000 # Use locktime as nonce, starting at unix timestamp minimum + addr = wallet.getnewaddress() + while True: + child_send_res = wallet.send(outputs=[{addr: 8}], add_to_wallet=False, locktime=locktime) + child_txid = child_send_res["txid"] + child_txid_bytes = bytes.fromhex(child_txid)[::-1] + if (child_txid_bytes > parent_txid_bytes): + wallet.sendrawtransaction(child_send_res["hex"]) + break + locktime += 1 + + # conflict with parent + conflict_unsigned = self.nodes[0].createrawtransaction(inputs=[conflict_utxo], outputs=[{wallet.getnewaddress(): 9.9999}]) + conflict_signed = wallet.signrawtransactionwithwallet(conflict_unsigned)["hex"] + conflict_txid = self.nodes[0].sendrawtransaction(conflict_signed) + self.generate(self.nodes[0], 1) + assert_equal(wallet.gettransaction(txid=parent_txid)["confirmations"], -1) + assert_equal(wallet.gettransaction(txid=child_txid)["confirmations"], -1) + assert_equal(wallet.gettransaction(txid=conflict_txid)["confirmations"], 1) + + wallet.migratewallet() + assert_equal(wallet.gettransaction(txid=parent_txid)["confirmations"], -1) + assert_equal(wallet.gettransaction(txid=child_txid)["confirmations"], -1) + assert_equal(wallet.gettransaction(txid=conflict_txid)["confirmations"], 1) + + wallet.unloadwallet() + def run_test(self): self.generate(self.nodes[0], 101) @@ -743,6 +786,7 @@ def run_test(self): self.test_direct_file() self.test_addressbook() self.test_migrate_raw_p2sh() + self.test_conflict_txs() if __name__ == '__main__': WalletMigrationTest().main() From 79ef528511f0cbbe0a7097ef031f2964aaccfe5c Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Wed, 27 Sep 2023 12:19:57 +0100 Subject: [PATCH 034/172] build, macos: Fix `qt` package build with new Xcode 15 linker --- depends/packages/qt.mk | 2 + depends/patches/qt/fix-macos-linker.patch | 55 +++++++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 depends/patches/qt/fix-macos-linker.patch diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index 136ce325798a7..b898bf2713b5c 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -22,6 +22,7 @@ $(package)_patches += rcc_hardcode_timestamp.patch $(package)_patches += duplicate_lcqpafonts.patch $(package)_patches += fast_fixed_dtoa_no_optimize.patch $(package)_patches += guix_cross_lib_path.patch +$(package)_patches += fix-macos-linker.patch $(package)_qttranslations_file_name=qttranslations-$($(package)_suffix) $(package)_qttranslations_sha256_hash=c92af4171397a0ed272330b4fa0669790fcac8d050b07c8b8cc565ebeba6735e @@ -238,6 +239,7 @@ endef define $(package)_preprocess_cmds cp $($(package)_patch_dir)/qt.pro qt.pro && \ cp $($(package)_patch_dir)/qttools_src.pro qttools/src/src.pro && \ + patch -p1 -i $($(package)_patch_dir)/fix-macos-linker.patch && \ patch -p1 -i $($(package)_patch_dir)/dont_hardcode_pwd.patch && \ patch -p1 -i $($(package)_patch_dir)/fix_qt_pkgconfig.patch && \ patch -p1 -i $($(package)_patch_dir)/fix_android_jni_static.patch && \ diff --git a/depends/patches/qt/fix-macos-linker.patch b/depends/patches/qt/fix-macos-linker.patch new file mode 100644 index 0000000000000..db056de4d9b25 --- /dev/null +++ b/depends/patches/qt/fix-macos-linker.patch @@ -0,0 +1,55 @@ +qmake: Don't error out if QMAKE_DEFAULT_LIBDIRS is empty on macOS + +The new linker in Xcode 15 doesn't provide any default linker or +framework paths when requested via -v, but still seems to use the +default paths documented in the ld man page. + +We trust that linker will do the right thing, even if we don't +know of its default linker paths. + +We also need to opt out of the default fallback logic to +set the libdirs to /lib and /usr/lib. + +This may result in UnixMakefileGenerator::findLibraries finding +different libraries than expected, if additional paths are +passed with -L, which will then take precedence for qmake, +even if the linker itself will use the library from the +SDK's default paths. This should hopefully not be an issue +in practice, as we don't turn -lFoo into absolute paths in +qmake, so the only risk is that we're picking up the wrong +prl files and adding additional dependencies that the lib +in the SDK doesn't have. + +Upstream commits: + - Qt 5.15.16: Not yet publicly available. + - Qt dev: cdf64b0e47115cc473e1afd1472b4b09e130b2a5 + +For other Qt branches see +https://codereview.qt-project.org/q/I2347b26e2df0828471373b0e15b8c9089274c65d + +--- old/qtbase/mkspecs/features/toolchain.prf ++++ new/qtbase/mkspecs/features/toolchain.prf +@@ -283,9 +283,12 @@ isEmpty($${target_prefix}.INCDIRS) { + } + } + } +- isEmpty(QMAKE_DEFAULT_LIBDIRS)|isEmpty(QMAKE_DEFAULT_INCDIRS): \ ++ isEmpty(QMAKE_DEFAULT_INCDIRS): \ + !integrity: \ +- error("failed to parse default search paths from compiler output") ++ error("failed to parse default include paths from compiler output") ++ isEmpty(QMAKE_DEFAULT_LIBDIRS): \ ++ !integrity:!darwin: \ ++ error("failed to parse default library paths from compiler output") + QMAKE_DEFAULT_LIBDIRS = $$unique(QMAKE_DEFAULT_LIBDIRS) + } else: ghs { + cmd = $$QMAKE_CXX $$QMAKE_CXXFLAGS -$${LITERAL_HASH} -o /tmp/fake_output /tmp/fake_input.cpp +@@ -407,7 +410,7 @@ isEmpty($${target_prefix}.INCDIRS) { + QMAKE_DEFAULT_INCDIRS = $$split(INCLUDE, $$QMAKE_DIRLIST_SEP) + } + +- unix:if(!cross_compile|host_build) { ++ unix:!darwin:if(!cross_compile|host_build) { + isEmpty(QMAKE_DEFAULT_INCDIRS): QMAKE_DEFAULT_INCDIRS = /usr/include /usr/local/include + isEmpty(QMAKE_DEFAULT_LIBDIRS): QMAKE_DEFAULT_LIBDIRS = /lib /usr/lib + } From fa40b3ee22e78f58d7426dbc4343472ba40081e3 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Wed, 27 Sep 2023 16:50:16 +0200 Subject: [PATCH 035/172] test: Avoid test failure on Linux root without cap-add LINUX_IMMUTABLE --- test/functional/feature_reindex_readonly.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/functional/feature_reindex_readonly.py b/test/functional/feature_reindex_readonly.py index 9f1bb30023dd3..26531f472bf8d 100755 --- a/test/functional/feature_reindex_readonly.py +++ b/test/functional/feature_reindex_readonly.py @@ -6,6 +6,7 @@ - Start a node, generate blocks, then restart with -reindex after setting blk files to read-only """ +import os import platform import stat import subprocess @@ -45,6 +46,11 @@ def reindex_readonly(self): self.log.warning(f"stdout: {e.stdout}") if e.stderr: self.log.warning(f"stderr: {e.stderr}") + if os.getuid() == 0: + self.log.warning("Return early on Linux under root, because chattr failed.") + self.log.warning("This should only happen due to missing capabilities in a container.") + self.log.warning("Make sure to --cap-add LINUX_IMMUTABLE if you want to run this test.") + return self.log.debug("Attempt to restart and reindex the node with the unwritable block file") with self.nodes[0].assert_debug_log(expected_msgs=['FlushStateToDisk', 'failed to open file'], unexpected_msgs=[]): From d9841a7ac634472c1a9105f81f8e7b55e4bd1a4a Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Fri, 22 Sep 2023 09:40:45 -0400 Subject: [PATCH 036/172] Add make_secure_unique helper Co-authored-by: Pieter Wuille --- src/support/allocators/secure.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/support/allocators/secure.h b/src/support/allocators/secure.h index 558f835f11628..4395567722e6e 100644 --- a/src/support/allocators/secure.h +++ b/src/support/allocators/secure.h @@ -57,4 +57,28 @@ struct secure_allocator { // TODO: Consider finding a way to make incoming RPC request.params[i] mlock()ed as well typedef std::basic_string, secure_allocator > SecureString; +template +struct SecureUniqueDeleter { + void operator()(T* t) noexcept { + secure_allocator().deallocate(t, 1); + } +}; + +template +using secure_unique_ptr = std::unique_ptr>; + +template +secure_unique_ptr make_secure_unique(Args&&... as) +{ + T* p = secure_allocator().allocate(1); + + // initialize in place, and return as secure_unique_ptr + try { + return secure_unique_ptr(new (p) T(std::forward(as)...)); + } catch (...) { + secure_allocator().deallocate(p, 1); + throw; + } +} + #endif // BITCOIN_SUPPORT_ALLOCATORS_SECURE_H From 6ef405ddb195bbf1b28a906d8c8bb877f0c17d7b Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Mon, 18 Sep 2023 12:10:19 -0400 Subject: [PATCH 037/172] key: don't allocate secure mem for null (invalid) key Instead of storing the key material as an std::vector (with secure allocator), use a secure_unique_ptr to a 32-byte array, and use nullptr for invalid keys. This means a smaller CKey type, and no secure/dynamic memory usage for invalid keys. --- src/key.cpp | 37 +++++++++++++++++---------------- src/key.h | 60 +++++++++++++++++++++++++++++++++++------------------ 2 files changed, 59 insertions(+), 38 deletions(-) diff --git a/src/key.cpp b/src/key.cpp index efaea5b1b36f1..0f283ca3e3839 100644 --- a/src/key.cpp +++ b/src/key.cpp @@ -159,21 +159,21 @@ bool CKey::Check(const unsigned char *vch) { } void CKey::MakeNewKey(bool fCompressedIn) { + MakeKeyData(); do { - GetStrongRandBytes(keydata); - } while (!Check(keydata.data())); - fValid = true; + GetStrongRandBytes(*keydata); + } while (!Check(keydata->data())); fCompressed = fCompressedIn; } bool CKey::Negate() { - assert(fValid); - return secp256k1_ec_seckey_negate(secp256k1_context_sign, keydata.data()); + assert(keydata); + return secp256k1_ec_seckey_negate(secp256k1_context_sign, keydata->data()); } CPrivKey CKey::GetPrivKey() const { - assert(fValid); + assert(keydata); CPrivKey seckey; int ret; size_t seckeylen; @@ -186,7 +186,7 @@ CPrivKey CKey::GetPrivKey() const { } CPubKey CKey::GetPubKey() const { - assert(fValid); + assert(keydata); secp256k1_pubkey pubkey; size_t clen = CPubKey::SIZE; CPubKey result; @@ -212,7 +212,7 @@ bool SigHasLowR(const secp256k1_ecdsa_signature* sig) } bool CKey::Sign(const uint256 &hash, std::vector& vchSig, bool grind, uint32_t test_case) const { - if (!fValid) + if (!keydata) return false; vchSig.resize(CPubKey::SIGNATURE_SIZE); size_t nSigLen = CPubKey::SIGNATURE_SIZE; @@ -253,7 +253,7 @@ bool CKey::VerifyPubKey(const CPubKey& pubkey) const { } bool CKey::SignCompact(const uint256 &hash, std::vector& vchSig) const { - if (!fValid) + if (!keydata) return false; vchSig.resize(CPubKey::COMPACT_SIGNATURE_SIZE); int rec = -1; @@ -301,10 +301,12 @@ bool CKey::SignSchnorr(const uint256& hash, Span sig, const uint2 } bool CKey::Load(const CPrivKey &seckey, const CPubKey &vchPubKey, bool fSkipCheck=false) { - if (!ec_seckey_import_der(secp256k1_context_sign, (unsigned char*)begin(), seckey.data(), seckey.size())) + MakeKeyData(); + if (!ec_seckey_import_der(secp256k1_context_sign, (unsigned char*)begin(), seckey.data(), seckey.size())) { + ClearKeyData(); return false; + } fCompressed = vchPubKey.IsCompressed(); - fValid = true; if (fSkipCheck) return true; @@ -325,22 +327,21 @@ bool CKey::Derive(CKey& keyChild, ChainCode &ccChild, unsigned int nChild, const BIP32Hash(cc, nChild, 0, begin(), vout.data()); } memcpy(ccChild.begin(), vout.data()+32, 32); - memcpy((unsigned char*)keyChild.begin(), begin(), 32); + keyChild.Set(begin(), begin() + 32, true); bool ret = secp256k1_ec_seckey_tweak_add(secp256k1_context_sign, (unsigned char*)keyChild.begin(), vout.data()); - keyChild.fCompressed = true; - keyChild.fValid = ret; + if (!ret) keyChild.ClearKeyData(); return ret; } EllSwiftPubKey CKey::EllSwiftCreate(Span ent32) const { - assert(fValid); + assert(keydata); assert(ent32.size() == 32); std::array encoded_pubkey; auto success = secp256k1_ellswift_create(secp256k1_context_sign, UCharCast(encoded_pubkey.data()), - keydata.data(), + keydata->data(), UCharCast(ent32.data())); // Should always succeed for valid keys (asserted above). @@ -350,7 +351,7 @@ EllSwiftPubKey CKey::EllSwiftCreate(Span ent32) const ECDHSecret CKey::ComputeBIP324ECDHSecret(const EllSwiftPubKey& their_ellswift, const EllSwiftPubKey& our_ellswift, bool initiating) const { - assert(fValid); + assert(keydata); ECDHSecret output; // BIP324 uses the initiator as party A, and the responder as party B. Remap the inputs @@ -359,7 +360,7 @@ ECDHSecret CKey::ComputeBIP324ECDHSecret(const EllSwiftPubKey& their_ellswift, c UCharCast(output.data()), UCharCast(initiating ? our_ellswift.data() : their_ellswift.data()), UCharCast(initiating ? their_ellswift.data() : our_ellswift.data()), - keydata.data(), + keydata->data(), initiating ? 0 : 1, secp256k1_ellswift_xdh_hash_function_bip324, nullptr); diff --git a/src/key.h b/src/key.h index 8382b0a6703b9..785059da0218c 100644 --- a/src/key.h +++ b/src/key.h @@ -46,57 +46,77 @@ class CKey "COMPRESSED_SIZE is larger than SIZE"); private: - //! Whether this private key is valid. We check for correctness when modifying the key - //! data, so fValid should always correspond to the actual state. - bool fValid{false}; + /** Internal data container for private key material. */ + using KeyType = std::array; //! Whether the public key corresponding to this private key is (to be) compressed. bool fCompressed{false}; - //! The actual byte data - std::vector > keydata; + //! The actual byte data. nullptr for invalid keys. + secure_unique_ptr keydata; //! Check whether the 32-byte array pointed to by vch is valid keydata. bool static Check(const unsigned char* vch); + void MakeKeyData() + { + if (!keydata) keydata = make_secure_unique(); + } + + void ClearKeyData() + { + keydata.reset(); + } + public: - //! Construct an invalid private key. - CKey() + CKey() noexcept = default; + CKey(CKey&&) noexcept = default; + CKey& operator=(CKey&&) noexcept = default; + + CKey& operator=(const CKey& other) { - // Important: vch must be 32 bytes in length to not break serialization - keydata.resize(32); + if (other.keydata) { + MakeKeyData(); + *keydata = *other.keydata; + } else { + ClearKeyData(); + } + fCompressed = other.fCompressed; + return *this; } + CKey(const CKey& other) { *this = other; } + friend bool operator==(const CKey& a, const CKey& b) { return a.fCompressed == b.fCompressed && a.size() == b.size() && - memcmp(a.keydata.data(), b.keydata.data(), a.size()) == 0; + memcmp(a.data(), b.data(), a.size()) == 0; } //! Initialize using begin and end iterators to byte data. template void Set(const T pbegin, const T pend, bool fCompressedIn) { - if (size_t(pend - pbegin) != keydata.size()) { - fValid = false; + if (size_t(pend - pbegin) != std::tuple_size_v) { + ClearKeyData(); } else if (Check(&pbegin[0])) { - memcpy(keydata.data(), (unsigned char*)&pbegin[0], keydata.size()); - fValid = true; + MakeKeyData(); + memcpy(keydata->data(), (unsigned char*)&pbegin[0], keydata->size()); fCompressed = fCompressedIn; } else { - fValid = false; + ClearKeyData(); } } //! Simple read-only vector-like interface. - unsigned int size() const { return (fValid ? keydata.size() : 0); } - const std::byte* data() const { return reinterpret_cast(keydata.data()); } - const unsigned char* begin() const { return keydata.data(); } - const unsigned char* end() const { return keydata.data() + size(); } + unsigned int size() const { return keydata ? keydata->size() : 0; } + const std::byte* data() const { return keydata ? reinterpret_cast(keydata->data()) : nullptr; } + const unsigned char* begin() const { return keydata ? keydata->data() : nullptr; } + const unsigned char* end() const { return begin() + size(); } //! Check whether this private key is valid. - bool IsValid() const { return fValid; } + bool IsValid() const { return !!keydata; } //! Check whether the public key corresponding to this private key is (to be) compressed. bool IsCompressed() const { return fCompressed; } From f9047771d642c5887c752872b6ffbbd974603b35 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Sun, 30 Jul 2023 11:21:41 +0200 Subject: [PATCH 038/172] lint: fix custom mypy cache dir setting --- test/lint/lint-python.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/lint/lint-python.py b/test/lint/lint-python.py index 6010c787cb9cd..eabd13322e1e5 100755 --- a/test/lint/lint-python.py +++ b/test/lint/lint-python.py @@ -9,14 +9,17 @@ """ import os +from pathlib import Path import subprocess import sys from importlib.metadata import metadata, PackageNotFoundError +# Customize mypy cache dir via environment variable +cache_dir = Path(__file__).parent.parent / ".mypy_cache" +os.environ["MYPY_CACHE_DIR"] = str(cache_dir) DEPS = ['flake8', 'lief', 'mypy', 'pyzmq'] -MYPY_CACHE_DIR = f"{os.getenv('BASE_ROOT_DIR', '')}/test/.mypy_cache" # All .py files, except those in src/ (to exclude subtrees there) FLAKE_FILES_ARGS = ['git', 'ls-files', '*.py', ':!:src/*.py'] From 7899402cff708319b1c5181242a97557eefe1ae7 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Wed, 31 Aug 2022 18:15:24 -0400 Subject: [PATCH 039/172] Add headerssync-params.py script to the repository --- contrib/devtools/README.md | 10 + contrib/devtools/headerssync-params.py | 357 +++++++++++++++++++++++++ 2 files changed, 367 insertions(+) create mode 100644 contrib/devtools/headerssync-params.py diff --git a/contrib/devtools/README.md b/contrib/devtools/README.md index 54b1a8558818a..8bbf39b67fcc0 100644 --- a/contrib/devtools/README.md +++ b/contrib/devtools/README.md @@ -90,6 +90,16 @@ example: BUILDDIR=$PWD/build contrib/devtools/gen-manpages.py ``` +headerssync-params.py +===================== + +A script to generate optimal parameters for the headerssync module (src/headerssync.cpp). It takes no command-line +options, as all its configuration is set at the top of the file. It runs many times faster inside PyPy. Invocation: + +```bash +pypy3 contrib/devtools/headerssync-params.py +``` + gen-bitcoin-conf.sh =================== diff --git a/contrib/devtools/headerssync-params.py b/contrib/devtools/headerssync-params.py new file mode 100644 index 0000000000000..f0088d6cb9a36 --- /dev/null +++ b/contrib/devtools/headerssync-params.py @@ -0,0 +1,357 @@ +#!/usr/bin/env python3 +# Copyright (c) 2022 Pieter Wuille +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +"""Script to find the optimal parameters for the headerssync module through simulation.""" + +from math import log, exp, sqrt +from datetime import datetime, timedelta +import random + +# Parameters: + +# Aim for still working fine at some point in the future. [datetime] +TIME = datetime(2026, 5, 25) + +# Expected block interval. [timedelta] +BLOCK_INTERVAL = timedelta(seconds=600) + +# The number of headers corresponding to the minchainwork parameter. [headers] +MINCHAINWORK_HEADERS = 784000 + +# Combined processing bandwidth from all attackers to one victim. [bit/s] +# 6 Gbit/s is approximately the speed at which a single thread of a Ryzen 5950X CPU thread can hash +# headers. In practice, the victim's network bandwidth and network processing overheads probably +# impose a far lower number, but it's a useful upper bound. +ATTACK_BANDWIDTH = 6000000000 + +# How much additional permanent memory usage are attackers (jointly) allowed to cause in the victim, +# expressed as fraction of the normal memory usage due to mainchain growth, for the duration the +# attack is sustained. [unitless] +# 0.2 means that attackers, while they keep up the attack, can cause permanent memory usage due to +# headers storage to grow at 1.2 header per BLOCK_INTERVAL. +ATTACK_FRACTION = 0.2 + +# When this is set, the mapping from period size to memory usage (at optimal buffer size for that +# period) is assumed to be convex. This greatly speeds up the computation, and does not appear +# to influence the outcome. Set to False for a stronger guarantee to get the optimal result. +ASSUME_CONVEX = True + +# Explanation: +# +# The headerssync module implements a DoS protection against low-difficulty header spam which does +# not rely on checkpoints. In short it works as follows: +# +# - (initial) header synchronization is split into two phases: +# - A commitment phase, in which headers are downloaded from the peer, and a very compact +# commitment to them is remembered in per-peer memory. The commitment phase ends when the +# received chain's combined work reaches a predetermined threshold. +# - A redownload phase, during which the headers are downloaded a second time from the same peer, +# and compared against the commitment constructed in the first phase. If there is a match, the +# redownloaded headers are fed to validation and accepted into permanent storage. +# +# This separation guarantees that no headers are accepted into permanent storage without +# requiring the peer to first prove the chain actually has sufficient work. +# +# - To actually implement this commitment mechanism, the following approach is used: +# - Keep a *1 bit* commitment (constructed using a salted hash function), for every block whose +# height is a multiple of {period} plus an offset value. If RANDOMIZE_OFFSET, the offset, +# like the salt, is chosen randomly when the synchronization starts and kept fixed afterwards. +# - When redownloading, headers are fed through a per-peer queue that holds {bufsize} headers, +# before passing them to validation. All the headers in this queue are verified against the +# commitment bits created in the first phase before any header is released from it. This means +# {bufsize/period} bits are checked "on top of" each header before actually processing it, +# which results in a commitment structure with roughly {bufsize/period} bits of security, as +# once a header is modified, due to the prevhash inclusion, all future headers necessarily +# change as well. +# +# The question is what these {period} and {bufsize} parameters need to be set to. This program +# exhaustively tests a range of values to find the optimal choice, taking into account: +# +# - Minimizing the (maximum of) two scenarios that trigger per-peer memory usage: +# +# - When downloading a (likely honest) chain that reaches the chainwork threshold after {n} +# blocks, and then redownloads them, we will consume per-peer memory that is sufficient to +# store {n/period} commitment bits and {bufsize} headers. We only consider attackers without +# sufficient hashpower (as otherwise they are from a PoW perspective not attackers), which +# means {n} is restricted to the honest chain's length before reaching minchainwork. +# +# - When downloading a (likely false) chain of {n} headers that never reaches the chainwork +# threshold, we will consume per-peer memory that is sufficient to store {n/period} +# commitment bits. Such a chain may be very long, by exploiting the timewarp bug to avoid +# ramping up difficulty. There is however an absolute limit on how long such a chain can be: 6 +# blocks per second since genesis, due to the increasing MTP consensus rule. +# +# - Not gratuitously preventing synchronizing any valid chain, however difficult such a chain may +# be to construct. In particular, the above scenario with an enormous timewarp-expoiting chain +# cannot simply be ignored, as it is legal that the honest main chain is like that. We however +# do not bother minimizing the memory usage in that case (because a billion-header long honest +# chain will inevitably use far larger amounts of memory than designed for). +# +# - Keep the rate at which attackers can get low-difficulty headers accepted to the block index +# negligible. Specifically, the possibility exists for an attacker to send the honest main +# chain's headers during the commitment phase, but then start deviating at an attacker-chosen +# point by sending novel low-difficulty headers instead. Depending on how high we set the +# {bufsize/period} ratio, we can make the probability that such a header makes it in +# arbitrarily small, but at the cost of higher memory during the redownload phase. It turns out, +# some rate of memory usage growth is expected anyway due to chain growth, so permitting the +# attacker to increase that rate by a small factor isn't concerning. The attacker may start +# somewhat later than genesis, as long as the difficulty doesn't get too high. This reduces +# the attacker bandwidth required at the cost of higher PoW needed for constructing the +# alternate chain. This trade-off is ignored here, as it results in at most a small constant +# factor in attack rate. + + +# System properties: + +# Headers in the redownload buffer are stored without prevhash. [bits] +COMPACT_HEADER_SIZE = 48 * 8 + +# How many bits a header uses in P2P protocol. [bits] +NET_HEADER_SIZE = 81 * 8 + +# How many headers are sent at once. [headers] +HEADER_BATCH_COUNT = 2000 + +# Whether or not the offset of which blocks heights get checksummed is randomized. +RANDOMIZE_OFFSET = True + +# Timestamp of the genesis block +GENESIS_TIME = datetime(2009, 1, 3) + +# Derived values: + +# What rate of headers worth of RAM attackers are allowed to cause in the victim. [headers/s] +LIMIT_HEADERRATE = ATTACK_FRACTION / BLOCK_INTERVAL.total_seconds() + +# How many headers can attackers (jointly) send a victim per second. [headers/s] +NET_HEADERRATE = ATTACK_BANDWIDTH / NET_HEADER_SIZE + +# What fraction of headers sent by attackers can at most be accepted by a victim [unitless] +LIMIT_FRACTION = LIMIT_HEADERRATE / NET_HEADERRATE + +# How many headers we permit attackers to cause being accepted per attack. [headers/attack] +ATTACK_HEADERS = LIMIT_FRACTION * MINCHAINWORK_HEADERS + + +def find_max_headers(when): + """Compute the maximum number of headers a valid Bitcoin chain can have at given time.""" + # When exploiting the timewarp attack, this can be up to 6 per second since genesis. + return 6 * ((when - GENESIS_TIME) // timedelta(seconds=1)) + + +def lambert_w(value): + """Solve the equation x*exp(x)=value (x > 0, value > 0).""" + # Initial approximation. + approx = max(log(value), 0.0) + for _ in range(10): + # Newton-Rhapson iteration steps. + approx += (value * exp(-approx) - approx) / (approx + 1.0) + return approx + + +def attack_rate(period, bufsize, limit=None): + """Compute maximal accepted headers per attack in (period, bufsize) configuration. + + If limit is provided, the computation is stopped early when the result is known to exceed the + value in limit. + """ + + max_rate = None + max_honest = None + # Let the current batch 0 being received be the first one in which the attacker starts lying. + # They will only ever start doing so right after a commitment block, but where that is can be + # in a number of places. Let honest be the number of honest headers in this current batch, + # preceding the forged ones. + for honest in range(HEADER_BATCH_COUNT): + # The number of headers the attack under consideration will on average get accepted. + # This is the number being computed. + rate = 0 + + # Iterate over the possible alignments of commitments w.r.t. the first batch. In case + # the alignments are randomized, try all values. If not, the attacker can know/choose + # the alignment, and will always start forging right after a commitment. + if RANDOMIZE_OFFSET: + align_choices = list(range(period)) + else: + align_choices = [(honest - 1) % period] + # Now loop over those possible alignment values, computing the average attack rate + # over them by dividing each contribution by len(align_choices). + for align in align_choices: + # These state variables capture the situation after receiving the first batch. + # - The number of headers received after the last commitment for an honest block: + after_good_commit = HEADER_BATCH_COUNT - honest + ((honest - align - 1) % period) + # - The number of forged headers in the redownload buffer: + forged_in_buf = HEADER_BATCH_COUNT - honest + + # Now iterate over the next batches of headers received, adding contributions to the + # rate variable. + while True: + # Process the first HEADER_BATCH_COUNT headers in the buffer: + accept_forged_headers = max(forged_in_buf - bufsize, 0) + forged_in_buf -= accept_forged_headers + if accept_forged_headers: + # The probability the attack has not been detected yet at this point: + prob = 0.5 ** (after_good_commit // period) + # Update attack rate, divided by align_choices to average over the alignments. + rate += accept_forged_headers * prob / len(align_choices) + # If this means we exceed limit, bail out early (performance optimization). + if limit is not None and rate >= limit: + return rate, None + # If the maximal term being added is negligible compared to rate, stop + # iterating. + if HEADER_BATCH_COUNT * prob < 1.0e-16 * rate * len(align_choices): + break + # Update state from a new incoming batch (which is all forged) + after_good_commit += HEADER_BATCH_COUNT + forged_in_buf += HEADER_BATCH_COUNT + + if max_rate is None or rate > max_rate: + max_rate = rate + max_honest = honest + + return max_rate, max_honest + + +def memory_usage(period, bufsize, when): + """How much memory (max,mainchain,timewarp) does the (period,bufsize) configuration need?""" + + # Per-peer memory usage for a timewarp chain that never meets minchainwork + mem_timewarp = find_max_headers(when) // period + # Per-peer memory usage for being fed the main chain + mem_mainchain = (MINCHAINWORK_HEADERS // period) + bufsize * COMPACT_HEADER_SIZE + # Maximum per-peer memory usage + max_mem = max(mem_timewarp, mem_mainchain) + + return max_mem, mem_mainchain, mem_timewarp + +def find_bufsize(period, attack_headers, when, max_mem=None, min_bufsize=1): + """Determine how big bufsize needs to be given a specific period length. + + Given a period, find the smallest value of bufsize such that the attack rate against the + (period, bufsize) configuration is below attack_headers. If max_mem is provided, and no + such bufsize exists that needs less than max_mem bits of memory, None is returned. + min_bufsize is the minimal result to be considered.""" + + if max_mem is None: + succ_buf = min_bufsize - 1 + fail_buf = min_bufsize + # First double iteratively until an upper bound for failure is found. + while True: + if attack_rate(period, fail_buf, attack_headers)[0] < attack_headers: + break + succ_buf, fail_buf = fail_buf, 3 * fail_buf - 2 * succ_buf + else: + # If a long low-work header chain exists that exceeds max_mem already, give up. + if find_max_headers(when) // period > max_mem: + return None + # Otherwise, verify that the maximal buffer size that permits a mainchain sync with less + # than max_mem memory is sufficient to get the attack rate below attack_headers. If not, + # also give up. + max_buf = (max_mem - (MINCHAINWORK_HEADERS // period)) // COMPACT_HEADER_SIZE + if max_buf < min_bufsize: + return None + if attack_rate(period, max_buf, attack_headers)[0] >= attack_headers: + return None + # If it is sufficient, that's an upper bound to start our search. + succ_buf = min_bufsize - 1 + fail_buf = max_buf + + # Then perform a bisection search to narrow it down. + while fail_buf > succ_buf + 1: + try_buf = (succ_buf + fail_buf) // 2 + if attack_rate(period, try_buf, attack_headers)[0] >= attack_headers: + succ_buf = try_buf + else: + fail_buf = try_buf + return fail_buf + + +def optimize(when): + """Find the best (period, bufsize) configuration.""" + + # When period*bufsize = memory_scale, the per-peer memory for a mainchain sync and a maximally + # long low-difficulty header sync are equal. + memory_scale = (find_max_headers(when) - MINCHAINWORK_HEADERS) / COMPACT_HEADER_SIZE + # Compute approximation for {bufsize/period}, using a formula for a simplified problem. + approx_ratio = lambert_w(log(4) * memory_scale / ATTACK_HEADERS**2) / log(4) + # Use those for a first attempt. + print("Searching configurations:") + period = int(sqrt(memory_scale / approx_ratio) + 0.5) + bufsize = find_bufsize(period, ATTACK_HEADERS, when) + mem = memory_usage(period, bufsize, when) + best = (period, bufsize, mem) + maps = [(period, bufsize), (MINCHAINWORK_HEADERS + 1, None)] + print(f"- Initial: period={period}, buffer={bufsize}, mem={mem[0] / 8192:.3f} KiB") + + # Consider all period values between 1 and MINCHAINWORK_HEADERS, except the one just tried. + periods = [iv for iv in range(1, MINCHAINWORK_HEADERS + 1) if iv != period] + # Iterate, picking a random element from periods, computing its corresponding bufsize, and + # then using the result to shrink the period. + while True: + # Remove all periods whose memory usage for low-work long chain sync exceed the best + # memory usage we've found so far. + periods = [p for p in periods if find_max_headers(when) // p < best[2][0]] + # Stop if there is nothing left to try. + if len(periods) == 0: + break + # Pick a random remaining option for period size, and compute corresponding bufsize. + period = periods.pop(random.randrange(len(periods))) + # The buffer size (at a given attack level) cannot shrink as the period grows. Find the + # largest period smaller than the selected one we know the buffer size for, and use that + # as a lower bound to find_bufsize. + min_bufsize = max([(p, b) for p, b in maps if p < period] + [(0,0)])[1] + bufsize = find_bufsize(period, ATTACK_HEADERS, when, best[2][0], min_bufsize) + if bufsize is not None: + # We found a (period, bufsize) configuration with better memory usage than our best + # so far. Remember it for future lower bounds. + maps.append((period, bufsize)) + mem = memory_usage(period, bufsize, when) + assert mem[0] <= best[2][0] + if ASSUME_CONVEX: + # Remove all periods that are on the other side of the former best as the new + # best. + periods = [p for p in periods if (p < best[0]) == (period < best[0])] + best = (period, bufsize, mem) + print(f"- New best: period={period}, buffer={bufsize}, mem={mem[0] / 8192:.3f} KiB") + else: + # The (period, bufsize) configuration we found is worse than what we already had. + if ASSUME_CONVEX: + # Remove all periods that are on the other side of the tried configuration as the + # best one. + periods = [p for p in periods if (p < period) == (best[0] < period)] + + # Return the result. + period, bufsize, _ = best + return period, bufsize + + +def analyze(when): + """Find the best configuration and print it out.""" + + period, bufsize = optimize(when) + # Compute accurate statistics for the best found configuration. + _, mem_mainchain, mem_timewarp = memory_usage(period, bufsize, when) + headers_per_attack, _ = attack_rate(period, bufsize) + attack_volume = NET_HEADER_SIZE * MINCHAINWORK_HEADERS + # And report them. + print() + print("Optimal configuration:") + print() + print("//! Store one header commitment per HEADER_COMMITMENT_PERIOD blocks.") + print(f"constexpr size_t HEADER_COMMITMENT_PERIOD{{{period}}};") + print() + print("//! Only feed headers to validation once this many headers on top have been") + print("//! received and validated against commitments.") + print(f"constexpr size_t REDOWNLOAD_BUFFER_SIZE{{{bufsize}}};" + f" // {bufsize}/{period} = ~{bufsize/period:.1f} commitments") + print() + print("Properties:") + print(f"- Per-peer memory for mainchain sync: {mem_mainchain / 8192:.3f} KiB") + print(f"- Per-peer memory for timewarp attack: {mem_timewarp / 8192:.3f} KiB") + print(f"- Attack rate: {1/headers_per_attack:.1f} attacks for 1 header of memory growth") + print(f" (where each attack costs {attack_volume / 8388608:.3f} MiB bandwidth)") + + +analyze(TIME) From 53d7d35b5899685cd1577156250068e0cab502f4 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Wed, 31 Aug 2022 18:19:48 -0400 Subject: [PATCH 040/172] Update parameters in headerssync.cpp --- src/headerssync.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/headerssync.cpp b/src/headerssync.cpp index f891063cd23b1..1b5d7305e84fa 100644 --- a/src/headerssync.cpp +++ b/src/headerssync.cpp @@ -9,15 +9,15 @@ #include #include -// The two constants below are computed using the simulation script on -// https://gist.github.com/sipa/016ae445c132cdf65a2791534dfb7ae1 +// The two constants below are computed using the simulation script in +// contrib/devtools/headerssync-params.py. -//! Store a commitment to a header every HEADER_COMMITMENT_PERIOD blocks. -constexpr size_t HEADER_COMMITMENT_PERIOD{584}; +//! Store one header commitment per HEADER_COMMITMENT_PERIOD blocks. +constexpr size_t HEADER_COMMITMENT_PERIOD{600}; //! Only feed headers to validation once this many headers on top have been //! received and validated against commitments. -constexpr size_t REDOWNLOAD_BUFFER_SIZE{13959}; // 13959/584 = ~23.9 commitments +constexpr size_t REDOWNLOAD_BUFFER_SIZE{14308}; // 14308/600 = ~23.8 commitments // Our memory analysis assumes 48 bytes for a CompressedHeader (so we should // re-calculate parameters if we compress further) From 3d420d8f28f2d351abf8b0afe90848110e15d50c Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Wed, 31 Aug 2022 18:44:38 -0400 Subject: [PATCH 041/172] Add instructions for headerssync-params.py to release-process.md --- doc/release-process.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/doc/release-process.md b/doc/release-process.md index bdef57243bffa..468efeb7e135a 100644 --- a/doc/release-process.md +++ b/doc/release-process.md @@ -43,6 +43,14 @@ Release Process - On mainnet, the selected value must not be orphaned, so it may be useful to set the height two blocks back from the tip. - Testnet should be set with a height some tens of thousands back from the tip, due to reorgs there. - `nMinimumChainWork` with the "chainwork" value of RPC `getblockheader` using the same height as that selected for the previous step. +* Consider updating the headers synchronization tuning parameters to account for the chainparams updates. + The optimal values change very slowly, so this isn't strictly necessary every release, but doing so doesn't hurt. + - Update configuration variables in [`contrib/devtools/headerssync-params.py`](contrib/devtools/headerssync-params.py): + - Set `TIME` to the software's expected supported lifetime -- after this time, its ability to defend against a high bandwidth timewarp attacker will begin to degrade. + - Set `MINCHAINWORK_HEADERS` to the height used for the `nMinimumChainWork` calculation above. + - Check that the other variables still look reasonable. + - Run the script. It works fine in CPython, but PyPy is much faster (seconds instead of minutes): `pypy3 contrib/devtools/headerssync-params.py`. + - Paste the output defining `HEADER_COMMITMENT_PERIOD` and `REDOWNLOAD_BUFFER_SIZE` into the top of [`src/headerssync.cpp`](/src/headerssync.cpp). - Clear the release notes and move them to the wiki (see "Write the release notes" below). - Translations on Transifex: - Pull translations from Transifex into the master branch. From b73d3bbd23220857bf17cbb6401275bf58013b72 Mon Sep 17 00:00:00 2001 From: Suhas Daftuar Date: Thu, 4 May 2023 14:08:37 -0400 Subject: [PATCH 042/172] net_processing: Request assumeutxo background chain blocks Add new PeerManagerImpl::TryDownloadingHistoricalBlocks method and use it to request background chain blocks in addition to blocks normally requested by FindNextBlocksToDownload. Co-authored-by: Ryan Ofsky Co-authored-by: James O'Beirne --- src/net_processing.cpp | 91 +++++++++++++++++++++++++++++++++++++++--- src/validation.cpp | 6 +++ src/validation.h | 16 ++++++-- 3 files changed, 105 insertions(+), 8 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index b046b3ac168c7..46759423663d5 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -892,6 +892,38 @@ class PeerManagerImpl final : public PeerManager */ void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + /** Request blocks for the background chainstate, if one is in use. */ + void TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector& vBlocks, const CBlockIndex* from_tip, const CBlockIndex* target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /** + * \brief Find next blocks to download from a peer after a starting block. + * + * \param vBlocks Vector of blocks to download which will be appended to. + * \param peer Peer which blocks will be downloaded from. + * \param state Pointer to the state of the peer. + * \param pindexWalk Pointer to the starting block to add to vBlocks. + * \param count Maximum number of blocks to allow in vBlocks. No more + * blocks will be added if it reaches this size. + * \param nWindowEnd Maximum height of blocks to allow in vBlocks. No + * blocks will be added above this height. + * \param activeChain Optional pointer to a chain to compare against. If + * provided, any next blocks which are already contained + * in this chain will not be appended to vBlocks, but + * instead will be used to update the + * state->pindexLastCommonBlock pointer. + * \param nodeStaller Optional pointer to a NodeId variable that will receive + * the ID of another peer that might be causing this peer + * to stall. This is set to the ID of the peer which + * first requested the first in-flight block in the + * download window. It is only set if vBlocks is empty at + * the end of this function call and if increasing + * nWindowEnd by 1 would cause it to be non-empty (which + * indicates the download might be stalled because every + * block in the window is in flight and no other peer is + * trying to download the next block). + */ + void FindNextBlocks(std::vector& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain=nullptr, NodeId* nodeStaller=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + /* Multimap used to preserve insertion order */ typedef std::multimap::iterator>> BlockDownloadMap; BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main); @@ -1312,6 +1344,7 @@ void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash } } +// Logic for calculating which blocks to download from a given peer, given our current tip. void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector& vBlocks, NodeId& nodeStaller) { if (count == 0) @@ -1341,12 +1374,47 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) return; - std::vector vToFetch; const CBlockIndex *pindexWalk = state->pindexLastCommonBlock; // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to // download that next block if the window were 1 larger. int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW; + + FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd, &m_chainman.ActiveChain(), &nodeStaller); +} + +void PeerManagerImpl::TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector& vBlocks, const CBlockIndex *from_tip, const CBlockIndex* target_block) +{ + Assert(from_tip); + Assert(target_block); + + if (vBlocks.size() >= count) { + return; + } + + vBlocks.reserve(count); + CNodeState *state = Assert(State(peer.m_id)); + + if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) != target_block) { + // This peer can't provide us the complete series of blocks leading up to the + // assumeutxo snapshot base. + // + // Presumably this peer's chain has less work than our ActiveChain()'s tip, or else we + // will eventually crash when we try to reorg to it. Let other logic + // deal with whether we disconnect this peer. + // + // TODO at some point in the future, we might choose to request what blocks + // this peer does have from the historical chain, despite it not having a + // complete history beneath the snapshot base. + return; + } + + FindNextBlocks(vBlocks, peer, state, from_tip, count, std::min(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW, target_block->nHeight)); +} + +void PeerManagerImpl::FindNextBlocks(std::vector& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain, NodeId* nodeStaller) +{ + std::vector vToFetch; int nMaxHeight = std::min(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); NodeId waitingfor = -1; while (pindexWalk->nHeight < nMaxHeight) { @@ -1374,8 +1442,8 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co // We wouldn't download this block or its descendants from this peer. return; } - if (pindex->nStatus & BLOCK_HAVE_DATA || m_chainman.ActiveChain().Contains(pindex)) { - if (pindex->HaveTxsDownloaded()) + if (pindex->nStatus & BLOCK_HAVE_DATA || (activeChain && activeChain->Contains(pindex))) { + if (activeChain && pindex->HaveTxsDownloaded()) state->pindexLastCommonBlock = pindex; } else if (!IsBlockRequested(pindex->GetBlockHash())) { // The block is not already downloaded, and not yet in flight. @@ -1383,7 +1451,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co // We reached the end of the window. if (vBlocks.size() == 0 && waitingfor != peer.m_id) { // We aren't able to fetch anything, but we would be if the download window was one larger. - nodeStaller = waitingfor; + if (nodeStaller) *nodeStaller = waitingfor; } return; } @@ -5847,7 +5915,20 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { std::vector vToDownload; NodeId staller = -1; - FindNextBlocksToDownload(*peer, MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.vBlocksInFlight.size(), vToDownload, staller); + auto get_inflight_budget = [&state]() { + return std::max(0, MAX_BLOCKS_IN_TRANSIT_PER_PEER - static_cast(state.vBlocksInFlight.size())); + }; + + // If a snapshot chainstate is in use, we want to find its next blocks + // before the background chainstate to prioritize getting to network tip. + FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload, staller); + if (m_chainman.BackgroundSyncInProgress() && !IsLimitedPeer(*peer)) { + TryDownloadingHistoricalBlocks( + *peer, + get_inflight_budget(), + vToDownload, m_chainman.GetBackgroundSyncTip(), + Assert(m_chainman.GetSnapshotBaseBlock())); + } for (const CBlockIndex *pindex : vToDownload) { uint32_t nFetchFlags = GetFetchFlags(*peer); vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); diff --git a/src/validation.cpp b/src/validation.cpp index 357b4d422d23a..a12f121dc329a 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -4148,6 +4148,12 @@ bool ChainstateManager::ProcessNewBlock(const std::shared_ptr& blo return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString()); } + Chainstate* bg_chain{WITH_LOCK(cs_main, return BackgroundSyncInProgress() ? m_ibd_chainstate.get() : nullptr)}; + BlockValidationState bg_state; + if (bg_chain && !bg_chain->ActivateBestChain(bg_state, block)) { + return error("%s: [background] ActivateBestChain failed (%s)", __func__, bg_state.ToString()); + } + return true; } diff --git a/src/validation.h b/src/validation.h index 3f0a2312b52ee..319e40447b48d 100644 --- a/src/validation.h +++ b/src/validation.h @@ -881,9 +881,6 @@ class ChainstateManager /** Most recent headers presync progress update, for rate-limiting. */ std::chrono::time_point m_last_presync_update GUARDED_BY(::cs_main) {}; - //! Returns nullptr if no snapshot has been loaded. - const CBlockIndex* GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main); - //! Return the height of the base block of the snapshot in use, if one exists, else //! nullopt. std::optional GetSnapshotBaseHeight() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main); @@ -1034,12 +1031,25 @@ class ChainstateManager //! Otherwise, revert to using the ibd chainstate and shutdown. SnapshotCompletionResult MaybeCompleteSnapshotValidation() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + //! Returns nullptr if no snapshot has been loaded. + const CBlockIndex* GetSnapshotBaseBlock() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + //! The most-work chain. Chainstate& ActiveChainstate() const; CChain& ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) { return ActiveChainstate().m_chain; } int ActiveHeight() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) { return ActiveChain().Height(); } CBlockIndex* ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) { return ActiveChain().Tip(); } + //! The state of a background sync (for net processing) + bool BackgroundSyncInProgress() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) { + return IsUsable(m_snapshot_chainstate.get()) && IsUsable(m_ibd_chainstate.get()); + } + + //! The tip of the background sync chain + const CBlockIndex* GetBackgroundSyncTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) { + return BackgroundSyncInProgress() ? m_ibd_chainstate->m_chain.Tip() : nullptr; + } + node::BlockMap& BlockIndex() EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); From c93ef43e4fd4fbc1263cdc9e98ae5856830fe89e Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Wed, 24 May 2023 21:10:12 -0400 Subject: [PATCH 043/172] bugfix: correct is_snapshot_cs in VerifyDB --- src/validation.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/validation.cpp b/src/validation.cpp index a12f121dc329a..a57a66478690d 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -4281,7 +4281,7 @@ VerifyDBResult CVerifyDB::VerifyDB( bool skipped_l3_checks{false}; LogPrintf("Verification progress: 0%%\n"); - const bool is_snapshot_cs{!chainstate.m_from_snapshot_blockhash}; + const bool is_snapshot_cs{chainstate.m_from_snapshot_blockhash}; for (pindex = chainstate.m_chain.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) { const int percentageDone = std::max(1, std::min(99, (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100)))); From c711ca186f8d8a28810be0beedcb615ddcf93163 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Wed, 3 May 2023 15:39:51 -0400 Subject: [PATCH 044/172] assumeutxo: remove snapshot during -reindex{-chainstate} Removing a snapshot chainstate from disk (and memory) is consistent with existing reindex operations. --- src/node/chainstate.cpp | 9 ++++++++- src/validation.cpp | 32 +++++++++++++++++++++++++++----- src/validation.h | 7 ++++--- 3 files changed, 39 insertions(+), 9 deletions(-) diff --git a/src/node/chainstate.cpp b/src/node/chainstate.cpp index ae1457a87ea27..16ca1d9156ba8 100644 --- a/src/node/chainstate.cpp +++ b/src/node/chainstate.cpp @@ -185,7 +185,14 @@ ChainstateLoadResult LoadChainstate(ChainstateManager& chainman, const CacheSize chainman.InitializeChainstate(options.mempool); // Load a chain created from a UTXO snapshot, if any exist. - chainman.DetectSnapshotChainstate(options.mempool); + bool has_snapshot = chainman.DetectSnapshotChainstate(options.mempool); + + if (has_snapshot && (options.reindex || options.reindex_chainstate)) { + LogPrintf("[snapshot] deleting snapshot chainstate due to reindexing\n"); + if (!chainman.DeleteSnapshotChainstate()) { + return {ChainstateLoadStatus::FAILURE_FATAL, Untranslated("Couldn't remove snapshot chainstate.")}; + } + } auto [init_status, init_error] = CompleteChainstateInitialization(chainman, cache_sizes, options); if (init_status != ChainstateLoadStatus::SUCCESS) { diff --git a/src/validation.cpp b/src/validation.cpp index a57a66478690d..240543e6ebf59 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5111,7 +5111,7 @@ const AssumeutxoData* ExpectedAssumeutxo( return nullptr; } -static bool DeleteCoinsDBFromDisk(const fs::path db_path, bool is_snapshot) +[[nodiscard]] static bool DeleteCoinsDBFromDisk(const fs::path db_path, bool is_snapshot) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); @@ -5750,15 +5750,20 @@ bool IsBIP30Unspendable(const CBlockIndex& block_index) (block_index.nHeight==91812 && block_index.GetBlockHash() == uint256S("0x00000000000af0aed4792b1acee3d966af36cf5def14935db8de83d6f9306f2f")); } -util::Result Chainstate::InvalidateCoinsDBOnDisk() +static fs::path GetSnapshotCoinsDBPath(Chainstate& cs) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); // Should never be called on a non-snapshot chainstate. - assert(m_from_snapshot_blockhash); - auto storage_path_maybe = this->CoinsDB().StoragePath(); + assert(cs.m_from_snapshot_blockhash); + auto storage_path_maybe = cs.CoinsDB().StoragePath(); // Should never be called with a non-existent storage path. assert(storage_path_maybe); - fs::path snapshot_datadir = *storage_path_maybe; + return *storage_path_maybe; +} + +util::Result Chainstate::InvalidateCoinsDBOnDisk() +{ + fs::path snapshot_datadir = GetSnapshotCoinsDBPath(*this); // Coins views no longer usable. m_coins_views.reset(); @@ -5789,6 +5794,23 @@ util::Result Chainstate::InvalidateCoinsDBOnDisk() return {}; } +bool ChainstateManager::DeleteSnapshotChainstate() +{ + AssertLockHeld(::cs_main); + Assert(m_snapshot_chainstate); + Assert(m_ibd_chainstate); + + fs::path snapshot_datadir = GetSnapshotCoinsDBPath(*m_snapshot_chainstate); + if (!DeleteCoinsDBFromDisk(snapshot_datadir, /*is_snapshot=*/ true)) { + LogPrintf("Deletion of %s failed. Please remove it manually to continue reindexing.\n", + fs::PathToString(snapshot_datadir)); + return false; + } + m_active_chainstate = m_ibd_chainstate.get(); + m_snapshot_chainstate.reset(); + return true; +} + const CBlockIndex* ChainstateManager::GetSnapshotBaseBlock() const { return m_active_chainstate ? m_active_chainstate->SnapshotBase() : nullptr; diff --git a/src/validation.h b/src/validation.h index 319e40447b48d..4e9e91c299ad9 100644 --- a/src/validation.h +++ b/src/validation.h @@ -848,9 +848,6 @@ class ChainstateManager //! Points to either the ibd or snapshot chainstate; indicates our //! most-work chain. //! - //! Once this pointer is set to a corresponding chainstate, it will not - //! be reset until init.cpp:Shutdown(). - //! //! This is especially important when, e.g., calling ActivateBestChain() //! on all chainstates because we are not able to hold ::cs_main going into //! that call. @@ -1203,6 +1200,10 @@ class ChainstateManager void ResetChainstates() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + //! Remove the snapshot-based chainstate and all on-disk artifacts. + //! Used when reindex{-chainstate} is called during snapshot use. + [[nodiscard]] bool DeleteSnapshotChainstate() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + //! Switch the active chainstate to one based on a UTXO snapshot that was loaded //! previously. Chainstate& ActivateExistingSnapshot(CTxMemPool* mempool, uint256 base_blockhash) From 434495a8c1496ca23fe35b84499f3daf668d76b8 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Thu, 25 May 2023 13:05:27 -0400 Subject: [PATCH 045/172] chainparams: add blockhash to AssumeutxoData This allows us to reference assumeutxo configuration by blockhash as well as height; this is helpful in future changes when we want to reference assumeutxo configurations before the block index is loaded. --- src/kernel/chainparams.cpp | 18 ++++++------- src/kernel/chainparams.h | 26 +++++++++++++------ .../validation_chainstatemanager_tests.cpp | 4 +-- src/test/validation_tests.cpp | 10 +++---- src/util/vector.h | 13 ++++++++++ src/validation.cpp | 18 +++---------- src/validation.h | 9 ------- 7 files changed, 49 insertions(+), 49 deletions(-) diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 7e69c097a6af5..ca418fc6abc07 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -172,8 +172,8 @@ class CMainParams : public CChainParams { } }; - m_assumeutxo_data = MapAssumeutxo{ - // TODO to be specified in a future patch. + m_assumeutxo_data = { + // TODO to be specified in a future patch. }; chainTxData = ChainTxData{ @@ -266,7 +266,7 @@ class CTestNetParams : public CChainParams { } }; - m_assumeutxo_data = MapAssumeutxo{ + m_assumeutxo_data = { // TODO to be specified in a future patch. }; @@ -477,14 +477,12 @@ class CRegTestParams : public CChainParams } }; - m_assumeutxo_data = MapAssumeutxo{ - { - 110, - {AssumeutxoHash{uint256S("0x1ebbf5850204c0bdb15bf030f47c7fe91d45c44c712697e4509ba67adb01c618")}, 110}, - }, + m_assumeutxo_data = { { - 200, - {AssumeutxoHash{uint256S("0x51c8d11d8b5c1de51543c579736e786aa2736206d1e11e627568029ce092cf62")}, 200}, + .height = 110, + .hash_serialized = AssumeutxoHash{uint256S("0x1ebbf5850204c0bdb15bf030f47c7fe91d45c44c712697e4509ba67adb01c618")}, + .nChainTx = 110, + .blockhash = uint256S("0x696e92821f65549c7ee134edceeeeaaa4105647a3c4fd9f298c0aec0ab50425c") }, }; diff --git a/src/kernel/chainparams.h b/src/kernel/chainparams.h index ec1697493c96e..7a5539bc71c54 100644 --- a/src/kernel/chainparams.h +++ b/src/kernel/chainparams.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -44,17 +45,21 @@ struct AssumeutxoHash : public BaseHash { * as valid. */ struct AssumeutxoData { + int height; + //! The expected hash of the deserialized UTXO set. - const AssumeutxoHash hash_serialized; + AssumeutxoHash hash_serialized; //! Used to populate the nChainTx value, which is used during BlockManager::LoadBlockIndex(). //! //! We need to hardcode the value here because this is computed cumulatively using block data, //! which we do not necessarily have at the time of snapshot load. - const unsigned int nChainTx; -}; + unsigned int nChainTx; -using MapAssumeutxo = std::map; + //! The hash of the base block for this snapshot. Used to refer to assumeutxo data + //! prior to having a loaded blockindex. + uint256 blockhash; +}; /** * Holds various statistics on transactions within a chain. Used to estimate @@ -114,9 +119,14 @@ class CChainParams const std::vector& FixedSeeds() const { return vFixedSeeds; } const CCheckpointData& Checkpoints() const { return checkpointData; } - //! Get allowed assumeutxo configuration. - //! @see ChainstateManager - const MapAssumeutxo& Assumeutxo() const { return m_assumeutxo_data; } + std::optional AssumeutxoForHeight(int height) const + { + return FindFirst(m_assumeutxo_data, [&](const auto& d) { return d.height == height; }); + } + std::optional AssumeutxoForBlockhash(const uint256& blockhash) const + { + return FindFirst(m_assumeutxo_data, [&](const auto& d) { return d.blockhash == blockhash; }); + } const ChainTxData& TxData() const { return chainTxData; } @@ -169,7 +179,7 @@ class CChainParams bool fDefaultConsistencyChecks; bool m_is_mockable_chain; CCheckpointData checkpointData; - MapAssumeutxo m_assumeutxo_data; + std::vector m_assumeutxo_data; ChainTxData chainTxData; }; diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp index 7b7be4be9eeb0..74e577c17cbc2 100644 --- a/src/test/validation_chainstatemanager_tests.cpp +++ b/src/test/validation_chainstatemanager_tests.cpp @@ -289,10 +289,10 @@ struct SnapshotTestSetup : TestChain100Setup { BOOST_CHECK(!chainman.ActiveChain().Genesis()->IsAssumedValid()); } - const AssumeutxoData& au_data = *ExpectedAssumeutxo(snapshot_height, ::Params()); + const auto& au_data = ::Params().AssumeutxoForHeight(snapshot_height); const CBlockIndex* tip = WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip()); - BOOST_CHECK_EQUAL(tip->nChainTx, au_data.nChainTx); + BOOST_CHECK_EQUAL(tip->nChainTx, au_data->nChainTx); // To be checked against later when we try loading a subsequent snapshot. uint256 loaded_snapshot_blockhash{*chainman.SnapshotBlockhash()}; diff --git a/src/test/validation_tests.cpp b/src/test/validation_tests.cpp index d00f2ff4d1cac..d34d98c219a83 100644 --- a/src/test/validation_tests.cpp +++ b/src/test/validation_tests.cpp @@ -132,17 +132,17 @@ BOOST_AUTO_TEST_CASE(test_assumeutxo) std::vector bad_heights{0, 100, 111, 115, 209, 211}; for (auto empty : bad_heights) { - const auto out = ExpectedAssumeutxo(empty, *params); + const auto out = params->AssumeutxoForHeight(empty); BOOST_CHECK(!out); } - const auto out110 = *ExpectedAssumeutxo(110, *params); + const auto out110 = *params->AssumeutxoForHeight(110); BOOST_CHECK_EQUAL(out110.hash_serialized.ToString(), "1ebbf5850204c0bdb15bf030f47c7fe91d45c44c712697e4509ba67adb01c618"); BOOST_CHECK_EQUAL(out110.nChainTx, 110U); - const auto out210 = *ExpectedAssumeutxo(200, *params); - BOOST_CHECK_EQUAL(out210.hash_serialized.ToString(), "51c8d11d8b5c1de51543c579736e786aa2736206d1e11e627568029ce092cf62"); - BOOST_CHECK_EQUAL(out210.nChainTx, 200U); + const auto out110_2 = *params->AssumeutxoForBlockhash(uint256S("0x696e92821f65549c7ee134edceeeeaaa4105647a3c4fd9f298c0aec0ab50425c")); + BOOST_CHECK_EQUAL(out110_2.hash_serialized.ToString(), "1ebbf5850204c0bdb15bf030f47c7fe91d45c44c712697e4509ba67adb01c618"); + BOOST_CHECK_EQUAL(out110_2.nChainTx, 110U); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/util/vector.h b/src/util/vector.h index 40ff73c293c17..1513562f1bee7 100644 --- a/src/util/vector.h +++ b/src/util/vector.h @@ -5,7 +5,9 @@ #ifndef BITCOIN_UTIL_VECTOR_H #define BITCOIN_UTIL_VECTOR_H +#include #include +#include #include #include #include @@ -67,4 +69,15 @@ inline void ClearShrink(V& v) noexcept V{}.swap(v); } +template +inline std::optional FindFirst(const std::vector& vec, const L fnc) +{ + for (const auto& el : vec) { + if (fnc(el)) { + return el; + } + } + return std::nullopt; +} + #endif // BITCOIN_UTIL_VECTOR_H diff --git a/src/validation.cpp b/src/validation.cpp index 240543e6ebf59..8afd37726547e 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5099,18 +5099,6 @@ Chainstate& ChainstateManager::InitializeChainstate(CTxMemPool* mempool) return *m_active_chainstate; } -const AssumeutxoData* ExpectedAssumeutxo( - const int height, const CChainParams& chainparams) -{ - const MapAssumeutxo& valid_assumeutxos_map = chainparams.Assumeutxo(); - const auto assumeutxo_found = valid_assumeutxos_map.find(height); - - if (assumeutxo_found != valid_assumeutxos_map.end()) { - return &assumeutxo_found->second; - } - return nullptr; -} - [[nodiscard]] static bool DeleteCoinsDBFromDisk(const fs::path db_path, bool is_snapshot) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { @@ -5295,7 +5283,7 @@ bool ChainstateManager::PopulateAndValidateSnapshot( CBlockIndex* snapshot_start_block = WITH_LOCK(::cs_main, return m_blockman.LookupBlockIndex(base_blockhash)); if (!snapshot_start_block) { - // Needed for ComputeUTXOStats and ExpectedAssumeutxo to determine the + // Needed for ComputeUTXOStats to determine the // height and to avoid a crash when base_blockhash.IsNull() LogPrintf("[snapshot] Did not find snapshot start blockheader %s\n", base_blockhash.ToString()); @@ -5303,7 +5291,7 @@ bool ChainstateManager::PopulateAndValidateSnapshot( } int base_height = snapshot_start_block->nHeight; - auto maybe_au_data = ExpectedAssumeutxo(base_height, GetParams()); + const auto& maybe_au_data = GetParams().AssumeutxoForHeight(base_height); if (!maybe_au_data) { LogPrintf("[snapshot] assumeutxo height in snapshot metadata not recognized " @@ -5572,7 +5560,7 @@ SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation() CCoinsViewDB& ibd_coins_db = m_ibd_chainstate->CoinsDB(); m_ibd_chainstate->ForceFlushStateToDisk(); - auto maybe_au_data = ExpectedAssumeutxo(curr_height, m_options.chainparams); + const auto& maybe_au_data = m_options.chainparams.AssumeutxoForHeight(curr_height); if (!maybe_au_data) { LogPrintf("[snapshot] assumeutxo data not found for height " "(%d) - refusing to validate snapshot\n", curr_height); diff --git a/src/validation.h b/src/validation.h index 4e9e91c299ad9..c2434264d6731 100644 --- a/src/validation.h +++ b/src/validation.h @@ -1242,15 +1242,6 @@ bool DeploymentEnabled(const ChainstateManager& chainman, DEP dep) return DeploymentEnabled(chainman.GetConsensus(), dep); } -/** - * Return the expected assumeutxo value for a given height, if one exists. - * - * @param[in] height Get the assumeutxo value for this height. - * - * @returns empty if no assumeutxo configuration exists for the given height. - */ -const AssumeutxoData* ExpectedAssumeutxo(const int height, const CChainParams& params); - /** Identifies blocks that overwrote an existing coinbase output in the UTXO set (see BIP30) */ bool IsBIP30Repeat(const CBlockIndex& block_index); From 9f2318c76cc6986d48e13831cf5bd8dab194fdf4 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Mon, 1 May 2023 17:39:28 -0400 Subject: [PATCH 046/172] validation: MaybeRebalanceCaches when chain leaves IBD Check to see if we need to rebalance caches across chainstates when a chain leaves IBD. --- src/validation.cpp | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index 8afd37726547e..e2091a2c9a85e 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -3192,6 +3192,7 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< CBlockIndex *pindexMostWork = nullptr; CBlockIndex *pindexNewTip = nullptr; + bool exited_ibd{false}; do { // Block until the validation queue drains. This should largely // never happen in normal operation, however may happen during @@ -3205,6 +3206,7 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< LOCK(cs_main); // Lock transaction pool for at least as long as it takes for connectTrace to be consumed LOCK(MempoolMutex()); + const bool was_in_ibd = m_chainman.IsInitialBlockDownload(); CBlockIndex* starting_tip = m_chain.Tip(); bool blocks_connected = false; do { @@ -3252,16 +3254,21 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< if (!blocks_connected) return true; const CBlockIndex* pindexFork = m_chain.FindFork(starting_tip); - bool fInitialDownload = m_chainman.IsInitialBlockDownload(); + bool still_in_ibd = m_chainman.IsInitialBlockDownload(); + + if (was_in_ibd && !still_in_ibd) { + // Active chainstate has exited IBD. + exited_ibd = true; + } // Notify external listeners about the new tip. // Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected if (pindexFork != pindexNewTip) { // Notify ValidationInterface subscribers - GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, fInitialDownload); + GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, still_in_ibd); // Always notify the UI if a new block tip was connected - if (kernel::IsInterrupted(m_chainman.GetNotifications().blockTip(GetSynchronizationState(fInitialDownload), *pindexNewTip))) { + if (kernel::IsInterrupted(m_chainman.GetNotifications().blockTip(GetSynchronizationState(still_in_ibd), *pindexNewTip))) { // Just breaking and returning success for now. This could // be changed to bubble up the kernel::Interrupted value to // the caller so the caller could distinguish between @@ -3272,6 +3279,13 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< } // When we reach this point, we switched to a new tip (stored in pindexNewTip). + if (exited_ibd) { + // If a background chainstate is in use, we may need to rebalance our + // allocation of caches once a chainstate exits initial block download. + LOCK(::cs_main); + m_chainman.MaybeRebalanceCaches(); + } + if (WITH_LOCK(::cs_main, return m_disabled)) { // Background chainstate has reached the snapshot base block, so exit. break; From c6af23c5179cc383f8e6c275373af8d11e6a989f Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Thu, 10 Nov 2022 12:03:39 -0500 Subject: [PATCH 047/172] validation: add ChainstateRole --- src/kernel/chain.cpp | 11 +++++++++++ src/kernel/chain.h | 20 ++++++++++++++++++++ src/validation.cpp | 10 ++++++++++ src/validation.h | 7 +++++++ 4 files changed, 48 insertions(+) diff --git a/src/kernel/chain.cpp b/src/kernel/chain.cpp index 1c877866d0a28..318c956b386dc 100644 --- a/src/kernel/chain.cpp +++ b/src/kernel/chain.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -25,3 +26,13 @@ interfaces::BlockInfo MakeBlockInfo(const CBlockIndex* index, const CBlock* data return info; } } // namespace kernel + +std::ostream& operator<<(std::ostream& os, const ChainstateRole& role) { + switch(role) { + case ChainstateRole::NORMAL: os << "normal"; break; + case ChainstateRole::ASSUMEDVALID: os << "assumedvalid"; break; + case ChainstateRole::BACKGROUND: os << "background"; break; + default: os.setstate(std::ios_base::failbit); + } + return os; +} diff --git a/src/kernel/chain.h b/src/kernel/chain.h index f0750f82663f7..feba24a557e69 100644 --- a/src/kernel/chain.h +++ b/src/kernel/chain.h @@ -5,6 +5,8 @@ #ifndef BITCOIN_KERNEL_CHAIN_H #define BITCOIN_KERNEL_CHAIN_H +#include + class CBlock; class CBlockIndex; namespace interfaces { @@ -14,6 +16,24 @@ struct BlockInfo; namespace kernel { //! Return data from block index. interfaces::BlockInfo MakeBlockInfo(const CBlockIndex* block_index, const CBlock* data = nullptr); + } // namespace kernel +//! This enum describes the various roles a specific Chainstate instance can take. +//! Other parts of the system sometimes need to vary in behavior depending on the +//! existence of a background validation chainstate, e.g. when building indexes. +enum class ChainstateRole { + // Single chainstate in use, "normal" IBD mode. + NORMAL, + + // Doing IBD-style validation in the background. Implies use of an assumed-valid + // chainstate. + BACKGROUND, + + // Active assumed-valid chainstate. Implies use of a background IBD chainstate. + ASSUMEDVALID, +}; + +std::ostream& operator<<(std::ostream& os, const ChainstateRole& role); + #endif // BITCOIN_KERNEL_CHAIN_H diff --git a/src/validation.cpp b/src/validation.cpp index e2091a2c9a85e..9a543d3a389a3 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5813,6 +5813,16 @@ bool ChainstateManager::DeleteSnapshotChainstate() return true; } +ChainstateRole Chainstate::GetRole() const +{ + if (m_chainman.GetAll().size() <= 1) { + return ChainstateRole::NORMAL; + } + return (this != &m_chainman.ActiveChainstate()) ? + ChainstateRole::BACKGROUND : + ChainstateRole::ASSUMEDVALID; +} + const CBlockIndex* ChainstateManager::GetSnapshotBaseBlock() const { return m_active_chainstate ? m_active_chainstate->SnapshotBase() : nullptr; diff --git a/src/validation.h b/src/validation.h index c2434264d6731..38f57ed1b597a 100644 --- a/src/validation.h +++ b/src/validation.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -511,6 +512,12 @@ class Chainstate ChainstateManager& chainman, std::optional from_snapshot_blockhash = std::nullopt); + //! Return the current role of the chainstate. See `ChainstateManager` + //! documentation for a description of the different types of chainstates. + //! + //! @sa ChainstateRole + ChainstateRole GetRole() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + /** * Initialize the CoinsViews UTXO set database management data structures. The in-memory * cache is initialized separately. From 1e59acdf17309f567c370885f0cf02605e2baa58 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Thu, 24 Aug 2023 16:51:16 -0400 Subject: [PATCH 048/172] validation: only call UpdatedBlockTip for active chainstate This notification isn't needed for background chainstates. `kernel::Notifications::blockTip` are also skipped. --- src/validation.cpp | 2 +- src/validationinterface.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index 9a543d3a389a3..4873eb964ca3e 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -3263,7 +3263,7 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< // Notify external listeners about the new tip. // Enqueue while holding cs_main to ensure that UpdatedBlockTip is called in the order in which blocks are connected - if (pindexFork != pindexNewTip) { + if (this == &m_chainman.ActiveChainstate() && pindexFork != pindexNewTip) { // Notify ValidationInterface subscribers GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork, still_in_ibd); diff --git a/src/validationinterface.h b/src/validationinterface.h index 8c20cc8ffbf73..5bdd7e0123892 100644 --- a/src/validationinterface.h +++ b/src/validationinterface.h @@ -87,7 +87,7 @@ class CValidationInterface { * but may not be called on every intermediate tip. If the latter behavior is desired, * subscribe to BlockConnected() instead. * - * Called on a background thread. + * Called on a background thread. Only called for the active chainstate. */ virtual void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {} /** From 4d8f4dcb450d31e4847804e62bf91545b949fa14 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Mon, 23 Sep 2019 13:54:21 -0400 Subject: [PATCH 049/172] validation: pass ChainstateRole for validationinterface calls This allows consumers to decide how to handle events from background or assumedvalid chainstates. --- src/bench/wallet_create_tx.cpp | 2 +- src/index/base.cpp | 4 ++-- src/index/base.h | 4 ++-- src/interfaces/chain.h | 5 +++-- src/net_processing.cpp | 8 ++++++-- src/node/interfaces.cpp | 8 +++++--- src/test/coinstatsindex_tests.cpp | 2 +- src/test/util/validation.cpp | 8 ++++++-- src/test/util/validation.h | 6 +++++- src/test/validation_block_tests.cpp | 2 +- src/test/validationinterface_tests.cpp | 1 + src/validation.cpp | 5 +++-- src/validationinterface.cpp | 13 +++++++------ src/validationinterface.h | 17 ++++++++++------- src/wallet/test/fuzz/notifications.cpp | 5 +++-- src/wallet/wallet.cpp | 9 +++++---- src/wallet/wallet.h | 4 ++-- src/zmq/zmqnotificationinterface.cpp | 3 ++- src/zmq/zmqnotificationinterface.h | 2 +- 19 files changed, 66 insertions(+), 42 deletions(-) diff --git a/src/bench/wallet_create_tx.cpp b/src/bench/wallet_create_tx.cpp index 5e5bc76fd21c5..160534b63ca41 100644 --- a/src/bench/wallet_create_tx.cpp +++ b/src/bench/wallet_create_tx.cpp @@ -70,7 +70,7 @@ void generateFakeBlock(const CChainParams& params, // notify wallet const auto& pindex = WITH_LOCK(::cs_main, return context.chainman->ActiveChain().Tip()); - wallet.blockConnected(kernel::MakeBlockInfo(pindex, &block)); + wallet.blockConnected(ChainstateRole::NORMAL, kernel::MakeBlockInfo(pindex, &block)); } struct PreSelectInputs { diff --git a/src/index/base.cpp b/src/index/base.cpp index f18205a76ffd0..98a8bad102db3 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -250,7 +250,7 @@ bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_ti return true; } -void BaseIndex::BlockConnected(const std::shared_ptr& block, const CBlockIndex* pindex) +void BaseIndex::BlockConnected(ChainstateRole role, const std::shared_ptr& block, const CBlockIndex* pindex) { if (!m_synced) { return; @@ -296,7 +296,7 @@ void BaseIndex::BlockConnected(const std::shared_ptr& block, const } } -void BaseIndex::ChainStateFlushed(const CBlockLocator& locator) +void BaseIndex::ChainStateFlushed(ChainstateRole role, const CBlockLocator& locator) { if (!m_synced) { return; diff --git a/src/index/base.h b/src/index/base.h index 9b2a41dc92b13..b93103eb36606 100644 --- a/src/index/base.h +++ b/src/index/base.h @@ -102,9 +102,9 @@ class BaseIndex : public CValidationInterface Chainstate* m_chainstate{nullptr}; const std::string m_name; - void BlockConnected(const std::shared_ptr& block, const CBlockIndex* pindex) override; + void BlockConnected(ChainstateRole role, const std::shared_ptr& block, const CBlockIndex* pindex) override; - void ChainStateFlushed(const CBlockLocator& locator) override; + void ChainStateFlushed(ChainstateRole role, const CBlockLocator& locator) override; /// Initialize internal state from the database and block index. [[nodiscard]] virtual bool CustomInit(const std::optional& block) { return true; } diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h index b5243725ad0e7..dea868f844da4 100644 --- a/src/interfaces/chain.h +++ b/src/interfaces/chain.h @@ -27,6 +27,7 @@ class Coin; class uint256; enum class MemPoolRemovalReason; enum class RBFTransactionState; +enum class ChainstateRole; struct bilingual_str; struct CBlockLocator; struct FeeCalculation; @@ -310,10 +311,10 @@ class Chain virtual ~Notifications() {} virtual void transactionAddedToMempool(const CTransactionRef& tx) {} virtual void transactionRemovedFromMempool(const CTransactionRef& tx, MemPoolRemovalReason reason) {} - virtual void blockConnected(const BlockInfo& block) {} + virtual void blockConnected(ChainstateRole role, const BlockInfo& block) {} virtual void blockDisconnected(const BlockInfo& block) {} virtual void updatedBlockTip() {} - virtual void chainStateFlushed(const CBlockLocator& locator) {} + virtual void chainStateFlushed(ChainstateRole role, const CBlockLocator& locator) {} }; //! Register handler for notifications. diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 46759423663d5..12dca182c3800 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -483,7 +484,7 @@ class PeerManagerImpl final : public PeerManager CTxMemPool& pool, Options opts); /** Overridden from CValidationInterface. */ - void BlockConnected(const std::shared_ptr& pblock, const CBlockIndex* pindexConnected) override + void BlockConnected(ChainstateRole role, const std::shared_ptr& pblock, const CBlockIndex* pindexConnected) override EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex); void BlockDisconnected(const std::shared_ptr &block, const CBlockIndex* pindex) override EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex); @@ -1911,7 +1912,10 @@ void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler) * announcements for them. Also save the time of the last tip update and * possibly reduce dynamic block stalling timeout. */ -void PeerManagerImpl::BlockConnected(const std::shared_ptr& pblock, const CBlockIndex* pindex) +void PeerManagerImpl::BlockConnected( + ChainstateRole role, + const std::shared_ptr& pblock, + const CBlockIndex* pindex) { m_orphanage.EraseForBlock(*pblock); m_last_tip_update = GetTime(); diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index e0c40036d90d2..4baa0da67cd52 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -434,9 +434,9 @@ class NotificationsProxy : public CValidationInterface { m_notifications->transactionRemovedFromMempool(tx, reason); } - void BlockConnected(const std::shared_ptr& block, const CBlockIndex* index) override + void BlockConnected(ChainstateRole role, const std::shared_ptr& block, const CBlockIndex* index) override { - m_notifications->blockConnected(kernel::MakeBlockInfo(index, block.get())); + m_notifications->blockConnected(role, kernel::MakeBlockInfo(index, block.get())); } void BlockDisconnected(const std::shared_ptr& block, const CBlockIndex* index) override { @@ -446,7 +446,9 @@ class NotificationsProxy : public CValidationInterface { m_notifications->updatedBlockTip(); } - void ChainStateFlushed(const CBlockLocator& locator) override { m_notifications->chainStateFlushed(locator); } + void ChainStateFlushed(ChainstateRole role, const CBlockLocator& locator) override { + m_notifications->chainStateFlushed(role, locator); + } std::shared_ptr m_notifications; }; diff --git a/src/test/coinstatsindex_tests.cpp b/src/test/coinstatsindex_tests.cpp index 787a196a0ca42..50f3f7d833682 100644 --- a/src/test/coinstatsindex_tests.cpp +++ b/src/test/coinstatsindex_tests.cpp @@ -105,7 +105,7 @@ BOOST_FIXTURE_TEST_CASE(coinstatsindex_unclean_shutdown, TestChain100Setup) // Send block connected notification, then stop the index without // sending a chainstate flushed notification. Prior to #24138, this // would cause the index to be corrupted and fail to reload. - ValidationInterfaceTest::BlockConnected(index, new_block, new_block_index); + ValidationInterfaceTest::BlockConnected(ChainstateRole::NORMAL, index, new_block, new_block_index); index.Stop(); } diff --git a/src/test/util/validation.cpp b/src/test/util/validation.cpp index 2d5562ae66c11..bcd6a7a7dc330 100644 --- a/src/test/util/validation.cpp +++ b/src/test/util/validation.cpp @@ -22,7 +22,11 @@ void TestChainstateManager::JumpOutOfIbd() Assert(!IsInitialBlockDownload()); } -void ValidationInterfaceTest::BlockConnected(CValidationInterface& obj, const std::shared_ptr& block, const CBlockIndex* pindex) +void ValidationInterfaceTest::BlockConnected( + ChainstateRole role, + CValidationInterface& obj, + const std::shared_ptr& block, + const CBlockIndex* pindex) { - obj.BlockConnected(block, pindex); + obj.BlockConnected(role, block, pindex); } diff --git a/src/test/util/validation.h b/src/test/util/validation.h index 64654f3fb6137..45ef773409a14 100644 --- a/src/test/util/validation.h +++ b/src/test/util/validation.h @@ -19,7 +19,11 @@ struct TestChainstateManager : public ChainstateManager { class ValidationInterfaceTest { public: - static void BlockConnected(CValidationInterface& obj, const std::shared_ptr& block, const CBlockIndex* pindex); + static void BlockConnected( + ChainstateRole role, + CValidationInterface& obj, + const std::shared_ptr& block, + const CBlockIndex* pindex); }; #endif // BITCOIN_TEST_UTIL_VALIDATION_H diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp index d1463634cc209..411371f7c16b0 100644 --- a/src/test/validation_block_tests.cpp +++ b/src/test/validation_block_tests.cpp @@ -43,7 +43,7 @@ struct TestSubscriber final : public CValidationInterface { BOOST_CHECK_EQUAL(m_expected_tip, pindexNew->GetBlockHash()); } - void BlockConnected(const std::shared_ptr& block, const CBlockIndex* pindex) override + void BlockConnected(ChainstateRole role, const std::shared_ptr& block, const CBlockIndex* pindex) override { BOOST_CHECK_EQUAL(m_expected_tip, block->hashPrevBlock); BOOST_CHECK_EQUAL(m_expected_tip, pindex->pprev->GetBlockHash()); diff --git a/src/test/validationinterface_tests.cpp b/src/test/validationinterface_tests.cpp index fcd0b25b3887c..5979441057cc8 100644 --- a/src/test/validationinterface_tests.cpp +++ b/src/test/validationinterface_tests.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/src/validation.cpp b/src/validation.cpp index 4873eb964ca3e..8c657839e8d3a 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5,6 +5,7 @@ #include +#include #include #include @@ -2645,7 +2646,7 @@ bool Chainstate::FlushStateToDisk( } if (full_flush_completed) { // Update best block in wallet (so we can detect restored wallets). - GetMainSignals().ChainStateFlushed(m_chain.GetLocator()); + GetMainSignals().ChainStateFlushed(this->GetRole(), m_chain.GetLocator()); } } catch (const std::runtime_error& e) { return FatalError(m_chainman.GetNotifications(), state, std::string("System error while flushing: ") + e.what()); @@ -3239,7 +3240,7 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< for (const PerBlockConnectTrace& trace : connectTrace.GetBlocksConnected()) { assert(trace.pblock && trace.pindex); - GetMainSignals().BlockConnected(trace.pblock, trace.pindex); + GetMainSignals().BlockConnected(this->GetRole(), trace.pblock, trace.pindex); } // This will have been toggled in diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp index d344c8bfbda04..9241395ad54ed 100644 --- a/src/validationinterface.cpp +++ b/src/validationinterface.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -223,9 +224,9 @@ void CMainSignals::TransactionRemovedFromMempool(const CTransactionRef& tx, MemP RemovalReasonToString(reason)); } -void CMainSignals::BlockConnected(const std::shared_ptr &pblock, const CBlockIndex *pindex) { - auto event = [pblock, pindex, this] { - m_internals->Iterate([&](CValidationInterface& callbacks) { callbacks.BlockConnected(pblock, pindex); }); +void CMainSignals::BlockConnected(ChainstateRole role, const std::shared_ptr &pblock, const CBlockIndex *pindex) { + auto event = [role, pblock, pindex, this] { + m_internals->Iterate([&](CValidationInterface& callbacks) { callbacks.BlockConnected(role, pblock, pindex); }); }; ENQUEUE_AND_LOG_EVENT(event, "%s: block hash=%s block height=%d", __func__, pblock->GetHash().ToString(), @@ -242,9 +243,9 @@ void CMainSignals::BlockDisconnected(const std::shared_ptr& pblock pindex->nHeight); } -void CMainSignals::ChainStateFlushed(const CBlockLocator &locator) { - auto event = [locator, this] { - m_internals->Iterate([&](CValidationInterface& callbacks) { callbacks.ChainStateFlushed(locator); }); +void CMainSignals::ChainStateFlushed(ChainstateRole role, const CBlockLocator &locator) { + auto event = [role, locator, this] { + m_internals->Iterate([&](CValidationInterface& callbacks) { callbacks.ChainStateFlushed(role, locator); }); }; ENQUEUE_AND_LOG_EVENT(event, "%s: block hash=%s", __func__, locator.IsNull() ? "null" : locator.vHave.front().ToString()); diff --git a/src/validationinterface.h b/src/validationinterface.h index 5bdd7e0123892..eb15aa4d5f707 100644 --- a/src/validationinterface.h +++ b/src/validationinterface.h @@ -7,6 +7,7 @@ #define BITCOIN_VALIDATIONINTERFACE_H #include +#include #include // CTransaction(Ref) #include @@ -136,11 +137,12 @@ class CValidationInterface { * * Called on a background thread. */ - virtual void BlockConnected(const std::shared_ptr &block, const CBlockIndex *pindex) {} + virtual void BlockConnected(ChainstateRole role, const std::shared_ptr &block, const CBlockIndex *pindex) {} /** * Notifies listeners of a block being disconnected * - * Called on a background thread. + * Called on a background thread. Only called for the active chainstate, since + * background chainstates should never disconnect blocks. */ virtual void BlockDisconnected(const std::shared_ptr &block, const CBlockIndex* pindex) {} /** @@ -159,17 +161,18 @@ class CValidationInterface { * * Called on a background thread. */ - virtual void ChainStateFlushed(const CBlockLocator &locator) {} + virtual void ChainStateFlushed(ChainstateRole role, const CBlockLocator &locator) {} /** * Notifies listeners of a block validation result. * If the provided BlockValidationState IsValid, the provided block * is guaranteed to be the current best block at the time the - * callback was generated (not necessarily now) + * callback was generated (not necessarily now). */ virtual void BlockChecked(const CBlock&, const BlockValidationState&) {} /** * Notifies listeners that a block which builds directly on our current tip - * has been received and connected to the headers tree, though not validated yet */ + * has been received and connected to the headers tree, though not validated yet. + */ virtual void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr& block) {}; friend class CMainSignals; friend class ValidationInterfaceTest; @@ -199,9 +202,9 @@ class CMainSignals { void UpdatedBlockTip(const CBlockIndex *, const CBlockIndex *, bool fInitialDownload); void TransactionAddedToMempool(const CTransactionRef&, uint64_t mempool_sequence); void TransactionRemovedFromMempool(const CTransactionRef&, MemPoolRemovalReason, uint64_t mempool_sequence); - void BlockConnected(const std::shared_ptr &, const CBlockIndex *pindex); + void BlockConnected(ChainstateRole, const std::shared_ptr &, const CBlockIndex *pindex); void BlockDisconnected(const std::shared_ptr &, const CBlockIndex* pindex); - void ChainStateFlushed(const CBlockLocator &); + void ChainStateFlushed(ChainstateRole, const CBlockLocator &); void BlockChecked(const CBlock&, const BlockValidationState&); void NewPoWValidBlock(const CBlockIndex *, const std::shared_ptr&); }; diff --git a/src/wallet/test/fuzz/notifications.cpp b/src/wallet/test/fuzz/notifications.cpp index 42accafe5b0ae..abd788f96fbe9 100644 --- a/src/wallet/test/fuzz/notifications.cpp +++ b/src/wallet/test/fuzz/notifications.cpp @@ -2,6 +2,7 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include #include #include #include @@ -145,8 +146,8 @@ FUZZ_TARGET(wallet_notifications, .init = initialize_setup) // time to the maximum value. This ensures that the wallet's birth time is always // earlier than this maximum time. info.chain_time_max = std::numeric_limits::max(); - a.wallet->blockConnected(info); - b.wallet->blockConnected(info); + a.wallet->blockConnected(ChainstateRole::NORMAL, info); + b.wallet->blockConnected(ChainstateRole::NORMAL, info); // Store the coins for the next block Coins coins_new; for (const auto& tx : block.vtx) { diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 245990841950c..c840c2ee1f3d8 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -626,7 +627,7 @@ bool CWallet::ChangeWalletPassphrase(const SecureString& strOldWalletPassphrase, return false; } -void CWallet::chainStateFlushed(const CBlockLocator& loc) +void CWallet::chainStateFlushed(ChainstateRole role, const CBlockLocator& loc) { // Don't update the best block until the chain is attached so that in case of a shutdown, // the rescan will be restarted at next startup. @@ -1462,7 +1463,7 @@ void CWallet::transactionRemovedFromMempool(const CTransactionRef& tx, MemPoolRe } } -void CWallet::blockConnected(const interfaces::BlockInfo& block) +void CWallet::blockConnected(ChainstateRole role, const interfaces::BlockInfo& block) { assert(block.data); LOCK(cs_wallet); @@ -2941,7 +2942,7 @@ std::shared_ptr CWallet::Create(WalletContext& context, const std::stri } if (chain) { - walletInstance->chainStateFlushed(chain->getTipLocator()); + walletInstance->chainStateFlushed(ChainstateRole::NORMAL, chain->getTipLocator()); } } else if (wallet_creation_flags & WALLET_FLAG_DISABLE_PRIVATE_KEYS) { // Make it impossible to disable private keys after creation @@ -3227,7 +3228,7 @@ bool CWallet::AttachChain(const std::shared_ptr& walletInstance, interf } } walletInstance->m_attaching_chain = false; - walletInstance->chainStateFlushed(chain.getTipLocator()); + walletInstance->chainStateFlushed(ChainstateRole::NORMAL, chain.getTipLocator()); walletInstance->GetDatabase().IncrementUpdateCounter(); } walletInstance->m_attaching_chain = false; diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 5adb8b6e2703a..9333493a6eebc 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -599,7 +599,7 @@ class CWallet final : public WalletStorage, public interfaces::Chain::Notificati CWalletTx* AddToWallet(CTransactionRef tx, const TxState& state, const UpdateWalletTxFn& update_wtx=nullptr, bool fFlushOnClose=true, bool rescanning_old_block = false); bool LoadToWallet(const uint256& hash, const UpdateWalletTxFn& fill_wtx) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); void transactionAddedToMempool(const CTransactionRef& tx) override; - void blockConnected(const interfaces::BlockInfo& block) override; + void blockConnected(ChainstateRole role, const interfaces::BlockInfo& block) override; void blockDisconnected(const interfaces::BlockInfo& block) override; void updatedBlockTip() override; int64_t RescanFromTime(int64_t startTime, const WalletRescanReserver& reserver, bool update); @@ -777,7 +777,7 @@ class CWallet final : public WalletStorage, public interfaces::Chain::Notificati /** should probably be renamed to IsRelevantToMe */ bool IsFromMe(const CTransaction& tx) const; CAmount GetDebit(const CTransaction& tx, const isminefilter& filter) const; - void chainStateFlushed(const CBlockLocator& loc) override; + void chainStateFlushed(ChainstateRole role, const CBlockLocator& loc) override; DBErrors LoadWallet(); DBErrors ZapSelectTx(std::vector& vHashIn, std::vector& vHashOut) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); diff --git a/src/zmq/zmqnotificationinterface.cpp b/src/zmq/zmqnotificationinterface.cpp index 6755368249f53..97355b45a7a14 100644 --- a/src/zmq/zmqnotificationinterface.cpp +++ b/src/zmq/zmqnotificationinterface.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -170,7 +171,7 @@ void CZMQNotificationInterface::TransactionRemovedFromMempool(const CTransaction }); } -void CZMQNotificationInterface::BlockConnected(const std::shared_ptr& pblock, const CBlockIndex* pindexConnected) +void CZMQNotificationInterface::BlockConnected(ChainstateRole role, const std::shared_ptr& pblock, const CBlockIndex* pindexConnected) { for (const CTransactionRef& ptx : pblock->vtx) { const CTransaction& tx = *ptx; diff --git a/src/zmq/zmqnotificationinterface.h b/src/zmq/zmqnotificationinterface.h index ce67633b30f8b..4246c53bd3718 100644 --- a/src/zmq/zmqnotificationinterface.h +++ b/src/zmq/zmqnotificationinterface.h @@ -33,7 +33,7 @@ class CZMQNotificationInterface final : public CValidationInterface // CValidationInterface void TransactionAddedToMempool(const CTransactionRef& tx, uint64_t mempool_sequence) override; void TransactionRemovedFromMempool(const CTransactionRef& tx, MemPoolRemovalReason reason, uint64_t mempool_sequence) override; - void BlockConnected(const std::shared_ptr& pblock, const CBlockIndex* pindexConnected) override; + void BlockConnected(ChainstateRole role, const std::shared_ptr& pblock, const CBlockIndex* pindexConnected) override; void BlockDisconnected(const std::shared_ptr& pblock, const CBlockIndex* pindexDisconnected) override; void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override; From f073917a9e7ba423643dcae0339776470b628f65 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Thu, 10 Nov 2022 16:09:25 -0500 Subject: [PATCH 050/172] validationinterface: only send zmq notifications for active --- doc/zmq.md | 4 ++-- src/zmq/zmqnotificationinterface.cpp | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/doc/zmq.md b/doc/zmq.md index 4055505d7480b..07c340fb99e8f 100644 --- a/doc/zmq.md +++ b/doc/zmq.md @@ -113,11 +113,11 @@ Where the 8-byte uints correspond to the mempool sequence number. | hashtx | <32-byte transaction hash in Little Endian> | -`rawblock`: Notifies when the chain tip is updated. Messages are ZMQ multipart messages with three parts. The first part is the topic (`rawblock`), the second part is the serialized block, and the last part is a sequence number (representing the message count to detect lost messages). +`rawblock`: Notifies when the chain tip is updated. When assumeutxo is in use, this notification will not be issued for historical blocks connected to the background validation chainstate. Messages are ZMQ multipart messages with three parts. The first part is the topic (`rawblock`), the second part is the serialized block, and the last part is a sequence number (representing the message count to detect lost messages). | rawblock | | -`hashblock`: Notifies when the chain tip is updated. Messages are ZMQ multipart messages with three parts. The first part is the topic (`hashblock`), the second part is the 32-byte block hash, and the last part is a sequence number (representing the message count to detect lost messages). +`hashblock`: Notifies when the chain tip is updated. When assumeutxo is in use, this notification will not be issued for historical blocks connected to the background validation chainstate. Messages are ZMQ multipart messages with three parts. The first part is the topic (`hashblock`), the second part is the 32-byte block hash, and the last part is a sequence number (representing the message count to detect lost messages). | hashblock | <32-byte block hash in Little Endian> | diff --git a/src/zmq/zmqnotificationinterface.cpp b/src/zmq/zmqnotificationinterface.cpp index 97355b45a7a14..03aae865776e8 100644 --- a/src/zmq/zmqnotificationinterface.cpp +++ b/src/zmq/zmqnotificationinterface.cpp @@ -173,6 +173,9 @@ void CZMQNotificationInterface::TransactionRemovedFromMempool(const CTransaction void CZMQNotificationInterface::BlockConnected(ChainstateRole role, const std::shared_ptr& pblock, const CBlockIndex* pindexConnected) { + if (role == ChainstateRole::BACKGROUND) { + return; + } for (const CTransactionRef& ptx : pblock->vtx) { const CTransaction& tx = *ptx; TryForEachAndRemoveFailed(notifiers, [&tx](CZMQAbstractNotifier* notifier) { From fbe0a7d7ca680358237b6c2369b3fd2b43221113 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Thu, 10 Nov 2022 16:09:41 -0500 Subject: [PATCH 051/172] wallet: validationinterface: only handle active chain notifications --- src/wallet/wallet.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index c840c2ee1f3d8..7b85cc36c449a 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -631,7 +631,7 @@ void CWallet::chainStateFlushed(ChainstateRole role, const CBlockLocator& loc) { // Don't update the best block until the chain is attached so that in case of a shutdown, // the rescan will be restarted at next startup. - if (m_attaching_chain) { + if (m_attaching_chain || role == ChainstateRole::BACKGROUND) { return; } WalletBatch batch(GetDatabase()); @@ -1465,6 +1465,9 @@ void CWallet::transactionRemovedFromMempool(const CTransactionRef& tx, MemPoolRe void CWallet::blockConnected(ChainstateRole role, const interfaces::BlockInfo& block) { + if (role == ChainstateRole::BACKGROUND) { + return; + } assert(block.data); LOCK(cs_wallet); From 1fffdd76a1bca908f55d73b64983655b14cf7432 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Thu, 10 Nov 2022 16:10:35 -0500 Subject: [PATCH 052/172] net_processing: validationinterface: ignore some events for bg chain --- src/net_processing.cpp | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 12dca182c3800..03dee1351207c 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1917,9 +1917,25 @@ void PeerManagerImpl::BlockConnected( const std::shared_ptr& pblock, const CBlockIndex* pindex) { - m_orphanage.EraseForBlock(*pblock); + // Update this for all chainstate roles so that we don't mistakenly see peers + // helping us do background IBD as having a stale tip. m_last_tip_update = GetTime(); + // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value + auto stalling_timeout = m_block_stalling_timeout.load(); + Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT); + if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) { + const auto new_timeout = std::max(std::chrono::duration_cast(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT); + if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) { + LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout)); + } + } + + if (role == ChainstateRole::BACKGROUND) { + return; + } + m_orphanage.EraseForBlock(*pblock); + { LOCK(m_recent_confirmed_transactions_mutex); for (const auto& ptx : pblock->vtx) { @@ -1936,16 +1952,6 @@ void PeerManagerImpl::BlockConnected( m_txrequest.ForgetTxHash(ptx->GetWitnessHash()); } } - - // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value - auto stalling_timeout = m_block_stalling_timeout.load(); - Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT); - if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) { - const auto new_timeout = std::max(std::chrono::duration_cast(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT); - if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) { - LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout)); - } - } } void PeerManagerImpl::BlockDisconnected(const std::shared_ptr &block, const CBlockIndex* pindex) From 373cf91531b84bfdd06fdf8abf4dca228029ce6b Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Mon, 23 Sep 2019 14:44:54 -0400 Subject: [PATCH 053/172] validation: indexing changes for assumeutxo When using an assumedvalid chainstate, only process validationinterface callbacks from the background chainstate within indexes. This ensures that all indexes are built in-order. Later, we can possibly designate indexes which can be built out of order and continue their operation during snapshot use. Once the background sync has completed, restart the indexes so that they continue to index the now-validated snapshot chainstate. --- src/index/base.cpp | 32 +++++++++++++++++++++++++++++--- src/index/base.h | 12 +++++++++--- src/init.cpp | 30 +++++++++++++++++++++++++----- src/validation.cpp | 18 ++++++++++++++++++ src/validation.h | 14 ++++++++++++++ 5 files changed, 95 insertions(+), 11 deletions(-) diff --git a/src/index/base.cpp b/src/index/base.cpp index 98a8bad102db3..8474d01c41f8f 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -79,9 +79,15 @@ BaseIndex::~BaseIndex() bool BaseIndex::Init() { + AssertLockNotHeld(cs_main); + + // May need reset if index is being restarted. + m_interrupt.reset(); + // m_chainstate member gives indexing code access to node internals. It is // removed in followup https://github.com/bitcoin/bitcoin/pull/24230 - m_chainstate = &m_chain->context()->chainman->ActiveChainstate(); + m_chainstate = WITH_LOCK(::cs_main, + return &m_chain->context()->chainman->GetChainstateForIndexing()); // Register to validation interface before setting the 'm_synced' flag, so that // callbacks are not missed once m_synced is true. RegisterValidationInterface(this); @@ -92,7 +98,8 @@ bool BaseIndex::Init() } LOCK(cs_main); - CChain& active_chain = m_chainstate->m_chain; + CChain& index_chain = m_chainstate->m_chain; + if (locator.IsNull()) { SetBestBlockIndex(nullptr); } else { @@ -114,7 +121,7 @@ bool BaseIndex::Init() // Note: this will latch to true immediately if the user starts up with an empty // datadir and an index enabled. If this is the case, indexation will happen solely // via `BlockConnected` signals until, possibly, the next restart. - m_synced = start_block == active_chain.Tip(); + m_synced = start_block == index_chain.Tip(); m_init = true; return true; } @@ -143,6 +150,8 @@ void BaseIndex::ThreadSync() std::chrono::steady_clock::time_point last_locator_write_time{0s}; while (true) { if (m_interrupt) { + LogPrintf("%s: m_interrupt set; exiting ThreadSync\n", GetName()); + SetBestBlockIndex(pindex); // No need to handle errors in Commit. If it fails, the error will be already be // logged. The best way to recover is to continue, as index cannot be corrupted by @@ -252,6 +261,17 @@ bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_ti void BaseIndex::BlockConnected(ChainstateRole role, const std::shared_ptr& block, const CBlockIndex* pindex) { + // Ignore events from the assumed-valid chain; we will process its blocks + // (sequentially) after it is fully verified by the background chainstate. This + // is to avoid any out-of-order indexing. + // + // TODO at some point we could parameterize whether a particular index can be + // built out of order, but for now just do the conservative simple thing. + if (role == ChainstateRole::ASSUMEDVALID) { + return; + } + + // Ignore BlockConnected signals until we have fully indexed the chain. if (!m_synced) { return; } @@ -298,6 +318,12 @@ void BaseIndex::BlockConnected(ChainstateRole role, const std::shared_ptr(node.kernel->interrupt, chainman_opts, blockman_opts); ChainstateManager& chainman = *node.chainman; + // This is defined and set here instead of inline in validation.h to avoid a hard + // dependency between validation and index/base, since the latter is not in + // libbitcoinkernel. + chainman.restart_indexes = [&node]() { + LogPrintf("[snapshot] restarting indexes\n"); + + // Drain the validation interface queue to ensure that the old indexes + // don't have any pending work. + SyncWithValidationInterfaceQueue(); + + for (auto* index : node.indexes) { + index->Interrupt(); + index->Stop(); + if (!(index->Init() && index->StartBackgroundSync())) { + LogPrintf("[snapshot] WARNING failed to restart index %s on snapshot chain\n", index->GetName()); + } + } + }; + node::ChainstateLoadOptions options; options.mempool = Assert(node.mempool.get()); options.reindex = node::fReindex; @@ -1906,18 +1925,19 @@ bool StartIndexBackgroundSync(NodeContext& node) // indexes_start_block='nullptr' means "start from height 0". std::optional indexes_start_block; std::string older_index_name; - ChainstateManager& chainman = *Assert(node.chainman); + const Chainstate& chainstate = WITH_LOCK(::cs_main, return chainman.GetChainstateForIndexing()); + const CChain& index_chain = chainstate.m_chain; + for (auto index : node.indexes) { const IndexSummary& summary = index->GetSummary(); if (summary.synced) continue; // Get the last common block between the index best block and the active chain LOCK(::cs_main); - const CChain& active_chain = chainman.ActiveChain(); const CBlockIndex* pindex = chainman.m_blockman.LookupBlockIndex(summary.best_block_hash); - if (!active_chain.Contains(pindex)) { - pindex = active_chain.FindFork(pindex); + if (!index_chain.Contains(pindex)) { + pindex = index_chain.FindFork(pindex); } if (!indexes_start_block || !pindex || pindex->nHeight < indexes_start_block.value()->nHeight) { @@ -1932,7 +1952,7 @@ bool StartIndexBackgroundSync(NodeContext& node) LOCK(::cs_main); const CBlockIndex* start_block = *indexes_start_block; if (!start_block) start_block = chainman.ActiveChain().Genesis(); - if (!chainman.m_blockman.CheckBlockDataAvailability(*chainman.ActiveChain().Tip(), *Assert(start_block))) { + if (!chainman.m_blockman.CheckBlockDataAvailability(*index_chain.Tip(), *Assert(start_block))) { return InitError(strprintf(Untranslated("%s best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)"), older_index_name)); } } diff --git a/src/validation.cpp b/src/validation.cpp index 8c657839e8d3a..00da4a1c9ebf7 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -3289,6 +3289,16 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< if (WITH_LOCK(::cs_main, return m_disabled)) { // Background chainstate has reached the snapshot base block, so exit. + + // Restart indexes to resume indexing for all blocks unique to the snapshot + // chain. This resumes indexing "in order" from where the indexing on the + // background validation chain left off. + // + // This cannot be done while holding cs_main (within + // MaybeCompleteSnapshotValidation) or a cs_main deadlock will occur. + if (m_chainman.restart_indexes) { + m_chainman.restart_indexes(); + } break; } @@ -5921,3 +5931,11 @@ bool ChainstateManager::ValidatedSnapshotCleanup() } return true; } + +Chainstate& ChainstateManager::GetChainstateForIndexing() +{ + // We can't always return `m_ibd_chainstate` because after background validation + // has completed, `m_snapshot_chainstate == m_active_chainstate`, but it can be + // indexed. + return (this->GetAll().size() > 1) ? *m_ibd_chainstate : *m_active_chainstate; +} diff --git a/src/validation.h b/src/validation.h index 38f57ed1b597a..b46b55b65014c 100644 --- a/src/validation.h +++ b/src/validation.h @@ -905,6 +905,10 @@ class ChainstateManager explicit ChainstateManager(const util::SignalInterrupt& interrupt, Options options, node::BlockManager::Options blockman_options); + //! Function to restart active indexes; set dynamically to avoid a circular + //! dependency on `base/index.cpp`. + std::function restart_indexes = std::function(); + const CChainParams& GetParams() const { return m_options.chainparams; } const Consensus::Params& GetConsensus() const { return m_options.chainparams.GetConsensus(); } bool ShouldCheckBlockIndex() const { return *Assert(m_options.check_block_index); } @@ -1227,6 +1231,16 @@ class ChainstateManager //! @sa node/chainstate:LoadChainstate() bool ValidatedSnapshotCleanup() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + //! @returns the chainstate that indexes should consult when ensuring that an + //! index is synced with a chain where we can expect block index entries to have + //! BLOCK_HAVE_DATA beneath the tip. + //! + //! In other words, give us the chainstate for which we can reasonably expect + //! that all blocks beneath the tip have been indexed. In practice this means + //! when using an assumed-valid chainstate based upon a snapshot, return only the + //! fully validated chain. + Chainstate& GetChainstateForIndexing() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + ~ChainstateManager(); }; From 1019c399825b0d512c1fd751c376d46fed4992b9 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Mon, 16 Sep 2019 16:34:45 -0400 Subject: [PATCH 054/172] validation: pruning for multiple chainstates Introduces ChainstateManager::GetPruneRange(). The prune budget is split evenly between the number of chainstates, however the prune budget may be exceeded if the resulting shares are beneath `MIN_DISK_SPACE_FOR_BLOCK_FILES`. --- doc/release-notes-27596.md | 7 +++++ src/node/blockstorage.cpp | 62 ++++++++++++++++++++++++-------------- src/node/blockstorage.h | 14 +++++++-- src/validation.cpp | 35 +++++++++++++++++++-- src/validation.h | 14 ++++++--- 5 files changed, 102 insertions(+), 30 deletions(-) create mode 100644 doc/release-notes-27596.md diff --git a/doc/release-notes-27596.md b/doc/release-notes-27596.md new file mode 100644 index 0000000000000..4f96adb0f354f --- /dev/null +++ b/doc/release-notes-27596.md @@ -0,0 +1,7 @@ +Pruning +------- + +When using assumeutxo with `-prune`, the prune budget may be exceeded if it is set +lower than 1100MB (i.e. `MIN_DISK_SPACE_FOR_BLOCK_FILES * 2`). Prune budget is normally +split evenly across each chainstate, unless the resulting prune budget per chainstate +is beneath `MIN_DISK_SPACE_FOR_BLOCK_FILES` in which case that value will be used. diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 5c3b7f958e0b3..9ae4ad67b4a51 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -257,40 +257,56 @@ void BlockManager::PruneOneBlockFile(const int fileNumber) m_dirty_fileinfo.insert(fileNumber); } -void BlockManager::FindFilesToPruneManual(std::set& setFilesToPrune, int nManualPruneHeight, int chain_tip_height) +void BlockManager::FindFilesToPruneManual( + std::set& setFilesToPrune, + int nManualPruneHeight, + const Chainstate& chain, + ChainstateManager& chainman) { assert(IsPruneMode() && nManualPruneHeight > 0); LOCK2(cs_main, cs_LastBlockFile); - if (chain_tip_height < 0) { + if (chain.m_chain.Height() < 0) { return; } - // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip) - unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP); + const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight); + int count = 0; for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) { - if (m_blockfile_info[fileNumber].nSize == 0 || m_blockfile_info[fileNumber].nHeightLast > nLastBlockWeCanPrune) { + const auto& fileinfo = m_blockfile_info[fileNumber]; + if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) { continue; } + PruneOneBlockFile(fileNumber); setFilesToPrune.insert(fileNumber); count++; } - LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count); + LogPrintf("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", + chain.GetRole(), last_block_can_prune, count); } -void BlockManager::FindFilesToPrune(std::set& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd) +void BlockManager::FindFilesToPrune( + std::set& setFilesToPrune, + int last_prune, + const Chainstate& chain, + ChainstateManager& chainman) { LOCK2(cs_main, cs_LastBlockFile); - if (chain_tip_height < 0 || GetPruneTarget() == 0) { + // Distribute our -prune budget over all chainstates. + const auto target = std::max( + MIN_DISK_SPACE_FOR_BLOCK_FILES, GetPruneTarget() / chainman.GetAll().size()); + + if (chain.m_chain.Height() < 0 || target == 0) { return; } - if ((uint64_t)chain_tip_height <= nPruneAfterHeight) { + if (static_cast(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) { return; } - unsigned int nLastBlockWeCanPrune{(unsigned)std::min(prune_height, chain_tip_height - static_cast(MIN_BLOCKS_TO_KEEP))}; + const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, last_prune); + uint64_t nCurrentUsage = CalculateCurrentUsage(); // We don't check to prune until after we've allocated new space for files // So we should leave a buffer under our target to account for another allocation @@ -299,29 +315,31 @@ void BlockManager::FindFilesToPrune(std::set& setFilesToPrune, uint64_t nPr uint64_t nBytesToPrune; int count = 0; - if (nCurrentUsage + nBuffer >= GetPruneTarget()) { + if (nCurrentUsage + nBuffer >= target) { // On a prune event, the chainstate DB is flushed. // To avoid excessive prune events negating the benefit of high dbcache // values, we should not prune too rapidly. // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon. - if (is_ibd) { + if (chainman.IsInitialBlockDownload()) { // Since this is only relevant during IBD, we use a fixed 10% - nBuffer += GetPruneTarget() / 10; + nBuffer += target / 10; } for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) { - nBytesToPrune = m_blockfile_info[fileNumber].nSize + m_blockfile_info[fileNumber].nUndoSize; + const auto& fileinfo = m_blockfile_info[fileNumber]; + nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize; - if (m_blockfile_info[fileNumber].nSize == 0) { + if (fileinfo.nSize == 0) { continue; } - if (nCurrentUsage + nBuffer < GetPruneTarget()) { // are we below our target? + if (nCurrentUsage + nBuffer < target) { // are we below our target? break; } - // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning - if (m_blockfile_info[fileNumber].nHeightLast > nLastBlockWeCanPrune) { + // don't prune files that could have a block that's not within the allowable + // prune range for the chain being pruned. + if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) { continue; } @@ -333,10 +351,10 @@ void BlockManager::FindFilesToPrune(std::set& setFilesToPrune, uint64_t nPr } } - LogPrint(BCLog::PRUNE, "target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n", - GetPruneTarget() / 1024 / 1024, nCurrentUsage / 1024 / 1024, - (int64_t(GetPruneTarget()) - int64_t(nCurrentUsage)) / 1024 / 1024, - nLastBlockWeCanPrune, count); + LogPrint(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n", + chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024, + (int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024, + min_block_to_prune, last_block_can_prune, count); } void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) { diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 9a1d44cc7508d..64488584062f6 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -36,6 +36,7 @@ class CBlockUndo; class CChainParams; class Chainstate; class ChainstateManager; +enum class ChainstateRole; struct CCheckpointData; struct FlatFilePos; namespace Consensus { @@ -138,7 +139,11 @@ class BlockManager bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const; /* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */ - void FindFilesToPruneManual(std::set& setFilesToPrune, int nManualPruneHeight, int chain_tip_height); + void FindFilesToPruneManual( + std::set& setFilesToPrune, + int nManualPruneHeight, + const Chainstate& chain, + ChainstateManager& chainman); /** * Prune block and undo files (blk???.dat and rev???.dat) so that the disk space used is less than a user-defined target. @@ -154,8 +159,13 @@ class BlockManager * A db flag records the fact that at least some block files have been pruned. * * @param[out] setFilesToPrune The set of file indices that can be unlinked will be returned + * @param last_prune The last height we're able to prune, according to the prune locks */ - void FindFilesToPrune(std::set& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd); + void FindFilesToPrune( + std::set& setFilesToPrune, + int last_prune, + const Chainstate& chain, + ChainstateManager& chainman); RecursiveMutex cs_LastBlockFile; std::vector m_blockfile_info; diff --git a/src/validation.cpp b/src/validation.cpp index 00da4a1c9ebf7..ad135994ec7a9 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -69,6 +69,7 @@ #include #include #include +#include using kernel::CCoinsStats; using kernel::CoinStatsHashType; @@ -2552,11 +2553,14 @@ bool Chainstate::FlushStateToDisk( if (nManualPruneHeight > 0) { LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH); - m_blockman.FindFilesToPruneManual(setFilesToPrune, std::min(last_prune, nManualPruneHeight), m_chain.Height()); + m_blockman.FindFilesToPruneManual( + setFilesToPrune, + std::min(last_prune, nManualPruneHeight), + *this, m_chainman); } else { LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH); - m_blockman.FindFilesToPrune(setFilesToPrune, m_chainman.GetParams().PruneAfterHeight(), m_chain.Height(), last_prune, m_chainman.IsInitialBlockDownload()); + m_blockman.FindFilesToPrune(setFilesToPrune, last_prune, *this, m_chainman); m_blockman.m_check_for_pruning = false; } if (!setFilesToPrune.empty()) { @@ -5939,3 +5943,30 @@ Chainstate& ChainstateManager::GetChainstateForIndexing() // indexed. return (this->GetAll().size() > 1) ? *m_ibd_chainstate : *m_active_chainstate; } + +std::pair ChainstateManager::GetPruneRange(const Chainstate& chainstate, int last_height_can_prune) +{ + if (chainstate.m_chain.Height() <= 0) { + return {0, 0}; + } + int prune_start{0}; + + if (this->GetAll().size() > 1 && m_snapshot_chainstate.get() == &chainstate) { + // Leave the blocks in the background IBD chain alone if we're pruning + // the snapshot chain. + prune_start = *Assert(GetSnapshotBaseHeight()) + 1; + } + + int max_prune = std::max( + 0, chainstate.m_chain.Height() - static_cast(MIN_BLOCKS_TO_KEEP)); + + // last block to prune is the lesser of (caller-specified height, MIN_BLOCKS_TO_KEEP from the tip) + // + // While you might be tempted to prune the background chainstate more + // aggressively (i.e. fewer MIN_BLOCKS_TO_KEEP), this won't work with index + // building - specifically blockfilterindex requires undo data, and if + // we don't maintain this trailing window, we hit indexing failures. + int prune_end = std::min(last_height_can_prune, max_prune); + + return {prune_start, prune_end}; +} diff --git a/src/validation.h b/src/validation.h index b46b55b65014c..2aa4221102d22 100644 --- a/src/validation.h +++ b/src/validation.h @@ -885,10 +885,6 @@ class ChainstateManager /** Most recent headers presync progress update, for rate-limiting. */ std::chrono::time_point m_last_presync_update GUARDED_BY(::cs_main) {}; - //! Return the height of the base block of the snapshot in use, if one exists, else - //! nullopt. - std::optional GetSnapshotBaseHeight() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main); - std::array m_warningcache GUARDED_BY(::cs_main); //! Return true if a chainstate is considered usable. @@ -1241,6 +1237,16 @@ class ChainstateManager //! fully validated chain. Chainstate& GetChainstateForIndexing() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + //! Return the [start, end] (inclusive) of block heights we can prune. + //! + //! start > end is possible, meaning no blocks can be pruned. + std::pair GetPruneRange( + const Chainstate& chainstate, int last_height_can_prune) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + + //! Return the height of the base block of the snapshot in use, if one exists, else + //! nullopt. + std::optional GetSnapshotBaseHeight() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + ~ChainstateManager(); }; From 49ef778158c43859946a592e11ec34fe1b93a5b6 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Fri, 25 Aug 2023 14:05:07 -0400 Subject: [PATCH 055/172] test: adjust chainstate tests to use recognized snapshot base In future commits, loading the block index while making use of a snapshot is contingent on the snapshot being recognized by chainparams. Ensure all existing unittests that use snapshots use a recognized snapshot (at height 110). Co-authored-by: Ryan Ofsky --- .../validation_chainstatemanager_tests.cpp | 103 +++++++++++++----- src/validation.cpp | 3 +- 2 files changed, 77 insertions(+), 29 deletions(-) diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp index 74e577c17cbc2..f219d6bc4b793 100644 --- a/src/test/validation_chainstatemanager_tests.cpp +++ b/src/test/validation_chainstatemanager_tests.cpp @@ -30,12 +30,12 @@ using node::BlockManager; using node::KernelNotifications; using node::SnapshotMetadata; -BOOST_FIXTURE_TEST_SUITE(validation_chainstatemanager_tests, ChainTestingSetup) +BOOST_FIXTURE_TEST_SUITE(validation_chainstatemanager_tests, TestingSetup) //! Basic tests for ChainstateManager. //! //! First create a legacy (IBD) chainstate, then create a snapshot chainstate. -BOOST_AUTO_TEST_CASE(chainstatemanager) +BOOST_FIXTURE_TEST_CASE(chainstatemanager, TestChain100Setup) { ChainstateManager& manager = *m_node.chainman; CTxMemPool& mempool = *m_node.mempool; @@ -46,14 +46,8 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) // Create a legacy (IBD) chainstate. // - Chainstate& c1 = WITH_LOCK(::cs_main, return manager.InitializeChainstate(&mempool)); + Chainstate& c1 = manager.ActiveChainstate(); chainstates.push_back(&c1); - c1.InitCoinsDB( - /*cache_size_bytes=*/1 << 23, /*in_memory=*/true, /*should_wipe=*/false); - WITH_LOCK(::cs_main, c1.InitCoinsCache(1 << 23)); - c1.LoadGenesisBlock(); - BlockValidationState val_state; - BOOST_CHECK(c1.ActivateBestChain(val_state, nullptr)); BOOST_CHECK(!manager.IsSnapshotActive()); BOOST_CHECK(WITH_LOCK(::cs_main, return !manager.IsSnapshotValidated())); @@ -63,8 +57,9 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) auto& active_chain = WITH_LOCK(manager.GetMutex(), return manager.ActiveChain()); BOOST_CHECK_EQUAL(&active_chain, &c1.m_chain); - BOOST_CHECK_EQUAL(WITH_LOCK(manager.GetMutex(), return manager.ActiveHeight()), 0); - + // Get to a valid assumeutxo tip (per chainparams); + mineBlocks(10); + BOOST_CHECK_EQUAL(WITH_LOCK(manager.GetMutex(), return manager.ActiveHeight()), 110); auto active_tip = WITH_LOCK(manager.GetMutex(), return manager.ActiveTip()); auto exp_tip = c1.m_chain.Tip(); BOOST_CHECK_EQUAL(active_tip, exp_tip); @@ -77,16 +72,19 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) Chainstate& c2 = WITH_LOCK(::cs_main, return manager.ActivateExistingSnapshot( &mempool, snapshot_blockhash)); chainstates.push_back(&c2); - - BOOST_CHECK_EQUAL(manager.SnapshotBlockhash().value(), snapshot_blockhash); - c2.InitCoinsDB( /*cache_size_bytes=*/1 << 23, /*in_memory=*/true, /*should_wipe=*/false); - WITH_LOCK(::cs_main, c2.InitCoinsCache(1 << 23)); - c2.m_chain.SetTip(*active_tip); + { + LOCK(::cs_main); + c2.InitCoinsCache(1 << 23); + c2.CoinsTip().SetBestBlock(active_tip->GetBlockHash()); + c2.setBlockIndexCandidates.insert(manager.m_blockman.LookupBlockIndex(active_tip->GetBlockHash())); + c2.LoadChainTip(); + } BlockValidationState _; BOOST_CHECK(c2.ActivateBestChain(_, nullptr)); + BOOST_CHECK_EQUAL(manager.SnapshotBlockhash().value(), snapshot_blockhash); BOOST_CHECK(manager.IsSnapshotActive()); BOOST_CHECK(WITH_LOCK(::cs_main, return !manager.IsSnapshotValidated())); BOOST_CHECK_EQUAL(&c2, &manager.ActiveChainstate()); @@ -97,13 +95,15 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) auto& active_chain2 = WITH_LOCK(manager.GetMutex(), return manager.ActiveChain()); BOOST_CHECK_EQUAL(&active_chain2, &c2.m_chain); - BOOST_CHECK_EQUAL(WITH_LOCK(manager.GetMutex(), return manager.ActiveHeight()), 0); + BOOST_CHECK_EQUAL(WITH_LOCK(manager.GetMutex(), return manager.ActiveHeight()), 110); + mineBlocks(1); + BOOST_CHECK_EQUAL(WITH_LOCK(manager.GetMutex(), return manager.ActiveHeight()), 111); + BOOST_CHECK_EQUAL(WITH_LOCK(manager.GetMutex(), return c1.m_chain.Height()), 110); auto active_tip2 = WITH_LOCK(manager.GetMutex(), return manager.ActiveTip()); - auto exp_tip2 = c2.m_chain.Tip(); - BOOST_CHECK_EQUAL(active_tip2, exp_tip2); - - BOOST_CHECK_EQUAL(exp_tip, exp_tip2); + BOOST_CHECK_EQUAL(active_tip, active_tip2->pprev); + BOOST_CHECK_EQUAL(active_tip, c1.m_chain.Tip()); + BOOST_CHECK_EQUAL(active_tip2, c2.m_chain.Tip()); // Let scheduler events finish running to avoid accessing memory that is going to be unloaded SyncWithValidationInterfaceQueue(); @@ -125,9 +125,6 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_rebalance_caches, TestChain100Setup) // Chainstate& c1 = manager.ActiveChainstate(); chainstates.push_back(&c1); - c1.InitCoinsDB( - /*cache_size_bytes=*/1 << 23, /*in_memory=*/true, /*should_wipe=*/false); - { LOCK(::cs_main); c1.InitCoinsCache(1 << 23); @@ -431,13 +428,20 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup) int num_indexes{0}; int num_assumed_valid{0}; + // Blocks in range [assumed_valid_start_idx, last_assumed_valid_idx) will be + // marked as assumed-valid and not having data. const int expected_assumed_valid{20}; - const int last_assumed_valid_idx{40}; + const int last_assumed_valid_idx{111}; const int assumed_valid_start_idx = last_assumed_valid_idx - expected_assumed_valid; + // Mine to height 120, past the hardcoded regtest assumeutxo snapshot at + // height 110 + mineBlocks(20); + CBlockIndex* validated_tip{nullptr}; CBlockIndex* assumed_base{nullptr}; CBlockIndex* assumed_tip{WITH_LOCK(chainman.GetMutex(), return chainman.ActiveChain().Tip())}; + BOOST_CHECK_EQUAL(assumed_tip->nHeight, 120); auto reload_all_block_indexes = [&]() { // For completeness, we also reset the block sequence counters to @@ -463,7 +467,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup) LOCK(::cs_main); auto index = cs1.m_chain[i]; - // Blocks with heights in range [20, 40) are marked ASSUMED_VALID + // Blocks with heights in range [91, 110] are marked ASSUMED_VALID if (i < last_assumed_valid_idx && i >= assumed_valid_start_idx) { index->nStatus = BlockStatus::BLOCK_VALID_TREE | BlockStatus::BLOCK_ASSUMED_VALID; } @@ -497,10 +501,36 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup) // Set tip of the assume-valid-based chain to the assume-valid block cs2.m_chain.SetTip(*assumed_base); + // Sanity check test variables. + BOOST_CHECK_EQUAL(num_indexes, 121); // 121 total blocks, including genesis + BOOST_CHECK_EQUAL(assumed_tip->nHeight, 120); // original chain has height 120 + BOOST_CHECK_EQUAL(validated_tip->nHeight, 90); // current cs1 chain has height 90 + BOOST_CHECK_EQUAL(assumed_base->nHeight, 110); // current cs2 chain has height 110 + + // Regenerate cs1.setBlockIndexCandidates and cs2.setBlockIndexCandidate and + // check contents below. reload_all_block_indexes(); - // The fully validated chain should have the current validated tip - // and the assumed valid base as candidates. + // The fully validated chain should only have the current validated tip and + // the assumed valid base as candidates, blocks 90 and 110. Specifically: + // + // - It does not have blocks 0-89 because they contain less work than the + // chain tip. + // + // - It has block 90 because it has data and equal work to the chain tip, + // (since it is the chain tip). + // + // - It does not have blocks 91-109 because they do not contain data. + // + // - It has block 110 even though it does not have data, because + // LoadBlockIndex has a special case to always add the snapshot block as a + // candidate. The special case is only actually intended to apply to the + // snapshot chainstate cs2, not the background chainstate cs1, but it is + // written broadly and applies to both. + // + // - It does not have any blocks after height 110 because cs1 is a background + // chainstate, and only blocks where are ancestors of the snapshot block + // are added as candidates for the background chainstate. BOOST_CHECK_EQUAL(cs1.setBlockIndexCandidates.size(), 2); BOOST_CHECK_EQUAL(cs1.setBlockIndexCandidates.count(validated_tip), 1); BOOST_CHECK_EQUAL(cs1.setBlockIndexCandidates.count(assumed_base), 1); @@ -508,8 +538,25 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup) // The assumed-valid tolerant chain has the assumed valid base as a // candidate, but otherwise has none of the assumed-valid (which do not // HAVE_DATA) blocks as candidates. + // + // Specifically: + // - All blocks below height 110 are not candidates, because cs2 chain tip + // has height 110 and they have less work than it does. + // + // - Block 110 is a candidate even though it does not have data, because it + // is the snapshot block, which is assumed valid. + // + // - Blocks 111-120 are added because they have data. + + // Check that block 90 is absent BOOST_CHECK_EQUAL(cs2.setBlockIndexCandidates.count(validated_tip), 0); + // Check that block 109 is absent + BOOST_CHECK_EQUAL(cs2.setBlockIndexCandidates.count(assumed_base->pprev), 0); + // Check that block 110 is present + BOOST_CHECK_EQUAL(cs2.setBlockIndexCandidates.count(assumed_base), 1); + // Check that block 120 is present BOOST_CHECK_EQUAL(cs2.setBlockIndexCandidates.count(assumed_tip), 1); + // Check that 11 blocks total are present. BOOST_CHECK_EQUAL(cs2.setBlockIndexCandidates.size(), num_indexes - last_assumed_valid_idx + 1); } diff --git a/src/validation.cpp b/src/validation.cpp index ad135994ec7a9..b5cff0a4fdbb2 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -3539,7 +3539,8 @@ void Chainstate::ResetBlockFailureFlags(CBlockIndex *pindex) { void Chainstate::TryAddBlockIndexCandidate(CBlockIndex* pindex) { AssertLockHeld(cs_main); - // The block only is a candidate for the most-work-chain if it has more work than our current tip. + // The block only is a candidate for the most-work-chain if it has the same + // or more work than our current tip. if (m_chain.Tip() != nullptr && setBlockIndexCandidates.value_comp()(pindex, m_chain.Tip())) { return; } From 4c3b8ca35c2e4a441264749bb312df2bd054b5b8 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Fri, 5 May 2023 15:54:13 -0400 Subject: [PATCH 056/172] validation: populate nChainTx value for assumedvalid chainstates Use the expected AssumeutxoData in order to bootstrap nChainTx values for assumedvalid blockindex entries in the snapshot chainstate. This is necessary because nChainTx is normally built up from nTx values, which are populated using blockdata which the snapshot chainstate does not yet have. --- src/node/blockstorage.cpp | 25 +++++++++++++++++++++---- src/node/blockstorage.h | 5 +++-- src/validation.cpp | 6 +++++- 3 files changed, 29 insertions(+), 7 deletions(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 9ae4ad67b4a51..7ed2346ae4f8a 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -378,13 +378,26 @@ CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash) return pindex; } -bool BlockManager::LoadBlockIndex() +bool BlockManager::LoadBlockIndex(const std::optional& snapshot_blockhash) { if (!m_block_tree_db->LoadBlockIndexGuts( GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) { return false; } + int snapshot_height = -1; + if (snapshot_blockhash) { + const AssumeutxoData au_data = *Assert(GetParams().AssumeutxoForBlockhash(*snapshot_blockhash)); + snapshot_height = au_data.height; + CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)}; + + // Since nChainTx (responsible for estiamted progress) isn't persisted + // to disk, we must bootstrap the value for assumedvalid chainstates + // from the hardcoded assumeutxo chainparams. + base->nChainTx = au_data.nChainTx; + LogPrintf("[snapshot] set nChainTx=%d for %s\n", au_data.nChainTx, snapshot_blockhash->ToString()); + } + // Calculate nChainWork std::vector vSortedByHeight{GetAllBlockIndices()}; std::sort(vSortedByHeight.begin(), vSortedByHeight.end(), @@ -401,7 +414,11 @@ bool BlockManager::LoadBlockIndex() // Pruned nodes may have deleted the block. if (pindex->nTx > 0) { if (pindex->pprev) { - if (pindex->pprev->nChainTx > 0) { + if (snapshot_blockhash && pindex->nHeight == snapshot_height && + pindex->GetBlockHash() == *snapshot_blockhash) { + // Should have been set above; don't disturb it with code below. + Assert(pindex->nChainTx > 0); + } else if (pindex->pprev->nChainTx > 0) { pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx; } else { pindex->nChainTx = 0; @@ -444,9 +461,9 @@ bool BlockManager::WriteBlockIndexDB() return true; } -bool BlockManager::LoadBlockIndexDB() +bool BlockManager::LoadBlockIndexDB(const std::optional& snapshot_blockhash) { - if (!LoadBlockIndex()) { + if (!LoadBlockIndex(snapshot_blockhash)) { return false; } diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 64488584062f6..fcd9fb9f67aed 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -118,7 +118,7 @@ class BlockManager * per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as peripheral * collections like m_dirty_blockindex. */ - bool LoadBlockIndex() + bool LoadBlockIndex(const std::optional& snapshot_blockhash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** Return false if block file or undo file flushing fails. */ @@ -231,7 +231,8 @@ class BlockManager std::unique_ptr m_block_tree_db GUARDED_BY(::cs_main); bool WriteBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); - bool LoadBlockIndexDB() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + bool LoadBlockIndexDB(const std::optional& snapshot_blockhash) + EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** * Remove any pruned block & undo files that are still on disk. diff --git a/src/validation.cpp b/src/validation.cpp index b5cff0a4fdbb2..9c783ece65176 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -4542,7 +4542,7 @@ bool ChainstateManager::LoadBlockIndex() // Load block index from databases bool needs_init = fReindex; if (!fReindex) { - bool ret{m_blockman.LoadBlockIndexDB()}; + bool ret{m_blockman.LoadBlockIndexDB(SnapshotBlockhash())}; if (!ret) return false; m_blockman.ScanAndUnlinkAlreadyPrunedFiles(); @@ -4838,6 +4838,10 @@ void ChainstateManager::CheckBlockIndex() CBlockIndex* pindexFirstAssumeValid = nullptr; // Oldest ancestor of pindex which has BLOCK_ASSUMED_VALID while (pindex != nullptr) { nNodes++; + if (pindex->pprev && pindex->nTx > 0) { + // nChainTx should increase monotonically + assert(pindex->pprev->nChainTx <= pindex->nChainTx); + } if (pindexFirstAssumeValid == nullptr && pindex->nStatus & BLOCK_ASSUMED_VALID) pindexFirstAssumeValid = pindex; if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex; if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) { From 7fcd21544a333ffdf1910b65c573579860be6a36 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Wed, 3 May 2023 14:55:03 -0400 Subject: [PATCH 057/172] blockstorage: segment normal/assumedvalid blockfiles When using an assumedvalid (snapshot) chainstate along with a background chainstate, we are syncing two very different regions of the chain simultaneously. If we use the same blockfile space for both of these syncs, wildly different height blocks will be stored alongside one another, making pruning ineffective. This change implements a separate blockfile cursor for the assumedvalid chainstate when one is in use. --- src/node/blockstorage.cpp | 139 +++++++++++++++++++++++++++++--------- src/node/blockstorage.h | 84 +++++++++++++++++++---- src/validation.cpp | 3 +- 3 files changed, 179 insertions(+), 47 deletions(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 7ed2346ae4f8a..5e61ed3100d4c 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -273,7 +274,7 @@ void BlockManager::FindFilesToPruneManual( const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight); int count = 0; - for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) { + for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) { const auto& fileinfo = m_blockfile_info[fileNumber]; if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) { continue; @@ -325,7 +326,7 @@ void BlockManager::FindFilesToPrune( nBuffer += target / 10; } - for (int fileNumber = 0; fileNumber < m_last_blockfile; fileNumber++) { + for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) { const auto& fileinfo = m_blockfile_info[fileNumber]; nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize; @@ -385,19 +386,25 @@ bool BlockManager::LoadBlockIndex(const std::optional& snapshot_blockha return false; } - int snapshot_height = -1; if (snapshot_blockhash) { const AssumeutxoData au_data = *Assert(GetParams().AssumeutxoForBlockhash(*snapshot_blockhash)); - snapshot_height = au_data.height; + m_snapshot_height = au_data.height; CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)}; - // Since nChainTx (responsible for estiamted progress) isn't persisted + // Since nChainTx (responsible for estimated progress) isn't persisted // to disk, we must bootstrap the value for assumedvalid chainstates // from the hardcoded assumeutxo chainparams. base->nChainTx = au_data.nChainTx; LogPrintf("[snapshot] set nChainTx=%d for %s\n", au_data.nChainTx, snapshot_blockhash->ToString()); + } else { + // If this isn't called with a snapshot blockhash, make sure the cached snapshot height + // is null. This is relevant during snapshot completion, when the blockman may be loaded + // with a height that then needs to be cleared after the snapshot is fully validated. + m_snapshot_height.reset(); } + Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value()); + // Calculate nChainWork std::vector vSortedByHeight{GetAllBlockIndices()}; std::sort(vSortedByHeight.begin(), vSortedByHeight.end(), @@ -414,7 +421,7 @@ bool BlockManager::LoadBlockIndex(const std::optional& snapshot_blockha // Pruned nodes may have deleted the block. if (pindex->nTx > 0) { if (pindex->pprev) { - if (snapshot_blockhash && pindex->nHeight == snapshot_height && + if (m_snapshot_height && pindex->nHeight == *m_snapshot_height && pindex->GetBlockHash() == *snapshot_blockhash) { // Should have been set above; don't disturb it with code below. Assert(pindex->nChainTx > 0); @@ -455,7 +462,8 @@ bool BlockManager::WriteBlockIndexDB() vBlocks.push_back(*it); m_dirty_blockindex.erase(it++); } - if (!m_block_tree_db->WriteBatchSync(vFiles, m_last_blockfile, vBlocks)) { + int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum()); + if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) { return false; } return true; @@ -466,16 +474,17 @@ bool BlockManager::LoadBlockIndexDB(const std::optional& snapshot_block if (!LoadBlockIndex(snapshot_blockhash)) { return false; } + int max_blockfile_num{0}; // Load block file info - m_block_tree_db->ReadLastBlockFile(m_last_blockfile); - m_blockfile_info.resize(m_last_blockfile + 1); - LogPrintf("%s: last block file = %i\n", __func__, m_last_blockfile); - for (int nFile = 0; nFile <= m_last_blockfile; nFile++) { + m_block_tree_db->ReadLastBlockFile(max_blockfile_num); + m_blockfile_info.resize(max_blockfile_num + 1); + LogPrintf("%s: last block file = %i\n", __func__, max_blockfile_num); + for (int nFile = 0; nFile <= max_blockfile_num; nFile++) { m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]); } - LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[m_last_blockfile].ToString()); - for (int nFile = m_last_blockfile + 1; true; nFile++) { + LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[max_blockfile_num].ToString()); + for (int nFile = max_blockfile_num + 1; true; nFile++) { CBlockFileInfo info; if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) { m_blockfile_info.push_back(info); @@ -499,6 +508,15 @@ bool BlockManager::LoadBlockIndexDB(const std::optional& snapshot_block } } + { + // Initialize the blockfile cursors. + LOCK(cs_LastBlockFile); + for (size_t i = 0; i < m_blockfile_info.size(); ++i) { + const auto last_height_in_file = m_blockfile_info[i].nHeightLast; + m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast(i), 0}; + } + } + // Check whether we have ever pruned block & undo files m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned); if (m_have_pruned) { @@ -516,12 +534,13 @@ bool BlockManager::LoadBlockIndexDB(const std::optional& snapshot_block void BlockManager::ScanAndUnlinkAlreadyPrunedFiles() { AssertLockHeld(::cs_main); + int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum()); if (!m_have_pruned) { return; } std::set block_files_to_prune; - for (int file_number = 0; file_number < m_last_blockfile; file_number++) { + for (int file_number = 0; file_number < max_blockfile; file_number++) { if (m_blockfile_info[file_number].nSize == 0) { block_files_to_prune.insert(file_number); } @@ -696,7 +715,7 @@ bool BlockManager::FlushUndoFile(int block_file, bool finalize) return true; } -bool BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo) +bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo) { bool success = true; LOCK(cs_LastBlockFile); @@ -708,9 +727,9 @@ bool BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo) // have populated `m_blockfile_info` via LoadBlockIndexDB(). return true; } - assert(static_cast(m_blockfile_info.size()) > m_last_blockfile); + assert(static_cast(m_blockfile_info.size()) > blockfile_num); - FlatFilePos block_pos_old(m_last_blockfile, m_blockfile_info[m_last_blockfile].nSize); + FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize); if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) { m_opts.notifications.flushError("Flushing block file to disk failed. This is likely the result of an I/O error."); success = false; @@ -718,13 +737,33 @@ bool BlockManager::FlushBlockFile(bool fFinalize, bool finalize_undo) // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks, // e.g. during IBD or a sync after a node going offline if (!fFinalize || finalize_undo) { - if (!FlushUndoFile(m_last_blockfile, finalize_undo)) { + if (!FlushUndoFile(blockfile_num, finalize_undo)) { success = false; } } return success; } +BlockfileType BlockManager::BlockfileTypeForHeight(int height) +{ + if (!m_snapshot_height) { + return BlockfileType::NORMAL; + } + return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL; +} + +bool BlockManager::FlushChainstateBlockFile(int tip_height) +{ + LOCK(cs_LastBlockFile); + auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)]; + if (cursor) { + // The cursor may not exist after a snapshot has been loaded but before any + // blocks have been downloaded. + return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false); + } + return false; +} + uint64_t BlockManager::CalculateCurrentUsage() { LOCK(cs_LastBlockFile); @@ -779,8 +818,19 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne { LOCK(cs_LastBlockFile); - unsigned int nFile = fKnown ? pos.nFile : m_last_blockfile; - if (m_blockfile_info.size() <= nFile) { + const BlockfileType chain_type = BlockfileTypeForHeight(nHeight); + + if (!m_blockfile_cursors[chain_type]) { + // If a snapshot is loaded during runtime, we may not have initialized this cursor yet. + assert(chain_type == BlockfileType::ASSUMED); + const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1}; + m_blockfile_cursors[chain_type] = new_cursor; + LogPrint(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor); + } + const int last_blockfile = m_blockfile_cursors[chain_type]->file_num; + + int nFile = fKnown ? pos.nFile : last_blockfile; + if (static_cast(m_blockfile_info.size()) <= nFile) { m_blockfile_info.resize(nFile + 1); } @@ -797,13 +847,20 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne } } assert(nAddSize < max_blockfile_size); + while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) { // when the undo file is keeping up with the block file, we want to flush it explicitly // when it is lagging behind (more blocks arrive than are being connected), we let the // undo block write case handle it - finalize_undo = (m_blockfile_info[nFile].nHeightLast == m_undo_height_in_last_blockfile); - nFile++; - if (m_blockfile_info.size() <= nFile) { + finalize_undo = (static_cast(m_blockfile_info[nFile].nHeightLast) == + Assert(m_blockfile_cursors[chain_type])->undo_height); + + // Try the next unclaimed blockfile number + nFile = this->MaxBlockfileNum() + 1; + // Set to increment MaxBlockfileNum() for next iteration + m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; + + if (static_cast(m_blockfile_info.size()) <= nFile) { m_blockfile_info.resize(nFile + 1); } } @@ -811,9 +868,10 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne pos.nPos = m_blockfile_info[nFile].nSize; } - if ((int)nFile != m_last_blockfile) { + if (nFile != last_blockfile) { if (!fKnown) { - LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s\n", m_last_blockfile, m_blockfile_info[m_last_blockfile].ToString()); + LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n", + last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight); } // Do not propagate the return code. The flush concerns a previous block @@ -823,13 +881,13 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne // data may be inconsistent after a crash if the flush is called during // a reindex. A flush error might also leave some of the data files // untrimmed. - if (!FlushBlockFile(!fKnown, finalize_undo)) { + if (!FlushBlockFile(last_blockfile, !fKnown, finalize_undo)) { LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush previous block file %05i (finalize=%i, finalize_undo=%i) before opening new block file %05i\n", - m_last_blockfile, !fKnown, finalize_undo, nFile); + last_blockfile, !fKnown, finalize_undo, nFile); } - m_last_blockfile = nFile; - m_undo_height_in_last_blockfile = 0; // No undo data yet in the new file, so reset our undo-height tracking. + // No undo data yet in the new file, so reset our undo-height tracking. + m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; } m_blockfile_info[nFile].AddBlock(nHeight, nTime); @@ -903,6 +961,9 @@ bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) { AssertLockHeld(::cs_main); + const BlockfileType type = BlockfileTypeForHeight(block.nHeight); + auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type])); + // Write undo information to disk if (block.GetUndoPos().IsNull()) { FlatFilePos _pos; @@ -917,7 +978,7 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid // in the block file info as below; note that this does not catch the case where the undo writes are keeping up // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in // the FindBlockPos function - if (_pos.nFile < m_last_blockfile && static_cast(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) { + if (_pos.nFile < cursor.file_num && static_cast(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) { // Do not propagate the return code, a failed flush here should not // be an indication for a failed write. If it were propagated here, // the caller would assume the undo data not to be written, when in @@ -926,8 +987,8 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid if (!FlushUndoFile(_pos.nFile, true)) { LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", _pos.nFile); } - } else if (_pos.nFile == m_last_blockfile && static_cast(block.nHeight) > m_undo_height_in_last_blockfile) { - m_undo_height_in_last_blockfile = block.nHeight; + } else if (_pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) { + cursor.undo_height = block.nHeight; } // update nUndoPos in block index block.nUndoPos = _pos.nPos; @@ -1126,4 +1187,18 @@ void ImportBlocks(ChainstateManager& chainman, std::vector vImportFile } } // End scope of ImportingNow } + +std::ostream& operator<<(std::ostream& os, const BlockfileType& type) { + switch(type) { + case BlockfileType::NORMAL: os << "normal"; break; + case BlockfileType::ASSUMED: os << "assumed"; break; + default: os.setstate(std::ios_base::failbit); + } + return os; +} + +std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) { + os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height); + return os; +} } // namespace node diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index fcd9fb9f67aed..ac97728c0567e 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -36,7 +37,6 @@ class CBlockUndo; class CChainParams; class Chainstate; class ChainstateManager; -enum class ChainstateRole; struct CCheckpointData; struct FlatFilePos; namespace Consensus { @@ -98,6 +98,35 @@ struct PruneLockInfo { int height_first{std::numeric_limits::max()}; //! Height of earliest block that should be kept and not pruned }; +enum BlockfileType { + // Values used as array indexes - do not change carelessly. + NORMAL = 0, + ASSUMED = 1, + NUM_TYPES = 2, +}; + +std::ostream& operator<<(std::ostream& os, const BlockfileType& type); + +struct BlockfileCursor { + // The latest blockfile number. + int file_num{0}; + + // Track the height of the highest block in file_num whose undo + // data has been written. Block data is written to block files in download + // order, but is written to undo files in validation order, which is + // usually in order by height. To avoid wasting disk space, undo files will + // be trimmed whenever the corresponding block file is finalized and + // the height of the highest block written to the block file equals the + // height of the highest block written to the undo file. This is a + // heuristic and can sometimes preemptively trim undo files that will write + // more data later, and sometimes fail to trim undo files that can't have + // more data written later. + int undo_height{0}; +}; + +std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor); + + /** * Maintains a tree of blocks (stored in `m_block_index`) which is consulted * to determine where the most-work tip is. @@ -122,12 +151,13 @@ class BlockManager EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** Return false if block file or undo file flushing fails. */ - [[nodiscard]] bool FlushBlockFile(bool fFinalize = false, bool finalize_undo = false); + [[nodiscard]] bool FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo); /** Return false if undo file flushing fails. */ [[nodiscard]] bool FlushUndoFile(int block_file, bool finalize = false); [[nodiscard]] bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown); + [[nodiscard]] bool FlushChainstateBlockFile(int tip_height); bool FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize); FlatFileSeq BlockFileSeq() const; @@ -169,19 +199,29 @@ class BlockManager RecursiveMutex cs_LastBlockFile; std::vector m_blockfile_info; - int m_last_blockfile = 0; - // Track the height of the highest block in m_last_blockfile whose undo - // data has been written. Block data is written to block files in download - // order, but is written to undo files in validation order, which is - // usually in order by height. To avoid wasting disk space, undo files will - // be trimmed whenever the corresponding block file is finalized and - // the height of the highest block written to the block file equals the - // height of the highest block written to the undo file. This is a - // heuristic and can sometimes preemptively trim undo files that will write - // more data later, and sometimes fail to trim undo files that can't have - // more data written later. - unsigned int m_undo_height_in_last_blockfile = 0; + //! Since assumedvalid chainstates may be syncing a range of the chain that is very + //! far away from the normal/background validation process, we should segment blockfiles + //! for assumed chainstates. Otherwise, we might have wildly different height ranges + //! mixed into the same block files, which would impair our ability to prune + //! effectively. + //! + //! This data structure maintains separate blockfile number cursors for each + //! BlockfileType. The ASSUMED state is initialized, when necessary, in FindBlockPos(). + //! + //! The first element is the NORMAL cursor, second is ASSUMED. + std::array, BlockfileType::NUM_TYPES> + m_blockfile_cursors GUARDED_BY(cs_LastBlockFile) = { + BlockfileCursor{}, + std::nullopt, + }; + int MaxBlockfileNum() const EXCLUSIVE_LOCKS_REQUIRED(cs_LastBlockFile) + { + static const BlockfileCursor empty_cursor; + const auto& normal = m_blockfile_cursors[BlockfileType::NORMAL].value_or(empty_cursor); + const auto& assumed = m_blockfile_cursors[BlockfileType::ASSUMED].value_or(empty_cursor); + return std::max(normal.file_num, assumed.file_num); + } /** Global flag to indicate we should check to see if there are * block/undo files that should be deleted. Set on startup @@ -205,6 +245,8 @@ class BlockManager */ std::unordered_map m_prune_locks GUARDED_BY(::cs_main); + BlockfileType BlockfileTypeForHeight(int height); + const kernel::BlockManagerOpts m_opts; public: @@ -220,6 +262,20 @@ class BlockManager BlockMap m_block_index GUARDED_BY(cs_main); + /** + * The height of the base block of an assumeutxo snapshot, if one is in use. + * + * This controls how blockfiles are segmented by chainstate type to avoid + * comingling different height regions of the chain when an assumedvalid chainstate + * is in use. If heights are drastically different in the same blockfile, pruning + * suffers. + * + * This is set during ActivateSnapshot() or upon LoadBlockIndex() if a snapshot + * had been previously loaded. After the snapshot is validated, this is unset to + * restore normal LoadBlockIndex behavior. + */ + std::optional m_snapshot_height; + std::vector GetAllBlockIndices() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** diff --git a/src/validation.cpp b/src/validation.cpp index 9c783ece65176..01081011b085a 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2601,7 +2601,7 @@ bool Chainstate::FlushStateToDisk( // First make sure all block and undo data is flushed to disk. // TODO: Handle return error, or add detailed comment why it is // safe to not return an error upon failure. - if (!m_blockman.FlushBlockFile()) { + if (!m_blockman.FlushChainstateBlockFile(m_chain.Height())) { LogPrintLevel(BCLog::VALIDATION, BCLog::Level::Warning, "%s: Failed to flush block file.\n", __func__); } } @@ -5269,6 +5269,7 @@ bool ChainstateManager::ActivateSnapshot( assert(chaintip_loaded); m_active_chainstate = m_snapshot_chainstate.get(); + m_blockman.m_snapshot_height = this->GetSnapshotBaseHeight(); LogPrintf("[snapshot] successfully activated snapshot %s\n", base_blockhash.ToString()); LogPrintf("[snapshot] (%.2f MB)\n", From 9511fb3616b7bbe1d0d2f54a45ea0a650ba0367b Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Fri, 5 May 2023 18:27:56 -0400 Subject: [PATCH 058/172] validation: assumeutxo: swap m_mempool on snapshot activation Otherwise we will not receive transactions during background sync until restart. --- .../validation_chainstatemanager_tests.cpp | 11 +++-------- src/validation.cpp | 18 +++++++++++++++--- src/validation.h | 3 +-- 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp index f219d6bc4b793..227d7d4633e18 100644 --- a/src/test/validation_chainstatemanager_tests.cpp +++ b/src/test/validation_chainstatemanager_tests.cpp @@ -38,8 +38,6 @@ BOOST_FIXTURE_TEST_SUITE(validation_chainstatemanager_tests, TestingSetup) BOOST_FIXTURE_TEST_CASE(chainstatemanager, TestChain100Setup) { ChainstateManager& manager = *m_node.chainman; - CTxMemPool& mempool = *m_node.mempool; - std::vector chainstates; BOOST_CHECK(!manager.SnapshotBlockhash().has_value()); @@ -69,8 +67,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager, TestChain100Setup) // Create a snapshot-based chainstate. // const uint256 snapshot_blockhash = active_tip->GetBlockHash(); - Chainstate& c2 = WITH_LOCK(::cs_main, return manager.ActivateExistingSnapshot( - &mempool, snapshot_blockhash)); + Chainstate& c2 = WITH_LOCK(::cs_main, return manager.ActivateExistingSnapshot(snapshot_blockhash)); chainstates.push_back(&c2); c2.InitCoinsDB( /*cache_size_bytes=*/1 << 23, /*in_memory=*/true, /*should_wipe=*/false); @@ -113,7 +110,6 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager, TestChain100Setup) BOOST_FIXTURE_TEST_CASE(chainstatemanager_rebalance_caches, TestChain100Setup) { ChainstateManager& manager = *m_node.chainman; - CTxMemPool& mempool = *m_node.mempool; size_t max_cache = 10000; manager.m_total_coinsdb_cache = max_cache; @@ -137,7 +133,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_rebalance_caches, TestChain100Setup) // Create a snapshot-based chainstate. // CBlockIndex* snapshot_base{WITH_LOCK(manager.GetMutex(), return manager.ActiveChain()[manager.ActiveChain().Height() / 2])}; - Chainstate& c2 = WITH_LOCK(cs_main, return manager.ActivateExistingSnapshot(&mempool, *snapshot_base->phashBlock)); + Chainstate& c2 = WITH_LOCK(cs_main, return manager.ActivateExistingSnapshot(*snapshot_base->phashBlock)); chainstates.push_back(&c2); c2.InitCoinsDB( /*cache_size_bytes=*/1 << 23, /*in_memory=*/true, /*should_wipe=*/false); @@ -423,7 +419,6 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_activate_snapshot, SnapshotTestSetup) BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup) { ChainstateManager& chainman = *Assert(m_node.chainman); - CTxMemPool& mempool = *m_node.mempool; Chainstate& cs1 = chainman.ActiveChainstate(); int num_indexes{0}; @@ -493,7 +488,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup) // Note: cs2's tip is not set when ActivateExistingSnapshot is called. Chainstate& cs2 = WITH_LOCK(::cs_main, - return chainman.ActivateExistingSnapshot(&mempool, *assumed_base->phashBlock)); + return chainman.ActivateExistingSnapshot(*assumed_base->phashBlock)); // Set tip of the fully validated chain to be the validated tip cs1.m_chain.SetTip(*validated_tip); diff --git a/src/validation.cpp b/src/validation.cpp index 01081011b085a..d72b017cc4a37 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5268,6 +5268,12 @@ bool ChainstateManager::ActivateSnapshot( const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip(); assert(chaintip_loaded); + // Transfer possession of the mempool to the snapshot chianstate. + // Mempool is empty at this point because we're still in IBD. + Assert(m_active_chainstate->m_mempool->size() == 0); + Assert(!m_snapshot_chainstate->m_mempool); + m_snapshot_chainstate->m_mempool = m_active_chainstate->m_mempool; + m_active_chainstate->m_mempool = nullptr; m_active_chainstate = m_snapshot_chainstate.get(); m_blockman.m_snapshot_height = this->GetSnapshotBaseHeight(); @@ -5747,16 +5753,22 @@ bool ChainstateManager::DetectSnapshotChainstate(CTxMemPool* mempool) LogPrintf("[snapshot] detected active snapshot chainstate (%s) - loading\n", fs::PathToString(*path)); - this->ActivateExistingSnapshot(mempool, *base_blockhash); + this->ActivateExistingSnapshot(*base_blockhash); return true; } -Chainstate& ChainstateManager::ActivateExistingSnapshot(CTxMemPool* mempool, uint256 base_blockhash) +Chainstate& ChainstateManager::ActivateExistingSnapshot(uint256 base_blockhash) { assert(!m_snapshot_chainstate); m_snapshot_chainstate = - std::make_unique(mempool, m_blockman, *this, base_blockhash); + std::make_unique(nullptr, m_blockman, *this, base_blockhash); LogPrintf("[snapshot] switching active chainstate to %s\n", m_snapshot_chainstate->ToString()); + + // Mempool is empty at this point because we're still in IBD. + Assert(m_active_chainstate->m_mempool->size() == 0); + Assert(!m_snapshot_chainstate->m_mempool); + m_snapshot_chainstate->m_mempool = m_active_chainstate->m_mempool; + m_active_chainstate->m_mempool = nullptr; m_active_chainstate = m_snapshot_chainstate.get(); return *m_snapshot_chainstate; } diff --git a/src/validation.h b/src/validation.h index 2aa4221102d22..94a00e44a4eed 100644 --- a/src/validation.h +++ b/src/validation.h @@ -1213,8 +1213,7 @@ class ChainstateManager //! Switch the active chainstate to one based on a UTXO snapshot that was loaded //! previously. - Chainstate& ActivateExistingSnapshot(CTxMemPool* mempool, uint256 base_blockhash) - EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + Chainstate& ActivateExistingSnapshot(uint256 base_blockhash) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); //! If we have validated a snapshot chain during this runtime, copy its //! chainstate directory over to the main `chainstate` location, completing From 62ac519e718eb7a31dca1102a96ba219fbc7f95d Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Sun, 17 Sep 2023 13:56:12 -0400 Subject: [PATCH 059/172] validation: do not activate snapshot if behind active chain Most easily reviewed with git show --color-moved=dimmed-zebra --color-moved-ws=ignore-all-space Co-authored-by: Ryan Ofsky --- src/test/util/chainstate.h | 18 ++++++++- src/validation.cpp | 81 ++++++++++++++++++++++---------------- 2 files changed, 65 insertions(+), 34 deletions(-) diff --git a/src/test/util/chainstate.h b/src/test/util/chainstate.h index 7f559168703fe..e2a88eacddabf 100644 --- a/src/test/util/chainstate.h +++ b/src/test/util/chainstate.h @@ -109,7 +109,23 @@ CreateAndActivateUTXOSnapshot( 0 == WITH_LOCK(node.chainman->GetMutex(), return node.chainman->ActiveHeight())); } - return node.chainman->ActivateSnapshot(auto_infile, metadata, in_memory_chainstate); + auto& new_active = node.chainman->ActiveChainstate(); + auto* tip = new_active.m_chain.Tip(); + + // Disconnect a block so that the snapshot chainstate will be ahead, otherwise + // it will refuse to activate. + // + // TODO this is a unittest-specific hack, and we should probably rethink how to + // better generate/activate snapshots in unittests. + if (tip->pprev) { + new_active.m_chain.SetTip(*(tip->pprev)); + } + + bool res = node.chainman->ActivateSnapshot(auto_infile, metadata, in_memory_chainstate); + + // Restore the old tip. + new_active.m_chain.SetTip(*tip); + return res; } diff --git a/src/validation.cpp b/src/validation.cpp index d72b017cc4a37..82aafd97f87bd 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5230,19 +5230,8 @@ bool ChainstateManager::ActivateSnapshot( static_cast(current_coinstip_cache_size * SNAPSHOT_CACHE_PERC)); } - bool snapshot_ok = this->PopulateAndValidateSnapshot( - *snapshot_chainstate, coins_file, metadata); - - // If not in-memory, persist the base blockhash for use during subsequent - // initialization. - if (!in_memory) { - LOCK(::cs_main); - if (!node::WriteSnapshotBaseBlockhash(*snapshot_chainstate)) { - snapshot_ok = false; - } - } - if (!snapshot_ok) { - LOCK(::cs_main); + auto cleanup_bad_snapshot = [&](const char* reason) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { + LogPrintf("[snapshot] activation failed - %s\n", reason); this->MaybeRebalanceCaches(); // PopulateAndValidateSnapshot can return (in error) before the leveldb datadir @@ -5259,30 +5248,48 @@ bool ChainstateManager::ActivateSnapshot( } } return false; - } + }; - { + if (!this->PopulateAndValidateSnapshot(*snapshot_chainstate, coins_file, metadata)) { LOCK(::cs_main); - assert(!m_snapshot_chainstate); - m_snapshot_chainstate.swap(snapshot_chainstate); - const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip(); - assert(chaintip_loaded); - - // Transfer possession of the mempool to the snapshot chianstate. - // Mempool is empty at this point because we're still in IBD. - Assert(m_active_chainstate->m_mempool->size() == 0); - Assert(!m_snapshot_chainstate->m_mempool); - m_snapshot_chainstate->m_mempool = m_active_chainstate->m_mempool; - m_active_chainstate->m_mempool = nullptr; - m_active_chainstate = m_snapshot_chainstate.get(); - m_blockman.m_snapshot_height = this->GetSnapshotBaseHeight(); - - LogPrintf("[snapshot] successfully activated snapshot %s\n", base_blockhash.ToString()); - LogPrintf("[snapshot] (%.2f MB)\n", - m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() / (1000 * 1000)); + return cleanup_bad_snapshot("population failed"); + } - this->MaybeRebalanceCaches(); + LOCK(::cs_main); // cs_main required for rest of snapshot activation. + + // Do a final check to ensure that the snapshot chainstate is actually a more + // work chain than the active chainstate; a user could have loaded a snapshot + // very late in the IBD process, and we wouldn't want to load a useless chainstate. + if (!CBlockIndexWorkComparator()(ActiveTip(), snapshot_chainstate->m_chain.Tip())) { + return cleanup_bad_snapshot("work does not exceed active chainstate"); + } + // If not in-memory, persist the base blockhash for use during subsequent + // initialization. + if (!in_memory) { + if (!node::WriteSnapshotBaseBlockhash(*snapshot_chainstate)) { + return cleanup_bad_snapshot("could not write base blockhash"); + } } + + assert(!m_snapshot_chainstate); + m_snapshot_chainstate.swap(snapshot_chainstate); + const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip(); + assert(chaintip_loaded); + + // Transfer possession of the mempool to the snapshot chainstate. + // Mempool is empty at this point because we're still in IBD. + Assert(m_active_chainstate->m_mempool->size() == 0); + Assert(!m_snapshot_chainstate->m_mempool); + m_snapshot_chainstate->m_mempool = m_active_chainstate->m_mempool; + m_active_chainstate->m_mempool = nullptr; + m_active_chainstate = m_snapshot_chainstate.get(); + m_blockman.m_snapshot_height = this->GetSnapshotBaseHeight(); + + LogPrintf("[snapshot] successfully activated snapshot %s\n", base_blockhash.ToString()); + LogPrintf("[snapshot] (%.2f MB)\n", + m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() / (1000 * 1000)); + + this->MaybeRebalanceCaches(); return true; } @@ -5342,6 +5349,14 @@ bool ChainstateManager::PopulateAndValidateSnapshot( const AssumeutxoData& au_data = *maybe_au_data; + // This work comparison is a duplicate check with the one performed later in + // ActivateSnapshot(), but is done so that we avoid doing the long work of staging + // a snapshot that isn't actually usable. + if (WITH_LOCK(::cs_main, return !CBlockIndexWorkComparator()(ActiveTip(), snapshot_start_block))) { + LogPrintf("[snapshot] activation failed - height does not exceed active chainstate\n"); + return false; + } + COutPoint outpoint; Coin coin; const uint64_t coins_count = metadata.m_coins_count; From ce585a9a158476b0ad3296477b922e79f308e795 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Fri, 29 Mar 2019 15:31:54 -0400 Subject: [PATCH 060/172] rpc: add loadtxoutset Co-authored-by: Sebastian Falbesoner --- doc/design/assumeutxo.md | 2 +- doc/release-notes-27596.md | 19 +++++++ src/rpc/blockchain.cpp | 103 ++++++++++++++++++++++++++++++++++++- src/test/fuzz/rpc.cpp | 1 + 4 files changed, 123 insertions(+), 2 deletions(-) diff --git a/doc/design/assumeutxo.md b/doc/design/assumeutxo.md index 1492877e62224..8068a93f27a7d 100644 --- a/doc/design/assumeutxo.md +++ b/doc/design/assumeutxo.md @@ -3,7 +3,7 @@ Assumeutxo is a feature that allows fast bootstrapping of a validating bitcoind instance with a very similar security model to assumevalid. -The RPC commands `dumptxoutset` and `loadtxoutset` (yet to be merged) are used to +The RPC commands `dumptxoutset` and `loadtxoutset` are used to respectively generate and load UTXO snapshots. The utility script `./contrib/devtools/utxo_snapshot.sh` may be of use. diff --git a/doc/release-notes-27596.md b/doc/release-notes-27596.md index 4f96adb0f354f..da96b36189c47 100644 --- a/doc/release-notes-27596.md +++ b/doc/release-notes-27596.md @@ -5,3 +5,22 @@ When using assumeutxo with `-prune`, the prune budget may be exceeded if it is s lower than 1100MB (i.e. `MIN_DISK_SPACE_FOR_BLOCK_FILES * 2`). Prune budget is normally split evenly across each chainstate, unless the resulting prune budget per chainstate is beneath `MIN_DISK_SPACE_FOR_BLOCK_FILES` in which case that value will be used. + +RPC +--- + +`loadtxoutset` has been added, which allows loading a UTXO snapshot of the format +generated by `dumptxoutset`. Once this snapshot is loaded, its contents will be +deserialized into a second chainstate data structure, which is then used to sync to +the network's tip under a security model very much like `assumevalid`. + +Meanwhile, the original chainstate will complete the initial block download process in +the background, eventually validating up to the block that the snapshot is based upon. + +The result is a usable bitcoind instance that is current with the network tip in a +matter of minutes rather than hours. UTXO snapshot are typically obtained via +third-party sources (HTTP, torrent, etc.) which is reasonable since their contents +are always checked by hash. + +You can find more information on this process in the `assumeutxo` design +document (). diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index f4d88e4209f6e..c7ffa0a0b2e98 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -2699,6 +2700,105 @@ UniValue CreateUTXOSnapshot( return result; } +static RPCHelpMan loadtxoutset() +{ + return RPCHelpMan{ + "loadtxoutset", + "Load the serialized UTXO set from disk.\n" + "Once this snapshot is loaded, its contents will be " + "deserialized into a second chainstate data structure, which is then used to sync to " + "the network's tip under a security model very much like `assumevalid`. " + "Meanwhile, the original chainstate will complete the initial block download process in " + "the background, eventually validating up to the block that the snapshot is based upon.\n\n" + + "The result is a usable bitcoind instance that is current with the network tip in a " + "matter of minutes rather than hours. UTXO snapshot are typically obtained from " + "third-party sources (HTTP, torrent, etc.) which is reasonable since their " + "contents are always checked by hash.\n\n" + + "You can find more information on this process in the `assumeutxo` design " + "document ().", + { + {"path", + RPCArg::Type::STR, + RPCArg::Optional::NO, + "path to the snapshot file. If relative, will be prefixed by datadir."}, + }, + RPCResult{ + RPCResult::Type::OBJ, "", "", + { + {RPCResult::Type::NUM, "coins_loaded", "the number of coins loaded from the snapshot"}, + {RPCResult::Type::STR_HEX, "tip_hash", "the hash of the base of the snapshot"}, + {RPCResult::Type::NUM, "base_height", "the height of the base of the snapshot"}, + {RPCResult::Type::STR, "path", "the absolute path that the snapshot was loaded from"}, + } + }, + RPCExamples{ + HelpExampleCli("loadtxoutset", "utxo.dat") + }, + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ + NodeContext& node = EnsureAnyNodeContext(request.context); + fs::path path{AbsPathForConfigVal(EnsureArgsman(node), fs::u8path(request.params[0].get_str()))}; + + FILE* file{fsbridge::fopen(path, "rb")}; + AutoFile afile{file}; + if (afile.IsNull()) { + throw JSONRPCError( + RPC_INVALID_PARAMETER, + "Couldn't open file " + path.u8string() + " for reading."); + } + + SnapshotMetadata metadata; + afile >> metadata; + + uint256 base_blockhash = metadata.m_base_blockhash; + int max_secs_to_wait_for_headers = 60 * 10; + CBlockIndex* snapshot_start_block = nullptr; + + LogPrintf("[snapshot] waiting to see blockheader %s in headers chain before snapshot activation\n", + base_blockhash.ToString()); + + ChainstateManager& chainman = *node.chainman; + + while (max_secs_to_wait_for_headers > 0) { + snapshot_start_block = WITH_LOCK(::cs_main, + return chainman.m_blockman.LookupBlockIndex(base_blockhash)); + max_secs_to_wait_for_headers -= 1; + + if (!IsRPCRunning()) { + throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down"); + } + + if (!snapshot_start_block) { + std::this_thread::sleep_for(std::chrono::seconds(1)); + } else { + break; + } + } + + if (!snapshot_start_block) { + LogPrintf("[snapshot] timed out waiting for snapshot start blockheader %s\n", + base_blockhash.ToString()); + throw JSONRPCError( + RPC_INTERNAL_ERROR, + "Timed out waiting for base block header to appear in headers chain"); + } + if (!chainman.ActivateSnapshot(afile, metadata, false)) { + throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to load UTXO snapshot " + fs::PathToString(path)); + } + CBlockIndex* new_tip{WITH_LOCK(::cs_main, return chainman.ActiveTip())}; + + UniValue result(UniValue::VOBJ); + result.pushKV("coins_loaded", metadata.m_coins_count); + result.pushKV("tip_hash", new_tip->GetBlockHash().ToString()); + result.pushKV("base_height", new_tip->nHeight); + result.pushKV("path", fs::PathToString(path)); + return result; +}, + }; +} + void RegisterBlockchainRPCCommands(CRPCTable& t) { static const CRPCCommand commands[]{ @@ -2722,13 +2822,14 @@ void RegisterBlockchainRPCCommands(CRPCTable& t) {"blockchain", &scantxoutset}, {"blockchain", &scanblocks}, {"blockchain", &getblockfilter}, + {"blockchain", &dumptxoutset}, + {"blockchain", &loadtxoutset}, {"hidden", &invalidateblock}, {"hidden", &reconsiderblock}, {"hidden", &waitfornewblock}, {"hidden", &waitforblock}, {"hidden", &waitforblockheight}, {"hidden", &syncwithvalidationinterfacequeue}, - {"hidden", &dumptxoutset}, }; for (const auto& c : commands) { t.appendCommand(c.name, &c); diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp index 7e9a18e1d0bbe..2ef3fe1b4779a 100644 --- a/src/test/fuzz/rpc.cpp +++ b/src/test/fuzz/rpc.cpp @@ -80,6 +80,7 @@ const std::vector RPC_COMMANDS_NOT_SAFE_FOR_FUZZING{ "gettxoutproof", // avoid prohibitively slow execution "importmempool", // avoid reading from disk "importwallet", // avoid reading from disk + "loadtxoutset", // avoid reading from disk "loadwallet", // avoid reading from disk "savemempool", // disabled as a precautionary measure: may take a file path argument in the future "setban", // avoid DNS lookups From bb0585779472962f40d9cdd9c6532132850d371c Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Fri, 8 Sep 2023 06:29:32 -0400 Subject: [PATCH 061/172] refuse to activate a UTXO snapshot if mempool not empty This ensures that we avoid any unexpected conditions inherent in transferring non-empty mempools across chainstates. Note that this should never happen in practice given that snapshot activation will not occur outside of IBD, based upon the height checks in `loadtxoutset`. --- src/validation.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/validation.cpp b/src/validation.cpp index 82aafd97f87bd..30b3dde74f010 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5185,6 +5185,14 @@ bool ChainstateManager::ActivateSnapshot( return false; } + { + LOCK(::cs_main); + if (Assert(m_active_chainstate->GetMempool())->size() > 0) { + LogPrintf("[snapshot] can't activate a snapshot when mempool not empty\n"); + return false; + } + } + int64_t current_coinsdb_cache_size{0}; int64_t current_coinstip_cache_size{0}; From 0f64bac6030334d798ae205cd7af4bf248feddd9 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Fri, 29 Mar 2019 17:55:08 -0400 Subject: [PATCH 062/172] rpc: add getchainstates Co-authored-by: Ryan Ofsky --- doc/release-notes-27596.md | 2 ++ src/rpc/blockchain.cpp | 74 ++++++++++++++++++++++++++++++++++++++ src/test/fuzz/rpc.cpp | 1 + 3 files changed, 77 insertions(+) diff --git a/doc/release-notes-27596.md b/doc/release-notes-27596.md index da96b36189c47..799b82643fec6 100644 --- a/doc/release-notes-27596.md +++ b/doc/release-notes-27596.md @@ -24,3 +24,5 @@ are always checked by hash. You can find more information on this process in the `assumeutxo` design document (). + +`getchainstates` has been added to aid in monitoring the assumeutxo sync process. diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index c7ffa0a0b2e98..0f4941b40ccfd 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -2799,6 +2799,79 @@ static RPCHelpMan loadtxoutset() }; } +const std::vector RPCHelpForChainstate{ + {RPCResult::Type::NUM, "blocks", "number of blocks in this chainstate"}, + {RPCResult::Type::STR_HEX, "bestblockhash", "blockhash of the tip"}, + {RPCResult::Type::NUM, "difficulty", "difficulty of the tip"}, + {RPCResult::Type::NUM, "verificationprogress", "progress towards the network tip"}, + {RPCResult::Type::STR_HEX, "snapshot_blockhash", /*optional=*/true, "the base block of the snapshot this chainstate is based on, if any"}, + {RPCResult::Type::NUM, "coins_db_cache_bytes", "size of the coinsdb cache"}, + {RPCResult::Type::NUM, "coins_tip_cache_bytes", "size of the coinstip cache"}, +}; + +static RPCHelpMan getchainstates() +{ +return RPCHelpMan{ + "getchainstates", + "\nReturn information about chainstates.\n", + {}, + RPCResult{ + RPCResult::Type::OBJ, "", "", { + {RPCResult::Type::NUM, "headers", "the number of headers seen so far"}, + {RPCResult::Type::OBJ, "normal", /*optional=*/true, "fully validated chainstate containing blocks this node has validated starting from the genesis block", RPCHelpForChainstate}, + {RPCResult::Type::OBJ, "snapshot", /*optional=*/true, "only present if an assumeutxo snapshot is loaded. Partially validated chainstate containing blocks this node has validated starting from the snapshot. After the snapshot is validated (when the 'normal' chainstate advances far enough to validate it), this chainstate will replace and become the 'normal' chainstate.", RPCHelpForChainstate}, + } + }, + RPCExamples{ + HelpExampleCli("getchainstates", "") + + HelpExampleRpc("getchainstates", "") + }, + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ + LOCK(cs_main); + UniValue obj(UniValue::VOBJ); + + NodeContext& node = EnsureAnyNodeContext(request.context); + ChainstateManager& chainman = *node.chainman; + + auto make_chain_data = [&](const Chainstate& cs) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { + AssertLockHeld(::cs_main); + UniValue data(UniValue::VOBJ); + if (!cs.m_chain.Tip()) { + return data; + } + const CChain& chain = cs.m_chain; + const CBlockIndex* tip = chain.Tip(); + + data.pushKV("blocks", (int)chain.Height()); + data.pushKV("bestblockhash", tip->GetBlockHash().GetHex()); + data.pushKV("difficulty", (double)GetDifficulty(tip)); + data.pushKV("verificationprogress", GuessVerificationProgress(Params().TxData(), tip)); + data.pushKV("coins_db_cache_bytes", cs.m_coinsdb_cache_size_bytes); + data.pushKV("coins_tip_cache_bytes", cs.m_coinstip_cache_size_bytes); + if (cs.m_from_snapshot_blockhash) { + data.pushKV("snapshot_blockhash", cs.m_from_snapshot_blockhash->ToString()); + } + return data; + }; + + if (chainman.GetAll().size() > 1) { + for (Chainstate* chainstate : chainman.GetAll()) { + obj.pushKV( + chainstate->m_from_snapshot_blockhash ? "snapshot" : "normal", + make_chain_data(*chainstate)); + } + } else { + obj.pushKV("normal", make_chain_data(chainman.ActiveChainstate())); + } + obj.pushKV("headers", chainman.m_best_header ? chainman.m_best_header->nHeight : -1); + + return obj; +} + }; +} + + void RegisterBlockchainRPCCommands(CRPCTable& t) { static const CRPCCommand commands[]{ @@ -2824,6 +2897,7 @@ void RegisterBlockchainRPCCommands(CRPCTable& t) {"blockchain", &getblockfilter}, {"blockchain", &dumptxoutset}, {"blockchain", &loadtxoutset}, + {"blockchain", &getchainstates}, {"hidden", &invalidateblock}, {"hidden", &reconsiderblock}, {"hidden", &waitfornewblock}, diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp index 2ef3fe1b4779a..27bb60d6b6125 100644 --- a/src/test/fuzz/rpc.cpp +++ b/src/test/fuzz/rpc.cpp @@ -123,6 +123,7 @@ const std::vector RPC_COMMANDS_SAFE_FOR_FUZZING{ "getblockstats", "getblocktemplate", "getchaintips", + "getchainstates", "getchaintxstats", "getconnectioncount", "getdeploymentinfo", From 42cae39356fd20d521aaf99aff1ed85856f3c9f3 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Thu, 17 Jun 2021 16:09:38 -0400 Subject: [PATCH 063/172] test: add feature_assumeutxo functional test Most ideas for test improvements (TODOs) provided by Russ Yanofsky. --- src/kernel/chainparams.cpp | 7 + test/functional/feature_assumeutxo.py | 246 ++++++++++++++++++ .../test_framework/test_framework.py | 4 + test/functional/test_runner.py | 1 + 4 files changed, 258 insertions(+) create mode 100755 test/functional/feature_assumeutxo.py diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index ca418fc6abc07..3ae24d0eb50bd 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -484,6 +484,13 @@ class CRegTestParams : public CChainParams .nChainTx = 110, .blockhash = uint256S("0x696e92821f65549c7ee134edceeeeaaa4105647a3c4fd9f298c0aec0ab50425c") }, + { + // For use by test/functional/feature_assumeutxo.py + .height = 299, + .hash_serialized = AssumeutxoHash{uint256S("0xef45ccdca5898b6c2145e4581d2b88c56564dd389e4bd75a1aaf6961d3edd3c0")}, + .nChainTx = 300, + .blockhash = uint256S("0x7e0517ef3ea6ecbed9117858e42eedc8eb39e8698a38dcbd1b3962a283233f4c") + }, }; chainTxData = ChainTxData{ diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py new file mode 100755 index 0000000000000..be1aa1899380a --- /dev/null +++ b/test/functional/feature_assumeutxo.py @@ -0,0 +1,246 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test for assumeutxo, a means of quickly bootstrapping a node using +a serialized version of the UTXO set at a certain height, which corresponds +to a hash that has been compiled into bitcoind. + +The assumeutxo value generated and used here is committed to in +`CRegTestParams::m_assumeutxo_data` in `src/chainparams.cpp`. + +## Possible test improvements + +- TODO: test submitting a transaction and verifying it appears in mempool +- TODO: test what happens with -reindex and -reindex-chainstate before the + snapshot is validated, and make sure it's deleted successfully. + +Interesting test cases could be loading an assumeutxo snapshot file with: + +- TODO: An invalid hash +- TODO: Valid hash but invalid snapshot file (bad coin height or truncated file or + bad other serialization) +- TODO: Valid snapshot file, but referencing an unknown block +- TODO: Valid snapshot file, but referencing a snapshot block that turns out to be + invalid, or has an invalid parent +- TODO: Valid snapshot file and snapshot block, but the block is not on the + most-work chain + +Interesting starting states could be loading a snapshot when the current chain tip is: + +- TODO: An ancestor of snapshot block +- TODO: Not an ancestor of the snapshot block but has less work +- TODO: The snapshot block +- TODO: A descendant of the snapshot block +- TODO: Not an ancestor or a descendant of the snapshot block and has more work + +""" +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, wait_until_helper + +START_HEIGHT = 199 +SNAPSHOT_BASE_HEIGHT = 299 +FINAL_HEIGHT = 399 +COMPLETE_IDX = {'synced': True, 'best_block_height': FINAL_HEIGHT} + + +class AssumeutxoTest(BitcoinTestFramework): + + def set_test_params(self): + """Use the pregenerated, deterministic chain up to height 199.""" + self.num_nodes = 3 + self.rpc_timeout = 120 + self.extra_args = [ + [], + ["-fastprune", "-prune=1", "-blockfilterindex=1", "-coinstatsindex=1"], + ["-txindex=1", "-blockfilterindex=1", "-coinstatsindex=1"], + ] + + def setup_network(self): + """Start with the nodes disconnected so that one can generate a snapshot + including blocks the other hasn't yet seen.""" + self.add_nodes(3) + self.start_nodes(extra_args=self.extra_args) + + def run_test(self): + """ + Bring up two (disconnected) nodes, mine some new blocks on the first, + and generate a UTXO snapshot. + + Load the snapshot into the second, ensure it syncs to tip and completes + background validation when connected to the first. + """ + n0 = self.nodes[0] + n1 = self.nodes[1] + n2 = self.nodes[2] + + # Mock time for a deterministic chain + for n in self.nodes: + n.setmocktime(n.getblockheader(n.getbestblockhash())['time']) + + self.sync_blocks() + + def no_sync(): + pass + + # Generate a series of blocks that `n0` will have in the snapshot, + # but that n1 doesn't yet see. In order for the snapshot to activate, + # though, we have to ferry over the new headers to n1 so that it + # isn't waiting forever to see the header of the snapshot's base block + # while disconnected from n0. + for i in range(100): + self.generate(n0, nblocks=1, sync_fun=no_sync) + newblock = n0.getblock(n0.getbestblockhash(), 0) + + # make n1 aware of the new header, but don't give it the block. + n1.submitheader(newblock) + n2.submitheader(newblock) + + # Ensure everyone is seeing the same headers. + for n in self.nodes: + assert_equal(n.getblockchaininfo()["headers"], SNAPSHOT_BASE_HEIGHT) + + self.log.info("-- Testing assumeutxo + some indexes + pruning") + + assert_equal(n0.getblockcount(), SNAPSHOT_BASE_HEIGHT) + assert_equal(n1.getblockcount(), START_HEIGHT) + + self.log.info(f"Creating a UTXO snapshot at height {SNAPSHOT_BASE_HEIGHT}") + dump_output = n0.dumptxoutset('utxos.dat') + + assert_equal( + dump_output['txoutset_hash'], + 'ef45ccdca5898b6c2145e4581d2b88c56564dd389e4bd75a1aaf6961d3edd3c0') + assert_equal(dump_output['nchaintx'], 300) + assert_equal(n0.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT) + + # Mine more blocks on top of the snapshot that n1 hasn't yet seen. This + # will allow us to test n1's sync-to-tip on top of a snapshot. + self.generate(n0, nblocks=100, sync_fun=no_sync) + + assert_equal(n0.getblockcount(), FINAL_HEIGHT) + assert_equal(n1.getblockcount(), START_HEIGHT) + + assert_equal(n0.getblockchaininfo()["blocks"], FINAL_HEIGHT) + + self.log.info(f"Loading snapshot into second node from {dump_output['path']}") + loaded = n1.loadtxoutset(dump_output['path']) + assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT) + assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) + + monitor = n1.getchainstates() + assert_equal(monitor['normal']['blocks'], START_HEIGHT) + assert_equal(monitor['snapshot']['blocks'], SNAPSHOT_BASE_HEIGHT) + assert_equal(monitor['snapshot']['snapshot_blockhash'], dump_output['base_hash']) + + assert_equal(n1.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT) + + PAUSE_HEIGHT = FINAL_HEIGHT - 40 + + self.log.info("Restarting node to stop at height %d", PAUSE_HEIGHT) + self.restart_node(1, extra_args=[ + f"-stopatheight={PAUSE_HEIGHT}", *self.extra_args[1]]) + + # Finally connect the nodes and let them sync. + self.connect_nodes(0, 1) + + n1.wait_until_stopped(timeout=5) + + self.log.info("Checking that blocks are segmented on disk") + assert self.has_blockfile(n1, "00000"), "normal blockfile missing" + assert self.has_blockfile(n1, "00001"), "assumed blockfile missing" + assert not self.has_blockfile(n1, "00002"), "too many blockfiles" + + self.log.info("Restarted node before snapshot validation completed, reloading...") + self.restart_node(1, extra_args=self.extra_args[1]) + self.connect_nodes(0, 1) + + self.log.info(f"Ensuring snapshot chain syncs to tip. ({FINAL_HEIGHT})") + wait_until_helper(lambda: n1.getchainstates()['snapshot']['blocks'] == FINAL_HEIGHT) + self.sync_blocks(nodes=(n0, n1)) + + self.log.info("Ensuring background validation completes") + # N.B.: the `snapshot` key disappears once the background validation is complete. + wait_until_helper(lambda: not n1.getchainstates().get('snapshot')) + + # Ensure indexes have synced. + completed_idx_state = { + 'basic block filter index': COMPLETE_IDX, + 'coinstatsindex': COMPLETE_IDX, + } + self.wait_until(lambda: n1.getindexinfo() == completed_idx_state) + + + for i in (0, 1): + n = self.nodes[i] + self.log.info(f"Restarting node {i} to ensure (Check|Load)BlockIndex passes") + self.restart_node(i, extra_args=self.extra_args[i]) + + assert_equal(n.getblockchaininfo()["blocks"], FINAL_HEIGHT) + + assert_equal(n.getchainstates()['normal']['blocks'], FINAL_HEIGHT) + assert_equal(n.getchainstates().get('snapshot'), None) + + if i != 0: + # Ensure indexes have synced for the assumeutxo node + self.wait_until(lambda: n.getindexinfo() == completed_idx_state) + + + # Node 2: all indexes + reindex + # ----------------------------- + + self.log.info("-- Testing all indexes + reindex") + assert_equal(n2.getblockcount(), START_HEIGHT) + + self.log.info(f"Loading snapshot into third node from {dump_output['path']}") + loaded = n2.loadtxoutset(dump_output['path']) + assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT) + assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) + + monitor = n2.getchainstates() + assert_equal(monitor['normal']['blocks'], START_HEIGHT) + assert_equal(monitor['snapshot']['blocks'], SNAPSHOT_BASE_HEIGHT) + assert_equal(monitor['snapshot']['snapshot_blockhash'], dump_output['base_hash']) + + self.connect_nodes(0, 2) + wait_until_helper(lambda: n2.getchainstates()['snapshot']['blocks'] == FINAL_HEIGHT) + self.sync_blocks() + + self.log.info("Ensuring background validation completes") + wait_until_helper(lambda: not n2.getchainstates().get('snapshot')) + + completed_idx_state = { + 'basic block filter index': COMPLETE_IDX, + 'coinstatsindex': COMPLETE_IDX, + 'txindex': COMPLETE_IDX, + } + self.wait_until(lambda: n2.getindexinfo() == completed_idx_state) + + for i in (0, 2): + n = self.nodes[i] + self.log.info(f"Restarting node {i} to ensure (Check|Load)BlockIndex passes") + self.restart_node(i, extra_args=self.extra_args[i]) + + assert_equal(n.getblockchaininfo()["blocks"], FINAL_HEIGHT) + + assert_equal(n.getchainstates()['normal']['blocks'], FINAL_HEIGHT) + assert_equal(n.getchainstates().get('snapshot'), None) + + if i != 0: + # Ensure indexes have synced for the assumeutxo node + self.wait_until(lambda: n.getindexinfo() == completed_idx_state) + + self.log.info("Test -reindex-chainstate of an assumeutxo-synced node") + self.restart_node(2, extra_args=[ + '-reindex-chainstate=1', *self.extra_args[2]]) + assert_equal(n2.getblockchaininfo()["blocks"], FINAL_HEIGHT) + wait_until_helper(lambda: n2.getblockcount() == FINAL_HEIGHT) + + self.log.info("Test -reindex of an assumeutxo-synced node") + self.restart_node(2, extra_args=['-reindex=1', *self.extra_args[2]]) + self.connect_nodes(0, 2) + wait_until_helper(lambda: n2.getblockcount() == FINAL_HEIGHT) + + +if __name__ == '__main__': + AssumeutxoTest().main() diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 73e7516ea7e48..73635b4397e1e 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -979,3 +979,7 @@ def is_sqlite_compiled(self): def is_bdb_compiled(self): """Checks whether the wallet module was compiled with BDB support.""" return self.config["components"].getboolean("USE_BDB") + + def has_blockfile(self, node, filenum: str): + blocksdir = os.path.join(node.datadir, self.chain, 'blocks', '') + return os.path.isfile(os.path.join(blocksdir, f"blk{filenum}.dat")) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 32aee3aa8000e..9a0b5c6f0a678 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -324,6 +324,7 @@ 'wallet_coinbase_category.py --descriptors', 'feature_filelock.py', 'feature_loadblock.py', + 'feature_assumeutxo.py', 'p2p_dos_header_tree.py', 'p2p_add_connections.py', 'feature_bind_port_discover.py', From 7ee46a755f1d57ce9d51975d3b54dc9ac3d08d52 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Wed, 16 Jun 2021 12:09:29 -0400 Subject: [PATCH 064/172] contrib: add script to demo/test assumeutxo Add the script to the shellcheck exception list since the quoted variables rule needs to be violated in order to get bitcoind to pick up on $CHAIN_HACK_FLAGS. --- contrib/devtools/test_utxo_snapshots.sh | 200 ++++++++++++++++++++++++ test/lint/lint-shell.py | 8 +- 2 files changed, 206 insertions(+), 2 deletions(-) create mode 100755 contrib/devtools/test_utxo_snapshots.sh diff --git a/contrib/devtools/test_utxo_snapshots.sh b/contrib/devtools/test_utxo_snapshots.sh new file mode 100755 index 0000000000000..d4c49bf098f28 --- /dev/null +++ b/contrib/devtools/test_utxo_snapshots.sh @@ -0,0 +1,200 @@ +#!/usr/bin/env bash +# Demonstrate the creation and usage of UTXO snapshots. +# +# A server node starts up, IBDs up to a certain height, then generates a UTXO +# snapshot at that point. +# +# The server then downloads more blocks (to create a diff from the snapshot). +# +# We bring a client up, load the UTXO snapshot, and we show the client sync to +# the "network tip" and then start a background validation of the snapshot it +# loaded. We see the background validation chainstate removed after validation +# completes. +# + +export LC_ALL=C +set -e + +BASE_HEIGHT=${1:-30000} +INCREMENTAL_HEIGHT=20000 +FINAL_HEIGHT=$(($BASE_HEIGHT + $INCREMENTAL_HEIGHT)) + +SERVER_DATADIR="$(pwd)/utxodemo-data-server-$BASE_HEIGHT" +CLIENT_DATADIR="$(pwd)/utxodemo-data-client-$BASE_HEIGHT" +UTXO_DAT_FILE="$(pwd)/utxo.$BASE_HEIGHT.dat" + +# Chosen to try to not interfere with any running bitcoind processes. +SERVER_PORT=8633 +SERVER_RPC_PORT=8632 + +CLIENT_PORT=8733 +CLIENT_RPC_PORT=8732 + +SERVER_PORTS="-port=${SERVER_PORT} -rpcport=${SERVER_RPC_PORT}" +CLIENT_PORTS="-port=${CLIENT_PORT} -rpcport=${CLIENT_RPC_PORT}" + +# Ensure the client exercises all indexes to test that snapshot use works +# properly with indexes. +ALL_INDEXES="-txindex -coinstatsindex -blockfilterindex=1" + +if ! command -v jq >/dev/null ; then + echo "This script requires jq to parse JSON RPC output. Please install it." + echo "(e.g. sudo apt install jq)" + exit 1 +fi + +DUMP_OUTPUT="dumptxoutset-output-$BASE_HEIGHT.json" + +finish() { + echo + echo "Killing server and client PIDs ($SERVER_PID, $CLIENT_PID) and cleaning up datadirs" + echo + rm -f "$UTXO_DAT_FILE" "$DUMP_OUTPUT" + rm -rf "$SERVER_DATADIR" "$CLIENT_DATADIR" + kill -9 "$SERVER_PID" "$CLIENT_PID" +} + +trap finish EXIT + +# Need to specify these to trick client into accepting server as a peer +# it can IBD from, otherwise the default values prevent IBD from the server node. +EARLY_IBD_FLAGS="-maxtipage=9223372036854775207 -minimumchainwork=0x00" + +server_rpc() { + ./src/bitcoin-cli -rpcport=$SERVER_RPC_PORT -datadir="$SERVER_DATADIR" "$@" +} +client_rpc() { + ./src/bitcoin-cli -rpcport=$CLIENT_RPC_PORT -datadir="$CLIENT_DATADIR" "$@" +} +server_sleep_til_boot() { + while ! server_rpc ping >/dev/null 2>&1; do sleep 0.1; done +} +client_sleep_til_boot() { + while ! client_rpc ping >/dev/null 2>&1; do sleep 0.1; done +} + +mkdir -p "$SERVER_DATADIR" "$CLIENT_DATADIR" + +echo "Hi, welcome to the assumeutxo demo/test" +echo +echo "We're going to" +echo +echo " - start up a 'server' node, sync it via mainnet IBD to height ${BASE_HEIGHT}" +echo " - create a UTXO snapshot at that height" +echo " - IBD ${INCREMENTAL_HEIGHT} more blocks on top of that" +echo +echo "then we'll demonstrate assumeutxo by " +echo +echo " - starting another node (the 'client') and loading the snapshot in" +echo " * first you'll have to modify the code slightly (chainparams) and recompile" +echo " * don't worry, we'll make it easy" +echo " - observing the client sync ${INCREMENTAL_HEIGHT} blocks on top of the snapshot from the server" +echo " - observing the client validate the snapshot chain via background IBD" +echo +read -p "Press [enter] to continue" _ + +echo +echo "-- Starting the demo. You might want to run the two following commands in" +echo " separate terminal windows:" +echo +echo " watch -n0.1 tail -n 30 $SERVER_DATADIR/debug.log" +echo " watch -n0.1 tail -n 30 $CLIENT_DATADIR/debug.log" +echo +read -p "Press [enter] to continue" _ + +echo +echo "-- IBDing the blocks (height=$BASE_HEIGHT) required to the server node..." +./src/bitcoind -logthreadnames=1 $SERVER_PORTS \ + -datadir="$SERVER_DATADIR" $EARLY_IBD_FLAGS -stopatheight="$BASE_HEIGHT" >/dev/null + +echo +echo "-- Creating snapshot at ~ height $BASE_HEIGHT ($UTXO_DAT_FILE)..." +sleep 2 +./src/bitcoind -logthreadnames=1 $SERVER_PORTS \ + -datadir="$SERVER_DATADIR" $EARLY_IBD_FLAGS -connect=0 -listen=0 >/dev/null & +SERVER_PID="$!" + +server_sleep_til_boot +server_rpc dumptxoutset "$UTXO_DAT_FILE" > "$DUMP_OUTPUT" +cat "$DUMP_OUTPUT" +kill -9 "$SERVER_PID" + +RPC_BASE_HEIGHT=$(jq -r .base_height < "$DUMP_OUTPUT") +RPC_AU=$(jq -r .txoutset_hash < "$DUMP_OUTPUT") +RPC_NCHAINTX=$(jq -r .nchaintx < "$DUMP_OUTPUT") +RPC_BLOCKHASH=$(jq -r .base_hash < "$DUMP_OUTPUT") + +# Wait for server to shutdown... +while server_rpc ping >/dev/null 2>&1; do sleep 0.1; done + +echo +echo "-- Now: add the following to CMainParams::m_assumeutxo_data" +echo " in src/kernel/chainparams.cpp, and recompile:" +echo +echo " {${RPC_BASE_HEIGHT}, AssumeutxoHash{uint256S(\"0x${RPC_AU}\")}, ${RPC_NCHAINTX}, uint256S(\"0x${RPC_BLOCKHASH}\")}," +echo +echo +echo "-- IBDing more blocks to the server node (height=$FINAL_HEIGHT) so there is a diff between snapshot and tip..." +./src/bitcoind $SERVER_PORTS -logthreadnames=1 -datadir="$SERVER_DATADIR" \ + $EARLY_IBD_FLAGS -stopatheight="$FINAL_HEIGHT" >/dev/null + +echo +echo "-- Starting the server node to provide blocks to the client node..." +./src/bitcoind $SERVER_PORTS -logthreadnames=1 -debug=net -datadir="$SERVER_DATADIR" \ + $EARLY_IBD_FLAGS -connect=0 -listen=1 >/dev/null & +SERVER_PID="$!" +server_sleep_til_boot + +echo +echo "-- Okay, what you're about to see is the client starting up and activating the snapshot." +echo " I'm going to display the top 14 log lines from the client on top of an RPC called" +echo " getchainstates, which is like getblockchaininfo but for both the snapshot and " +echo " background validation chainstates." +echo +echo " You're going to first see the snapshot chainstate sync to the server's tip, then" +echo " the background IBD chain kicks in to validate up to the base of the snapshot." +echo +echo " Once validation of the snapshot is done, you should see log lines indicating" +echo " that we've deleted the background validation chainstate." +echo +echo " Once everything completes, exit the watch command with CTRL+C." +echo +read -p "When you're ready for all this, hit [enter]" _ + +echo +echo "-- Starting the client node to get headers from the server, then load the snapshot..." +./src/bitcoind $CLIENT_PORTS $ALL_INDEXES -logthreadnames=1 -datadir="$CLIENT_DATADIR" \ + -connect=0 -addnode=127.0.0.1:$SERVER_PORT -debug=net $EARLY_IBD_FLAGS >/dev/null & +CLIENT_PID="$!" +client_sleep_til_boot + +echo +echo "-- Initial state of the client:" +client_rpc getchainstates + +echo +echo "-- Loading UTXO snapshot into client..." +client_rpc loadtxoutset "$UTXO_DAT_FILE" + +watch -n 0.3 "( tail -n 14 $CLIENT_DATADIR/debug.log ; echo ; ./src/bitcoin-cli -rpcport=$CLIENT_RPC_PORT -datadir=$CLIENT_DATADIR getchainstates) | cat" + +echo +echo "-- Okay, now I'm going to restart the client to make sure that the snapshot chain reloads " +echo " as the main chain properly..." +echo +echo " Press CTRL+C after you're satisfied to exit the demo" +echo +read -p "Press [enter] to continue" + +while kill -0 "$CLIENT_PID"; do + sleep 1 +done +./src/bitcoind $CLIENT_PORTS $ALL_INDEXES -logthreadnames=1 -datadir="$CLIENT_DATADIR" -connect=0 \ + -addnode=127.0.0.1:$SERVER_PORT "$EARLY_IBD_FLAGS" >/dev/null & +CLIENT_PID="$!" +client_sleep_til_boot + +watch -n 0.3 "( tail -n 14 $CLIENT_DATADIR/debug.log ; echo ; ./src/bitcoin-cli -rpcport=$CLIENT_RPC_PORT -datadir=$CLIENT_DATADIR getchainstates) | cat" + +echo +echo "-- Done!" diff --git a/test/lint/lint-shell.py b/test/lint/lint-shell.py index 1646bf0d3ed97..db84ca3d394e2 100755 --- a/test/lint/lint-shell.py +++ b/test/lint/lint-shell.py @@ -67,9 +67,13 @@ def main(): '*.sh', ] files = get_files(files_cmd) - # remove everything that doesn't match this regex reg = re.compile(r'src/[leveldb,secp256k1,minisketch]') - files[:] = [file for file in files if not reg.match(file)] + + def should_exclude(fname: str) -> bool: + return bool(reg.match(fname)) or 'test_utxo_snapshots.sh' in fname + + # remove everything that doesn't match this regex + files[:] = [file for file in files if not should_exclude(file)] # build the `shellcheck` command shellcheck_cmd = [ From 99839bbfa7110c7abf22e587ae2f72c9c57d3c85 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Mon, 11 Sep 2023 13:41:28 -0400 Subject: [PATCH 065/172] doc: add note about confusing HaveTxsDownloaded name --- src/chain.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/chain.h b/src/chain.h index 7806720ce9e8b..78b06719f43a4 100644 --- a/src/chain.h +++ b/src/chain.h @@ -276,6 +276,12 @@ class CBlockIndex * * Does not imply the transactions are consensus-valid (ConnectTip might fail) * Does not imply the transactions are still stored on disk. (IsBlockPruned might return true) + * + * Note that this will be true for the snapshot base block, if one is loaded (and + * all subsequent assumed-valid blocks) since its nChainTx value will have been set + * manually based on the related AssumeutxoData entry. + * + * TODO: potentially change the name of this based on the fact above. */ bool HaveTxsDownloaded() const { return nChainTx != 0; } From 380130d9d70f8f8d395949a51f43806f6e600efa Mon Sep 17 00:00:00 2001 From: kevkevin Date: Thu, 27 Jul 2023 23:32:20 -0500 Subject: [PATCH 066/172] test: add coverage to feature_addrman.py I added two new tests that will cover the nNew and nTried tests which add coverage to the if block by checking values larger than our range since we only check for negative values now Co-authored-by: ismaelsadeeq --- test/functional/feature_addrman.py | 23 ++++++++++++++++++++--- test/functional/test_framework/netutil.py | 5 +++++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/test/functional/feature_addrman.py b/test/functional/feature_addrman.py index 7877f9d302bde..cb9cecaf9a91e 100755 --- a/test/functional/feature_addrman.py +++ b/test/functional/feature_addrman.py @@ -9,12 +9,12 @@ import struct from test_framework.messages import ser_uint256, hash256 +from test_framework.netutil import ADDRMAN_NEW_BUCKET_COUNT, ADDRMAN_TRIED_BUCKET_COUNT, ADDRMAN_BUCKET_SIZE from test_framework.p2p import MAGIC_BYTES from test_framework.test_framework import BitcoinTestFramework from test_framework.test_node import ErrorMatch from test_framework.util import assert_equal - def serialize_addrman( *, format=1, @@ -117,17 +117,34 @@ def run_test(self): self.log.info("Check that corrupt addrman cannot be read (len_tried)") self.stop_node(0) + max_len_tried = ADDRMAN_TRIED_BUCKET_COUNT * ADDRMAN_BUCKET_SIZE write_addrman(peers_dat, len_tried=-1) self.nodes[0].assert_start_raises_init_error( - expected_msg=init_error("Corrupt AddrMan serialization: nTried=-1, should be in \\[0, 16384\\]:.*"), + expected_msg=init_error(f"Corrupt AddrMan serialization: nTried=-1, should be in \\[0, {max_len_tried}\\]:.*"), + match=ErrorMatch.FULL_REGEX, + ) + + self.log.info("Check that corrupt addrman cannot be read (large len_tried)") + write_addrman(peers_dat, len_tried=max_len_tried + 1) + self.nodes[0].assert_start_raises_init_error( + expected_msg=init_error(f"Corrupt AddrMan serialization: nTried={max_len_tried + 1}, should be in \\[0, {max_len_tried}\\]:.*"), match=ErrorMatch.FULL_REGEX, ) self.log.info("Check that corrupt addrman cannot be read (len_new)") self.stop_node(0) + max_len_new = ADDRMAN_NEW_BUCKET_COUNT * ADDRMAN_BUCKET_SIZE write_addrman(peers_dat, len_new=-1) self.nodes[0].assert_start_raises_init_error( - expected_msg=init_error("Corrupt AddrMan serialization: nNew=-1, should be in \\[0, 65536\\]:.*"), + expected_msg=init_error(f"Corrupt AddrMan serialization: nNew=-1, should be in \\[0, {max_len_new}\\]:.*"), + match=ErrorMatch.FULL_REGEX, + ) + + self.log.info("Check that corrupt addrman cannot be read (large len_new)") + self.stop_node(0) + write_addrman(peers_dat, len_new=max_len_new + 1) + self.nodes[0].assert_start_raises_init_error( + expected_msg=init_error(f"Corrupt AddrMan serialization: nNew={max_len_new + 1}, should be in \\[0, {max_len_new}\\]:.*"), match=ErrorMatch.FULL_REGEX, ) diff --git a/test/functional/test_framework/netutil.py b/test/functional/test_framework/netutil.py index fcea4b2f68036..838f40fcaaa6c 100644 --- a/test/functional/test_framework/netutil.py +++ b/test/functional/test_framework/netutil.py @@ -25,6 +25,11 @@ STATE_LISTEN = '0A' # STATE_CLOSING = '0B' +# Address manager size constants as defined in addrman_impl.h +ADDRMAN_NEW_BUCKET_COUNT = 1 << 10 +ADDRMAN_TRIED_BUCKET_COUNT = 1 << 8 +ADDRMAN_BUCKET_SIZE = 1 << 6 + def get_socket_inodes(pid): ''' Get list of socket inodes for process pid. From b4f28cc345ef9c5261c4a8d743654a44784c7802 Mon Sep 17 00:00:00 2001 From: glozow Date: Mon, 2 Oct 2023 10:04:37 +0100 Subject: [PATCH 067/172] [doc] parent pay for child in aggregate CheckFeeRate --- src/validation.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/validation.cpp b/src/validation.cpp index 357b4d422d23a..8166808c27b1e 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1285,6 +1285,12 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std:: // Transactions must meet two minimum feerates: the mempool minimum fee and min relay fee. // For transactions consisting of exactly one child and its parents, it suffices to use the // package feerate (total modified fees / total virtual size) to check this requirement. + // Note that this is an aggregate feerate; this function has not checked that there are transactions + // too low feerate to pay for themselves, or that the child transactions are higher feerate than + // their parents. Using aggregate feerate may allow "parents pay for child" behavior and permit + // a child that is below mempool minimum feerate. To avoid these behaviors, callers of + // AcceptMultipleTransactions need to restrict txns topology (e.g. to ancestor sets) and check + // the feerates of individuals and subsets. const auto m_total_vsize = std::accumulate(workspaces.cbegin(), workspaces.cend(), int64_t{0}, [](int64_t sum, auto& ws) { return sum + ws.m_vsize; }); const auto m_total_modified_fees = std::accumulate(workspaces.cbegin(), workspaces.cend(), CAmount{0}, From e32ba1599c599e75b1da3393f71f633de860505f Mon Sep 17 00:00:00 2001 From: glozow Date: Thu, 11 May 2023 17:50:05 +0100 Subject: [PATCH 068/172] [txpackages] IsChildWithParentsTree() Many edge cases exist when parents in a child-with-parents package can spend each other. However, this pattern should also be uncommon in normal use cases. --- src/policy/packages.cpp | 15 +++++++++++++++ src/policy/packages.h | 4 ++++ src/test/txpackage_tests.cpp | 3 +++ 3 files changed, 22 insertions(+) diff --git a/src/policy/packages.cpp b/src/policy/packages.cpp index fd272a2642e4c..47a9274a31107 100644 --- a/src/policy/packages.cpp +++ b/src/policy/packages.cpp @@ -88,3 +88,18 @@ bool IsChildWithParents(const Package& package) return std::all_of(package.cbegin(), package.cend() - 1, [&input_txids](const auto& ptx) { return input_txids.count(ptx->GetHash()) > 0; }); } + +bool IsChildWithParentsTree(const Package& package) +{ + if (!IsChildWithParents(package)) return false; + std::unordered_set parent_txids; + std::transform(package.cbegin(), package.cend() - 1, std::inserter(parent_txids, parent_txids.end()), + [](const auto& ptx) { return ptx->GetHash(); }); + // Each parent must not have an input who is one of the other parents. + return std::all_of(package.cbegin(), package.cend() - 1, [&](const auto& ptx) { + for (const auto& input : ptx->vin) { + if (parent_txids.count(input.prevout.hash) > 0) return false; + } + return true; + }); +} diff --git a/src/policy/packages.h b/src/policy/packages.h index 702667b8ade8d..cf37857e4bb65 100644 --- a/src/policy/packages.h +++ b/src/policy/packages.h @@ -63,4 +63,8 @@ bool CheckPackage(const Package& txns, PackageValidationState& state); */ bool IsChildWithParents(const Package& package); +/** Context-free check that a package IsChildWithParents() and none of the parents depend on each + * other (the package is a "tree"). + */ +bool IsChildWithParentsTree(const Package& package); #endif // BITCOIN_POLICY_PACKAGES_H diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp index 571b58156f737..a8318e9fdbe09 100644 --- a/src/test/txpackage_tests.cpp +++ b/src/test/txpackage_tests.cpp @@ -162,6 +162,7 @@ BOOST_FIXTURE_TEST_CASE(noncontextual_package_tests, TestChain100Setup) BOOST_CHECK_EQUAL(state.GetResult(), PackageValidationResult::PCKG_POLICY); BOOST_CHECK_EQUAL(state.GetRejectReason(), "package-not-sorted"); BOOST_CHECK(IsChildWithParents({tx_parent, tx_child})); + BOOST_CHECK(IsChildWithParentsTree({tx_parent, tx_child})); } // 24 Parents and 1 Child @@ -187,6 +188,7 @@ BOOST_FIXTURE_TEST_CASE(noncontextual_package_tests, TestChain100Setup) PackageValidationState state; BOOST_CHECK(CheckPackage(package, state)); BOOST_CHECK(IsChildWithParents(package)); + BOOST_CHECK(IsChildWithParentsTree(package)); package.erase(package.begin()); BOOST_CHECK(IsChildWithParents(package)); @@ -219,6 +221,7 @@ BOOST_FIXTURE_TEST_CASE(noncontextual_package_tests, TestChain100Setup) BOOST_CHECK(IsChildWithParents({tx_parent, tx_parent_also_child})); BOOST_CHECK(IsChildWithParents({tx_parent, tx_child})); BOOST_CHECK(IsChildWithParents({tx_parent, tx_parent_also_child, tx_child})); + BOOST_CHECK(!IsChildWithParentsTree({tx_parent, tx_parent_also_child, tx_child})); // IsChildWithParents does not detect unsorted parents. BOOST_CHECK(IsChildWithParents({tx_parent_also_child, tx_parent, tx_child})); BOOST_CHECK(CheckPackage({tx_parent, tx_parent_also_child, tx_child}, state)); From 5b9087a9a7da2602485e85e0b163dc3cbd2daf31 Mon Sep 17 00:00:00 2001 From: glozow Date: Thu, 11 May 2023 17:54:39 +0100 Subject: [PATCH 069/172] [rpc] require package to be a tree in submitpackage --- src/rpc/mempool.cpp | 4 ++++ test/functional/rpc_packages.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index 705608bd476a3..173127e01425d 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -820,6 +820,7 @@ static RPCHelpMan submitpackage() { return RPCHelpMan{"submitpackage", "Submit a package of raw transactions (serialized, hex-encoded) to local node (-regtest only).\n" + "The package must consist of a child with its parents, and none of the parents may depend on one another.\n" "The package will be validated according to consensus and mempool policy rules. If all transactions pass, they will be accepted to mempool.\n" "This RPC is experimental and the interface may be unstable. Refer to doc/policy/packages.md for documentation on package policies.\n" "Warning: until package relay is in use, successful submission does not mean the transaction will propagate to other nodes on the network.\n" @@ -881,6 +882,9 @@ static RPCHelpMan submitpackage() } txns.emplace_back(MakeTransactionRef(std::move(mtx))); } + if (!IsChildWithParentsTree(txns)) { + throw JSONRPCTransactionError(TransactionError::INVALID_PACKAGE, "package topology disallowed. not child-with-parents or parents depend on each other."); + } NodeContext& node = EnsureAnyNodeContext(request.context); CTxMemPool& mempool = EnsureMemPool(node); diff --git a/test/functional/rpc_packages.py b/test/functional/rpc_packages.py index 9c4960aa1ea92..5644a9f5a8c92 100755 --- a/test/functional/rpc_packages.py +++ b/test/functional/rpc_packages.py @@ -335,7 +335,7 @@ def test_submitpackage(self): self.log.info("Submitpackage only allows packages of 1 child with its parents") # Chain of 3 transactions has too many generations chain_hex = [t["hex"] for t in self.wallet.create_self_transfer_chain(chain_length=25)] - assert_raises_rpc_error(-25, "not-child-with-parents", node.submitpackage, chain_hex) + assert_raises_rpc_error(-25, "package topology disallowed", node.submitpackage, chain_hex) if __name__ == "__main__": From 7a9bb2a2a59ba49f80519c8435229abec2432486 Mon Sep 17 00:00:00 2001 From: glozow Date: Tue, 11 Apr 2023 16:07:34 +0100 Subject: [PATCH 070/172] [rpc] allow submitpackage to be called outside of regtest --- src/rpc/mempool.cpp | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index 173127e01425d..136969eb87419 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -819,12 +819,11 @@ static RPCHelpMan savemempool() static RPCHelpMan submitpackage() { return RPCHelpMan{"submitpackage", - "Submit a package of raw transactions (serialized, hex-encoded) to local node (-regtest only).\n" + "Submit a package of raw transactions (serialized, hex-encoded) to local node.\n" "The package must consist of a child with its parents, and none of the parents may depend on one another.\n" "The package will be validated according to consensus and mempool policy rules. If all transactions pass, they will be accepted to mempool.\n" "This RPC is experimental and the interface may be unstable. Refer to doc/policy/packages.md for documentation on package policies.\n" - "Warning: until package relay is in use, successful submission does not mean the transaction will propagate to other nodes on the network.\n" - "Currently, each transaction is broadcasted individually after submission, which means they must meet other nodes' feerate requirements alone.\n" + "Warning: successful submission does not mean the transactions will propagate throughout the network.\n" , { {"package", RPCArg::Type::ARR, RPCArg::Optional::NO, "An array of raw transactions.", @@ -863,9 +862,6 @@ static RPCHelpMan submitpackage() }, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { - if (Params().GetChainType() != ChainType::REGTEST) { - throw std::runtime_error("submitpackage is for regression testing (-regtest mode) only"); - } const UniValue raw_transactions = request.params[0].get_array(); if (raw_transactions.size() < 1 || raw_transactions.size() > MAX_PACKAGE_COUNT) { throw JSONRPCError(RPC_INVALID_PARAMETER, @@ -987,7 +983,7 @@ void RegisterMempoolRPCCommands(CRPCTable& t) {"blockchain", &getrawmempool}, {"blockchain", &importmempool}, {"blockchain", &savemempool}, - {"hidden", &submitpackage}, + {"rawtransactions", &submitpackage}, }; for (const auto& c : commands) { t.appendCommand(c.name, &c); From 5b878be742dbfcd232d949d2df1fff4743aec3d8 Mon Sep 17 00:00:00 2001 From: glozow Date: Wed, 10 May 2023 16:43:05 +0100 Subject: [PATCH 071/172] [doc] add release note for submitpackage --- doc/release-notes-27609.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 doc/release-notes-27609.md diff --git a/doc/release-notes-27609.md b/doc/release-notes-27609.md new file mode 100644 index 0000000000000..b8cecbd88252b --- /dev/null +++ b/doc/release-notes-27609.md @@ -0,0 +1,14 @@ +- A new RPC, `submitpackage`, has been added. It can be used to submit a list of raw hex + transactions to the mempool to be evaluated as a package using consensus and mempool policy rules. +These policies include package CPFP, allowing a child with high fees to bump a parent below the +mempool minimum feerate (but not minimum relay feerate). + + - Warning: successful submission does not mean the transactions will propagate throughout the + network, as package relay is not supported. + + - Not all features are available. The package is limited to a child with all of its + unconfirmed parents, and no parent may spend the output of another parent. Also, package + RBF is not supported. Refer to doc/policy/packages.md for more details on package policies + and limitations. + + - This RPC is experimental. Its interface may change. From d9b172cd00fc3a8de1308e4469b82f5da474ea33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20=C3=81lvarez=20Rosa?= Date: Mon, 2 Oct 2023 12:12:36 +0200 Subject: [PATCH 072/172] doc: fix link to developer-notes.md file in multiprocess.md --- doc/design/multiprocess.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/design/multiprocess.md b/doc/design/multiprocess.md index e3f389a6d3f89..e6a77dbbc0dd3 100644 --- a/doc/design/multiprocess.md +++ b/doc/design/multiprocess.md @@ -38,7 +38,7 @@ Alternately, you can install [Cap'n Proto](https://capnproto.org/) and [libmulti Cross process Node, Wallet, and Chain interfaces are defined in [`src/interfaces/`](../src/interfaces/). These are C++ classes which follow -[conventions](developer-notes.md#internal-interface-guidelines), like passing +[conventions](../developer-notes.md#internal-interface-guidelines), like passing serializable arguments so they can be called from different processes, and making methods pure virtual so they can have proxy implementations that forward calls between processes. From d67aa25eb2f781b9edcfcf164a08401f9937a0c1 Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 2 Oct 2023 11:54:55 +0100 Subject: [PATCH 073/172] bench: drop NO_THREAD_SAFETY_ANALYSIS from disconnected_txs --- src/bench/disconnected_transactions.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/bench/disconnected_transactions.cpp b/src/bench/disconnected_transactions.cpp index d6f15909505d5..0a7344b24897a 100644 --- a/src/bench/disconnected_transactions.cpp +++ b/src/bench/disconnected_transactions.cpp @@ -98,7 +98,7 @@ static void AddAndRemoveDisconnectedBlockTransactionsAll(benchmark::Bench& bench const auto chains{CreateBlocks(/*num_not_shared=*/1)}; assert(chains.num_shared == BLOCK_VTX_COUNT - 1); - bench.minEpochIterations(10).run([&]() NO_THREAD_SAFETY_ANALYSIS { + bench.minEpochIterations(10).run([&]() { Reorg(chains); }); } @@ -109,7 +109,7 @@ static void AddAndRemoveDisconnectedBlockTransactions90(benchmark::Bench& bench) const auto chains{CreateBlocks(/*num_not_shared=*/BLOCK_VTX_COUNT_10PERCENT)}; assert(chains.num_shared == BLOCK_VTX_COUNT - BLOCK_VTX_COUNT_10PERCENT); - bench.minEpochIterations(10).run([&]() NO_THREAD_SAFETY_ANALYSIS { + bench.minEpochIterations(10).run([&]() { Reorg(chains); }); } @@ -120,7 +120,7 @@ static void AddAndRemoveDisconnectedBlockTransactions10(benchmark::Bench& bench) const auto chains{CreateBlocks(/*num_not_shared=*/BLOCK_VTX_COUNT - BLOCK_VTX_COUNT_10PERCENT)}; assert(chains.num_shared == BLOCK_VTX_COUNT_10PERCENT); - bench.minEpochIterations(10).run([&]() NO_THREAD_SAFETY_ANALYSIS { + bench.minEpochIterations(10).run([&]() { Reorg(chains); }); } From da384a286bd84a97e7ebe7a64654c5be20ab2df1 Mon Sep 17 00:00:00 2001 From: 0xb10c Date: Wed, 20 Sep 2023 19:37:45 +0200 Subject: [PATCH 074/172] rpc: getrawaddrman for addrman entries Exposing address manager table entries in a hidden RPC allows to introspect addrman tables in tests and during development. As response JSON object the following FORMAT1 is choosen: { "table": { "/": { "address": "..", "port": .., ... }, "/": { "address": "..", "port": .., ... }, "/": { "address": "..", "port": .., ... }, ... } } An alternative would be FORMAT2 { "table": { "bucket": { "position": { "address": "..", "port": .., ... }, "position": { "address": "..", "port": .., ... }, .. }, "bucket": { "position": { "address": "..", "port": .., ... }, .. }, } } FORMAT1 and FORMAT2 have different encodings for the location of the address in the address manager. While FORMAT2 might be easier to process for downstream tools, it also mimics internal addrman mappings, which might change at some point. Users not interested in the address location can ignore the location key. They don't have to adapt to a new RPC response format, when the internal addrman layout changes. Additionally, FORMAT1 is also slightly easier to to iterate in downstream tools. The RPC response-building implemenation complexcity is lower with FORMAT1 as we can more easily build a "/" key than a multiple "bucket" objects with multiple "position" objects (FORMAT2). --- src/addrman.cpp | 38 +++++++++++++++++++++++ src/addrman.h | 14 ++++++++- src/addrman_impl.h | 5 ++++ src/rpc/net.cpp | 70 +++++++++++++++++++++++++++++++++++++++++++ src/test/fuzz/rpc.cpp | 1 + 5 files changed, 127 insertions(+), 1 deletion(-) diff --git a/src/addrman.cpp b/src/addrman.cpp index 212baab9d45c2..6ce9c81c63a0a 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -838,6 +838,30 @@ std::vector AddrManImpl::GetAddr_(size_t max_addresses, size_t max_pct return addresses; } +std::vector> AddrManImpl::GetEntries_(bool from_tried) const +{ + AssertLockHeld(cs); + + const int bucket_count = from_tried ? ADDRMAN_TRIED_BUCKET_COUNT : ADDRMAN_NEW_BUCKET_COUNT; + std::vector> infos; + for (int bucket = 0; bucket < bucket_count; ++bucket) { + for (int position = 0; position < ADDRMAN_BUCKET_SIZE; ++position) { + int id = GetEntry(from_tried, bucket, position); + if (id >= 0) { + AddrInfo info = mapInfo.at(id); + AddressPosition location = AddressPosition( + from_tried, + /*multiplicity_in=*/from_tried ? 1 : info.nRefCount, + bucket, + position); + infos.push_back(std::make_pair(info, location)); + } + } + } + + return infos; +} + void AddrManImpl::Connected_(const CService& addr, NodeSeconds time) { AssertLockHeld(cs); @@ -1199,6 +1223,15 @@ std::vector AddrManImpl::GetAddr(size_t max_addresses, size_t max_pct, return addresses; } +std::vector> AddrManImpl::GetEntries(bool from_tried) const +{ + LOCK(cs); + Check(); + auto addrInfos = GetEntries_(from_tried); + Check(); + return addrInfos; +} + void AddrManImpl::Connected(const CService& addr, NodeSeconds time) { LOCK(cs); @@ -1289,6 +1322,11 @@ std::vector AddrMan::GetAddr(size_t max_addresses, size_t max_pct, std return m_impl->GetAddr(max_addresses, max_pct, network); } +std::vector> AddrMan::GetEntries(bool use_tried) const +{ + return m_impl->GetEntries(use_tried); +} + void AddrMan::Connected(const CService& addr, NodeSeconds time) { m_impl->Connected(addr, time); diff --git a/src/addrman.h b/src/addrman.h index f41687dcffc83..4d44c943ac808 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -25,11 +25,12 @@ class InvalidAddrManVersionError : public std::ios_base::failure }; class AddrManImpl; +class AddrInfo; /** Default for -checkaddrman */ static constexpr int32_t DEFAULT_ADDRMAN_CONSISTENCY_CHECKS{0}; -/** Test-only struct, capturing info about an address in AddrMan */ +/** Location information for an address in AddrMan */ struct AddressPosition { // Whether the address is in the new or tried table const bool tried; @@ -168,6 +169,17 @@ class AddrMan */ std::vector GetAddr(size_t max_addresses, size_t max_pct, std::optional network) const; + /** + * Returns an information-location pair for all addresses in the selected addrman table. + * If an address appears multiple times in the new table, an information-location pair + * is returned for each occurence. Addresses only ever appear once in the tried table. + * + * @param[in] from_tried Selects which table to return entries from. + * + * @return A vector consisting of pairs of AddrInfo and AddressPosition. + */ + std::vector> GetEntries(bool from_tried) const; + /** We have successfully connected to this peer. Calling this function * updates the CAddress's nTime, which is used in our IsTerrible() * decisions and gossiped to peers. Callers should be careful that updating diff --git a/src/addrman_impl.h b/src/addrman_impl.h index 1cfaca04a3df0..512f085a21f4a 100644 --- a/src/addrman_impl.h +++ b/src/addrman_impl.h @@ -132,6 +132,9 @@ class AddrManImpl std::vector GetAddr(size_t max_addresses, size_t max_pct, std::optional network) const EXCLUSIVE_LOCKS_REQUIRED(!cs); + std::vector> GetEntries(bool from_tried) const + EXCLUSIVE_LOCKS_REQUIRED(!cs); + void Connected(const CService& addr, NodeSeconds time) EXCLUSIVE_LOCKS_REQUIRED(!cs); @@ -260,6 +263,8 @@ class AddrManImpl std::vector GetAddr_(size_t max_addresses, size_t max_pct, std::optional network) const EXCLUSIVE_LOCKS_REQUIRED(cs); + std::vector> GetEntries_(bool from_tried) const EXCLUSIVE_LOCKS_REQUIRED(cs); + void Connected_(const CService& addr, NodeSeconds time) EXCLUSIVE_LOCKS_REQUIRED(cs); void SetServices_(const CService& addr, ServiceFlags nServices) EXCLUSIVE_LOCKS_REQUIRED(cs); diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 6af62641bda5a..8ebc6a73cf771 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -1063,6 +1064,74 @@ static RPCHelpMan getaddrmaninfo() }; } +UniValue AddrmanEntryToJSON(const AddrInfo& info) +{ + UniValue ret(UniValue::VOBJ); + ret.pushKV("address", info.ToStringAddr()); + ret.pushKV("port", info.GetPort()); + ret.pushKV("services", (uint64_t)info.nServices); + ret.pushKV("time", int64_t{TicksSinceEpoch(info.nTime)}); + ret.pushKV("network", GetNetworkName(info.GetNetClass())); + ret.pushKV("source", info.source.ToStringAddr()); + ret.pushKV("source_network", GetNetworkName(info.source.GetNetClass())); + return ret; +} + +UniValue AddrmanTableToJSON(const std::vector>& tableInfos) +{ + UniValue table(UniValue::VOBJ); + for (const auto& e : tableInfos) { + AddrInfo info = e.first; + AddressPosition location = e.second; + std::ostringstream key; + key << location.bucket << "/" << location.position; + // Address manager tables have unique entries so there is no advantage + // in using UniValue::pushKV, which checks if the key already exists + // in O(N). UniValue::pushKVEnd is used instead which currently is O(1). + table.pushKVEnd(key.str(), AddrmanEntryToJSON(info)); + } + return table; +} + +static RPCHelpMan getrawaddrman() +{ + return RPCHelpMan{"getrawaddrman", + "EXPERIMENTAL warning: this call may be changed in future releases.\n" + "\nReturns information on all address manager entries for the new and tried tables.\n", + {}, + RPCResult{ + RPCResult::Type::OBJ_DYN, "", "", { + {RPCResult::Type::OBJ_DYN, "table", "buckets with addresses in the address manager table ( new, tried )", { + {RPCResult::Type::OBJ, "bucket/position", "the location in the address manager table (/)", { + {RPCResult::Type::STR, "address", "The address of the node"}, + {RPCResult::Type::NUM, "port", "The port number of the node"}, + {RPCResult::Type::STR, "network", "The network (" + Join(GetNetworkNames(), ", ") + ") of the address"}, + {RPCResult::Type::NUM, "services", "The services offered by the node"}, + {RPCResult::Type::NUM_TIME, "time", "The " + UNIX_EPOCH_TIME + " when the node was last seen"}, + {RPCResult::Type::STR, "source", "The address that relayed the address to us"}, + {RPCResult::Type::STR, "source_network", "The network (" + Join(GetNetworkNames(), ", ") + ") of the source address"}, + }} + }} + } + }, + RPCExamples{ + HelpExampleCli("getrawaddrman", "") + + HelpExampleRpc("getrawaddrman", "") + }, + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { + NodeContext& node = EnsureAnyNodeContext(request.context); + if (!node.addrman) { + throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Address manager functionality missing or disabled"); + } + + UniValue ret(UniValue::VOBJ); + ret.pushKV("new", AddrmanTableToJSON(node.addrman->GetEntries(false))); + ret.pushKV("tried", AddrmanTableToJSON(node.addrman->GetEntries(true))); + return ret; + }, + }; +} + void RegisterNetRPCCommands(CRPCTable& t) { static const CRPCCommand commands[]{ @@ -1083,6 +1152,7 @@ void RegisterNetRPCCommands(CRPCTable& t) {"hidden", &addpeeraddress}, {"hidden", &sendmsgtopeer}, {"hidden", &getaddrmaninfo}, + {"hidden", &getrawaddrman}, }; for (const auto& c : commands) { t.appendCommand(c.name, &c); diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp index 7e9a18e1d0bbe..cffda751df25e 100644 --- a/src/test/fuzz/rpc.cpp +++ b/src/test/fuzz/rpc.cpp @@ -140,6 +140,7 @@ const std::vector RPC_COMMANDS_SAFE_FOR_FUZZING{ "getnodeaddresses", "getpeerinfo", "getprioritisedtransactions", + "getrawaddrman", "getrawmempool", "getrawtransaction", "getrpcinfo", From 352d5eb2a9e89cff4a2815d94a9d81fcc20c4b2c Mon Sep 17 00:00:00 2001 From: 0xb10c Date: Sat, 23 Sep 2023 16:54:37 +0200 Subject: [PATCH 075/172] test: getrawaddrman RPC Test that the getrawaddrman returns the addresses in the new and tried tables. We can't check the buckets and positions as these are not deterministic (yet). --- test/functional/rpc_net.py | 112 +++++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 117802b812ceb..da68066fc5fb6 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -12,6 +12,7 @@ import time import test_framework.messages +from test_framework.netutil import ADDRMAN_NEW_BUCKET_COUNT, ADDRMAN_TRIED_BUCKET_COUNT, ADDRMAN_BUCKET_SIZE from test_framework.p2p import ( P2PInterface, P2P_SERVICES, @@ -67,6 +68,7 @@ def run_test(self): self.test_addpeeraddress() self.test_sendmsgtopeer() self.test_getaddrmaninfo() + self.test_getrawaddrman() def test_connection_count(self): self.log.info("Test getconnectioncount") @@ -386,5 +388,115 @@ def test_getaddrmaninfo(self): assert_equal(res[net]["tried"], 0) assert_equal(res[net]["total"], 0) + def test_getrawaddrman(self): + self.log.info("Test getrawaddrman") + node = self.nodes[1] + + self.log.debug("Test that getrawaddrman is a hidden RPC") + # It is hidden from general help, but its detailed help may be called directly. + assert "getrawaddrman" not in node.help() + assert "getrawaddrman" in node.help("getrawaddrman") + + def check_addr_information(result, expected): + """Utility to compare a getrawaddrman result entry with an expected entry""" + assert_equal(result["address"], expected["address"]) + assert_equal(result["port"], expected["port"]) + assert_equal(result["services"], expected["services"]) + assert_equal(result["network"], expected["network"]) + assert_equal(result["source"], expected["source"]) + assert_equal(result["source_network"], expected["source_network"]) + # To avoid failing on slow test runners, use a 10s vspan here. + assert_approx(result["time"], time.time(), vspan=10) + + def check_getrawaddrman_entries(expected): + """Utility to compare a getrawaddrman result with expected addrman contents""" + getrawaddrman = node.getrawaddrman() + getaddrmaninfo = node.getaddrmaninfo() + for (table_name, table_info) in expected.items(): + assert_equal(len(getrawaddrman[table_name]), len(table_info["entries"])) + assert_equal(len(getrawaddrman[table_name]), getaddrmaninfo["all_networks"][table_name]) + + for bucket_position in getrawaddrman[table_name].keys(): + bucket = int(bucket_position.split("/")[0]) + position = int(bucket_position.split("/")[1]) + + # bucket and position only be sanity checked here as the + # test-addrman isn't deterministic + assert 0 <= int(bucket) < table_info["bucket_count"] + assert 0 <= int(position) < ADDRMAN_BUCKET_SIZE + + entry = getrawaddrman[table_name][bucket_position] + expected_entry = list(filter(lambda e: e["address"] == entry["address"], table_info["entries"]))[0] + check_addr_information(entry, expected_entry) + + # we expect one addrman new and tried table entry, which were added in a previous test + expected = { + "new": { + "bucket_count": ADDRMAN_NEW_BUCKET_COUNT, + "entries": [ + { + "address": "2.0.0.0", + "port": 8333, + "services": 9, + "network": "ipv4", + "source": "2.0.0.0", + "source_network": "ipv4", + } + ] + }, + "tried": { + "bucket_count": ADDRMAN_TRIED_BUCKET_COUNT, + "entries": [ + { + "address": "1.2.3.4", + "port": 8333, + "services": 9, + "network": "ipv4", + "source": "1.2.3.4", + "source_network": "ipv4", + } + ] + } + } + + self.log.debug("Test that the getrawaddrman contains information about the addresses added in a previous test") + check_getrawaddrman_entries(expected) + + self.log.debug("Add one new address to each addrman table") + expected["new"]["entries"].append({ + "address": "2803:0:1234:abcd::1", + "services": 9, + "network": "ipv6", + "source": "2803:0:1234:abcd::1", + "source_network": "ipv6", + "port": -1, # set once addpeeraddress is successful + }) + expected["tried"]["entries"].append({ + "address": "nrfj6inpyf73gpkyool35hcmne5zwfmse3jl3aw23vk7chdemalyaqad.onion", + "services": 9, + "network": "onion", + "source": "nrfj6inpyf73gpkyool35hcmne5zwfmse3jl3aw23vk7chdemalyaqad.onion", + "source_network": "onion", + "port": -1, # set once addpeeraddress is successful + }) + + port = 0 + for (table_name, table_info) in expected.items(): + # There's a slight chance that the to-be-added address collides with an already + # present table entry. To avoid this, we increment the port until an address has been + # added. Incrementing the port changes the position in the new table bucket (bucket + # stays the same) and changes both the bucket and the position in the tried table. + while True: + if node.addpeeraddress(address=table_info["entries"][1]["address"], port=port, tried=table_name == "tried")["success"]: + table_info["entries"][1]["port"] = port + self.log.debug(f"Added {table_info['entries'][1]['address']} to {table_name} table") + break + else: + port += 1 + + self.log.debug("Test that the newly added addresses appear in getrawaddrman") + check_getrawaddrman_entries(expected) + + if __name__ == '__main__': NetTest().main() From b8cafe38713cbf10d15459042f7f911bcc1b1e4e Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Thu, 21 Sep 2023 10:52:00 +0000 Subject: [PATCH 076/172] chainparams: add testnet assumeutxo param at height 2_500_000 --- src/kernel/chainparams.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 3ae24d0eb50bd..e114a6336352c 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -267,7 +267,12 @@ class CTestNetParams : public CChainParams { }; m_assumeutxo_data = { - // TODO to be specified in a future patch. + { + .height = 2'500'000, + .hash_serialized = AssumeutxoHash{uint256S("0x2a8fdefef3bf75fa00540ccaaaba4b5281bea94229327bdb0f7416ef1e7a645c")}, + .nChainTx = 66484552, + .blockhash = uint256S("0x0000000000000093bcb68c03a9a168ae252572d348a2eaeba2cdf9231d73206f") + } }; chainTxData = ChainTxData{ From edbed31066e3674ba52b8c093ab235625527f383 Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Thu, 21 Sep 2023 11:00:23 +0000 Subject: [PATCH 077/172] chainparams: add signet assumeutxo param at height 160_000 --- src/kernel/chainparams.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index e114a6336352c..5e893a3f58c4d 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -375,6 +375,15 @@ class SigNetParams : public CChainParams { vFixedSeeds.clear(); + m_assumeutxo_data = { + { + .height = 160'000, + .hash_serialized = AssumeutxoHash{uint256S("0x5225141cb62dee63ab3be95f9b03d60801f264010b1816d4bd00618b2736e7be")}, + .nChainTx = 2289496, + .blockhash = uint256S("0x0000003ca3c99aff040f2563c2ad8f8ec88bd0fd6b8f0895cfaf1ef90353a62c") + } + }; + base58Prefixes[PUBKEY_ADDRESS] = std::vector(1,111); base58Prefixes[SCRIPT_ADDRESS] = std::vector(1,196); base58Prefixes[SECRET_KEY] = std::vector(1,239); From abf343b32026c3f8246f98c416e2c6cf5b66aa38 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Tue, 22 Aug 2023 17:50:59 -0400 Subject: [PATCH 078/172] net: advertise NODE_P2P_V2 if CLI arg -v2transport is on Co-authored-by: Dhruv Mehta <856960+dhruv@users.noreply.github.com> --- src/init.cpp | 6 ++++++ src/net.h | 2 ++ src/protocol.cpp | 1 + src/protocol.h | 3 +++ src/test/util/net.h | 1 + test/functional/test_framework/messages.py | 1 + 6 files changed, 14 insertions(+) diff --git a/src/init.cpp b/src/init.cpp index 8d954092ea089..a0b4425898177 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -498,6 +498,7 @@ void SetupServerArgs(ArgsManager& argsman) argsman.AddArg("-i2psam=", "I2P SAM proxy to reach I2P peers and accept I2P connections (default: none)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-i2pacceptincoming", strprintf("Whether to accept inbound I2P connections (default: %i). Ignored if -i2psam is not set. Listening for inbound I2P connections is done through the SAM proxy, not by binding to a local address and port.", DEFAULT_I2P_ACCEPT_INCOMING), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-onlynet=", "Make automatic outbound connections only to network (" + Join(GetNetworkNames(), ", ") + "). Inbound and manual connections are not affected by this option. It can be specified multiple times to allow multiple networks.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-v2transport", strprintf("Support v2 transport (default: %u)", DEFAULT_V2_TRANSPORT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-txreconciliation", strprintf("Enable transaction reconciliations per BIP 330 (default: %d)", DEFAULT_TXRECONCILIATION_ENABLE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION); @@ -893,6 +894,11 @@ bool AppInitParameterInteraction(const ArgsManager& args) } } + // Signal NODE_P2P_V2 if BIP324 v2 transport is enabled. + if (args.GetBoolArg("-v2transport", DEFAULT_V2_TRANSPORT)) { + nLocalServices = ServiceFlags(nLocalServices | NODE_P2P_V2); + } + // Signal NODE_COMPACT_FILTERS if peerblockfilters and basic filters index are both enabled. if (args.GetBoolArg("-peerblockfilters", DEFAULT_PEERBLOCKFILTERS)) { if (g_enabled_filter_types.count(BlockFilterType::BASIC) != 1) { diff --git a/src/net.h b/src/net.h index 035cca2b13d1e..c4bd89099a267 100644 --- a/src/net.h +++ b/src/net.h @@ -94,6 +94,8 @@ static constexpr bool DEFAULT_FIXEDSEEDS{true}; static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000; static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000; +static constexpr bool DEFAULT_V2_TRANSPORT{false}; + typedef int64_t NodeId; struct AddedNodeInfo diff --git a/src/protocol.cpp b/src/protocol.cpp index cb956191e4dba..f956728af2fc6 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -199,6 +199,7 @@ static std::string serviceFlagToStr(size_t bit) case NODE_WITNESS: return "WITNESS"; case NODE_COMPACT_FILTERS: return "COMPACT_FILTERS"; case NODE_NETWORK_LIMITED: return "NETWORK_LIMITED"; + case NODE_P2P_V2: return "P2P_V2"; // Not using default, so we get warned when a case is missing } diff --git a/src/protocol.h b/src/protocol.h index 56668898e4943..a58d671a706cb 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -291,6 +291,9 @@ enum ServiceFlags : uint64_t { // See BIP159 for details on how this is implemented. NODE_NETWORK_LIMITED = (1 << 10), + // NODE_P2P_V2 means the node supports BIP324 transport + NODE_P2P_V2 = (1 << 11), + // Bits 24-31 are reserved for temporary experiments. Just pick a bit that // isn't getting used, or one not being used much, and notify the // bitcoin-development mailing list. Remember that service bits are just diff --git a/src/test/util/net.h b/src/test/util/net.h index 1684da777a287..0d41cf550e938 100644 --- a/src/test/util/net.h +++ b/src/test/util/net.h @@ -65,6 +65,7 @@ constexpr ServiceFlags ALL_SERVICE_FLAGS[]{ NODE_WITNESS, NODE_COMPACT_FILTERS, NODE_NETWORK_LIMITED, + NODE_P2P_V2, }; constexpr NetPermissionFlags ALL_NET_PERMISSION_FLAGS[]{ diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 4d635556f43b3..8f3aea8785e55 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -52,6 +52,7 @@ NODE_WITNESS = (1 << 3) NODE_COMPACT_FILTERS = (1 << 6) NODE_NETWORK_LIMITED = (1 << 10) +NODE_P2P_V2 = (1 << 11) MSG_TX = 1 MSG_BLOCK = 2 From a4706bc877504057e8522c929cc0704d3eaa7302 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Tue, 26 Sep 2023 18:07:36 -0400 Subject: [PATCH 079/172] rpc: don't report v2 handshake bytes in the per-type sent byte statistics This matches the behavior for per-type received bytes. --- src/net.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/net.cpp b/src/net.cpp index df8f3acfd1e61..72aef92e62b71 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1609,7 +1609,9 @@ std::pair CConnman::SocketSendData(CNode& node) const // Notify transport that bytes have been processed. node.m_transport->MarkBytesSent(nBytes); // Update statistics per message type. - node.AccountForSentBytes(msg_type, nBytes); + if (!msg_type.empty()) { // don't report v2 handshake bytes for now + node.AccountForSentBytes(msg_type, nBytes); + } nSentSize += nBytes; if ((size_t)nBytes != data.size()) { // could not send full message; stop sending more From 62d21ee0974b582a6a32aa97ee35ef51c977ea4b Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Mon, 21 Aug 2023 16:55:47 -0400 Subject: [PATCH 080/172] net: use V2Transport when NODE_P2P_V2 service flag is present Co-authored-by: Dhruv Mehta <856960+dhruv@users.noreply.github.com> --- src/net.cpp | 38 +++++++++++++++++++++-------- src/net.h | 5 ++-- src/rpc/net.cpp | 2 +- test/functional/p2p_v2_transport.py | 30 +++++++++++++++++++++++ test/functional/test_runner.py | 1 + 5 files changed, 63 insertions(+), 13 deletions(-) create mode 100755 test/functional/p2p_v2_transport.py diff --git a/src/net.cpp b/src/net.cpp index 72aef92e62b71..844a6158736e9 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -439,7 +439,7 @@ static CAddress GetBindAddress(const Sock& sock) return addr_bind; } -CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type) +CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type, bool use_v2transport) { AssertLockNotHeld(m_unused_i2p_sessions_mutex); assert(conn_type != ConnectionType::INBOUND); @@ -457,7 +457,8 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo } } - LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "trying connection %s lastseen=%.1fhrs\n", + LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "trying %s connection %s lastseen=%.1fhrs\n", + use_v2transport ? "v2" : "v1", pszDest ? pszDest : addrConnect.ToStringAddrPort(), Ticks(pszDest ? 0h : Now() - addrConnect.nTime)); @@ -580,6 +581,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo CNodeOptions{ .i2p_sam_session = std::move(i2p_transient_session), .recv_flood_size = nReceiveFloodSize, + .use_v2transport = use_v2transport, }); pnode->AddRef(); @@ -1794,6 +1796,10 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr&& sock, } const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end(); + // The V2Transport transparently falls back to V1 behavior when an incoming V1 connection is + // detected, so use it whenever we signal NODE_P2P_V2. + const bool use_v2transport(nodeServices & NODE_P2P_V2); + CNode* pnode = new CNode(id, std::move(sock), addr, @@ -1807,6 +1813,7 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr&& sock, .permission_flags = permission_flags, .prefer_evict = discouraged, .recv_flood_size = nReceiveFloodSize, + .use_v2transport = use_v2transport, }); pnode->AddRef(); m_msgproc->InitializeNode(*pnode, nodeServices); @@ -1855,7 +1862,7 @@ bool CConnman::AddConnection(const std::string& address, ConnectionType conn_typ CSemaphoreGrant grant(*semOutbound, true); if (!grant) return false; - OpenNetworkConnection(CAddress(), false, &grant, address.c_str(), conn_type); + OpenNetworkConnection(CAddress(), false, &grant, address.c_str(), conn_type, /*use_v2transport=*/false); return true; } @@ -2289,7 +2296,7 @@ void CConnman::ProcessAddrFetch() CAddress addr; CSemaphoreGrant grant(*semOutbound, true); if (grant) { - OpenNetworkConnection(addr, false, &grant, strDest.c_str(), ConnectionType::ADDR_FETCH); + OpenNetworkConnection(addr, false, &grant, strDest.c_str(), ConnectionType::ADDR_FETCH, /*use_v2transport=*/false); } } @@ -2391,7 +2398,7 @@ void CConnman::ThreadOpenConnections(const std::vector connect) for (const std::string& strAddr : connect) { CAddress addr(CService(), NODE_NONE); - OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), ConnectionType::MANUAL); + OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), ConnectionType::MANUAL, /*use_v2transport=*/false); for (int i = 0; i < 10 && i < nLoop; i++) { if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) @@ -2694,7 +2701,9 @@ void CConnman::ThreadOpenConnections(const std::vector connect) // Don't record addrman failure attempts when node is offline. This can be identified since all local // network connections (if any) belong in the same netgroup, and the size of `outbound_ipv46_peer_netgroups` would only be 1. const bool count_failures{((int)outbound_ipv46_peer_netgroups.size() + outbound_privacy_network_peers) >= std::min(nMaxConnections - 1, 2)}; - OpenNetworkConnection(addrConnect, count_failures, &grant, /*strDest=*/nullptr, conn_type); + // Use BIP324 transport when both us and them have NODE_V2_P2P set. + const bool use_v2transport(addrConnect.nServices & GetLocalServices() & NODE_P2P_V2); + OpenNetworkConnection(addrConnect, count_failures, &grant, /*strDest=*/nullptr, conn_type, use_v2transport); } } } @@ -2783,7 +2792,7 @@ void CConnman::ThreadOpenAddedConnections() } tried = true; CAddress addr(CService(), NODE_NONE); - OpenNetworkConnection(addr, false, &grant, info.strAddedNode.c_str(), ConnectionType::MANUAL); + OpenNetworkConnection(addr, false, &grant, info.strAddedNode.c_str(), ConnectionType::MANUAL, /*use_v2transport=*/false); if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) return; } @@ -2795,7 +2804,7 @@ void CConnman::ThreadOpenAddedConnections() } // if successful, this moves the passed grant to the constructed node -void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, ConnectionType conn_type) +void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, ConnectionType conn_type, bool use_v2transport) { AssertLockNotHeld(m_unused_i2p_sessions_mutex); assert(conn_type != ConnectionType::INBOUND); @@ -2817,7 +2826,7 @@ void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai } else if (FindNode(std::string(pszDest))) return; - CNode* pnode = ConnectNode(addrConnect, pszDest, fCountFailure, conn_type); + CNode* pnode = ConnectNode(addrConnect, pszDest, fCountFailure, conn_type, use_v2transport); if (!pnode) return; @@ -3579,6 +3588,15 @@ ServiceFlags CConnman::GetLocalServices() const return nLocalServices; } +static std::unique_ptr MakeTransport(NodeId id, bool use_v2transport, bool inbound) noexcept +{ + if (use_v2transport) { + return std::make_unique(id, /*initiating=*/!inbound, SER_NETWORK, INIT_PROTO_VERSION); + } else { + return std::make_unique(id, SER_NETWORK, INIT_PROTO_VERSION); + } +} + CNode::CNode(NodeId idIn, std::shared_ptr sock, const CAddress& addrIn, @@ -3589,7 +3607,7 @@ CNode::CNode(NodeId idIn, ConnectionType conn_type_in, bool inbound_onion, CNodeOptions&& node_opts) - : m_transport{std::make_unique(idIn, SER_NETWORK, INIT_PROTO_VERSION)}, + : m_transport{MakeTransport(idIn, node_opts.use_v2transport, conn_type_in == ConnectionType::INBOUND)}, m_permission_flags{node_opts.permission_flags}, m_sock{sock}, m_connected{GetTime()}, diff --git a/src/net.h b/src/net.h index c4bd89099a267..8862efa76a9b9 100644 --- a/src/net.h +++ b/src/net.h @@ -657,6 +657,7 @@ struct CNodeOptions std::unique_ptr i2p_sam_session = nullptr; bool prefer_evict = false; size_t recv_flood_size{DEFAULT_MAXRECEIVEBUFFER * 1000}; + bool use_v2transport = false; }; /** Information about a peer */ @@ -1098,7 +1099,7 @@ class CConnman bool GetNetworkActive() const { return fNetworkActive; }; bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; }; void SetNetworkActive(bool active); - void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant* grantOutbound, const char* strDest, ConnectionType conn_type) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex); + void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant* grantOutbound, const char* strDest, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex); bool CheckIncomingNonce(uint64_t nonce); // alias for thread safety annotations only, not defined @@ -1314,7 +1315,7 @@ class CConnman bool AlreadyConnectedToAddress(const CAddress& addr); bool AttemptToEvictConnection(); - CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex); + CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex); void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr) const; void DeleteNode(CNode* pnode); diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 6af62641bda5a..86d3eef43625e 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -311,7 +311,7 @@ static RPCHelpMan addnode() if (command == "onetry") { CAddress addr; - connman.OpenNetworkConnection(addr, /*fCountFailure=*/false, /*grantOutbound=*/nullptr, node_arg.c_str(), ConnectionType::MANUAL); + connman.OpenNetworkConnection(addr, /*fCountFailure=*/false, /*grantOutbound=*/nullptr, node_arg.c_str(), ConnectionType::MANUAL, /*use_v2transport=*/false); return UniValue::VNULL; } diff --git a/test/functional/p2p_v2_transport.py b/test/functional/p2p_v2_transport.py new file mode 100755 index 0000000000000..9df4c297e40c2 --- /dev/null +++ b/test/functional/p2p_v2_transport.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +Test v2 transport +""" + +from test_framework.messages import NODE_P2P_V2 +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class V2TransportTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain=True + self.num_nodes = 1 + self.extra_args = [["-v2transport=0"]] + + def run_test(self): + network_info = self.nodes[0].getnetworkinfo() + assert_equal(int(network_info["localservices"], 16) & NODE_P2P_V2, 0) + assert "P2P_V2" not in network_info["localservicesnames"] + + self.restart_node(0, ["-v2transport=1"]) + network_info = self.nodes[0].getnetworkinfo() + assert_equal(int(network_info["localservices"], 16) & NODE_P2P_V2, NODE_P2P_V2) + assert "P2P_V2" in network_info["localservicesnames"] + +if __name__ == '__main__': + V2TransportTest().main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 9a0b5c6f0a678..4645557655106 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -246,6 +246,7 @@ 'p2p_invalid_locator.py', 'p2p_invalid_block.py', 'p2p_invalid_tx.py', + 'p2p_v2_transport.py', 'example_test.py', 'wallet_txn_doublespend.py --legacy-wallet', 'wallet_multisig_descriptor_psbt.py --descriptors', From c73cd423636e06df46742f573640ca773b281ffc Mon Sep 17 00:00:00 2001 From: dhruv <856960+dhruv@users.noreply.github.com> Date: Tue, 28 Dec 2021 13:26:20 -0800 Subject: [PATCH 081/172] rpc: addnode arg to use BIP324 v2 p2p Co-authored-by: Pieter Wuille --- src/net.cpp | 32 ++++++++++++++++---------------- src/net.h | 23 +++++++++++++++++------ src/rpc/client.cpp | 1 + src/rpc/net.cpp | 18 ++++++++++++------ src/rpc/util.cpp | 1 + src/test/fuzz/connman.cpp | 2 +- 6 files changed, 48 insertions(+), 29 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index 844a6158736e9..bb4f9800fe7ef 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -2452,7 +2452,7 @@ void CConnman::ThreadOpenConnections(const std::vector connect) // Perform cheap checks before locking a mutex. else if (!dnsseed && !use_seednodes) { LOCK(m_added_nodes_mutex); - if (m_added_nodes.empty()) { + if (m_added_node_params.empty()) { add_fixed_seeds_now = true; LogPrintf("Adding fixed seeds as -dnsseed=0 (or IPv4/IPv6 connections are disabled via -onlynet) and neither -addnode nor -seednode are provided\n"); } @@ -2725,11 +2725,11 @@ std::vector CConnman::GetAddedNodeInfo() const { std::vector ret; - std::list lAddresses(0); + std::list lAddresses(0); { LOCK(m_added_nodes_mutex); - ret.reserve(m_added_nodes.size()); - std::copy(m_added_nodes.cbegin(), m_added_nodes.cend(), std::back_inserter(lAddresses)); + ret.reserve(m_added_node_params.size()); + std::copy(m_added_node_params.cbegin(), m_added_node_params.cend(), std::back_inserter(lAddresses)); } @@ -2749,9 +2749,9 @@ std::vector CConnman::GetAddedNodeInfo() const } } - for (const std::string& strAddNode : lAddresses) { - CService service(LookupNumeric(strAddNode, GetDefaultPort(strAddNode))); - AddedNodeInfo addedNode{strAddNode, CService(), false, false}; + for (const auto& addr : lAddresses) { + CService service(LookupNumeric(addr.m_added_node, GetDefaultPort(addr.m_added_node))); + AddedNodeInfo addedNode{addr, CService(), false, false}; if (service.IsValid()) { // strAddNode is an IP:port auto it = mapConnected.find(service); @@ -2762,7 +2762,7 @@ std::vector CConnman::GetAddedNodeInfo() const } } else { // strAddNode is a name - auto it = mapConnectedByName.find(strAddNode); + auto it = mapConnectedByName.find(addr.m_added_node); if (it != mapConnectedByName.end()) { addedNode.resolvedAddress = it->second.second; addedNode.fConnected = true; @@ -2792,7 +2792,7 @@ void CConnman::ThreadOpenAddedConnections() } tried = true; CAddress addr(CService(), NODE_NONE); - OpenNetworkConnection(addr, false, &grant, info.strAddedNode.c_str(), ConnectionType::MANUAL, /*use_v2transport=*/false); + OpenNetworkConnection(addr, false, &grant, info.m_params.m_added_node.c_str(), ConnectionType::MANUAL, info.m_params.m_use_v2transport); if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) return; } @@ -3384,23 +3384,23 @@ std::vector CConnman::GetAddresses(CNode& requestor, size_t max_addres return cache_entry.m_addrs_response_cache; } -bool CConnman::AddNode(const std::string& strNode) +bool CConnman::AddNode(const AddedNodeParams& add) { LOCK(m_added_nodes_mutex); - for (const std::string& it : m_added_nodes) { - if (strNode == it) return false; + for (const auto& it : m_added_node_params) { + if (add.m_added_node == it.m_added_node) return false; } - m_added_nodes.push_back(strNode); + m_added_node_params.push_back(add); return true; } bool CConnman::RemoveAddedNode(const std::string& strNode) { LOCK(m_added_nodes_mutex); - for(std::vector::iterator it = m_added_nodes.begin(); it != m_added_nodes.end(); ++it) { - if (strNode == *it) { - m_added_nodes.erase(it); + for (auto it = m_added_node_params.begin(); it != m_added_node_params.end(); ++it) { + if (strNode == it->m_added_node) { + m_added_node_params.erase(it); return true; } } diff --git a/src/net.h b/src/net.h index 8862efa76a9b9..1e81bc76f5eaf 100644 --- a/src/net.h +++ b/src/net.h @@ -98,9 +98,13 @@ static constexpr bool DEFAULT_V2_TRANSPORT{false}; typedef int64_t NodeId; -struct AddedNodeInfo -{ - std::string strAddedNode; +struct AddedNodeParams { + std::string m_added_node; + bool m_use_v2transport; +}; + +struct AddedNodeInfo { + AddedNodeParams m_params; CService resolvedAddress; bool fConnected; bool fInbound; @@ -1075,7 +1079,11 @@ class CConnman vWhitelistedRange = connOptions.vWhitelistedRange; { LOCK(m_added_nodes_mutex); - m_added_nodes = connOptions.m_added_nodes; + + for (const std::string& added_node : connOptions.m_added_nodes) { + // -addnode cli arg does not currently have a way to signal BIP324 support + m_added_node_params.push_back({added_node, false}); + } } m_onion_binds = connOptions.onion_binds; } @@ -1162,7 +1170,7 @@ class CConnman // Count the number of block-relay-only peers we have over our limit. int GetExtraBlockRelayCount() const; - bool AddNode(const std::string& node) EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex); + bool AddNode(const AddedNodeParams& add) EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex); bool RemoveAddedNode(const std::string& node) EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex); std::vector GetAddedNodeInfo() const EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex); @@ -1387,7 +1395,10 @@ class CConnman const NetGroupManager& m_netgroupman; std::deque m_addr_fetches GUARDED_BY(m_addr_fetches_mutex); Mutex m_addr_fetches_mutex; - std::vector m_added_nodes GUARDED_BY(m_added_nodes_mutex); + + // connection string and whether to use v2 p2p + std::vector m_added_node_params GUARDED_BY(m_added_nodes_mutex); + mutable Mutex m_added_nodes_mutex; std::vector m_nodes GUARDED_BY(m_nodes_mutex); std::list m_nodes_disconnected; diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp index 1e5e231cefa7b..49820f25a35a5 100644 --- a/src/rpc/client.cpp +++ b/src/rpc/client.cpp @@ -301,6 +301,7 @@ static const CRPCConvertParam vRPCConvertParams[] = { "addpeeraddress", 2, "tried"}, { "sendmsgtopeer", 0, "peer_id" }, { "stop", 0, "wait" }, + { "addnode", 2, "v2transport" }, }; // clang-format on diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 86d3eef43625e..a66f74242c211 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -289,11 +289,12 @@ static RPCHelpMan addnode() { {"node", RPCArg::Type::STR, RPCArg::Optional::NO, "The address of the peer to connect to"}, {"command", RPCArg::Type::STR, RPCArg::Optional::NO, "'add' to add a node to the list, 'remove' to remove a node from the list, 'onetry' to try a connection to the node once"}, + {"v2transport", RPCArg::Type::BOOL, RPCArg::Default{false}, "Attempt to connect using BIP324 v2 transport protocol (ignored for 'remove' command)"}, }, RPCResult{RPCResult::Type::NONE, "", ""}, RPCExamples{ - HelpExampleCli("addnode", "\"192.168.0.6:8333\" \"onetry\"") - + HelpExampleRpc("addnode", "\"192.168.0.6:8333\", \"onetry\"") + HelpExampleCli("addnode", "\"192.168.0.6:8333\" \"onetry\" true") + + HelpExampleRpc("addnode", "\"192.168.0.6:8333\", \"onetry\" true") }, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { @@ -307,17 +308,22 @@ static RPCHelpMan addnode() CConnman& connman = EnsureConnman(node); const std::string node_arg{request.params[0].get_str()}; + bool use_v2transport = self.Arg(2); + + if (use_v2transport && !(node.connman->GetLocalServices() & NODE_P2P_V2)) { + throw JSONRPCError(RPC_INVALID_PARAMETER, "Error: v2transport requested but not enabled (see -v2transport)"); + } if (command == "onetry") { CAddress addr; - connman.OpenNetworkConnection(addr, /*fCountFailure=*/false, /*grantOutbound=*/nullptr, node_arg.c_str(), ConnectionType::MANUAL, /*use_v2transport=*/false); + connman.OpenNetworkConnection(addr, /*fCountFailure=*/false, /*grantOutbound=*/nullptr, node_arg.c_str(), ConnectionType::MANUAL, use_v2transport); return UniValue::VNULL; } if (command == "add") { - if (!connman.AddNode(node_arg)) { + if (!connman.AddNode({node_arg, use_v2transport})) { throw JSONRPCError(RPC_CLIENT_NODE_ALREADY_ADDED, "Error: Node already added"); } } @@ -475,7 +481,7 @@ static RPCHelpMan getaddednodeinfo() if (!request.params[0].isNull()) { bool found = false; for (const AddedNodeInfo& info : vInfo) { - if (info.strAddedNode == request.params[0].get_str()) { + if (info.m_params.m_added_node == request.params[0].get_str()) { vInfo.assign(1, info); found = true; break; @@ -490,7 +496,7 @@ static RPCHelpMan getaddednodeinfo() for (const AddedNodeInfo& info : vInfo) { UniValue obj(UniValue::VOBJ); - obj.pushKV("addednode", info.strAddedNode); + obj.pushKV("addednode", info.m_params.m_added_node); obj.pushKV("connected", info.fConnected); UniValue addresses(UniValue::VARR); if (info.fConnected) { diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp index 9a941be181624..a11366bd47169 100644 --- a/src/rpc/util.cpp +++ b/src/rpc/util.cpp @@ -682,6 +682,7 @@ TMPL_INST(nullptr, std::optional, maybe_arg ? std::optional{maybe_arg->get TMPL_INST(nullptr, const std::string*, maybe_arg ? &maybe_arg->get_str() : nullptr;); // Required arg or optional arg with default value. +TMPL_INST(CheckRequiredOrDefault, bool, CHECK_NONFATAL(maybe_arg)->get_bool();); TMPL_INST(CheckRequiredOrDefault, int, CHECK_NONFATAL(maybe_arg)->getInt();); TMPL_INST(CheckRequiredOrDefault, uint64_t, CHECK_NONFATAL(maybe_arg)->getInt();); TMPL_INST(CheckRequiredOrDefault, const std::string&, CHECK_NONFATAL(maybe_arg)->get_str();); diff --git a/src/test/fuzz/connman.cpp b/src/test/fuzz/connman.cpp index e46e085ee7788..0dab2a2e9747f 100644 --- a/src/test/fuzz/connman.cpp +++ b/src/test/fuzz/connman.cpp @@ -61,7 +61,7 @@ FUZZ_TARGET(connman, .init = initialize_connman) random_string = fuzzed_data_provider.ConsumeRandomLengthString(64); }, [&] { - connman.AddNode(random_string); + connman.AddNode({random_string, fuzzed_data_provider.ConsumeBool()}); }, [&] { connman.CheckIncomingNonce(fuzzed_data_provider.ConsumeIntegral()); From 4d265d0342ae7e92df07ba51e8355db57c44f811 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Mon, 21 Aug 2023 18:14:52 -0400 Subject: [PATCH 082/172] sync: modernize CSemaphore / CSemaphoreGrant --- src/net.cpp | 23 ++++++++-------- src/net.h | 2 +- src/rpc/net.cpp | 2 +- src/sync.h | 73 +++++++++++++++++++++++++++++++++++-------------- 4 files changed, 65 insertions(+), 35 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index bb4f9800fe7ef..9cfe0abcd9034 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1862,7 +1862,7 @@ bool CConnman::AddConnection(const std::string& address, ConnectionType conn_typ CSemaphoreGrant grant(*semOutbound, true); if (!grant) return false; - OpenNetworkConnection(CAddress(), false, &grant, address.c_str(), conn_type, /*use_v2transport=*/false); + OpenNetworkConnection(CAddress(), false, std::move(grant), address.c_str(), conn_type, /*use_v2transport=*/false); return true; } @@ -2294,9 +2294,9 @@ void CConnman::ProcessAddrFetch() m_addr_fetches.pop_front(); } CAddress addr; - CSemaphoreGrant grant(*semOutbound, true); + CSemaphoreGrant grant(*semOutbound, /*fTry=*/true); if (grant) { - OpenNetworkConnection(addr, false, &grant, strDest.c_str(), ConnectionType::ADDR_FETCH, /*use_v2transport=*/false); + OpenNetworkConnection(addr, false, std::move(grant), strDest.c_str(), ConnectionType::ADDR_FETCH, /*use_v2transport=*/false); } } @@ -2398,7 +2398,7 @@ void CConnman::ThreadOpenConnections(const std::vector connect) for (const std::string& strAddr : connect) { CAddress addr(CService(), NODE_NONE); - OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), ConnectionType::MANUAL, /*use_v2transport=*/false); + OpenNetworkConnection(addr, false, {}, strAddr.c_str(), ConnectionType::MANUAL, /*use_v2transport=*/false); for (int i = 0; i < 10 && i < nLoop; i++) { if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) @@ -2703,7 +2703,7 @@ void CConnman::ThreadOpenConnections(const std::vector connect) const bool count_failures{((int)outbound_ipv46_peer_netgroups.size() + outbound_privacy_network_peers) >= std::min(nMaxConnections - 1, 2)}; // Use BIP324 transport when both us and them have NODE_V2_P2P set. const bool use_v2transport(addrConnect.nServices & GetLocalServices() & NODE_P2P_V2); - OpenNetworkConnection(addrConnect, count_failures, &grant, /*strDest=*/nullptr, conn_type, use_v2transport); + OpenNetworkConnection(addrConnect, count_failures, std::move(grant), /*strDest=*/nullptr, conn_type, use_v2transport); } } } @@ -2785,16 +2785,16 @@ void CConnman::ThreadOpenAddedConnections() bool tried = false; for (const AddedNodeInfo& info : vInfo) { if (!info.fConnected) { - if (!grant.TryAcquire()) { + if (!grant) { // If we've used up our semaphore and need a new one, let's not wait here since while we are waiting // the addednodeinfo state might change. break; } tried = true; CAddress addr(CService(), NODE_NONE); - OpenNetworkConnection(addr, false, &grant, info.m_params.m_added_node.c_str(), ConnectionType::MANUAL, info.m_params.m_use_v2transport); - if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) - return; + OpenNetworkConnection(addr, false, std::move(grant), info.m_params.m_added_node.c_str(), ConnectionType::MANUAL, info.m_params.m_use_v2transport); + if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) return; + grant = CSemaphoreGrant(*semAddnode, /*fTry=*/true); } } // Retry every 60 seconds if a connection was attempted, otherwise two seconds @@ -2804,7 +2804,7 @@ void CConnman::ThreadOpenAddedConnections() } // if successful, this moves the passed grant to the constructed node -void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, ConnectionType conn_type, bool use_v2transport) +void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant&& grant_outbound, const char *pszDest, ConnectionType conn_type, bool use_v2transport) { AssertLockNotHeld(m_unused_i2p_sessions_mutex); assert(conn_type != ConnectionType::INBOUND); @@ -2830,8 +2830,7 @@ void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai if (!pnode) return; - if (grantOutbound) - grantOutbound->MoveTo(pnode->grantOutbound); + pnode->grantOutbound = std::move(grant_outbound); m_msgproc->InitializeNode(*pnode, nLocalServices); { diff --git a/src/net.h b/src/net.h index 1e81bc76f5eaf..00c0bc05fa4d1 100644 --- a/src/net.h +++ b/src/net.h @@ -1107,7 +1107,7 @@ class CConnman bool GetNetworkActive() const { return fNetworkActive; }; bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; }; void SetNetworkActive(bool active); - void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant* grantOutbound, const char* strDest, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex); + void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant&& grant_outbound, const char* strDest, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex); bool CheckIncomingNonce(uint64_t nonce); // alias for thread safety annotations only, not defined diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index a66f74242c211..3be91f292c8c7 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -317,7 +317,7 @@ static RPCHelpMan addnode() if (command == "onetry") { CAddress addr; - connman.OpenNetworkConnection(addr, /*fCountFailure=*/false, /*grantOutbound=*/nullptr, node_arg.c_str(), ConnectionType::MANUAL, use_v2transport); + connman.OpenNetworkConnection(addr, /*fCountFailure=*/false, /*grant_outbound=*/{}, node_arg.c_str(), ConnectionType::MANUAL, use_v2transport); return UniValue::VNULL; } diff --git a/src/sync.h b/src/sync.h index 7242a793abe46..45d40b5fdc377 100644 --- a/src/sync.h +++ b/src/sync.h @@ -301,6 +301,10 @@ inline MutexType* MaybeCheckNotHeld(MutexType* m) LOCKS_EXCLUDED(m) LOCK_RETURNE //! gcc and the -Wreturn-stack-address flag in clang, both enabled by default. #define WITH_LOCK(cs, code) (MaybeCheckNotHeld(cs), [&]() -> decltype(auto) { LOCK(cs); code; }()) +/** An implementation of a semaphore. + * + * See https://en.wikipedia.org/wiki/Semaphore_(programming) + */ class CSemaphore { private: @@ -309,25 +313,33 @@ class CSemaphore int value; public: - explicit CSemaphore(int init) : value(init) {} + explicit CSemaphore(int init) noexcept : value(init) {} - void wait() + // Disallow default construct, copy, move. + CSemaphore() = delete; + CSemaphore(const CSemaphore&) = delete; + CSemaphore(CSemaphore&&) = delete; + CSemaphore& operator=(const CSemaphore&) = delete; + CSemaphore& operator=(CSemaphore&&) = delete; + + void wait() noexcept { std::unique_lock lock(mutex); condition.wait(lock, [&]() { return value >= 1; }); value--; } - bool try_wait() + bool try_wait() noexcept { std::lock_guard lock(mutex); - if (value < 1) + if (value < 1) { return false; + } value--; return true; } - void post() + void post() noexcept { { std::lock_guard lock(mutex); @@ -345,45 +357,64 @@ class CSemaphoreGrant bool fHaveGrant; public: - void Acquire() + void Acquire() noexcept { - if (fHaveGrant) + if (fHaveGrant) { return; + } sem->wait(); fHaveGrant = true; } - void Release() + void Release() noexcept { - if (!fHaveGrant) + if (!fHaveGrant) { return; + } sem->post(); fHaveGrant = false; } - bool TryAcquire() + bool TryAcquire() noexcept { - if (!fHaveGrant && sem->try_wait()) + if (!fHaveGrant && sem->try_wait()) { fHaveGrant = true; + } return fHaveGrant; } - void MoveTo(CSemaphoreGrant& grant) + // Disallow copy. + CSemaphoreGrant(const CSemaphoreGrant&) = delete; + CSemaphoreGrant& operator=(const CSemaphoreGrant&) = delete; + + // Allow move. + CSemaphoreGrant(CSemaphoreGrant&& other) noexcept + { + sem = other.sem; + fHaveGrant = other.fHaveGrant; + other.fHaveGrant = false; + other.sem = nullptr; + } + + CSemaphoreGrant& operator=(CSemaphoreGrant&& other) noexcept { - grant.Release(); - grant.sem = sem; - grant.fHaveGrant = fHaveGrant; - fHaveGrant = false; + Release(); + sem = other.sem; + fHaveGrant = other.fHaveGrant; + other.fHaveGrant = false; + other.sem = nullptr; + return *this; } - CSemaphoreGrant() : sem(nullptr), fHaveGrant(false) {} + CSemaphoreGrant() noexcept : sem(nullptr), fHaveGrant(false) {} - explicit CSemaphoreGrant(CSemaphore& sema, bool fTry = false) : sem(&sema), fHaveGrant(false) + explicit CSemaphoreGrant(CSemaphore& sema, bool fTry = false) noexcept : sem(&sema), fHaveGrant(false) { - if (fTry) + if (fTry) { TryAcquire(); - else + } else { Acquire(); + } } ~CSemaphoreGrant() @@ -391,7 +422,7 @@ class CSemaphoreGrant Release(); } - operator bool() const + explicit operator bool() const noexcept { return fHaveGrant; } From 432a62c4dce908729c62edcfaebc3da6387c3afe Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Tue, 22 Aug 2023 20:42:24 -0400 Subject: [PATCH 083/172] net: reconnect with V1Transport under certain conditions When an outbound v2 connection is disconnected without receiving anything, but at least 24 bytes of our pubkey were sent out (enough to constitute an invalid v1 header), add them to a queue of reconnections to be tried. The reconnections are in a queue rather than performed immediately, because we should not block the socket handler thread with connection creation (a blocking operation that can take multiple seconds). --- src/net.cpp | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++ src/net.h | 44 ++++++++++++++++++++++++++--- 2 files changed, 119 insertions(+), 4 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index 9cfe0abcd9034..a3f1a18fe79e6 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1546,6 +1546,9 @@ void V2Transport::MarkBytesSent(size_t bytes_sent) noexcept m_send_pos += bytes_sent; Assume(m_send_pos <= m_send_buffer.size()); + if (m_send_pos >= CMessageHeader::HEADER_SIZE) { + m_sent_v1_header_worth = true; + } // Wipe the buffer when everything is sent. if (m_send_pos == m_send_buffer.size()) { m_send_pos = 0; @@ -1553,6 +1556,23 @@ void V2Transport::MarkBytesSent(size_t bytes_sent) noexcept } } +bool V2Transport::ShouldReconnectV1() const noexcept +{ + AssertLockNotHeld(m_send_mutex); + AssertLockNotHeld(m_recv_mutex); + // Only outgoing connections need reconnection. + if (!m_initiating) return false; + + LOCK(m_recv_mutex); + // We only reconnect in the very first state and when the receive buffer is empty. Together + // these conditions imply nothing has been received so far. + if (m_recv_state != RecvState::KEY) return false; + if (!m_recv_buffer.empty()) return false; + // Check if we've sent enough for the other side to disconnect us (if it was V1). + LOCK(m_send_mutex); + return m_sent_v1_header_worth; +} + size_t V2Transport::GetSendMemoryUsage() const noexcept { AssertLockNotHeld(m_send_mutex); @@ -1868,6 +1888,13 @@ bool CConnman::AddConnection(const std::string& address, ConnectionType conn_typ void CConnman::DisconnectNodes() { + AssertLockNotHeld(m_nodes_mutex); + AssertLockNotHeld(m_reconnections_mutex); + + // Use a temporary variable to accumulate desired reconnections, so we don't need + // m_reconnections_mutex while holding m_nodes_mutex. + decltype(m_reconnections) reconnections_to_add; + { LOCK(m_nodes_mutex); @@ -1890,6 +1917,19 @@ void CConnman::DisconnectNodes() // remove from m_nodes m_nodes.erase(remove(m_nodes.begin(), m_nodes.end(), pnode), m_nodes.end()); + // Add to reconnection list if appropriate. We don't reconnect right here, because + // the creation of a connection is a blocking operation (up to several seconds), + // and we don't want to hold up the socket handler thread for that long. + if (pnode->m_transport->ShouldReconnectV1()) { + reconnections_to_add.push_back({ + .addr_connect = pnode->addr, + .grant = std::move(pnode->grantOutbound), + .destination = pnode->m_dest, + .conn_type = pnode->m_conn_type, + .use_v2transport = false}); + LogPrint(BCLog::NET, "retrying with v1 transport protocol for peer=%d\n", pnode->GetId()); + } + // release outbound grant (if any) pnode->grantOutbound.Release(); @@ -1917,6 +1957,11 @@ void CConnman::DisconnectNodes() } } } + { + // Move entries from reconnections_to_add to m_reconnections. + LOCK(m_reconnections_mutex); + m_reconnections.splice(m_reconnections.end(), std::move(reconnections_to_add)); + } } void CConnman::NotifyNumConnectionsChanged() @@ -2389,6 +2434,7 @@ bool CConnman::MaybePickPreferredNetwork(std::optional& network) void CConnman::ThreadOpenConnections(const std::vector connect) { AssertLockNotHeld(m_unused_i2p_sessions_mutex); + AssertLockNotHeld(m_reconnections_mutex); FastRandomContext rng; // Connect to specific addresses if (!connect.empty()) @@ -2432,6 +2478,8 @@ void CConnman::ThreadOpenConnections(const std::vector connect) if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) return; + PerformReconnections(); + CSemaphoreGrant grant(*semOutbound); if (interruptNet) return; @@ -2778,6 +2826,7 @@ std::vector CConnman::GetAddedNodeInfo() const void CConnman::ThreadOpenAddedConnections() { AssertLockNotHeld(m_unused_i2p_sessions_mutex); + AssertLockNotHeld(m_reconnections_mutex); while (true) { CSemaphoreGrant grant(*semAddnode); @@ -2800,6 +2849,8 @@ void CConnman::ThreadOpenAddedConnections() // Retry every 60 seconds if a connection was attempted, otherwise two seconds if (!interruptNet.sleep_for(std::chrono::seconds(tried ? 60 : 2))) return; + // See if any reconnections are desired. + PerformReconnections(); } } @@ -3613,6 +3664,7 @@ CNode::CNode(NodeId idIn, addr{addrIn}, addrBind{addrBindIn}, m_addr_name{addrNameIn.empty() ? addr.ToStringAddrPort() : addrNameIn}, + m_dest(addrNameIn), m_inbound_onion{inbound_onion}, m_prefer_evict{node_opts.prefer_evict}, nKeyedNetGroup{nKeyedNetGroupIn}, @@ -3743,6 +3795,33 @@ uint64_t CConnman::CalculateKeyedNetGroup(const CAddress& address) const return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP).Write(vchNetGroup).Finalize(); } +void CConnman::PerformReconnections() +{ + AssertLockNotHeld(m_reconnections_mutex); + AssertLockNotHeld(m_unused_i2p_sessions_mutex); + while (true) { + // Move first element of m_reconnections to todo (avoiding an allocation inside the lock). + decltype(m_reconnections) todo; + { + LOCK(m_reconnections_mutex); + if (m_reconnections.empty()) break; + todo.splice(todo.end(), m_reconnections, m_reconnections.begin()); + } + + auto& item = *todo.begin(); + OpenNetworkConnection(item.addr_connect, + // We only reconnect if the first attempt to connect succeeded at + // connection time, but then failed after the CNode object was + // created. Since we already know connecting is possible, do not + // count failure to reconnect. + /*fCountFailure=*/false, + std::move(item.grant), + item.destination.empty() ? nullptr : item.destination.c_str(), + item.conn_type, + item.use_v2transport); + } +} + // Dump binary message to file, with timestamp. static void CaptureMessageToFile(const CAddress& addr, const std::string& msg_type, diff --git a/src/net.h b/src/net.h index 00c0bc05fa4d1..297e408320096 100644 --- a/src/net.h +++ b/src/net.h @@ -361,6 +361,11 @@ class Transport { /** Return the memory usage of this transport attributable to buffered data to send. */ virtual size_t GetSendMemoryUsage() const noexcept = 0; + + // 3. Miscellaneous functions. + + /** Whether upon disconnections, a reconnect with V1 is warranted. */ + virtual bool ShouldReconnectV1() const noexcept = 0; }; class V1Transport final : public Transport @@ -440,6 +445,7 @@ class V1Transport final : public Transport BytesToSend GetBytesToSend(bool have_next_message) const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex); void MarkBytesSent(size_t bytes_sent) noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex); size_t GetSendMemoryUsage() const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex); + bool ShouldReconnectV1() const noexcept override { return false; } }; class V2Transport final : public Transport @@ -608,6 +614,8 @@ class V2Transport final : public Transport std::string m_send_type GUARDED_BY(m_send_mutex); /** Current sender state. */ SendState m_send_state GUARDED_BY(m_send_mutex); + /** Whether we've sent at least 24 bytes (which would trigger disconnect for V1 peers). */ + bool m_sent_v1_header_worth GUARDED_BY(m_send_mutex) {false}; /** Change the receive state. */ void SetReceiveState(RecvState recv_state) noexcept EXCLUSIVE_LOCKS_REQUIRED(m_recv_mutex); @@ -653,6 +661,9 @@ class V2Transport final : public Transport BytesToSend GetBytesToSend(bool have_next_message) const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex); void MarkBytesSent(size_t bytes_sent) noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex); size_t GetSendMemoryUsage() const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_send_mutex); + + // Miscellaneous functions. + bool ShouldReconnectV1() const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex, !m_send_mutex); }; struct CNodeOptions @@ -706,6 +717,8 @@ class CNode // Bind address of our side of the connection const CAddress addrBind; const std::string m_addr_name; + /** The pszDest argument provided to ConnectNode(). Only used for reconnections. */ + const std::string m_dest; //! Whether this peer is an inbound onion, i.e. connected via our Tor onion service. const bool m_inbound_onion; std::atomic nVersion{0}; @@ -1253,10 +1266,10 @@ class CConnman bool Bind(const CService& addr, unsigned int flags, NetPermissionFlags permissions); bool InitBinds(const Options& options); - void ThreadOpenAddedConnections() EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex, !m_unused_i2p_sessions_mutex); + void ThreadOpenAddedConnections() EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex, !m_unused_i2p_sessions_mutex, !m_reconnections_mutex); void AddAddrFetch(const std::string& strDest) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex); void ProcessAddrFetch() EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_unused_i2p_sessions_mutex); - void ThreadOpenConnections(std::vector connect) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_added_nodes_mutex, !m_nodes_mutex, !m_unused_i2p_sessions_mutex); + void ThreadOpenConnections(std::vector connect) EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_added_nodes_mutex, !m_nodes_mutex, !m_unused_i2p_sessions_mutex, !m_reconnections_mutex); void ThreadMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc); void ThreadI2PAcceptIncoming(); void AcceptConnection(const ListenSocket& hListenSocket); @@ -1274,7 +1287,7 @@ class CConnman const CAddress& addr_bind, const CAddress& addr); - void DisconnectNodes(); + void DisconnectNodes() EXCLUSIVE_LOCKS_REQUIRED(!m_reconnections_mutex, !m_nodes_mutex); void NotifyNumConnectionsChanged(); /** Return true if the peer is inactive and should be disconnected. */ bool InactivityCheck(const CNode& node) const; @@ -1306,7 +1319,7 @@ class CConnman */ void SocketHandlerListening(const Sock::EventsPerSock& events_per_sock); - void ThreadSocketHandler() EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !mutexMsgProc); + void ThreadSocketHandler() EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !mutexMsgProc, !m_nodes_mutex, !m_reconnections_mutex); void ThreadDNSAddressSeed() EXCLUSIVE_LOCKS_REQUIRED(!m_addr_fetches_mutex, !m_nodes_mutex); uint64_t CalculateKeyedNetGroup(const CAddress& ad) const; @@ -1537,6 +1550,29 @@ class CConnman */ std::queue> m_unused_i2p_sessions GUARDED_BY(m_unused_i2p_sessions_mutex); + /** + * Mutex protecting m_reconnections. + */ + Mutex m_reconnections_mutex; + + /** Struct for entries in m_reconnections. */ + struct ReconnectionInfo + { + CAddress addr_connect; + CSemaphoreGrant grant; + std::string destination; + ConnectionType conn_type; + bool use_v2transport; + }; + + /** + * List of reconnections we have to make. + */ + std::list m_reconnections GUARDED_BY(m_reconnections_mutex); + + /** Attempt reconnections, if m_reconnections non-empty. */ + void PerformReconnections() EXCLUSIVE_LOCKS_REQUIRED(!m_reconnections_mutex, !m_unused_i2p_sessions_mutex); + /** * Cap on the size of `m_unused_i2p_sessions`, to ensure it does not * unexpectedly use too much memory. From b815cce50e4bfa0efea8ea02659b7042c8fb18be Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Sun, 30 Jul 2023 23:26:04 -0400 Subject: [PATCH 084/172] net: expose transport types/session IDs of connections in RPC and logs Co-authored-by: Dhruv Mehta <856960+dhruv@users.noreply.github.com> --- src/net.cpp | 29 +++++++++++++++++++ src/net.h | 16 ++++++++++ src/net_processing.cpp | 11 ++++--- src/node/connection_types.cpp | 14 +++++++++ src/node/connection_types.h | 11 +++++++ src/rpc/net.cpp | 10 +++++++ src/test/fuzz/p2p_transport_serialization.cpp | 3 ++ src/test/net_tests.cpp | 12 ++++++++ test/functional/rpc_net.py | 2 ++ 9 files changed, 104 insertions(+), 4 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index a3f1a18fe79e6..a3792b7d49ab8 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -667,6 +667,9 @@ void CNode::CopyStats(CNodeStats& stats) LOCK(cs_vRecv); X(mapRecvBytesPerMsgType); X(nRecvBytes); + Transport::Info info = m_transport->GetInfo(); + stats.m_transport_type = info.transport_type; + if (info.session_id) stats.m_session_id = HexStr(*info.session_id); } X(m_permission_flags); @@ -734,6 +737,11 @@ V1Transport::V1Transport(const NodeId node_id, int nTypeIn, int nVersionIn) noex Reset(); } +Transport::Info V1Transport::GetInfo() const noexcept +{ + return {.transport_type = TransportProtocolType::V1, .session_id = {}}; +} + int V1Transport::readHeader(Span msg_bytes) { AssertLockHeld(m_recv_mutex); @@ -1582,6 +1590,27 @@ size_t V2Transport::GetSendMemoryUsage() const noexcept return sizeof(m_send_buffer) + memusage::DynamicUsage(m_send_buffer); } +Transport::Info V2Transport::GetInfo() const noexcept +{ + AssertLockNotHeld(m_recv_mutex); + LOCK(m_recv_mutex); + if (m_recv_state == RecvState::V1) return m_v1_fallback.GetInfo(); + + Transport::Info info; + + // Do not report v2 and session ID until the version packet has been received + // and verified (confirming that the other side very likely has the same keys as us). + if (m_recv_state != RecvState::KEY_MAYBE_V1 && m_recv_state != RecvState::KEY && + m_recv_state != RecvState::GARB_GARBTERM && m_recv_state != RecvState::VERSION) { + info.transport_type = TransportProtocolType::V2; + info.session_id = uint256(MakeUCharSpan(m_cipher.GetSessionID())); + } else { + info.transport_type = TransportProtocolType::DETECTING; + } + + return info; +} + std::pair CConnman::SocketSendData(CNode& node) const { auto it = node.vSendMsg.begin(); diff --git a/src/net.h b/src/net.h index 297e408320096..2f7b832fbaa66 100644 --- a/src/net.h +++ b/src/net.h @@ -232,6 +232,10 @@ class CNodeStats Network m_network; uint32_t m_mapped_as; ConnectionType m_conn_type; + /** Transport protocol type. */ + TransportProtocolType m_transport_type; + /** BIP324 session id string in hex, if any. */ + std::string m_session_id; }; @@ -268,6 +272,15 @@ class Transport { public: virtual ~Transport() {} + struct Info + { + TransportProtocolType transport_type; + std::optional session_id; + }; + + /** Retrieve information about this transport. */ + virtual Info GetInfo() const noexcept = 0; + // 1. Receiver side functions, for decoding bytes received on the wire into transport protocol // agnostic CNetMessage (message type & payload) objects. @@ -426,6 +439,8 @@ class V1Transport final : public Transport return WITH_LOCK(m_recv_mutex, return CompleteInternal()); } + Info GetInfo() const noexcept override; + bool ReceivedBytes(Span& msg_bytes) override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex) { AssertLockNotHeld(m_recv_mutex); @@ -664,6 +679,7 @@ class V2Transport final : public Transport // Miscellaneous functions. bool ShouldReconnectV1() const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex, !m_send_mutex); + Info GetInfo() const noexcept override EXCLUSIVE_LOCKS_REQUIRED(!m_recv_mutex); }; struct CNodeOptions diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 03dee1351207c..06086d6804aca 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -3585,13 +3585,16 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } - if (!pfrom.IsInboundConn()) { + // Log succesful connections unconditionally for outbound, but not for inbound as those + // can be triggered by an attacker at high rate. + if (!pfrom.IsInboundConn() || LogAcceptCategory(BCLog::NET, BCLog::Level::Debug)) { const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)}; - LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s%s (%s)\n", + LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s\n", + pfrom.ConnectionTypeAsString(), + TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type), pfrom.nVersion.load(), peer->m_starting_height, pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToStringAddrPort()) : ""), - (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : ""), - pfrom.ConnectionTypeAsString()); + (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); } if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) { diff --git a/src/node/connection_types.cpp b/src/node/connection_types.cpp index 904f4371aafd1..5e4dc5bf2ef94 100644 --- a/src/node/connection_types.cpp +++ b/src/node/connection_types.cpp @@ -24,3 +24,17 @@ std::string ConnectionTypeAsString(ConnectionType conn_type) assert(false); } + +std::string TransportTypeAsString(TransportProtocolType transport_type) +{ + switch (transport_type) { + case TransportProtocolType::DETECTING: + return "detecting"; + case TransportProtocolType::V1: + return "v1"; + case TransportProtocolType::V2: + return "v2"; + } // no default case, so the compiler can warn about missing cases + + assert(false); +} diff --git a/src/node/connection_types.h b/src/node/connection_types.h index 5e1abcace67d1..a911b95f7e917 100644 --- a/src/node/connection_types.h +++ b/src/node/connection_types.h @@ -6,6 +6,7 @@ #define BITCOIN_NODE_CONNECTION_TYPES_H #include +#include /** Different types of connections to a peer. This enum encapsulates the * information we have available at the time of opening or accepting the @@ -79,4 +80,14 @@ enum class ConnectionType { /** Convert ConnectionType enum to a string value */ std::string ConnectionTypeAsString(ConnectionType conn_type); +/** Transport layer version */ +enum class TransportProtocolType : uint8_t { + DETECTING, //!< Peer could be v1 or v2 + V1, //!< Unencrypted, plaintext protocol + V2, //!< BIP324 protocol +}; + +/** Convert TransportProtocolType enum to a string value */ +std::string TransportTypeAsString(TransportProtocolType transport_type); + #endif // BITCOIN_NODE_CONNECTION_TYPES_H diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 3be91f292c8c7..8d796b8e9b847 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -45,6 +45,12 @@ const std::vector CONNECTION_TYPE_DOC{ "feeler (short-lived automatic connection for testing addresses)" }; +const std::vector TRANSPORT_TYPE_DOC{ + "detecting (peer could be v1 or v2)", + "v1 (plaintext transport protocol)", + "v2 (BIP324 encrypted transport protocol)" +}; + static RPCHelpMan getconnectioncount() { return RPCHelpMan{"getconnectioncount", @@ -164,6 +170,8 @@ static RPCHelpMan getpeerinfo() {RPCResult::Type::STR, "connection_type", "Type of connection: \n" + Join(CONNECTION_TYPE_DOC, ",\n") + ".\n" "Please note this output is unlikely to be stable in upcoming releases as we iterate to\n" "best capture connection behaviors."}, + {RPCResult::Type::STR, "transport_protocol_type", "Type of transport protocol: \n" + Join(TRANSPORT_TYPE_DOC, ",\n") + ".\n"}, + {RPCResult::Type::STR, "session_id", "The session ID for this connection, or \"\" if there is none (\"v2\" transport protocol only).\n"}, }}, }}, }, @@ -268,6 +276,8 @@ static RPCHelpMan getpeerinfo() } obj.pushKV("bytesrecv_per_msg", recvPerMsgType); obj.pushKV("connection_type", ConnectionTypeAsString(stats.m_conn_type)); + obj.pushKV("transport_protocol_type", TransportTypeAsString(stats.m_transport_type)); + obj.pushKV("session_id", stats.m_session_id); ret.push_back(obj); } diff --git a/src/test/fuzz/p2p_transport_serialization.cpp b/src/test/fuzz/p2p_transport_serialization.cpp index 88d6e96eacbf6..21d8dab53605e 100644 --- a/src/test/fuzz/p2p_transport_serialization.cpp +++ b/src/test/fuzz/p2p_transport_serialization.cpp @@ -328,6 +328,9 @@ void SimulationTest(Transport& initiator, Transport& responder, R& rng, FuzzedDa // Make sure all expected messages were received. assert(expected[0].empty()); assert(expected[1].empty()); + + // Compare session IDs. + assert(transports[0]->GetInfo().session_id == transports[1]->GetInfo().session_id); } std::unique_ptr MakeV1Transport(NodeId nodeid) noexcept diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index 1df08127ad782..5976aa37131d7 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -1321,6 +1321,14 @@ class V2TransportTester SendPacket(contents); } + /** Test whether the transport's session ID matches the session ID we expect. */ + void CompareSessionIDs() const + { + auto info = m_transport.GetInfo(); + BOOST_CHECK(info.session_id); + BOOST_CHECK(uint256(MakeUCharSpan(m_cipher.GetSessionID())) == *info.session_id); + } + /** Introduce a bit error in the data scheduled to be sent. */ void Damage() { @@ -1346,6 +1354,7 @@ BOOST_AUTO_TEST_CASE(v2transport_test) BOOST_REQUIRE(ret && ret->empty()); tester.ReceiveGarbage(); tester.ReceiveVersion(); + tester.CompareSessionIDs(); auto msg_data_1 = g_insecure_rand_ctx.randbytes(InsecureRandRange(100000)); auto msg_data_2 = g_insecure_rand_ctx.randbytes(InsecureRandRange(1000)); tester.SendMessage(uint8_t(4), msg_data_1); // cmpctblock short id @@ -1386,6 +1395,7 @@ BOOST_AUTO_TEST_CASE(v2transport_test) BOOST_REQUIRE(ret && ret->empty()); tester.ReceiveGarbage(); tester.ReceiveVersion(); + tester.CompareSessionIDs(); auto msg_data_1 = g_insecure_rand_ctx.randbytes(InsecureRandRange(100000)); auto msg_data_2 = g_insecure_rand_ctx.randbytes(InsecureRandRange(1000)); tester.SendMessage(uint8_t(14), msg_data_1); // inv short id @@ -1439,6 +1449,7 @@ BOOST_AUTO_TEST_CASE(v2transport_test) BOOST_REQUIRE(ret && ret->empty()); tester.ReceiveGarbage(); tester.ReceiveVersion(); + tester.CompareSessionIDs(); for (unsigned d = 0; d < num_decoys_1; ++d) { auto decoy_data = g_insecure_rand_ctx.randbytes(InsecureRandRange(1000)); tester.SendPacket(/*content=*/decoy_data, /*aad=*/{}, /*ignore=*/true); @@ -1516,6 +1527,7 @@ BOOST_AUTO_TEST_CASE(v2transport_test) BOOST_REQUIRE(ret && ret->empty()); tester.ReceiveGarbage(); tester.ReceiveVersion(); + tester.CompareSessionIDs(); auto msg_data_1 = g_insecure_rand_ctx.randbytes(4000000); // test that receiving 4M payload works auto msg_data_2 = g_insecure_rand_ctx.randbytes(4000000); // test that sending 4M payload works tester.SendMessage(uint8_t(InsecureRandRange(223) + 33), {}); // unknown short id diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 117802b812ceb..a87944a0627ec 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -142,11 +142,13 @@ def test_getpeerinfo(self): "relaytxes": False, "services": "0000000000000000", "servicesnames": [], + "session_id": "", "startingheight": -1, "subver": "", "synced_blocks": -1, "synced_headers": -1, "timeoffset": 0, + "transport_protocol_type": "v1", "version": 0, }, ) From 05d19fbcc10f26c7f1e3a9afc660eb7fa71b1d8c Mon Sep 17 00:00:00 2001 From: dhruv <856960+dhruv@users.noreply.github.com> Date: Wed, 7 Sep 2022 15:59:49 -0700 Subject: [PATCH 085/172] test: Functional test for opportunistic encryption Co-authored-by: Pieter Wuille --- src/net.cpp | 4 + test/functional/p2p_v2_transport.py | 111 ++++++++++++++++-- .../test_framework/test_framework.py | 19 ++- 3 files changed, 121 insertions(+), 13 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index a3792b7d49ab8..6b2ef5f43d9fe 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1552,6 +1552,10 @@ void V2Transport::MarkBytesSent(size_t bytes_sent) noexcept LOCK(m_send_mutex); if (m_send_state == SendState::V1) return m_v1_fallback.MarkBytesSent(bytes_sent); + if (m_send_state == SendState::AWAITING_KEY && m_send_pos == 0 && bytes_sent > 0) { + LogPrint(BCLog::NET, "start sending v2 handshake to peer=%d\n", m_nodeid); + } + m_send_pos += bytes_sent; Assume(m_send_pos <= m_send_buffer.size()); if (m_send_pos >= CMessageHeader::HEADER_SIZE) { diff --git a/test/functional/p2p_v2_transport.py b/test/functional/p2p_v2_transport.py index 9df4c297e40c2..2455bf2e2da5b 100755 --- a/test/functional/p2p_v2_transport.py +++ b/test/functional/p2p_v2_transport.py @@ -12,19 +12,116 @@ class V2TransportTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain=True - self.num_nodes = 1 - self.extra_args = [["-v2transport=0"]] + self.setup_clean_chain = True + self.num_nodes = 5 + self.extra_args = [["-v2transport=1"], ["-v2transport=1"], ["-v2transport=0"], ["-v2transport=0"], ["-v2transport=0"]] def run_test(self): - network_info = self.nodes[0].getnetworkinfo() + sending_handshake = "start sending v2 handshake to peer" + downgrading_to_v1 = "retrying with v1 transport protocol for peer" + self.disconnect_nodes(0, 1) + self.disconnect_nodes(1, 2) + self.disconnect_nodes(2, 3) + self.disconnect_nodes(3, 4) + + # verify local services + network_info = self.nodes[2].getnetworkinfo() assert_equal(int(network_info["localservices"], 16) & NODE_P2P_V2, 0) assert "P2P_V2" not in network_info["localservicesnames"] - - self.restart_node(0, ["-v2transport=1"]) - network_info = self.nodes[0].getnetworkinfo() + network_info = self.nodes[1].getnetworkinfo() assert_equal(int(network_info["localservices"], 16) & NODE_P2P_V2, NODE_P2P_V2) assert "P2P_V2" in network_info["localservicesnames"] + # V2 nodes can sync with V2 nodes + assert_equal(self.nodes[0].getblockcount(), 0) + assert_equal(self.nodes[1].getblockcount(), 0) + with self.nodes[0].assert_debug_log(expected_msgs=[sending_handshake], + unexpected_msgs=[downgrading_to_v1]): + self.connect_nodes(0, 1, peer_advertises_v2=True) + self.generate(self.nodes[0], 5, sync_fun=lambda: self.sync_all(self.nodes[0:2])) + assert_equal(self.nodes[1].getblockcount(), 5) + # verify there is a v2 connection between node 0 and 1 + node_0_info = self.nodes[0].getpeerinfo() + node_1_info = self.nodes[0].getpeerinfo() + assert_equal(len(node_0_info), 1) + assert_equal(len(node_1_info), 1) + assert_equal(node_0_info[0]["transport_protocol_type"], "v2") + assert_equal(node_1_info[0]["transport_protocol_type"], "v2") + assert_equal(len(node_0_info[0]["session_id"]), 64) + assert_equal(len(node_1_info[0]["session_id"]), 64) + assert_equal(node_0_info[0]["session_id"], node_1_info[0]["session_id"]) + + # V1 nodes can sync with each other + assert_equal(self.nodes[2].getblockcount(), 0) + assert_equal(self.nodes[3].getblockcount(), 0) + with self.nodes[2].assert_debug_log(expected_msgs=[], + unexpected_msgs=[sending_handshake, downgrading_to_v1]): + self.connect_nodes(2, 3, peer_advertises_v2=False) + self.generate(self.nodes[2], 8, sync_fun=lambda: self.sync_all(self.nodes[2:4])) + assert_equal(self.nodes[3].getblockcount(), 8) + assert self.nodes[0].getbestblockhash() != self.nodes[2].getbestblockhash() + # verify there is a v1 connection between node 2 and 3 + node_2_info = self.nodes[2].getpeerinfo() + node_3_info = self.nodes[3].getpeerinfo() + assert_equal(len(node_2_info), 1) + assert_equal(len(node_3_info), 1) + assert_equal(node_2_info[0]["transport_protocol_type"], "v1") + assert_equal(node_3_info[0]["transport_protocol_type"], "v1") + assert_equal(len(node_2_info[0]["session_id"]), 0) + assert_equal(len(node_3_info[0]["session_id"]), 0) + + # V1 nodes can sync with V2 nodes + self.disconnect_nodes(0, 1) + self.disconnect_nodes(2, 3) + with self.nodes[2].assert_debug_log(expected_msgs=[], + unexpected_msgs=[sending_handshake, downgrading_to_v1]): + self.connect_nodes(2, 1, peer_advertises_v2=False) # cannot enable v2 on v1 node + self.sync_all(self.nodes[1:3]) + assert_equal(self.nodes[1].getblockcount(), 8) + assert self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash() + # verify there is a v1 connection between node 1 and 2 + node_1_info = self.nodes[1].getpeerinfo() + node_2_info = self.nodes[2].getpeerinfo() + assert_equal(len(node_1_info), 1) + assert_equal(len(node_2_info), 1) + assert_equal(node_1_info[0]["transport_protocol_type"], "v1") + assert_equal(node_2_info[0]["transport_protocol_type"], "v1") + assert_equal(len(node_1_info[0]["session_id"]), 0) + assert_equal(len(node_2_info[0]["session_id"]), 0) + + # V2 nodes can sync with V1 nodes + self.disconnect_nodes(1, 2) + with self.nodes[0].assert_debug_log(expected_msgs=[], + unexpected_msgs=[sending_handshake, downgrading_to_v1]): + self.connect_nodes(0, 3, peer_advertises_v2=False) + self.sync_all([self.nodes[0], self.nodes[3]]) + assert_equal(self.nodes[0].getblockcount(), 8) + # verify there is a v1 connection between node 0 and 3 + node_0_info = self.nodes[0].getpeerinfo() + node_3_info = self.nodes[3].getpeerinfo() + assert_equal(len(node_0_info), 1) + assert_equal(len(node_3_info), 1) + assert_equal(node_0_info[0]["transport_protocol_type"], "v1") + assert_equal(node_3_info[0]["transport_protocol_type"], "v1") + assert_equal(len(node_0_info[0]["session_id"]), 0) + assert_equal(len(node_3_info[0]["session_id"]), 0) + + # V2 node mines another block and everyone gets it + self.connect_nodes(0, 1, peer_advertises_v2=True) + self.connect_nodes(1, 2, peer_advertises_v2=False) + self.generate(self.nodes[1], 1, sync_fun=lambda: self.sync_all(self.nodes[0:4])) + assert_equal(self.nodes[0].getblockcount(), 9) # sync_all() verifies tip hashes match + + # V1 node mines another block and everyone gets it + self.generate(self.nodes[3], 2, sync_fun=lambda: self.sync_all(self.nodes[0:4])) + assert_equal(self.nodes[2].getblockcount(), 11) # sync_all() verifies tip hashes match + + assert_equal(self.nodes[4].getblockcount(), 0) + # Peer 4 is v1 p2p, but is falsely advertised as v2. + with self.nodes[1].assert_debug_log(expected_msgs=[sending_handshake, downgrading_to_v1]): + self.connect_nodes(1, 4, peer_advertises_v2=True) + self.sync_all() + assert_equal(self.nodes[4].getblockcount(), 11) + if __name__ == '__main__': V2TransportTest().main() diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 73635b4397e1e..669959dc8a89d 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -581,13 +581,20 @@ def restart_node(self, i, extra_args=None): def wait_for_node_exit(self, i, timeout): self.nodes[i].process.wait(timeout) - def connect_nodes(self, a, b): + def connect_nodes(self, a, b, *, peer_advertises_v2=False): from_connection = self.nodes[a] to_connection = self.nodes[b] from_num_peers = 1 + len(from_connection.getpeerinfo()) to_num_peers = 1 + len(to_connection.getpeerinfo()) ip_port = "127.0.0.1:" + str(p2p_port(b)) - from_connection.addnode(ip_port, "onetry") + + if peer_advertises_v2: + from_connection.addnode(node=ip_port, command="onetry", v2transport=True) + else: + # skip the optional third argument (default false) for + # compatibility with older clients + from_connection.addnode(ip_port, "onetry") + # poll until version handshake complete to avoid race conditions # with transaction relaying # See comments in net_processing: @@ -595,12 +602,12 @@ def connect_nodes(self, a, b): # * Must have a verack message before anything else self.wait_until(lambda: sum(peer['version'] != 0 for peer in from_connection.getpeerinfo()) == from_num_peers) self.wait_until(lambda: sum(peer['version'] != 0 for peer in to_connection.getpeerinfo()) == to_num_peers) - self.wait_until(lambda: sum(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo()) == from_num_peers) - self.wait_until(lambda: sum(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in to_connection.getpeerinfo()) == to_num_peers) + self.wait_until(lambda: sum(peer['bytesrecv_per_msg'].pop('verack', 0) >= 21 for peer in from_connection.getpeerinfo()) == from_num_peers) + self.wait_until(lambda: sum(peer['bytesrecv_per_msg'].pop('verack', 0) >= 21 for peer in to_connection.getpeerinfo()) == to_num_peers) # The message bytes are counted before processing the message, so make # sure it was fully processed by waiting for a ping. - self.wait_until(lambda: sum(peer["bytesrecv_per_msg"].pop("pong", 0) >= 32 for peer in from_connection.getpeerinfo()) == from_num_peers) - self.wait_until(lambda: sum(peer["bytesrecv_per_msg"].pop("pong", 0) >= 32 for peer in to_connection.getpeerinfo()) == to_num_peers) + self.wait_until(lambda: sum(peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 for peer in from_connection.getpeerinfo()) == from_num_peers) + self.wait_until(lambda: sum(peer["bytesrecv_per_msg"].pop("pong", 0) >= 29 for peer in to_connection.getpeerinfo()) == to_num_peers) def disconnect_nodes(self, a, b): def disconnect_nodes_helper(node_a, node_b): From 64ca7210f05c4003228f4cb0b160d869e15f47d2 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Sat, 9 Sep 2023 21:20:17 -0400 Subject: [PATCH 086/172] test: enable v2 transport between nodes in some functional tests --- test/functional/test_framework/test_framework.py | 12 ++++++++++-- test/functional/test_runner.py | 10 ++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 669959dc8a89d..ab7fed335c6b2 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -189,6 +189,8 @@ def parse_args(self): parser.add_argument("--randomseed", type=int, help="set a random seed for deterministically reproducing a previous test run") parser.add_argument("--timeout-factor", dest="timeout_factor", type=float, help="adjust test timeouts by a factor. Setting it to 0 disables all timeouts") + parser.add_argument("--v2transport", dest="v2transport", default=False, action="store_true", + help="use BIP324 v2 connections between all nodes by default") self.add_options(parser) # Running TestShell in a Jupyter notebook causes an additional -f argument @@ -504,6 +506,9 @@ def get_bin_from_version(version, bin_name, bin_default): assert_equal(len(binary), num_nodes) assert_equal(len(binary_cli), num_nodes) for i in range(num_nodes): + args = list(extra_args[i]) + if self.options.v2transport and ("-v2transport=0" not in args): + args.append("-v2transport=1") test_node_i = TestNode( i, get_datadir_path(self.options.tmpdir, i), @@ -517,7 +522,7 @@ def get_bin_from_version(version, bin_name, bin_default): coverage_dir=self.options.coveragedir, cwd=self.options.tmpdir, extra_conf=extra_confs[i], - extra_args=extra_args[i], + extra_args=args, use_cli=self.options.usecli, start_perf=self.options.perf, use_valgrind=self.options.valgrind, @@ -581,13 +586,16 @@ def restart_node(self, i, extra_args=None): def wait_for_node_exit(self, i, timeout): self.nodes[i].process.wait(timeout) - def connect_nodes(self, a, b, *, peer_advertises_v2=False): + def connect_nodes(self, a, b, *, peer_advertises_v2=None): from_connection = self.nodes[a] to_connection = self.nodes[b] from_num_peers = 1 + len(from_connection.getpeerinfo()) to_num_peers = 1 + len(to_connection.getpeerinfo()) ip_port = "127.0.0.1:" + str(p2p_port(b)) + if peer_advertises_v2 is None: + peer_advertises_v2 = self.options.v2transport + if peer_advertises_v2: from_connection.addnode(node=ip_port, command="onetry", v2transport=True) else: diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 4645557655106..933ea276e7bfd 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -117,6 +117,7 @@ 'wallet_backup.py --descriptors', 'feature_segwit.py --legacy-wallet', 'feature_segwit.py --descriptors', + 'feature_segwit.py --descriptors --v2transport', 'p2p_tx_download.py', 'wallet_avoidreuse.py --legacy-wallet', 'wallet_avoidreuse.py --descriptors', @@ -195,6 +196,7 @@ 'wallet_avoid_mixing_output_types.py --descriptors', 'mempool_reorg.py', 'p2p_block_sync.py', + 'p2p_block_sync.py --v2transport', 'wallet_createwallet.py --legacy-wallet', 'wallet_createwallet.py --usecli', 'wallet_createwallet.py --descriptors', @@ -221,10 +223,13 @@ 'wallet_transactiontime_rescan.py --legacy-wallet', 'p2p_addrv2_relay.py', 'p2p_compactblocks_hb.py', + 'p2p_compactblocks_hb.py --v2transport', 'p2p_disconnect_ban.py', + 'p2p_disconnect_ban.py --v2transport', 'feature_posix_fs_permissions.py', 'rpc_decodescript.py', 'rpc_blockchain.py', + 'rpc_blockchain.py --v2transport', 'rpc_deprecated.py', 'wallet_disable.py', 'wallet_change_address.py --legacy-wallet', @@ -245,7 +250,9 @@ 'mining_prioritisetransaction.py', 'p2p_invalid_locator.py', 'p2p_invalid_block.py', + 'p2p_invalid_block.py --v2transport', 'p2p_invalid_tx.py', + 'p2p_invalid_tx.py --v2transport', 'p2p_v2_transport.py', 'example_test.py', 'wallet_txn_doublespend.py --legacy-wallet', @@ -268,9 +275,12 @@ 'wallet_importprunedfunds.py --legacy-wallet', 'wallet_importprunedfunds.py --descriptors', 'p2p_leak_tx.py', + 'p2p_leak_tx.py --v2transport', 'p2p_eviction.py', 'p2p_ibd_stalling.py', + 'p2p_ibd_stalling.py --v2transport', 'p2p_net_deadlock.py', + 'p2p_net_deadlock.py --v2transport', 'wallet_signmessagewithaddress.py', 'rpc_signmessagewithprivkey.py', 'rpc_generate.py', From 75a329103505736acb9036224da2dfa8ab038c43 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Wed, 27 Sep 2023 08:47:16 -0400 Subject: [PATCH 087/172] doc: mention BIP324 support in bips.md --- doc/bips.md | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/bips.md b/doc/bips.md index 94213f80482ac..952d289daa6fe 100644 --- a/doc/bips.md +++ b/doc/bips.md @@ -49,6 +49,7 @@ BIPs that are implemented by Bitcoin Core: * [`BIP 173`](https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki): Bech32 addresses for native Segregated Witness outputs are supported as of **v0.16.0** ([PR 11167](https://github.com/bitcoin/bitcoin/pull/11167)). Bech32 addresses are generated by default as of **v0.20.0** ([PR 16884](https://github.com/bitcoin/bitcoin/pull/16884)). * [`BIP 174`](https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki): RPCs to operate on Partially Signed Bitcoin Transactions (PSBT) are present as of **v0.17.0** ([PR 13557](https://github.com/bitcoin/bitcoin/pull/13557)). * [`BIP 176`](https://github.com/bitcoin/bips/blob/master/bip-0176.mediawiki): Bits Denomination [QT only] is supported as of **v0.16.0** ([PR 12035](https://github.com/bitcoin/bitcoin/pull/12035)). +* [`BIP 324`](https://github.com/bitcoin/bips/blob/master/bip-0324.mediawiki): The v2 transport protocol specified by BIP324 and the associated `NODE_P2P_V2` service bit are supported as of **v26.0**, but off by default ([PR 28331](https://github.com/bitcoin/bitcoin/pull/28331)). * [`BIP 325`](https://github.com/bitcoin/bips/blob/master/bip-0325.mediawiki): Signet test network is supported as of **v0.21.0** ([PR 18267](https://github.com/bitcoin/bitcoin/pull/18267)). * [`BIP 339`](https://github.com/bitcoin/bips/blob/master/bip-0339.mediawiki): Relay of transactions by wtxid is supported as of **v0.21.0** ([PR 18044](https://github.com/bitcoin/bitcoin/pull/18044)). * [`BIP 340`](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki) From f08adec886febbfe038cedc32970c27c6f72bd5f Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Tue, 3 Oct 2023 10:18:51 +0100 Subject: [PATCH 088/172] qt: Add "Transport" label to peer details --- src/qt/forms/debugwindow.ui | 122 ++++++++++++++++++++++-------------- src/qt/rpcconsole.cpp | 15 ++++- 2 files changed, 87 insertions(+), 50 deletions(-) diff --git a/src/qt/forms/debugwindow.ui b/src/qt/forms/debugwindow.ui index f1b66341d1b00..5cdba64378589 100644 --- a/src/qt/forms/debugwindow.ui +++ b/src/qt/forms/debugwindow.ui @@ -1085,6 +1085,32 @@ + + + The transport layer version: %1 + + + Transport + + + + + + + IBeamCursor + + + N/A + + + Qt::PlainText + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse + + + + The network protocol this peer is connected through: IPv4, IPv6, Onion, I2P, or CJDNS. @@ -1094,7 +1120,7 @@ - + IBeamCursor @@ -1110,14 +1136,14 @@ - + Version - + IBeamCursor @@ -1133,14 +1159,14 @@ - + User Agent - + IBeamCursor @@ -1156,14 +1182,14 @@ - + Services - + IBeamCursor @@ -1182,7 +1208,7 @@ - + Whether we relay transactions to this peer. @@ -1192,7 +1218,7 @@ - + IBeamCursor @@ -1208,7 +1234,7 @@ - + High bandwidth BIP152 compact block relay: %1 @@ -1218,7 +1244,7 @@ - + IBeamCursor @@ -1234,14 +1260,14 @@ - + Starting Block - + IBeamCursor @@ -1257,14 +1283,14 @@ - + Synced Headers - + IBeamCursor @@ -1280,14 +1306,14 @@ - + Synced Blocks - + IBeamCursor @@ -1303,14 +1329,14 @@ - + Connection Time - + IBeamCursor @@ -1326,7 +1352,7 @@ - + Elapsed time since a novel block passing initial validity checks was received from this peer. @@ -1336,7 +1362,7 @@ - + IBeamCursor @@ -1352,7 +1378,7 @@ - + Elapsed time since a novel transaction accepted into our mempool was received from this peer. @@ -1362,7 +1388,7 @@ - + IBeamCursor @@ -1378,14 +1404,14 @@ - + Last Send - + IBeamCursor @@ -1401,14 +1427,14 @@ - + Last Receive - + IBeamCursor @@ -1424,14 +1450,14 @@ - + Sent - + IBeamCursor @@ -1447,14 +1473,14 @@ - + Received - + IBeamCursor @@ -1470,14 +1496,14 @@ - + Ping Time - + IBeamCursor @@ -1493,7 +1519,7 @@ - + The duration of a currently outstanding ping. @@ -1503,7 +1529,7 @@ - + IBeamCursor @@ -1519,14 +1545,14 @@ - + Min Ping - + IBeamCursor @@ -1542,14 +1568,14 @@ - + Time Offset - + IBeamCursor @@ -1565,7 +1591,7 @@ - + The mapped Autonomous System used for diversifying peer selection. @@ -1575,7 +1601,7 @@ - + IBeamCursor @@ -1591,7 +1617,7 @@ - + Whether we relay addresses to this peer. @@ -1601,7 +1627,7 @@ - + IBeamCursor @@ -1617,7 +1643,7 @@ - + The total number of addresses received from this peer that were processed (excludes addresses that were dropped due to rate-limiting). @@ -1627,7 +1653,7 @@ - + IBeamCursor @@ -1643,7 +1669,7 @@ - + The total number of addresses received from this peer that were dropped (not processed) due to rate-limiting. @@ -1653,7 +1679,7 @@ - + IBeamCursor @@ -1669,7 +1695,7 @@ - + Qt::Vertical diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index cb4ecfb6fbede..27d460f8e155f 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -514,8 +515,17 @@ RPCConsole::RPCConsole(interfaces::Node& node, const PlatformStyle *_platformSty /*: Explanatory text for a short-lived outbound peer connection that is used to request addresses from a peer. */ tr("Outbound Address Fetch: short-lived, for soliciting addresses")}; - const QString list{"
  • " + Join(CONNECTION_TYPE_DOC, QString("
  • ")) + "
"}; - ui->peerConnectionTypeLabel->setToolTip(ui->peerConnectionTypeLabel->toolTip().arg(list)); + const QString connection_types_list{"
  • " + Join(CONNECTION_TYPE_DOC, QString("
  • ")) + "
"}; + ui->peerConnectionTypeLabel->setToolTip(ui->peerConnectionTypeLabel->toolTip().arg(connection_types_list)); + const std::vector TRANSPORT_TYPE_DOC{ + //: Explanatory text for "detecting" transport type. + tr("detecting: peer could be v1 or v2"), + //: Explanatory text for v1 transport type. + tr("v1: unencrypted, plaintext transport protocol"), + //: Explanatory text for v2 transport type. + tr("v2: BIP324 encrypted transport protocol")}; + const QString transport_types_list{"
  • " + Join(TRANSPORT_TYPE_DOC, QString("
  • ")) + "
"}; + ui->peerTransportTypeLabel->setToolTip(ui->peerTransportTypeLabel->toolTip().arg(transport_types_list)); const QString hb_list{"
  • \"" + ts.to + "\" – " + tr("we selected the peer for high bandwidth relay") + "
  • \"" + ts.from + "\" – " + tr("the peer selected us for high bandwidth relay") + "
  • \"" @@ -1191,6 +1201,7 @@ void RPCConsole::updateDetailWidget() ui->peerSubversion->setText(QString::fromStdString(stats->nodeStats.cleanSubVer)); } ui->peerConnectionType->setText(GUIUtil::ConnectionTypeToQString(stats->nodeStats.m_conn_type, /*prepend_direction=*/true)); + ui->peerTransportType->setText(QString::fromStdString(TransportTypeAsString(stats->nodeStats.m_transport_type))); ui->peerNetwork->setText(GUIUtil::NetworkToQString(stats->nodeStats.m_network)); if (stats->nodeStats.m_permission_flags == NetPermissionFlags::None) { ui->peerPermissions->setText(ts.na); From 87c706713e5d1c78bad943a42bf7c69047d28ea5 Mon Sep 17 00:00:00 2001 From: dergoegge Date: Mon, 2 Oct 2023 14:11:55 +0100 Subject: [PATCH 089/172] [net processing] PeerManager holds a FastRandomContext --- src/net_processing.cpp | 5 ++++- src/net_processing.h | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index b046b3ac168c7..8963424c9bea1 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -695,6 +695,8 @@ class PeerManagerImpl final : public PeerManager /** Send `feefilter` message. */ void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); + FastRandomContext m_rng GUARDED_BY(NetEventsInterface::g_msgproc_mutex); + const CChainParams& m_chainparams; CConnman& m_connman; AddrMan& m_addrman; @@ -1808,7 +1810,8 @@ std::unique_ptr PeerManager::make(CConnman& connman, AddrMan& addrm PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman, BanMan* banman, ChainstateManager& chainman, CTxMemPool& pool, Options opts) - : m_chainparams(chainman.GetParams()), + : m_rng{opts.deterministic_rng}, + m_chainparams(chainman.GetParams()), m_connman(connman), m_addrman(addrman), m_banman(banman), diff --git a/src/net_processing.h b/src/net_processing.h index 837e308617383..80d07648a48ef 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -58,6 +58,9 @@ class PeerManager : public CValidationInterface, public NetEventsInterface uint32_t max_extra_txs{DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN}; //! Whether all P2P messages are captured to disk bool capture_messages{false}; + //! Whether or not the internal RNG behaves deterministically (this is + //! a test-only option). + bool deterministic_rng{false}; }; static std::unique_ptr make(CConnman& connman, AddrMan& addrman, From a648dd79e5ebfdb627d0221b1207862efb664dfc Mon Sep 17 00:00:00 2001 From: dergoegge Date: Mon, 2 Oct 2023 14:13:05 +0100 Subject: [PATCH 090/172] [net processing] PushAddress uses PeerManager's rng --- src/net_processing.cpp | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 8963424c9bea1..77d7f168bf9f8 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1022,7 +1022,7 @@ class PeerManagerImpl final : public PeerManager bool SetupAddressRelay(const CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); void AddAddressKnown(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); - void PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& insecure_rand) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); + void PushAddress(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); }; const CNodeState* PeerManagerImpl::State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main) @@ -1054,7 +1054,7 @@ void PeerManagerImpl::AddAddressKnown(Peer& peer, const CAddress& addr) peer.m_addr_known->insert(addr.GetKey()); } -void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr, FastRandomContext& insecure_rand) +void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr) { // Known checking here is only to save space from duplicates. // Before sending, we'll filter it again for known addresses that were @@ -1062,7 +1062,7 @@ void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr, FastRandomCo assert(peer.m_addr_known); if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) { if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) { - peer.m_addrs_to_send[insecure_rand.randrange(peer.m_addrs_to_send.size())] = addr; + peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] = addr; } else { peer.m_addrs_to_send.push_back(addr); } @@ -2108,7 +2108,6 @@ void PeerManagerImpl::RelayAddress(NodeId originator, const CSipHasher hasher{m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY) .Write(hash_addr) .Write(time_addr)}; - FastRandomContext insecure_rand; // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers. unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1; @@ -2132,7 +2131,7 @@ void PeerManagerImpl::RelayAddress(NodeId originator, }; for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) { - PushAddress(*best[i].second, addr, insecure_rand); + PushAddress(*best[i].second, addr); } } @@ -4657,9 +4656,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } else { vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND); } - FastRandomContext insecure_rand; for (const CAddress &addr : vAddr) { - PushAddress(*peer, addr, insecure_rand); + PushAddress(*peer, addr); } return; } @@ -5261,8 +5259,7 @@ void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::micros } if (std::optional local_service = GetLocalAddrForPeer(node)) { CAddress local_addr{*local_service, peer.m_our_services, Now()}; - FastRandomContext insecure_rand; - PushAddress(peer, local_addr, insecure_rand); + PushAddress(peer, local_addr); } peer.m_next_local_addr_send = GetExponentialRand(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); } From 77506f4ac6b3a3d7396a3a6101345019e05b3b10 Mon Sep 17 00:00:00 2001 From: dergoegge Date: Mon, 2 Oct 2023 14:14:37 +0100 Subject: [PATCH 091/172] [net processing] Addr shuffle uses PeerManager's rng --- src/net_processing.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 77d7f168bf9f8..ca8bd48225282 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -3714,7 +3714,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr); uint64_t num_proc = 0; uint64_t num_rate_limit = 0; - Shuffle(vAddr.begin(), vAddr.end(), FastRandomContext()); + Shuffle(vAddr.begin(), vAddr.end(), m_rng); for (CAddress& addr : vAddr) { if (interruptMsgProc) From 848eec09363d1ba8198376eb9654b1a69e3541aa Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 16 Aug 2023 15:37:30 +0100 Subject: [PATCH 092/172] depends: fix unusable memory_resource in macos qt build See https://codereview.qt-project.org/c/qt/qtbase/+/482392. --- depends/packages/qt.mk | 2 + depends/patches/qt/memory_resource.patch | 49 ++++++++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 depends/patches/qt/memory_resource.patch diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index b898bf2713b5c..7b4ee64776e9d 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -23,6 +23,7 @@ $(package)_patches += duplicate_lcqpafonts.patch $(package)_patches += fast_fixed_dtoa_no_optimize.patch $(package)_patches += guix_cross_lib_path.patch $(package)_patches += fix-macos-linker.patch +$(package)_patches += memory_resource.patch $(package)_qttranslations_file_name=qttranslations-$($(package)_suffix) $(package)_qttranslations_sha256_hash=c92af4171397a0ed272330b4fa0669790fcac8d050b07c8b8cc565ebeba6735e @@ -248,6 +249,7 @@ define $(package)_preprocess_cmds patch -p1 -i $($(package)_patch_dir)/qtbase-moc-ignore-gcc-macro.patch && \ patch -p1 -i $($(package)_patch_dir)/fix_montery_include.patch && \ patch -p1 -i $($(package)_patch_dir)/use_android_ndk23.patch && \ + patch -p1 -i $($(package)_patch_dir)/memory_resource.patch && \ patch -p1 -i $($(package)_patch_dir)/rcc_hardcode_timestamp.patch && \ patch -p1 -i $($(package)_patch_dir)/duplicate_lcqpafonts.patch && \ patch -p1 -i $($(package)_patch_dir)/fast_fixed_dtoa_no_optimize.patch && \ diff --git a/depends/patches/qt/memory_resource.patch b/depends/patches/qt/memory_resource.patch new file mode 100644 index 0000000000000..e41d68db30171 --- /dev/null +++ b/depends/patches/qt/memory_resource.patch @@ -0,0 +1,49 @@ +Fix unusable memory_resource on macos + +See https://bugreports.qt.io/browse/QTBUG-117484 +and https://bugreports.qt.io/browse/QTBUG-114316 + +--- a/qtbase/src/corelib/tools/qduplicatetracker_p.h ++++ b/qtbase/src/corelib/tools/qduplicatetracker_p.h +@@ -52,7 +52,7 @@ + + #include + +-#if QT_HAS_INCLUDE() && __cplusplus > 201402L ++#ifdef __cpp_lib_memory_resource + # include + # include + #else + +--- a/qtbase/src/corelib/global/qcompilerdetection.h ++++ b/qtbase/src/corelib/global/qcompilerdetection.h +@@ -1041,16 +1041,22 @@ + # endif // !_HAS_CONSTEXPR + # endif // !__GLIBCXX__ && !_LIBCPP_VERSION + # endif // Q_OS_QNX +-# if (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) && defined(__GNUC_LIBSTD__) \ +- && ((__GNUC_LIBSTD__-0) * 100 + __GNUC_LIBSTD_MINOR__-0 <= 402) ++# if (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) ++# if defined(__GNUC_LIBSTD__) && ((__GNUC_LIBSTD__-0) * 100 + __GNUC_LIBSTD_MINOR__-0 <= 402) + // Apple has not updated libstdc++ since 2007, which means it does not have + // or std::move. Let's disable these features +-# undef Q_COMPILER_INITIALIZER_LISTS +-# undef Q_COMPILER_RVALUE_REFS +-# undef Q_COMPILER_REF_QUALIFIERS ++# undef Q_COMPILER_INITIALIZER_LISTS ++# undef Q_COMPILER_RVALUE_REFS ++# undef Q_COMPILER_REF_QUALIFIERS + // Also disable , since it's clearly not there +-# undef Q_COMPILER_ATOMICS +-# endif ++# undef Q_COMPILER_ATOMICS ++# endif ++# if defined(__cpp_lib_memory_resource) \ ++ && ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED < 140000) \ ++ || (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED < 170000)) ++# undef __cpp_lib_memory_resource // Only supported on macOS 14 and iOS 17 ++# endif ++# endif // (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) + # if defined(Q_CC_CLANG) && defined(Q_CC_INTEL) && Q_CC_INTEL >= 1500 + // ICC 15.x and 16.0 have their own implementation of std::atomic, which is activated when in Clang mode + // (probably because libc++'s on OS X failed to compile), but they're missing some From 41f9027813f849a9fd6a1479bbb74b9037990c3c Mon Sep 17 00:00:00 2001 From: stickies-v Date: Fri, 29 Sep 2023 15:24:03 +0100 Subject: [PATCH 093/172] http: refactor: use encapsulated HTTPRequestTracker Introduces and uses a HTTPRequestTracker class to keep track of how many HTTP requests are currently active, so we don't stop the server before they're all handled. This has two purposes: 1. In a next commit, allows us to untrack all requests associated with a connection without running into lifetime issues of the connection living longer than the request (see https://github.com/bitcoin/bitcoin/pull/27909#discussion_r1265614783) 2. Improve encapsulation by making the mutex and cv internal members, and exposing just the WaitUntilEmpty() method that can be safely used. --- src/httpserver.cpp | 86 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 70 insertions(+), 16 deletions(-) diff --git a/src/httpserver.cpp b/src/httpserver.cpp index a83f4421d75c0..3ad329a6690a9 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -17,6 +17,7 @@ #include // For HTTP status codes #include #include +#include #include #include #include @@ -26,9 +27,10 @@ #include #include #include +#include #include #include -#include +#include #include #include @@ -149,10 +151,68 @@ static GlobalMutex g_httppathhandlers_mutex; static std::vector pathHandlers GUARDED_BY(g_httppathhandlers_mutex); //! Bound listening sockets static std::vector boundSockets; + +/** + * @brief Helps keep track of open `evhttp_connection`s with active `evhttp_requests` + * + */ +class HTTPRequestTracker +{ +private: + mutable Mutex m_mutex; + mutable std::condition_variable m_cv; + //! For each connection, keep a counter of how many requests are open + std::unordered_map m_tracker GUARDED_BY(m_mutex); + + void RemoveConnectionInternal(const decltype(m_tracker)::iterator it) EXCLUSIVE_LOCKS_REQUIRED(m_mutex) + { + m_tracker.erase(it); + if (m_tracker.empty()) m_cv.notify_all(); + } +public: + //! Increase request counter for the associated connection by 1 + void AddRequest(evhttp_request* req) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + const evhttp_connection* conn{Assert(evhttp_request_get_connection(Assert(req)))}; + WITH_LOCK(m_mutex, ++m_tracker[conn]); + } + //! Decrease request counter for the associated connection by 1, remove connection if counter is 0 + void RemoveRequest(evhttp_request* req) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + const evhttp_connection* conn{Assert(evhttp_request_get_connection(Assert(req)))}; + LOCK(m_mutex); + auto it{m_tracker.find(conn)}; + if (it != m_tracker.end() && it->second > 0) { + if (--(it->second) == 0) RemoveConnectionInternal(it); + } + } + //! Remove a connection entirely + void RemoveConnection(const evhttp_connection* conn) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + LOCK(m_mutex); + auto it{m_tracker.find(Assert(conn))}; + if (it != m_tracker.end()) RemoveConnectionInternal(it); + } + + size_t CountActiveRequests() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + LOCK(m_mutex); + return std::accumulate(m_tracker.begin(), m_tracker.end(), size_t(0), + [](size_t acc_count, const auto& pair) { return acc_count + pair.second; }); + } + size_t CountActiveConnections() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + return WITH_LOCK(m_mutex, return m_tracker.size()); + } + //! Wait until there are no more connections with active requests in the tracker + void WaitUntilEmpty() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) + { + WAIT_LOCK(m_mutex, lock); + m_cv.wait(lock, [this]() EXCLUSIVE_LOCKS_REQUIRED(m_mutex) { return m_tracker.empty(); }); + } +}; //! Track active requests -static GlobalMutex g_requests_mutex; -static std::condition_variable g_requests_cv; -static std::unordered_set g_requests GUARDED_BY(g_requests_mutex); +static HTTPRequestTracker g_requests; /** Check if a network address is allowed to access the HTTP server */ static bool ClientAllowed(const CNetAddr& netaddr) @@ -210,14 +270,11 @@ std::string RequestMethodString(HTTPRequest::RequestMethod m) /** HTTP request callback */ static void http_request_cb(struct evhttp_request* req, void* arg) { - // Track requests and notify when a request is completed. + // Track active requests { - WITH_LOCK(g_requests_mutex, g_requests.insert(req)); - g_requests_cv.notify_all(); + g_requests.AddRequest(req); evhttp_request_set_on_complete_cb(req, [](struct evhttp_request* req, void*) { - auto n{WITH_LOCK(g_requests_mutex, return g_requests.erase(req))}; - assert(n == 1); - g_requests_cv.notify_all(); + g_requests.RemoveRequest(req); }, nullptr); } @@ -473,13 +530,10 @@ void StopHTTPServer() } boundSockets.clear(); { - WAIT_LOCK(g_requests_mutex, lock); - if (!g_requests.empty()) { - LogPrint(BCLog::HTTP, "Waiting for %d requests to stop HTTP server\n", g_requests.size()); + if (g_requests.CountActiveConnections() != 0) { + LogPrint(BCLog::HTTP, "Waiting for %d requests to stop HTTP server\n", g_requests.CountActiveRequests()); } - g_requests_cv.wait(lock, []() EXCLUSIVE_LOCKS_REQUIRED(g_requests_mutex) { - return g_requests.empty(); - }); + g_requests.WaitUntilEmpty(); } if (eventHTTP) { // Schedule a callback to call evhttp_free in the event base thread, so From 084d0372311e658a486622f720d2b827d8416591 Mon Sep 17 00:00:00 2001 From: stickies-v Date: Fri, 29 Sep 2023 15:38:51 +0100 Subject: [PATCH 094/172] http: log connection instead of request count There is no significant benefit in logging the request count instead of the connection count. Reduces amount of code and computational complexity. --- src/httpserver.cpp | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/src/httpserver.cpp b/src/httpserver.cpp index 3ad329a6690a9..995c446b582ed 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -193,13 +192,6 @@ class HTTPRequestTracker auto it{m_tracker.find(Assert(conn))}; if (it != m_tracker.end()) RemoveConnectionInternal(it); } - - size_t CountActiveRequests() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) - { - LOCK(m_mutex); - return std::accumulate(m_tracker.begin(), m_tracker.end(), size_t(0), - [](size_t acc_count, const auto& pair) { return acc_count + pair.second; }); - } size_t CountActiveConnections() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) { return WITH_LOCK(m_mutex, return m_tracker.size()); @@ -530,8 +522,8 @@ void StopHTTPServer() } boundSockets.clear(); { - if (g_requests.CountActiveConnections() != 0) { - LogPrint(BCLog::HTTP, "Waiting for %d requests to stop HTTP server\n", g_requests.CountActiveRequests()); + if (const auto n_connections{g_requests.CountActiveConnections()}; n_connections != 0) { + LogPrint(BCLog::HTTP, "Waiting for %d connections to stop HTTP server\n", n_connections); } g_requests.WaitUntilEmpty(); } From 68f23f57d77bc172ed39ecdd4d2d5cd5e13cf483 Mon Sep 17 00:00:00 2001 From: stickies-v Date: Fri, 29 Sep 2023 15:24:14 +0100 Subject: [PATCH 095/172] http: bugfix: track closed connection It is possible that the client disconnects before the request is handled. In those cases, evhttp_request_set_on_complete_cb is never called, which means that on shutdown the server we'll keep waiting endlessly. By adding evhttp_connection_set_closecb, libevent automatically cleans up those dead connections at latest when we shutdown, and depending on the libevent version already at the moment of remote client disconnect. In both cases, the bug is fixed. --- src/httpserver.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/httpserver.cpp b/src/httpserver.cpp index 995c446b582ed..069511563cc82 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -262,19 +262,22 @@ std::string RequestMethodString(HTTPRequest::RequestMethod m) /** HTTP request callback */ static void http_request_cb(struct evhttp_request* req, void* arg) { + evhttp_connection* conn{evhttp_request_get_connection(req)}; // Track active requests { g_requests.AddRequest(req); evhttp_request_set_on_complete_cb(req, [](struct evhttp_request* req, void*) { g_requests.RemoveRequest(req); }, nullptr); + evhttp_connection_set_closecb(conn, [](evhttp_connection* conn, void* arg) { + g_requests.RemoveConnection(conn); + }, nullptr); } // Disable reading to work around a libevent bug, fixed in 2.1.9 // See https://github.com/libevent/libevent/commit/5ff8eb26371c4dc56f384b2de35bea2d87814779 // and https://github.com/bitcoin/bitcoin/pull/11593. if (event_get_version_number() >= 0x02010600 && event_get_version_number() < 0x02010900) { - evhttp_connection* conn = evhttp_request_get_connection(req); if (conn) { bufferevent* bev = evhttp_connection_get_bufferevent(conn); if (bev) { From fac054d24c4b6fe3370e458ad6b626f98bd018d6 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Tue, 3 Oct 2023 16:38:56 +0200 Subject: [PATCH 096/172] ci: Print Linux kernel info --- ci/test/06_script_b.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh index f5ba48e455961..89af61b87fc4e 100755 --- a/ci/test/06_script_b.sh +++ b/ci/test/06_script_b.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copyright (c) 2018-2022 The Bitcoin Core developers +# Copyright (c) 2018-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -19,6 +19,7 @@ if [ "$CI_OS_NAME" == "macos" ]; then else free -m -h echo "Number of CPUs (nproc): $(nproc)" + echo "System info: $(uname --kernel-name --kernel-release)" lscpu fi echo "Free disk space:" From bdee8589644fac121320e95f53457c3ddfc71e1b Mon Sep 17 00:00:00 2001 From: Erik McKelvey Date: Tue, 3 Oct 2023 11:22:46 -0700 Subject: [PATCH 097/172] typo: in packages.md --- doc/policy/packages.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/policy/packages.md b/doc/policy/packages.md index 399ae945d52db..dba270e494067 100644 --- a/doc/policy/packages.md +++ b/doc/policy/packages.md @@ -28,7 +28,7 @@ The following rules are enforced for all packages: - Note that, if these mempool limits change, package limits should be reconsidered. Users may also configure their mempool limits differently. - - Note that the this is transaction weight, not "virtual" size as with other limits to allow + - Note that this is transaction weight, not "virtual" size as with other limits to allow simpler context-less checks. * Packages must be topologically sorted. (#20833) From d9c4e344d70bbf31ccb7162d83d4bd25762bc678 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Tue, 3 Oct 2023 10:19:09 +0100 Subject: [PATCH 098/172] qt: Add "Session id" label to peer details --- src/qt/forms/debugwindow.ui | 122 ++++++++++++++++++++++-------------- src/qt/rpcconsole.cpp | 8 +++ 2 files changed, 82 insertions(+), 48 deletions(-) diff --git a/src/qt/forms/debugwindow.ui b/src/qt/forms/debugwindow.ui index 5cdba64378589..60e9bcde337ad 100644 --- a/src/qt/forms/debugwindow.ui +++ b/src/qt/forms/debugwindow.ui @@ -1111,6 +1111,32 @@ + + + The BIP324 session ID string in hex, if any. + + + Session ID + + + + + + + IBeamCursor + + + N/A + + + Qt::PlainText + + + Qt::LinksAccessibleByMouse|Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse + + + + The network protocol this peer is connected through: IPv4, IPv6, Onion, I2P, or CJDNS. @@ -1120,7 +1146,7 @@ - + IBeamCursor @@ -1136,14 +1162,14 @@ - + Version - + IBeamCursor @@ -1159,14 +1185,14 @@ - + User Agent - + IBeamCursor @@ -1182,14 +1208,14 @@ - + Services - + IBeamCursor @@ -1208,7 +1234,7 @@ - + Whether we relay transactions to this peer. @@ -1218,7 +1244,7 @@ - + IBeamCursor @@ -1234,7 +1260,7 @@ - + High bandwidth BIP152 compact block relay: %1 @@ -1244,7 +1270,7 @@ - + IBeamCursor @@ -1260,14 +1286,14 @@ - + Starting Block - + IBeamCursor @@ -1283,14 +1309,14 @@ - + Synced Headers - + IBeamCursor @@ -1306,14 +1332,14 @@ - + Synced Blocks - + IBeamCursor @@ -1329,14 +1355,14 @@ - + Connection Time - + IBeamCursor @@ -1352,7 +1378,7 @@ - + Elapsed time since a novel block passing initial validity checks was received from this peer. @@ -1362,7 +1388,7 @@ - + IBeamCursor @@ -1378,7 +1404,7 @@ - + Elapsed time since a novel transaction accepted into our mempool was received from this peer. @@ -1388,7 +1414,7 @@ - + IBeamCursor @@ -1404,14 +1430,14 @@ - + Last Send - + IBeamCursor @@ -1427,14 +1453,14 @@ - + Last Receive - + IBeamCursor @@ -1450,14 +1476,14 @@ - + Sent - + IBeamCursor @@ -1473,14 +1499,14 @@ - + Received - + IBeamCursor @@ -1496,14 +1522,14 @@ - + Ping Time - + IBeamCursor @@ -1519,7 +1545,7 @@ - + The duration of a currently outstanding ping. @@ -1529,7 +1555,7 @@ - + IBeamCursor @@ -1545,14 +1571,14 @@ - + Min Ping - + IBeamCursor @@ -1568,14 +1594,14 @@ - + Time Offset - + IBeamCursor @@ -1591,7 +1617,7 @@ - + The mapped Autonomous System used for diversifying peer selection. @@ -1601,7 +1627,7 @@ - + IBeamCursor @@ -1617,7 +1643,7 @@ - + Whether we relay addresses to this peer. @@ -1627,7 +1653,7 @@ - + IBeamCursor @@ -1643,7 +1669,7 @@ - + The total number of addresses received from this peer that were processed (excludes addresses that were dropped due to rate-limiting). @@ -1653,7 +1679,7 @@ - + IBeamCursor @@ -1669,7 +1695,7 @@ - + The total number of addresses received from this peer that were dropped (not processed) due to rate-limiting. @@ -1679,7 +1705,7 @@ - + IBeamCursor @@ -1695,7 +1721,7 @@ - + Qt::Vertical diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index 27d460f8e155f..998a4e5cbe25e 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -1202,6 +1202,14 @@ void RPCConsole::updateDetailWidget() } ui->peerConnectionType->setText(GUIUtil::ConnectionTypeToQString(stats->nodeStats.m_conn_type, /*prepend_direction=*/true)); ui->peerTransportType->setText(QString::fromStdString(TransportTypeAsString(stats->nodeStats.m_transport_type))); + if (stats->nodeStats.m_transport_type == TransportProtocolType::V2) { + ui->peerSessionIdLabel->setVisible(true); + ui->peerSessionId->setVisible(true); + ui->peerSessionId->setText(QString::fromStdString(stats->nodeStats.m_session_id)); + } else { + ui->peerSessionIdLabel->setVisible(false); + ui->peerSessionId->setVisible(false); + } ui->peerNetwork->setText(GUIUtil::NetworkToQString(stats->nodeStats.m_network)); if (stats->nodeStats.m_permission_flags == NetPermissionFlags::None) { ui->peerPermissions->setText(ts.na); From 47520ed209d9341702a0fb6006bee6f63f7da42e Mon Sep 17 00:00:00 2001 From: dergoegge Date: Mon, 2 Oct 2023 14:28:21 +0100 Subject: [PATCH 099/172] [net processing] Make fee filter rounder non-global --- src/net_processing.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index ca8bd48225282..b38965c0b841e 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -697,6 +697,8 @@ class PeerManagerImpl final : public PeerManager FastRandomContext m_rng GUARDED_BY(NetEventsInterface::g_msgproc_mutex); + FeeFilterRounder m_fee_filter_rounder GUARDED_BY(NetEventsInterface::g_msgproc_mutex); + const CChainParams& m_chainparams; CConnman& m_connman; AddrMan& m_addrman; @@ -1811,6 +1813,7 @@ PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman, BanMan* banman, ChainstateManager& chainman, CTxMemPool& pool, Options opts) : m_rng{opts.deterministic_rng}, + m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}}, m_chainparams(chainman.GetParams()), m_connman(connman), m_addrman(addrman), @@ -5338,14 +5341,13 @@ void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::mi if (pto.IsBlockOnlyConn()) return; CAmount currentFilter = m_mempool.GetMinFee().GetFeePerK(); - static FeeFilterRounder g_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}}; if (m_chainman.IsInitialBlockDownload()) { // Received tx-inv messages are discarded when the active // chainstate is in IBD, so tell the peer to not send them. currentFilter = MAX_MONEY; } else { - static const CAmount MAX_FILTER{g_filter_rounder.round(MAX_MONEY)}; + static const CAmount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)}; if (peer.m_fee_filter_sent == MAX_FILTER) { // Send the current filter if we sent MAX_FILTER previously // and made it out of IBD. @@ -5353,7 +5355,7 @@ void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::mi } } if (current_time > peer.m_next_send_feefilter) { - CAmount filterToSend = g_filter_rounder.round(currentFilter); + CAmount filterToSend = m_fee_filter_rounder.round(currentFilter); // We always have a fee filter of at least the min relay fee filterToSend = std::max(filterToSend, m_mempool.m_min_relay_feerate.GetFeePerK()); if (filterToSend != peer.m_fee_filter_sent) { From fecec3e1c661ba273470ecc5ef12d4c070b53050 Mon Sep 17 00:00:00 2001 From: dergoegge Date: Mon, 2 Oct 2023 14:21:35 +0100 Subject: [PATCH 100/172] [net processing] FeeFilterRounder doesn't own a FastRandomContext --- src/net_processing.cpp | 2 +- src/policy/fees.cpp | 5 +++-- src/policy/fees.h | 4 ++-- src/test/fuzz/fees.cpp | 3 ++- src/test/policy_fee_tests.cpp | 3 ++- 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index b38965c0b841e..d58db13424926 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1813,7 +1813,7 @@ PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman, BanMan* banman, ChainstateManager& chainman, CTxMemPool& pool, Options opts) : m_rng{opts.deterministic_rng}, - m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}}, + m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}, m_rng}, m_chainparams(chainman.GetParams()), m_connman(connman), m_addrman(addrman), diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp index 553c88fddc3bf..87bfa4cfc3040 100644 --- a/src/policy/fees.cpp +++ b/src/policy/fees.cpp @@ -1054,8 +1054,9 @@ static std::set MakeFeeSet(const CFeeRate& min_incremental_fee, return fee_set; } -FeeFilterRounder::FeeFilterRounder(const CFeeRate& minIncrementalFee) - : m_fee_set{MakeFeeSet(minIncrementalFee, MAX_FILTER_FEERATE, FEE_FILTER_SPACING)} +FeeFilterRounder::FeeFilterRounder(const CFeeRate& minIncrementalFee, FastRandomContext& rng) + : m_fee_set{MakeFeeSet(minIncrementalFee, MAX_FILTER_FEERATE, FEE_FILTER_SPACING)}, + insecure_rand{rng} { } diff --git a/src/policy/fees.h b/src/policy/fees.h index 8ed13482e924c..69bda195be449 100644 --- a/src/policy/fees.h +++ b/src/policy/fees.h @@ -320,7 +320,7 @@ class FeeFilterRounder public: /** Create new FeeFilterRounder */ - explicit FeeFilterRounder(const CFeeRate& min_incremental_fee); + explicit FeeFilterRounder(const CFeeRate& min_incremental_fee, FastRandomContext& rng); /** Quantize a minimum fee for privacy purpose before broadcast. */ CAmount round(CAmount currentMinFee) EXCLUSIVE_LOCKS_REQUIRED(!m_insecure_rand_mutex); @@ -328,7 +328,7 @@ class FeeFilterRounder private: const std::set m_fee_set; Mutex m_insecure_rand_mutex; - FastRandomContext insecure_rand GUARDED_BY(m_insecure_rand_mutex); + FastRandomContext& insecure_rand GUARDED_BY(m_insecure_rand_mutex); }; #endif // BITCOIN_POLICY_FEES_H diff --git a/src/test/fuzz/fees.cpp b/src/test/fuzz/fees.cpp index deb0ed65caef3..38a8c6798ecea 100644 --- a/src/test/fuzz/fees.cpp +++ b/src/test/fuzz/fees.cpp @@ -17,7 +17,8 @@ FUZZ_TARGET(fees) { FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); const CFeeRate minimal_incremental_fee{ConsumeMoney(fuzzed_data_provider)}; - FeeFilterRounder fee_filter_rounder{minimal_incremental_fee}; + FastRandomContext rng{/*fDeterministic=*/true}; + FeeFilterRounder fee_filter_rounder{minimal_incremental_fee, rng}; LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10000) { const CAmount current_minimum_fee = ConsumeMoney(fuzzed_data_provider); const CAmount rounded_fee = fee_filter_rounder.round(current_minimum_fee); diff --git a/src/test/policy_fee_tests.cpp b/src/test/policy_fee_tests.cpp index 25fb5343e3201..29d70cb5f859c 100644 --- a/src/test/policy_fee_tests.cpp +++ b/src/test/policy_fee_tests.cpp @@ -13,7 +13,8 @@ BOOST_AUTO_TEST_SUITE(policy_fee_tests) BOOST_AUTO_TEST_CASE(FeeRounder) { - FeeFilterRounder fee_rounder{CFeeRate{1000}}; + FastRandomContext rng{/*fDeterministic=*/true}; + FeeFilterRounder fee_rounder{CFeeRate{1000}, rng}; // check that 1000 rounds to 974 or 1071 std::set results; From 4cafe9f176e93ebb6c38abb12140e8d8be005cbf Mon Sep 17 00:00:00 2001 From: dergoegge Date: Mon, 2 Oct 2023 14:41:08 +0100 Subject: [PATCH 101/172] [test] Make PeerManager's rng deterministic in tests --- src/test/util/setup_common.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 2947bc3fcb2ea..e27d5a27ad099 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -256,6 +256,7 @@ TestingSetup::TestingSetup( m_node.connman = std::make_unique(0x1337, 0x1337, *m_node.addrman, *m_node.netgroupman, Params()); // Deterministic randomness for tests. PeerManager::Options peerman_opts; ApplyArgsManOptions(*m_node.args, peerman_opts); + peerman_opts.deterministic_rng = true; m_node.peerman = PeerManager::make(*m_node.connman, *m_node.addrman, m_node.banman.get(), *m_node.chainman, *m_node.mempool, peerman_opts); From 6988a2f097e9af50e1b4222550b2593bfc5685ea Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Wed, 4 Oct 2023 14:00:57 +0100 Subject: [PATCH 102/172] build: Update qt package up to 5.15.10 --- .github/workflows/ci.yml | 4 +- build_msvc/README.md | 2 +- depends/packages/qt.mk | 12 +- depends/patches/qt/dont_hardcode_x86_64.patch | 119 ------------------ depends/patches/qt/fix-macos-linker.patch | 4 +- .../patches/qt/fix_android_jni_static.patch | 2 +- depends/patches/qt/fix_montery_include.patch | 21 ---- depends/patches/qt/memory_resource.patch | 2 +- depends/patches/qt/no-xlib.patch | 20 +-- doc/dependencies.md | 2 +- 10 files changed, 18 insertions(+), 170 deletions(-) delete mode 100644 depends/patches/qt/dont_hardcode_x86_64.patch delete mode 100644 depends/patches/qt/fix_montery_include.patch diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 28a027b780639..cbd83530f3975 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -105,8 +105,8 @@ jobs: CCACHE_MAXSIZE: '200M' CI_CCACHE_VERSION: '4.7.5' CI_QT_CONF: '-release -silent -opensource -confirm-license -opengl desktop -static -static-runtime -mp -qt-zlib -qt-pcre -qt-libpng -nomake examples -nomake tests -nomake tools -no-angle -no-dbus -no-gif -no-gtk -no-ico -no-icu -no-libjpeg -no-libudev -no-sql-sqlite -no-sql-odbc -no-sqlite -no-vulkan -skip qt3d -skip qtactiveqt -skip qtandroidextras -skip qtcharts -skip qtconnectivity -skip qtdatavis3d -skip qtdeclarative -skip doc -skip qtdoc -skip qtgamepad -skip qtgraphicaleffects -skip qtimageformats -skip qtlocation -skip qtlottie -skip qtmacextras -skip qtmultimedia -skip qtnetworkauth -skip qtpurchasing -skip qtquick3d -skip qtquickcontrols -skip qtquickcontrols2 -skip qtquicktimeline -skip qtremoteobjects -skip qtscript -skip qtscxml -skip qtsensors -skip qtserialbus -skip qtserialport -skip qtspeech -skip qtsvg -skip qtvirtualkeyboard -skip qtwayland -skip qtwebchannel -skip qtwebengine -skip qtwebglplugin -skip qtwebsockets -skip qtwebview -skip qtx11extras -skip qtxmlpatterns -no-openssl -no-feature-bearermanagement -no-feature-printdialog -no-feature-printer -no-feature-printpreviewdialog -no-feature-printpreviewwidget -no-feature-sql -no-feature-sqlmodel -no-feature-textbrowser -no-feature-textmarkdownwriter -no-feature-textodfwriter -no-feature-xml' - CI_QT_DIR: 'qt-everywhere-src-5.15.5' - CI_QT_URL: 'https://download.qt.io/official_releases/qt/5.15/5.15.5/single/qt-everywhere-opensource-src-5.15.5.zip' + CI_QT_DIR: 'qt-everywhere-src-5.15.10' + CI_QT_URL: 'https://download.qt.io/official_releases/qt/5.15/5.15.10/single/qt-everywhere-opensource-src-5.15.10.zip' PYTHONUTF8: 1 TEST_RUNNER_TIMEOUT_FACTOR: 40 diff --git a/build_msvc/README.md b/build_msvc/README.md index ba6171fee7a38..8206620c3b934 100644 --- a/build_msvc/README.md +++ b/build_msvc/README.md @@ -32,7 +32,7 @@ Qt --------------------- To build Bitcoin Core with the GUI, a static build of Qt is required. -1. Download a single ZIP archive of Qt source code from https://download.qt.io/official_releases/qt/ (e.g., [`qt-everywhere-opensource-src-5.15.5.zip`](https://download.qt.io/official_releases/qt/5.15/5.15.5/single/qt-everywhere-opensource-src-5.15.5.zip)), and expand it into a dedicated folder. The following instructions assume that this folder is `C:\dev\qt-source`. +1. Download a single ZIP archive of Qt source code from https://download.qt.io/official_releases/qt/ (e.g., [`qt-everywhere-opensource-src-5.15.10.zip`](https://download.qt.io/official_releases/qt/5.15/5.15.10/single/qt-everywhere-opensource-src-5.15.10.zip)), and expand it into a dedicated folder. The following instructions assume that this folder is `C:\dev\qt-source`. 2. Open "x64 Native Tools Command Prompt for VS 2022", and input the following commands: ```cmd diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index 7b4ee64776e9d..86df58f9d2a0b 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -1,9 +1,9 @@ package=qt -$(package)_version=5.15.5 +$(package)_version=5.15.10 $(package)_download_path=https://download.qt.io/official_releases/qt/5.15/$($(package)_version)/submodules $(package)_suffix=everywhere-opensource-src-$($(package)_version).tar.xz $(package)_file_name=qtbase-$($(package)_suffix) -$(package)_sha256_hash=0c42c799aa7c89e479a07c451bf5a301e291266ba789e81afc18f95049524edc +$(package)_sha256_hash=c0d06cb18d20f10bf7ad53552099e097ec39362d30a5d6f104724f55fa1c8fb9 $(package)_linux_dependencies=freetype fontconfig libxcb libxkbcommon libxcb_util libxcb_util_render libxcb_util_keysyms libxcb_util_image libxcb_util_wm $(package)_qt_libs=corelib network widgets gui plugins testlib $(package)_linguist_tools = lrelease lupdate lconvert @@ -12,8 +12,6 @@ $(package)_patches += qttools_src.pro $(package)_patches += mac-qmake.conf $(package)_patches += fix_qt_pkgconfig.patch $(package)_patches += no-xlib.patch -$(package)_patches += dont_hardcode_x86_64.patch -$(package)_patches += fix_montery_include.patch $(package)_patches += fix_android_jni_static.patch $(package)_patches += dont_hardcode_pwd.patch $(package)_patches += qtbase-moc-ignore-gcc-macro.patch @@ -26,10 +24,10 @@ $(package)_patches += fix-macos-linker.patch $(package)_patches += memory_resource.patch $(package)_qttranslations_file_name=qttranslations-$($(package)_suffix) -$(package)_qttranslations_sha256_hash=c92af4171397a0ed272330b4fa0669790fcac8d050b07c8b8cc565ebeba6735e +$(package)_qttranslations_sha256_hash=38b942bc7e62794dd072945c8a92bb9dfffed24070aea300327a3bb42f855609 $(package)_qttools_file_name=qttools-$($(package)_suffix) -$(package)_qttools_sha256_hash=6d0778b71b2742cb527561791d1d3d255366163d54a10f78c683a398f09ffc6c +$(package)_qttools_sha256_hash=66f46c9729c831dce431778a9c561cca32daceaede1c7e58568d7a5898167dae $(package)_extra_sources = $($(package)_qttranslations_file_name) $(package)_extra_sources += $($(package)_qttools_file_name) @@ -245,9 +243,7 @@ define $(package)_preprocess_cmds patch -p1 -i $($(package)_patch_dir)/fix_qt_pkgconfig.patch && \ patch -p1 -i $($(package)_patch_dir)/fix_android_jni_static.patch && \ patch -p1 -i $($(package)_patch_dir)/no-xlib.patch && \ - patch -p1 -i $($(package)_patch_dir)/dont_hardcode_x86_64.patch && \ patch -p1 -i $($(package)_patch_dir)/qtbase-moc-ignore-gcc-macro.patch && \ - patch -p1 -i $($(package)_patch_dir)/fix_montery_include.patch && \ patch -p1 -i $($(package)_patch_dir)/use_android_ndk23.patch && \ patch -p1 -i $($(package)_patch_dir)/memory_resource.patch && \ patch -p1 -i $($(package)_patch_dir)/rcc_hardcode_timestamp.patch && \ diff --git a/depends/patches/qt/dont_hardcode_x86_64.patch b/depends/patches/qt/dont_hardcode_x86_64.patch deleted file mode 100644 index a66426877ad78..0000000000000 --- a/depends/patches/qt/dont_hardcode_x86_64.patch +++ /dev/null @@ -1,119 +0,0 @@ -macOS: Don't hard-code x86_64 as the architecture when using qmake - -Upstream commit: - - Qt 6.1: 9082cc8e8d5a6441dabe5e7a95bc0cd9085b95fe - -For other Qt branches see -https://codereview.qt-project.org/q/I70db7e4c27f0d3da5d0af33cb491d72c312d3fa8 - - ---- old/qtbase/configure.json -+++ new/qtbase/configure.json -@@ -244,11 +244,18 @@ - - "testTypeDependencies": { - "linkerSupportsFlag": [ "use_bfd_linker", "use_gold_linker", "use_lld_linker" ], -- "verifySpec": [ "shared", "use_bfd_linker", "use_gold_linker", "use_lld_linker", "compiler-flags", "qmakeargs", "commit" ], -+ "verifySpec": [ -+ "shared", -+ "use_bfd_linker", "use_gold_linker", "use_lld_linker", -+ "compiler-flags", "qmakeargs", -+ "simulator_and_device", -+ "thread", -+ "commit" ], - "compile": [ "verifyspec" ], - "detectPkgConfig": [ "cross_compile", "machineTuple" ], - "library": [ "pkg-config", "compiler-flags" ], -- "getPkgConfigVariable": [ "pkg-config" ] -+ "getPkgConfigVariable": [ "pkg-config" ], -+ "architecture" : [ "verifyspec" ] - }, - - "testTypeAliases": { -@@ -762,7 +769,7 @@ - }, - "architecture": { - "label": "Architecture", -- "output": [ "architecture" ] -+ "output": [ "architecture", "commitConfig" ] - }, - "pkg-config": { - "label": "Using pkg-config", -diff --git a/configure.pri b/configure.pri -index 49755f7abfd..8be9b10d7d4 100644 ---- old/qtbase/configure.pri -+++ new/qtbase/configure.pri -@@ -662,6 +662,13 @@ defineTest(qtConfOutput_commitOptions) { - write_file($$QT_BUILD_TREE/mkspecs/qdevice.pri, $${currentConfig}.output.devicePro)|error() - } - -+# Output is written after configuring each Qt module, -+# but some tests within a module might depend on the -+# configuration output of previous tests. -+defineTest(qtConfOutput_commitConfig) { -+ qtConfProcessOutput() -+} -+ - # type (empty or 'host'), option name, default value - defineTest(processQtPath) { - out_var = config.rel_input.$${2} -diff --git a/mkspecs/common/macx.conf b/mkspecs/common/macx.conf -index d16b77acb8e..4ba0a8eaa36 100644 ---- old/qtbase/mkspecs/common/macx.conf -+++ new/qtbase/mkspecs/common/macx.conf -@@ -6,7 +6,6 @@ QMAKE_PLATFORM += macos osx macx - QMAKE_MAC_SDK = macosx - - QMAKE_MACOSX_DEPLOYMENT_TARGET = 10.13 --QMAKE_APPLE_DEVICE_ARCHS = x86_64 - - # Should be 10.15, but as long as the CI builds with - # older SDKs we have to keep this. -diff --git a/mkspecs/features/mac/default_post.prf b/mkspecs/features/mac/default_post.prf -index 92a9112bca6..d888731ec8d 100644 ---- old/qtbase/mkspecs/features/mac/default_post.prf -+++ new/qtbase/mkspecs/features/mac/default_post.prf -@@ -95,6 +95,11 @@ app_extension_api_only { - QMAKE_LFLAGS += $$QMAKE_CFLAGS_APPLICATION_EXTENSION - } - -+# Non-universal builds do not set QMAKE_APPLE_DEVICE_ARCHS, -+# so we pick it up from what the arch test resolved instead. -+isEmpty(QMAKE_APPLE_DEVICE_ARCHS): \ -+ QMAKE_APPLE_DEVICE_ARCHS = $$QT_ARCH -+ - macx-xcode { - qmake_pkginfo_typeinfo.name = QMAKE_PKGINFO_TYPEINFO - !isEmpty(QMAKE_PKGINFO_TYPEINFO): \ -@@ -150,9 +155,6 @@ macx-xcode { - simulator: VALID_SIMULATOR_ARCHS = $$QMAKE_APPLE_SIMULATOR_ARCHS - VALID_ARCHS = $$VALID_DEVICE_ARCHS $$VALID_SIMULATOR_ARCHS - -- isEmpty(VALID_ARCHS): \ -- error("QMAKE_APPLE_DEVICE_ARCHS or QMAKE_APPLE_SIMULATOR_ARCHS must contain at least one architecture") -- - single_arch: VALID_ARCHS = $$first(VALID_ARCHS) - - ACTIVE_ARCHS = $(filter $(EXPORT_VALID_ARCHS), $(ARCHS)) -diff --git a/mkspecs/features/toolchain.prf b/mkspecs/features/toolchain.prf -index efbe7c1e55b..8add6dc8043 100644 ---- old/qtbase/mkspecs/features/toolchain.prf -+++ new/qtbase/mkspecs/features/toolchain.prf -@@ -182,9 +182,14 @@ isEmpty($${target_prefix}.INCDIRS) { - # UIKit simulator platforms will see the device SDK's sysroot in - # QMAKE_DEFAULT_*DIRS, because they're handled in a single build pass. - darwin { -- # Clang doesn't pick up the architecture from the sysroot, and will -- # default to the host architecture, so we need to manually set it. -- cxx_flags += -arch $$QMAKE_APPLE_DEVICE_ARCHS -+ uikit { -+ # Clang doesn't automatically pick up the architecture, just because -+ # we're passing the iOS sysroot below, and we will end up building the -+ # test for the host architecture, resulting in linker errors when -+ # linking against the iOS libraries. We work around this by passing -+ # the architecture explicitly. -+ cxx_flags += -arch $$first(QMAKE_APPLE_DEVICE_ARCHS) -+ } - - uikit:macx-xcode: \ - cxx_flags += -isysroot $$sdk_path_device.value diff --git a/depends/patches/qt/fix-macos-linker.patch b/depends/patches/qt/fix-macos-linker.patch index db056de4d9b25..e439685656550 100644 --- a/depends/patches/qt/fix-macos-linker.patch +++ b/depends/patches/qt/fix-macos-linker.patch @@ -29,7 +29,7 @@ https://codereview.qt-project.org/q/I2347b26e2df0828471373b0e15b8c9089274c65d --- old/qtbase/mkspecs/features/toolchain.prf +++ new/qtbase/mkspecs/features/toolchain.prf -@@ -283,9 +283,12 @@ isEmpty($${target_prefix}.INCDIRS) { +@@ -288,9 +288,12 @@ isEmpty($${target_prefix}.INCDIRS) { } } } @@ -44,7 +44,7 @@ https://codereview.qt-project.org/q/I2347b26e2df0828471373b0e15b8c9089274c65d QMAKE_DEFAULT_LIBDIRS = $$unique(QMAKE_DEFAULT_LIBDIRS) } else: ghs { cmd = $$QMAKE_CXX $$QMAKE_CXXFLAGS -$${LITERAL_HASH} -o /tmp/fake_output /tmp/fake_input.cpp -@@ -407,7 +410,7 @@ isEmpty($${target_prefix}.INCDIRS) { +@@ -412,7 +415,7 @@ isEmpty($${target_prefix}.INCDIRS) { QMAKE_DEFAULT_INCDIRS = $$split(INCLUDE, $$QMAKE_DIRLIST_SEP) } diff --git a/depends/patches/qt/fix_android_jni_static.patch b/depends/patches/qt/fix_android_jni_static.patch index 936b82e1522e3..7dbd68fe9c818 100644 --- a/depends/patches/qt/fix_android_jni_static.patch +++ b/depends/patches/qt/fix_android_jni_static.patch @@ -1,6 +1,6 @@ --- old/qtbase/src/plugins/platforms/android/androidjnimain.cpp +++ new/qtbase/src/plugins/platforms/android/androidjnimain.cpp -@@ -943,6 +943,14 @@ Q_DECL_EXPORT jint JNICALL JNI_OnLoad(JavaVM *vm, void */*reserved*/) +@@ -980,6 +980,14 @@ Q_DECL_EXPORT jint JNICALL JNI_OnLoad(JavaVM *vm, void */*reserved*/) __android_log_print(ANDROID_LOG_FATAL, "Qt", "registerNatives failed"); return -1; } diff --git a/depends/patches/qt/fix_montery_include.patch b/depends/patches/qt/fix_montery_include.patch deleted file mode 100644 index 38b700addfe7a..0000000000000 --- a/depends/patches/qt/fix_montery_include.patch +++ /dev/null @@ -1,21 +0,0 @@ -From dece6f5840463ae2ddf927d65eb1b3680e34a547 -From: Øystein Heskestad -Date: Wed, 27 Oct 2021 13:07:46 +0200 -Subject: [PATCH] Add missing macOS header file that was indirectly included before - -See: https://bugreports.qt.io/browse/QTBUG-97855 - -Upstream Commits: - - Qt 6.2: c884bf138a21dd7320e35cef34d24e22e74d7ce0 - -diff --git a/qtbase/src/plugins/platforms/cocoa/qiosurfacegraphicsbuffer.h b/qtbase/src/plugins/platforms/cocoa/qiosurfacegraphicsbuffer.h -index e070ba97..07c75b04 100644 ---- a/qtbase/src/plugins/platforms/cocoa/qiosurfacegraphicsbuffer.h -+++ b/qtbase/src/plugins/platforms/cocoa/qiosurfacegraphicsbuffer.h -@@ -40,6 +40,7 @@ - #ifndef QIOSURFACEGRAPHICSBUFFER_H - #define QIOSURFACEGRAPHICSBUFFER_H - -+#include - #include - #include diff --git a/depends/patches/qt/memory_resource.patch b/depends/patches/qt/memory_resource.patch index e41d68db30171..650c32852859d 100644 --- a/depends/patches/qt/memory_resource.patch +++ b/depends/patches/qt/memory_resource.patch @@ -17,7 +17,7 @@ and https://bugreports.qt.io/browse/QTBUG-114316 --- a/qtbase/src/corelib/global/qcompilerdetection.h +++ b/qtbase/src/corelib/global/qcompilerdetection.h -@@ -1041,16 +1041,22 @@ +@@ -1050,16 +1050,22 @@ # endif // !_HAS_CONSTEXPR # endif // !__GLIBCXX__ && !_LIBCPP_VERSION # endif // Q_OS_QNX diff --git a/depends/patches/qt/no-xlib.patch b/depends/patches/qt/no-xlib.patch index d6846aaca2c29..0f7965d2ea8b7 100644 --- a/depends/patches/qt/no-xlib.patch +++ b/depends/patches/qt/no-xlib.patch @@ -4,12 +4,7 @@ Date: Thu, 18 Jul 2019 17:22:05 -0400 Subject: [PATCH] Wrap xlib related code blocks in #if's They are not necessary to compile QT. ---- - qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp | 8 ++++++++ - 1 file changed, 8 insertions(+) -diff --git a/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp b/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp -index 7c62c2e2b3..c05c6c0a07 100644 --- a/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp +++ b/qtbase/src/plugins/platforms/xcb/qxcbcursor.cpp @@ -49,7 +49,9 @@ @@ -22,7 +17,7 @@ index 7c62c2e2b3..c05c6c0a07 100644 #include #include -@@ -391,6 +391,7 @@ void QXcbCursor::changeCursor(QCursor *cursor, QWindow *window) +@@ -391,6 +393,7 @@ void QXcbCursor::changeCursor(QCursor *cursor, QWindow *window) xcb_flush(xcb_connection()); } @@ -30,7 +25,7 @@ index 7c62c2e2b3..c05c6c0a07 100644 static int cursorIdForShape(int cshape) { int cursorId = 0; -@@ -444,6 +445,7 @@ static int cursorIdForShape(int cshape) +@@ -444,6 +447,7 @@ static int cursorIdForShape(int cshape) } return cursorId; } @@ -38,7 +33,7 @@ index 7c62c2e2b3..c05c6c0a07 100644 xcb_cursor_t QXcbCursor::createNonStandardCursor(int cshape) { -@@ -556,7 +558,9 @@ static xcb_cursor_t loadCursor(void *dpy, int cshape) +@@ -558,7 +562,9 @@ static xcb_cursor_t loadCursor(void *dpy, int cshape) xcb_cursor_t QXcbCursor::createFontCursor(int cshape) { xcb_connection_t *conn = xcb_connection(); @@ -47,8 +42,8 @@ index 7c62c2e2b3..c05c6c0a07 100644 +#endif xcb_cursor_t cursor = XCB_NONE; - // Try Xcursor first -@@ -586,6 +590,7 @@ xcb_cursor_t QXcbCursor::createFontCursor(int cshape) + #if QT_CONFIG(xcb_xlib) && QT_CONFIG(library) +@@ -590,6 +596,7 @@ xcb_cursor_t QXcbCursor::createFontCursor(int cshape) // Non-standard X11 cursors are created from bitmaps cursor = createNonStandardCursor(cshape); @@ -56,7 +51,7 @@ index 7c62c2e2b3..c05c6c0a07 100644 // Create a glpyh cursor if everything else failed if (!cursor && cursorId) { cursor = xcb_generate_id(conn); -@@ -593,6 +598,7 @@ xcb_cursor_t QXcbCursor::createFontCursor(int cshape) +@@ -597,6 +604,7 @@ xcb_cursor_t QXcbCursor::createFontCursor(int cshape) cursorId, cursorId + 1, 0xFFFF, 0xFFFF, 0xFFFF, 0, 0, 0); } @@ -64,6 +59,3 @@ index 7c62c2e2b3..c05c6c0a07 100644 if (cursor && cshape >= 0 && cshape < Qt::LastCursor && connection()->hasXFixes()) { const char *name = cursorNames[cshape].front(); --- -2.22.0 - diff --git a/doc/dependencies.md b/doc/dependencies.md index 804f796abe1fc..cb79b30cdf469 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -30,7 +30,7 @@ You can find installation instructions in the `build-*.md` file for your platfor | [Fontconfig](../depends/packages/fontconfig.mk) | [link](https://www.freedesktop.org/wiki/Software/fontconfig/) | [2.12.6](https://github.com/bitcoin/bitcoin/pull/23495) | 2.6 | Yes | | [FreeType](../depends/packages/freetype.mk) | [link](https://freetype.org) | [2.11.0](https://github.com/bitcoin/bitcoin/commit/01544dd78ccc0b0474571da854e27adef97137fb) | 2.3.0 | Yes | | [qrencode](../depends/packages/qrencode.mk) | [link](https://fukuchi.org/works/qrencode/) | [4.1.1](https://github.com/bitcoin/bitcoin/pull/27312) | | No | -| [Qt](../depends/packages/qt.mk) | [link](https://download.qt.io/official_releases/qt/) | [5.15.5](https://github.com/bitcoin/bitcoin/pull/25719) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No | +| [Qt](../depends/packages/qt.mk) | [link](https://download.qt.io/official_releases/qt/) | [5.15.10](https://github.com/bitcoin/bitcoin/pull/28561) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No | ### Networking | Dependency | Releases | Version used | Minimum required | Runtime | From ba2e5bfc67dcffca26af9e231652ada1767cbeb2 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Tue, 3 Oct 2023 14:41:55 -0400 Subject: [PATCH 103/172] net: raise V1_PREFIX_LEN from 12 to 16 A "version" message in the V1 protocol starts with a fixed 16 bytes: * The 4-byte network magic * The 12-byte zero-padded command "version" plus 5 0x00 bytes The current code detects incoming V1 connections by just looking at the first 12 bytes (matching an earlier version of BIP324), but 16 bytes is more precise. This isn't an observable difference right now, as a 12 byte prefix ought to be negligible already, but it may become observable with future extensions to the protocol, so make the code match the specification. --- src/net.cpp | 10 +++++----- src/net.h | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index 13f443042402d..994abd986d0a1 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1098,10 +1098,10 @@ void V2Transport::ProcessReceivedMaybeV1Bytes() noexcept AssertLockNotHeld(m_send_mutex); Assume(m_recv_state == RecvState::KEY_MAYBE_V1); // We still have to determine if this is a v1 or v2 connection. The bytes being received could - // be the beginning of either a v1 packet (network magic + "version\x00"), or of a v2 public - // key. BIP324 specifies that a mismatch with this 12-byte string should trigger sending of the - // key. - std::array v1_prefix = {0, 0, 0, 0, 'v', 'e', 'r', 's', 'i', 'o', 'n', 0}; + // be the beginning of either a v1 packet (network magic + "version\x00\x00\x00\x00\x00"), or + // of a v2 public key. BIP324 specifies that a mismatch with this 16-byte string should trigger + // sending of the key. + std::array v1_prefix = {0, 0, 0, 0, 'v', 'e', 'r', 's', 'i', 'o', 'n', 0, 0, 0, 0, 0}; std::copy(std::begin(Params().MessageStart()), std::end(Params().MessageStart()), v1_prefix.begin()); Assume(m_recv_buffer.size() <= v1_prefix.size()); if (!std::equal(m_recv_buffer.begin(), m_recv_buffer.end(), v1_prefix.begin())) { @@ -1295,7 +1295,7 @@ size_t V2Transport::GetMaxBytesToProcess() noexcept // receive buffer. Assume(m_recv_buffer.size() <= V1_PREFIX_LEN); // As long as we're not sure if this is a v1 or v2 connection, don't receive more than what - // is strictly necessary to distinguish the two (12 bytes). If we permitted more than + // is strictly necessary to distinguish the two (16 bytes). If we permitted more than // the v1 header size (24 bytes), we may not be able to feed the already-received bytes // back into the m_v1_fallback V1 transport. return V1_PREFIX_LEN - m_recv_buffer.size(); diff --git a/src/net.h b/src/net.h index 2f7b832fbaa66..e8e31f72e4ecb 100644 --- a/src/net.h +++ b/src/net.h @@ -473,7 +473,7 @@ class V2Transport final : public Transport /** The length of the V1 prefix to match bytes initially received by responders with to * determine if their peer is speaking V1 or V2. */ - static constexpr size_t V1_PREFIX_LEN = 12; + static constexpr size_t V1_PREFIX_LEN = 16; // The sender side and receiver side of V2Transport are state machines that are transitioned // through, based on what has been received. The receive state corresponds to the contents of, From 7005a01c19001ab5821731597656f8bc5e8c11e3 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Wed, 4 Oct 2023 11:05:03 -0400 Subject: [PATCH 104/172] test: add wait_for_connect to BitcoinTestFramework.connect_nodes --- test/functional/test_framework/test_framework.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index ab7fed335c6b2..a34c34713e922 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -586,7 +586,14 @@ def restart_node(self, i, extra_args=None): def wait_for_node_exit(self, i, timeout): self.nodes[i].process.wait(timeout) - def connect_nodes(self, a, b, *, peer_advertises_v2=None): + def connect_nodes(self, a, b, *, peer_advertises_v2=None, wait_for_connect: bool = True): + """ + Kwargs: + wait_for_connect: if True, block until the nodes are verified as connected. You might + want to disable this when using -stopatheight with one of the connected nodes, + since there will be a race between the actual connection and performing + the assertions before one node shuts down. + """ from_connection = self.nodes[a] to_connection = self.nodes[b] from_num_peers = 1 + len(from_connection.getpeerinfo()) @@ -603,6 +610,9 @@ def connect_nodes(self, a, b, *, peer_advertises_v2=None): # compatibility with older clients from_connection.addnode(ip_port, "onetry") + if not wait_for_connect: + return + # poll until version handshake complete to avoid race conditions # with transaction relaying # See comments in net_processing: From 5bd2010f024b5bcccf1d57bae6fc36c53f5facc5 Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Wed, 4 Oct 2023 11:05:27 -0400 Subject: [PATCH 105/172] test: assumeutxo: avoid race in functional test --- test/functional/feature_assumeutxo.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py index be1aa1899380a..4d0552f332158 100755 --- a/test/functional/feature_assumeutxo.py +++ b/test/functional/feature_assumeutxo.py @@ -142,7 +142,10 @@ def no_sync(): f"-stopatheight={PAUSE_HEIGHT}", *self.extra_args[1]]) # Finally connect the nodes and let them sync. - self.connect_nodes(0, 1) + # + # Set `wait_for_connect=False` to avoid a race between performing connection + # assertions and the -stopatheight tripping. + self.connect_nodes(0, 1, wait_for_connect=False) n1.wait_until_stopped(timeout=5) From c1e6c542af6d89a499e2a65465865aec651c4d67 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Wed, 4 Oct 2023 10:56:52 -0400 Subject: [PATCH 106/172] descriptors: disallow hybrid public keys The descriptor documentation (doc/descriptors.md) and BIP380 explicitly require that hex-encoded public keys start with 02 or 03 (compressed) or 04 (uncompressed). However, the current parsing/inference code permit 06 and 07 (hybrid) encoding as well. Fix this. --- src/pubkey.h | 6 ++++++ src/script/descriptor.cpp | 8 ++++++-- src/test/descriptor_tests.cpp | 9 +++++++++ 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/src/pubkey.h b/src/pubkey.h index 274779f9a470d..4b34fd829bc35 100644 --- a/src/pubkey.h +++ b/src/pubkey.h @@ -191,6 +191,12 @@ class CPubKey return size() > 0; } + /** Check if a public key is a syntactically valid compressed or uncompressed key. */ + bool IsValidNonHybrid() const noexcept + { + return size() > 0 && (vch[0] == 0x02 || vch[0] == 0x03 || vch[0] == 0x04); + } + //! fully validate whether this is a valid public key (more expensive than IsValid()) bool IsFullyValid() const; diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 2f3f2c7a1dc1c..896fb0b5b314d 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -1290,6 +1290,10 @@ std::unique_ptr ParsePubkeyInner(uint32_t key_exp_index, const S if (IsHex(str)) { std::vector data = ParseHex(str); CPubKey pubkey(data); + if (pubkey.IsValid() && !pubkey.IsValidNonHybrid()) { + error = "Hybrid public keys are not allowed"; + return nullptr; + } if (pubkey.IsFullyValid()) { if (permit_uncompressed || pubkey.IsCompressed()) { return std::make_unique(key_exp_index, pubkey, false); @@ -1448,7 +1452,7 @@ struct KeyParser { { assert(m_in); CPubKey pubkey(begin, end); - if (pubkey.IsValid()) { + if (pubkey.IsValidNonHybrid()) { Key key = m_keys.size(); m_keys.push_back(InferPubkey(pubkey, ParseScriptContext::P2WSH, *m_in)); return key; @@ -1795,7 +1799,7 @@ std::unique_ptr InferScript(const CScript& script, ParseScriptCo if (txntype == TxoutType::PUBKEY && (ctx == ParseScriptContext::TOP || ctx == ParseScriptContext::P2SH || ctx == ParseScriptContext::P2WSH)) { CPubKey pubkey(data[0]); - if (pubkey.IsValid()) { + if (pubkey.IsValidNonHybrid()) { return std::make_unique(InferPubkey(pubkey, ctx, provider)); } } diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp index 3a30ef453eca5..60e441d457781 100644 --- a/src/test/descriptor_tests.cpp +++ b/src/test/descriptor_tests.cpp @@ -409,6 +409,11 @@ BOOST_AUTO_TEST_CASE(descriptor_test) CheckUnparsable("wsh(pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss))", "wsh(pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235))", "pk(): Uncompressed keys are not allowed"); // No uncompressed keys in witness CheckUnparsable("sh(wpkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss))", "sh(wpkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235))", "wpkh(): Uncompressed keys are not allowed"); // No uncompressed keys in witness + // Equivalent single-key hybrid is not allowed + CheckUnparsable("", "combo(07a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "combo(): Hybrid public keys are not allowed"); + CheckUnparsable("", "pk(0623542d61708e3fc48ba78fbe8fcc983ba94a520bc33f82b8e45e51dbc47af2726bcf181925eee1bdd868b109314f3ea92a6fc23d6b66057d3acfba04d6b08b58)", "pk(): Hybrid public keys are not allowed"); + CheckUnparsable("", "pkh(07a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "pkh(): Hybrid public keys are not allowed"); + // Some unconventional single-key constructions Check("sh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "sh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141857af51a5e516552b3086430fd8ce55f7c1a52487"}}, OutputType::LEGACY); Check("sh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "sh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141a31ad23bf49c247dd531a623c2ef57da3c400c587"}}, OutputType::LEGACY); @@ -538,6 +543,10 @@ BOOST_AUTO_TEST_CASE(descriptor_test) CheckUnparsable("tr(and_v(vc:andor(pk(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),pk_k(Kx9HCDjGiwFcgVNhTrS5z5NeZdD6veeam61eDxLDCkGWujvL4Gnn),and_v(v:older(1),pk_k(L4o2kDvXXDRH2VS9uBnouScLduWt4dZnM25se7kvEjJeQ285en2A))),after(10)))", "tr(and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(02aa27e5eb2c185e87cd1dbc3e0efc9cb1175235e0259df1713424941c3cb40402))),after(10)))", "tr(): key 'and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(02aa27e5eb2c185e87cd1dbc3e0efc9cb1175235e0259df1713424941c3cb40402))),after(10))' is not valid"); CheckUnparsable("raw(and_v(vc:andor(pk(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),pk_k(Kx9HCDjGiwFcgVNhTrS5z5NeZdD6veeam61eDxLDCkGWujvL4Gnn),and_v(v:older(1),pk_k(L4o2kDvXXDRH2VS9uBnouScLduWt4dZnM25se7kvEjJeQ285en2A))),after(10)))", "sh(and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(02aa27e5eb2c185e87cd1dbc3e0efc9cb1175235e0259df1713424941c3cb40402))),after(10)))", "Miniscript expressions can only be used in wsh"); CheckUnparsable("", "tr(034D2224bbbbbbbbbbcbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb40,{{{{{{{{{{{{{{{{{{{{{{multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/967808'/9,xprvA1RpRA33e1JQ7ifknakTFNpgXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/968/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/585/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/2/0/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/5/8/5/8/24/5/58/52/5/8/5/2/8/24/5/58/588/246/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/5/4/5/58/55/58/2/5/8/55/2/5/8/58/555/58/2/5/8/4//2/5/58/5w/2/5/8/5/2/4/5/58/5558'/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/8/58/2/5/58/58/2/5/8/9/588/2/58/2/5/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/82/5/8/5/5/58/52/6/8/5/2/8/{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{}{{{{{{{{{DDD2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8588/246/8/5/2DLDDDDDDDbbD3DDDD/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8D)/5/2/5/58/58/2/5/58/58/58/588/2/58/2/5/8/5/25/58/58/2/5/58/58/2/5/8/9/588/2/58/2/6780,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFW/8/5/2/5/58678008')", "'multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/967808'/9,xprvA1RpRA33e1JQ7ifknakTFNpgXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/968/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/585/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/2/0/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/5/8/5/8/24/5/58/52/5/8/5/2/8/24/5/58/588/246/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/5/4/5/58/55/58/2/5/8/55/2/5/8/58/555/58/2/5/8/4//2/5/58/5w/2/5/8/5/2/4/5/58/5558'/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/8/58/2/5/58/58/2/5/8/9/588/2/58/2/5/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/82/5/8/5/5/58/52/6/8/5/2/8/{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{}{{{{{{{{{DDD2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8588/246/8/5/2DLDDDDDDDbbD3DDDD/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8D)/5/2/5/58/58/2/5/58/58/58/588/2/58/2/5/8/5/25/58/58/2/5/58/58/2/5/8/9/588/2/58/2/6780,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFW/8/5/2/5/58678008'' is not a valid descriptor function"); + // No uncompressed keys allowed + CheckUnparsable("", "wsh(and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(049228de6902abb4f541791f6d7f925b10e2078ccb1298856e5ea5cc5fd667f930eac37a00cc07f9a91ef3c2d17bf7a17db04552ff90ac312a5b8b4caca6c97aa4))),after(10)))", "A function is needed within P2WSH"); + // No hybrid keys allowed + CheckUnparsable("", "wsh(and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(069228de6902abb4f541791f6d7f925b10e2078ccb1298856e5ea5cc5fd667f930eac37a00cc07f9a91ef3c2d17bf7a17db04552ff90ac312a5b8b4caca6c97aa4))),after(10)))", "A function is needed within P2WSH"); // Insane at top level CheckUnparsable("wsh(and_b(vc:andor(pk(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),pk_k(Kx9HCDjGiwFcgVNhTrS5z5NeZdD6veeam61eDxLDCkGWujvL4Gnn),and_v(v:older(1),pk_k(L4o2kDvXXDRH2VS9uBnouScLduWt4dZnM25se7kvEjJeQ285en2A))),after(10)))", "wsh(and_b(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(02aa27e5eb2c185e87cd1dbc3e0efc9cb1175235e0259df1713424941c3cb40402))),after(10)))", "and_b(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(02aa27e5eb2c185e87cd1dbc3e0efc9cb1175235e0259df1713424941c3cb40402))),after(10)) is invalid"); // Invalid sub From 7e4003226030a04a19c718a4b1b83b4ca40ca33f Mon Sep 17 00:00:00 2001 From: James O'Beirne Date: Wed, 4 Oct 2023 11:18:14 -0400 Subject: [PATCH 107/172] tests: assumeutxo: accept final height from either chainstate --- test/functional/feature_assumeutxo.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py index 4d0552f332158..be0715df32cbd 100755 --- a/test/functional/feature_assumeutxo.py +++ b/test/functional/feature_assumeutxo.py @@ -159,7 +159,15 @@ def no_sync(): self.connect_nodes(0, 1) self.log.info(f"Ensuring snapshot chain syncs to tip. ({FINAL_HEIGHT})") - wait_until_helper(lambda: n1.getchainstates()['snapshot']['blocks'] == FINAL_HEIGHT) + + def check_for_final_height(): + chainstates = n1.getchainstates() + # The background validation may have completed before we run our first + # check, so accept a final blockheight from either chainstate type. + cs = chainstates.get('snapshot') or chainstates.get('normal') + return cs['blocks'] == FINAL_HEIGHT + + wait_until_helper(check_for_final_height) self.sync_blocks(nodes=(n0, n1)) self.log.info("Ensuring background validation completes") From aba4a5887b44bf7cbee9ea0a8e02bb92c1b4147b Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Tue, 3 Oct 2023 11:36:17 +0200 Subject: [PATCH 108/172] ci: Only run functional tests on windows in master --- .github/workflows/ci.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 28a027b780639..ae320b752b68e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -286,6 +286,10 @@ jobs: run: py -3 test\util\rpcauth-test.py - name: Run functional tests - env: - TEST_RUNNER_EXTRA: ${{ github.event_name != 'pull_request' && '--extended' || '' }} - run: py -3 test\functional\test_runner.py --jobs $env:NUMBER_OF_PROCESSORS --ci --quiet --tmpdirprefix=$env:RUNNER_TEMP --combinedlogslen=99999999 --timeout-factor=$env:TEST_RUNNER_TIMEOUT_FACTOR $env:TEST_RUNNER_EXTRA + # Don't run functional tests for pull requests. + # The test suit regularly fails to complete in windows native github + # actions as a child process stops making progress. The root cause has + # not yet been determined. + # Discussed in https://github.com/bitcoin/bitcoin/pull/28509 + if: github.event_name != 'pull_request' + run: py -3 test\functional\test_runner.py --jobs $env:NUMBER_OF_PROCESSORS --ci --quiet --tmpdirprefix=$env:RUNNER_TEMP --combinedlogslen=99999999 --timeout-factor=$env:TEST_RUNNER_TIMEOUT_FACTOR --extended From 58c9b50a952951cb326c99ba86cb706a1e7d533e Mon Sep 17 00:00:00 2001 From: pablomartin4btc Date: Wed, 13 Sep 2023 15:01:38 -0300 Subject: [PATCH 109/172] gui: Add wallet name to address book page Extend addresstablemodel to return the display name from the wallet and set it to the addressbookpage window title when its model is set. --- src/qt/addressbookpage.cpp | 26 +++++++++++++++----------- src/qt/addressbookpage.h | 1 + src/qt/addresstablemodel.cpp | 2 ++ src/qt/addresstablemodel.h | 2 ++ 4 files changed, 20 insertions(+), 11 deletions(-) diff --git a/src/qt/addressbookpage.cpp b/src/qt/addressbookpage.cpp index b888fc43e28ef..05e58191fce20 100644 --- a/src/qt/addressbookpage.cpp +++ b/src/qt/addressbookpage.cpp @@ -81,9 +81,7 @@ AddressBookPage::AddressBookPage(const PlatformStyle *platformStyle, Mode _mode, ui->exportButton->setIcon(platformStyle->SingleColorIcon(":/icons/export")); } - switch(mode) - { - case ForSelection: + if (mode == ForSelection) { switch(tab) { case SendingTab: setWindowTitle(tr("Choose the address to send coins to")); break; @@ -94,14 +92,6 @@ AddressBookPage::AddressBookPage(const PlatformStyle *platformStyle, Mode _mode, ui->tableView->setFocus(); ui->closeButton->setText(tr("C&hoose")); ui->exportButton->hide(); - break; - case ForEditing: - switch(tab) - { - case SendingTab: setWindowTitle(tr("Sending addresses")); break; - case ReceivingTab: setWindowTitle(tr("Receiving addresses")); break; - } - break; } switch(tab) { @@ -164,6 +154,7 @@ void AddressBookPage::setModel(AddressTableModel *_model) connect(_model, &AddressTableModel::rowsInserted, this, &AddressBookPage::selectNewAddress); selectionChanged(); + this->updateWindowsTitleWithWalletName(); } void AddressBookPage::on_copyAddress_clicked() @@ -328,3 +319,16 @@ void AddressBookPage::selectNewAddress(const QModelIndex &parent, int begin, int newAddressToSelect.clear(); } } + +void AddressBookPage::updateWindowsTitleWithWalletName() +{ + const QString walletName = this->model->GetWalletDisplayName(); + + if (mode == ForEditing) { + switch(tab) + { + case SendingTab: setWindowTitle(tr("Sending addresses - %1").arg(walletName)); break; + case ReceivingTab: setWindowTitle(tr("Receiving addresses - %1").arg(walletName)); break; + } + } +} diff --git a/src/qt/addressbookpage.h b/src/qt/addressbookpage.h index 283209d00c727..b649da4ac8f4d 100644 --- a/src/qt/addressbookpage.h +++ b/src/qt/addressbookpage.h @@ -56,6 +56,7 @@ public Q_SLOTS: AddressBookSortFilterProxyModel *proxyModel; QMenu *contextMenu; QString newAddressToSelect; + void updateWindowsTitleWithWalletName(); private Q_SLOTS: /** Delete currently selected address entry */ diff --git a/src/qt/addresstablemodel.cpp b/src/qt/addresstablemodel.cpp index e4689e4389325..c52ef7cd67d30 100644 --- a/src/qt/addresstablemodel.cpp +++ b/src/qt/addresstablemodel.cpp @@ -451,3 +451,5 @@ void AddressTableModel::emitDataChanged(int idx) { Q_EMIT dataChanged(index(idx, 0, QModelIndex()), index(idx, columns.length()-1, QModelIndex())); } + +QString AddressTableModel::GetWalletDisplayName() const { return walletModel->getDisplayName(); }; diff --git a/src/qt/addresstablemodel.h b/src/qt/addresstablemodel.h index 599aa89cadd1d..44808364ec95c 100644 --- a/src/qt/addresstablemodel.h +++ b/src/qt/addresstablemodel.h @@ -87,6 +87,8 @@ class AddressTableModel : public QAbstractTableModel OutputType GetDefaultAddressType() const; + QString GetWalletDisplayName() const; + private: WalletModel* const walletModel; AddressTablePriv *priv = nullptr; From e1308967e1a7e0ab275ab4c6f2f94c4ca0ee517b Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Wed, 4 Oct 2023 16:43:43 +0200 Subject: [PATCH 110/172] test: BIP324: add checks for v1 prefix matching / wrong network magic detection --- test/functional/p2p_v2_transport.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/test/functional/p2p_v2_transport.py b/test/functional/p2p_v2_transport.py index 2455bf2e2da5b..dd564fed88aff 100755 --- a/test/functional/p2p_v2_transport.py +++ b/test/functional/p2p_v2_transport.py @@ -5,10 +5,16 @@ """ Test v2 transport """ +import socket from test_framework.messages import NODE_P2P_V2 +from test_framework.p2p import MAGIC_BYTES from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal +from test_framework.util import ( + assert_equal, + p2p_port, +) + class V2TransportTest(BitcoinTestFramework): def set_test_params(self): @@ -123,5 +129,25 @@ def run_test(self): self.sync_all() assert_equal(self.nodes[4].getblockcount(), 11) + # Check v1 prefix detection + V1_PREFIX = MAGIC_BYTES["regtest"] + b"version\x00\x00\x00\x00\x00" + assert_equal(len(V1_PREFIX), 16) + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + num_peers = len(self.nodes[0].getpeerinfo()) + s.connect(("127.0.0.1", p2p_port(0))) + self.wait_until(lambda: len(self.nodes[0].getpeerinfo()) == num_peers + 1) + s.sendall(V1_PREFIX[:-1]) + assert_equal(self.nodes[0].getpeerinfo()[-1]["transport_protocol_type"], "detecting") + s.sendall(bytes([V1_PREFIX[-1]])) # send out last prefix byte + self.wait_until(lambda: self.nodes[0].getpeerinfo()[-1]["transport_protocol_type"] == "v1") + + # Check wrong network prefix detection (hits if the next 12 bytes correspond to a v1 version message) + wrong_network_magic_prefix = MAGIC_BYTES["signet"] + V1_PREFIX[4:] + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.connect(("127.0.0.1", p2p_port(0))) + with self.nodes[0].assert_debug_log("V2 transport error: V1 peer with wrong MessageStart"): + s.sendall(wrong_network_magic_prefix + b"somepayload") + + if __name__ == '__main__': V2TransportTest().main() From f0cebbdb2a1a3c2f0facd88963484ad6fd5851db Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 29 Jul 2022 10:46:47 +0100 Subject: [PATCH 111/172] qt: enable -ltcg for windows HOST Patch around multiple definition issues in Qt. Co-authored-by: Cory Fields --- depends/packages/qt.mk | 5 +++++ depends/patches/qt/windows_lto.patch | 31 ++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 depends/patches/qt/windows_lto.patch diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index 86df58f9d2a0b..047d1d5aee178 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -22,6 +22,7 @@ $(package)_patches += fast_fixed_dtoa_no_optimize.patch $(package)_patches += guix_cross_lib_path.patch $(package)_patches += fix-macos-linker.patch $(package)_patches += memory_resource.patch +$(package)_patches += windows_lto.patch $(package)_qttranslations_file_name=qttranslations-$($(package)_suffix) $(package)_qttranslations_sha256_hash=38b942bc7e62794dd072945c8a92bb9dfffed24070aea300327a3bb42f855609 @@ -183,6 +184,9 @@ $(package)_config_opts_mingw32 += "QMAKE_LFLAGS = '$($(package)_ldflags)'" $(package)_config_opts_mingw32 += "QMAKE_LIB = '$($(package)_ar) rc'" $(package)_config_opts_mingw32 += -device-option CROSS_COMPILE="$(host)-" $(package)_config_opts_mingw32 += -pch +ifneq ($(LTO),) +$(package)_config_opts_mingw32 += -ltcg +endif $(package)_config_opts_android = -xplatform android-clang $(package)_config_opts_android += -android-sdk $(ANDROID_SDK) @@ -250,6 +254,7 @@ define $(package)_preprocess_cmds patch -p1 -i $($(package)_patch_dir)/duplicate_lcqpafonts.patch && \ patch -p1 -i $($(package)_patch_dir)/fast_fixed_dtoa_no_optimize.patch && \ patch -p1 -i $($(package)_patch_dir)/guix_cross_lib_path.patch && \ + patch -p1 -i $($(package)_patch_dir)/windows_lto.patch && \ mkdir -p qtbase/mkspecs/macx-clang-linux &&\ cp -f qtbase/mkspecs/macx-clang/qplatformdefs.h qtbase/mkspecs/macx-clang-linux/ &&\ cp -f $($(package)_patch_dir)/mac-qmake.conf qtbase/mkspecs/macx-clang-linux/qmake.conf && \ diff --git a/depends/patches/qt/windows_lto.patch b/depends/patches/qt/windows_lto.patch new file mode 100644 index 0000000000000..ea379a60f14a1 --- /dev/null +++ b/depends/patches/qt/windows_lto.patch @@ -0,0 +1,31 @@ +Qt (for Windows) fails to build under LTO, due to multiple definition issues, i.e + +multiple definition of `QAccessibleLineEdit::~QAccessibleLineEdit()'; + +Possibly related to https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94156. + +diff --git a/qtbase/src/widgets/accessible/simplewidgets.cpp b/qtbase/src/widgets/accessible/simplewidgets.cpp +index 107fd729fe..0e61878f39 100644 +--- a/qtbase/src/widgets/accessible/simplewidgets.cpp ++++ b/qtbase/src/widgets/accessible/simplewidgets.cpp +@@ -109,6 +109,8 @@ QString qt_accHotKey(const QString &text); + \ingroup accessibility + */ + ++QAccessibleLineEdit::~QAccessibleLineEdit(){}; ++ + /*! + Creates a QAccessibleButton object for \a w. + */ +diff --git a/qtbase/src/widgets/accessible/simplewidgets_p.h b/qtbase/src/widgets/accessible/simplewidgets_p.h +index 73572e3059..658da86143 100644 +--- a/qtbase/src/widgets/accessible/simplewidgets_p.h ++++ b/qtbase/src/widgets/accessible/simplewidgets_p.h +@@ -155,6 +155,7 @@ class QAccessibleLineEdit : public QAccessibleWidget, public QAccessibleTextInte + public: + explicit QAccessibleLineEdit(QWidget *o, const QString &name = QString()); + ++ ~QAccessibleLineEdit(); + QString text(QAccessible::Text t) const override; + void setText(QAccessible::Text t, const QString &text) override; + QAccessible::State state() const override; From a9d070a6f89d855aec5fbe6efe679feef86a21f3 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 4 Oct 2023 16:51:38 +0100 Subject: [PATCH 112/172] kernel: update nMinimumChainWork & defaultAssumeValid for 26.x --- src/kernel/chainparams.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 5e893a3f58c4d..c57e6ea3d5cc8 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -104,8 +104,8 @@ class CMainParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = 1628640000; // August 11th, 2021 consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 709632; // Approximately November 12th, 2021 - consensus.nMinimumChainWork = uint256S("0x000000000000000000000000000000000000000044a50fe819c39ad624021859"); - consensus.defaultAssumeValid = uint256S("0x000000000000000000035c3f0d31e71a5ee24c5aaf3354689f65bd7b07dee632"); // 784000 + consensus.nMinimumChainWork = uint256S("0x000000000000000000000000000000000000000052b2559353df4117b7348b64"); + consensus.defaultAssumeValid = uint256S("0x00000000000000000001a0a448d6cf2546b06801389cc030b2b18c6491266815"); // 804000 /** * The message start string is designed to be unlikely to occur in normal data. @@ -222,8 +222,8 @@ class CTestNetParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = 1628640000; // August 11th, 2021 consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 0; // No activation delay - consensus.nMinimumChainWork = uint256S("0x000000000000000000000000000000000000000000000977edb0244170858d07"); - consensus.defaultAssumeValid = uint256S("0x0000000000000021bc50a89cde4870d4a81ffe0153b3c8de77b435a2fd3f6761"); // 2429000 + consensus.nMinimumChainWork = uint256S("0x000000000000000000000000000000000000000000000b6a51f415a67c0da307"); + consensus.defaultAssumeValid = uint256S("0x0000000000000093bcb68c03a9a168ae252572d348a2eaeba2cdf9231d73206f"); // 2500000 pchMessageStart[0] = 0x0b; pchMessageStart[1] = 0x11; @@ -302,8 +302,8 @@ class SigNetParams : public CChainParams { vSeeds.emplace_back("178.128.221.177"); vSeeds.emplace_back("v7ajjeirttkbnt32wpy3c6w3emwnfr3fkla7hpxcfokr3ysd3kqtzmqd.onion:38333"); - consensus.nMinimumChainWork = uint256S("0x000000000000000000000000000000000000000000000000000001899d8142b0"); - consensus.defaultAssumeValid = uint256S("0x0000004429ef154f7e00b4f6b46bfbe2d2678ecd351d95bbfca437ab9a5b84ec"); // 138000 + consensus.nMinimumChainWork = uint256S("0x000000000000000000000000000000000000000000000000000001ad46be4862"); + consensus.defaultAssumeValid = uint256S("0x0000013d778ba3f914530f11f6b69869c9fab54acff85acd7b8201d111f19b7f"); // 150000 m_assumed_blockchain_size = 1; m_assumed_chain_state_size = 0; chainTxData = ChainTxData{ From a8c2e5e556daf2a8c6b013110c802768b3f4b30e Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 4 Oct 2023 17:01:42 +0100 Subject: [PATCH 113/172] kernel: update chainTxData for 26.x --- src/kernel/chainparams.cpp | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index c57e6ea3d5cc8..8b9b92d7256cb 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -177,10 +177,10 @@ class CMainParams : public CChainParams { }; chainTxData = ChainTxData{ - // Data from RPC: getchaintxstats 4096 000000000000000000035c3f0d31e71a5ee24c5aaf3354689f65bd7b07dee632 - .nTime = 1680665245, - .nTxCount = 820876044, - .dTxRate = 3.672283614033389, + // Data from RPC: getchaintxstats 4096 00000000000000000001a0a448d6cf2546b06801389cc030b2b18c6491266815 + .nTime = 1692502494, + .nTxCount = 881818374, + .dTxRate = 5.521964628130412, }; } }; @@ -276,10 +276,10 @@ class CTestNetParams : public CChainParams { }; chainTxData = ChainTxData{ - // Data from RPC: getchaintxstats 4096 0000000000000021bc50a89cde4870d4a81ffe0153b3c8de77b435a2fd3f6761 - .nTime = 1681542696, - .nTxCount = 65345929, - .dTxRate = 0.09855282814711661, + // Data from RPC: getchaintxstats 4096 0000000000000093bcb68c03a9a168ae252572d348a2eaeba2cdf9231d73206f + .nTime = 1694733634, + .nTxCount = 66484552, + .dTxRate = 0.1804908356632494, }; } }; @@ -307,10 +307,10 @@ class SigNetParams : public CChainParams { m_assumed_blockchain_size = 1; m_assumed_chain_state_size = 0; chainTxData = ChainTxData{ - // Data from RPC: getchaintxstats 4096 0000004429ef154f7e00b4f6b46bfbe2d2678ecd351d95bbfca437ab9a5b84ec - .nTime = 1681127428, - .nTxCount = 2226359, - .dTxRate = 0.006424463050600656, + // Data from RPC: getchaintxstats 4096 0000013d778ba3f914530f11f6b69869c9fab54acff85acd7b8201d111f19b7f + .nTime = 1688366339, + .nTxCount = 2262750, + .dTxRate = 0.003414084572046456, }; } else { bin = *options.challenge; From f12f92b813cd8c29904f36f8ed7ed74649886897 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 4 Oct 2023 17:10:39 +0100 Subject: [PATCH 114/172] kernel: update m_assumed_* chain params for 26.x --- src/kernel/chainparams.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 8b9b92d7256cb..8563d0e8eba6c 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -118,8 +118,8 @@ class CMainParams : public CChainParams { pchMessageStart[3] = 0xd9; nDefaultPort = 8333; nPruneAfterHeight = 100000; - m_assumed_blockchain_size = 540; - m_assumed_chain_state_size = 7; + m_assumed_blockchain_size = 590; + m_assumed_chain_state_size = 9; genesis = CreateGenesisBlock(1231006505, 2083236893, 0x1d00ffff, 1, 50 * COIN); consensus.hashGenesisBlock = genesis.GetHash(); From b2ede22395ae8ce371433c9611929374dd98908a Mon Sep 17 00:00:00 2001 From: fanquake Date: Thu, 5 Oct 2023 11:36:03 +0100 Subject: [PATCH 115/172] headerssync: update params for 26.x --- contrib/devtools/headerssync-params.py | 4 ++-- src/headerssync.cpp | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/devtools/headerssync-params.py b/contrib/devtools/headerssync-params.py index f0088d6cb9a36..0198f5db99f8b 100644 --- a/contrib/devtools/headerssync-params.py +++ b/contrib/devtools/headerssync-params.py @@ -12,13 +12,13 @@ # Parameters: # Aim for still working fine at some point in the future. [datetime] -TIME = datetime(2026, 5, 25) +TIME = datetime(2026, 10, 5) # Expected block interval. [timedelta] BLOCK_INTERVAL = timedelta(seconds=600) # The number of headers corresponding to the minchainwork parameter. [headers] -MINCHAINWORK_HEADERS = 784000 +MINCHAINWORK_HEADERS = 804000 # Combined processing bandwidth from all attackers to one victim. [bit/s] # 6 Gbit/s is approximately the speed at which a single thread of a Ryzen 5950X CPU thread can hash diff --git a/src/headerssync.cpp b/src/headerssync.cpp index 1b5d7305e84fa..b885590c56561 100644 --- a/src/headerssync.cpp +++ b/src/headerssync.cpp @@ -13,11 +13,11 @@ // contrib/devtools/headerssync-params.py. //! Store one header commitment per HEADER_COMMITMENT_PERIOD blocks. -constexpr size_t HEADER_COMMITMENT_PERIOD{600}; +constexpr size_t HEADER_COMMITMENT_PERIOD{606}; //! Only feed headers to validation once this many headers on top have been //! received and validated against commitments. -constexpr size_t REDOWNLOAD_BUFFER_SIZE{14308}; // 14308/600 = ~23.8 commitments +constexpr size_t REDOWNLOAD_BUFFER_SIZE{14441}; // 14441/606 = ~23.8 commitments // Our memory analysis assumes 48 bytes for a CompressedHeader (so we should // re-calculate parameters if we compress further) From fa28f5a3819a4bb69b046529e05932016273170b Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Mon, 4 Sep 2023 16:43:57 +0200 Subject: [PATCH 116/172] test: Bump walletpassphrase timeouts to avoid intermittent issues --- test/functional/wallet_createwallet.py | 6 +++--- test/functional/wallet_descriptor.py | 4 ++-- test/functional/wallet_dump.py | 2 +- test/functional/wallet_encryption.py | 6 +++--- test/functional/wallet_fundrawtransaction.py | 6 +++--- test/functional/wallet_keypool.py | 6 +++--- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/test/functional/wallet_createwallet.py b/test/functional/wallet_createwallet.py index 75b507c3875b4..eb83e11f3604c 100755 --- a/test/functional/wallet_createwallet.py +++ b/test/functional/wallet_createwallet.py @@ -109,7 +109,7 @@ def run_test(self): assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress) assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress) # Now set a seed and it should work. Wallet should also be encrypted - w4.walletpassphrase('pass', 60) + w4.walletpassphrase("pass", 999000) if self.options.descriptors: w4.importdescriptors([{ 'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/0h/*)'), @@ -142,7 +142,7 @@ def run_test(self): self.nodes[0].createwallet(wallet_name='wblank', disable_private_keys=False, blank=True, passphrase='thisisapassphrase') wblank = node.get_wallet_rpc('wblank') assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", wblank.signmessage, "needanargument", "test") - wblank.walletpassphrase('thisisapassphrase', 60) + wblank.walletpassphrase("thisisapassphrase", 999000) assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getnewaddress) assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getrawchangeaddress) @@ -151,7 +151,7 @@ def run_test(self): self.nodes[0].createwallet(wallet_name='w6', disable_private_keys=False, blank=False, passphrase='thisisapassphrase') w6 = node.get_wallet_rpc('w6') assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", w6.signmessage, "needanargument", "test") - w6.walletpassphrase('thisisapassphrase', 60) + w6.walletpassphrase("thisisapassphrase", 999000) w6.signmessage(w6.getnewaddress('', 'legacy'), "test") w6.keypoolrefill(1) # There should only be 1 key for legacy, 3 for descriptors diff --git a/test/functional/wallet_descriptor.py b/test/functional/wallet_descriptor.py index 6f563987ccfe0..6af01f8cfd149 100755 --- a/test/functional/wallet_descriptor.py +++ b/test/functional/wallet_descriptor.py @@ -129,7 +129,7 @@ def run_test(self): # Encrypt wallet 0 send_wrpc.encryptwallet('pass') - send_wrpc.walletpassphrase('pass', 10) + send_wrpc.walletpassphrase("pass", 999000) addr = send_wrpc.getnewaddress() info2 = send_wrpc.getaddressinfo(addr) assert info1['hdmasterfingerprint'] != info2['hdmasterfingerprint'] @@ -143,7 +143,7 @@ def run_test(self): send_wrpc.getnewaddress() self.log.info("Test that unlock is needed when deriving only hardened keys in an encrypted wallet") - send_wrpc.walletpassphrase('pass', 10) + send_wrpc.walletpassphrase("pass", 999000) send_wrpc.importdescriptors([{ "desc": "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/*h)#y4dfsj7n", "timestamp": "now", diff --git a/test/functional/wallet_dump.py b/test/functional/wallet_dump.py index cf20ff1239da8..8c68d03f97197 100755 --- a/test/functional/wallet_dump.py +++ b/test/functional/wallet_dump.py @@ -173,7 +173,7 @@ def run_test(self): # encrypt wallet, restart, unlock and dump self.nodes[0].encryptwallet('test') - self.nodes[0].walletpassphrase('test', 100) + self.nodes[0].walletpassphrase("test", 999000) # Should be a no-op: self.nodes[0].keypoolrefill() self.nodes[0].dumpwallet(wallet_enc_dump) diff --git a/test/functional/wallet_encryption.py b/test/functional/wallet_encryption.py index 88b9ebbddd4ed..e8381ba8f2ecf 100755 --- a/test/functional/wallet_encryption.py +++ b/test/functional/wallet_encryption.py @@ -59,7 +59,7 @@ def run_test(self): assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10) # Test walletlock - self.nodes[0].walletpassphrase(passphrase, 84600) + self.nodes[0].walletpassphrase(passphrase, 999000) sig = self.nodes[0].signmessage(address, msg) assert self.nodes[0].verifymessage(address, sig, msg) self.nodes[0].walletlock() @@ -68,7 +68,7 @@ def run_test(self): # Test passphrase changes self.nodes[0].walletpassphrasechange(passphrase, passphrase2) assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10) - self.nodes[0].walletpassphrase(passphrase2, 10) + self.nodes[0].walletpassphrase(passphrase2, 999000) sig = self.nodes[0].signmessage(address, msg) assert self.nodes[0].verifymessage(address, sig, msg) self.nodes[0].walletlock() @@ -97,7 +97,7 @@ def run_test(self): self.nodes[0].walletpassphrasechange(passphrase2, passphrase_with_nulls) # walletpassphrasechange should not stop at null characters assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase_with_nulls.partition("\0")[0], 10) - self.nodes[0].walletpassphrase(passphrase_with_nulls, 10) + self.nodes[0].walletpassphrase(passphrase_with_nulls, 999000) sig = self.nodes[0].signmessage(address, msg) assert self.nodes[0].verifymessage(address, sig, msg) self.nodes[0].walletlock() diff --git a/test/functional/wallet_fundrawtransaction.py b/test/functional/wallet_fundrawtransaction.py index b1829f42afdce..ca4feefb2b6a2 100755 --- a/test/functional/wallet_fundrawtransaction.py +++ b/test/functional/wallet_fundrawtransaction.py @@ -581,7 +581,7 @@ def test_locked_wallet(self): wallet.encryptwallet("test") if self.options.descriptors: - wallet.walletpassphrase('test', 10) + wallet.walletpassphrase("test", 999000) wallet.importdescriptors([{ 'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPdYeeZbPSKd2KYLmeVKtcFA7kqCxDvDR13MQ6us8HopUR2wLcS2ZKPhLyKsqpDL2FtL73LMHcgoCL7DXsciA8eX8nbjCR2eG/0h/*h)'), 'timestamp': 'now', @@ -619,7 +619,7 @@ def test_locked_wallet(self): assert_raises_rpc_error(-4, "Transaction needs a change address, but we can't generate it.", wallet.fundrawtransaction, rawtx) # Refill the keypool. - wallet.walletpassphrase("test", 100) + wallet.walletpassphrase("test", 999000) wallet.keypoolrefill(8) #need to refill the keypool to get an internal change address wallet.walletlock() @@ -634,7 +634,7 @@ def test_locked_wallet(self): assert fundedTx["changepos"] != -1 # Now we need to unlock. - wallet.walletpassphrase("test", 600) + wallet.walletpassphrase("test", 999000) signedTx = wallet.signrawtransactionwithwallet(fundedTx['hex']) wallet.sendrawtransaction(signedTx['hex']) self.generate(self.nodes[1], 1) diff --git a/test/functional/wallet_keypool.py b/test/functional/wallet_keypool.py index a39db3bfb8e04..0ba8a46bae1eb 100755 --- a/test/functional/wallet_keypool.py +++ b/test/functional/wallet_keypool.py @@ -85,7 +85,7 @@ def run_test(self): assert_raises_rpc_error(-12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress) # put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min) - nodes[0].walletpassphrase('test', 12000) + nodes[0].walletpassphrase("test", 999000) nodes[0].keypoolrefill(6) nodes[0].walletlock() wi = nodes[0].getwalletinfo() @@ -131,7 +131,7 @@ def run_test(self): nodes[0].getnewaddress() assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getnewaddress) - nodes[0].walletpassphrase('test', 100) + nodes[0].walletpassphrase("test", 999000) nodes[0].keypoolrefill(100) wi = nodes[0].getwalletinfo() if self.options.descriptors: @@ -170,7 +170,7 @@ def run_test(self): else: res = w2.importmulti([{'desc': desc, 'timestamp': 'now'}]) assert_equal(res[0]['success'], True) - w1.walletpassphrase('test', 100) + w1.walletpassphrase("test", 999000) res = w1.sendtoaddress(address=address, amount=0.00010000) self.generate(nodes[0], 1) From fac88a874f57bfbedbaffaf43a01b3a74be8d875 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Thu, 5 Oct 2023 13:45:51 +0200 Subject: [PATCH 117/172] ci: Avoid cache depends/build --- ci/test/04_install.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh index 01faf9fff96be..b5a84ae08d611 100755 --- a/ci/test/04_install.sh +++ b/ci/test/04_install.sh @@ -21,6 +21,8 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then "${BASE_READ_ONLY_DIR}" docker volume create "${CONTAINER_NAME}_ccache" || true docker volume create "${CONTAINER_NAME}_depends" || true + docker volume create "${CONTAINER_NAME}_depends_sources" || true + docker volume create "${CONTAINER_NAME}_depends_SDKs_android" || true docker volume create "${CONTAINER_NAME}_previous_releases" || true if [ -n "${RESTART_CI_DOCKER_BEFORE_RUN}" ] ; then @@ -36,7 +38,9 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then CI_CONTAINER_ID=$(docker run --cap-add LINUX_IMMUTABLE $CI_CONTAINER_CAP --rm --interactive --detach --tty \ --mount "type=bind,src=$BASE_READ_ONLY_DIR,dst=$BASE_READ_ONLY_DIR,readonly" \ --mount "type=volume,src=${CONTAINER_NAME}_ccache,dst=$CCACHE_DIR" \ - --mount "type=volume,src=${CONTAINER_NAME}_depends,dst=$DEPENDS_DIR" \ + --mount "type=volume,src=${CONTAINER_NAME}_depends,dst=$DEPENDS_DIR/built" \ + --mount "type=volume,src=${CONTAINER_NAME}_depends_sources,dst=$DEPENDS_DIR/sources" \ + --mount "type=volume,src=${CONTAINER_NAME}_depends_SDKs_android,dst=$DEPENDS_DIR/SDKs/android" \ --mount "type=volume,src=${CONTAINER_NAME}_previous_releases,dst=$PREVIOUS_RELEASES_DIR" \ --env-file /tmp/env \ --name "$CONTAINER_NAME" \ From 0f83ab407ec5aa0591c54c03bcf408c7f2f0a192 Mon Sep 17 00:00:00 2001 From: furszy Date: Thu, 10 Aug 2023 17:55:33 -0300 Subject: [PATCH 118/172] test: display abrupt shutdown errors in console output Making it easier to debug errors in the CI environment, particularly in scenarios where it's not immediately clear what happened nor which node crashed (or shutdown abruptly). --- test/functional/test_framework/test_node.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 544a81602ebb1..f599043234f7a 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -232,8 +232,13 @@ def wait_for_rpc_connection(self): poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): if self.process.poll() is not None: + # Attach abrupt shutdown error/s to the exception message + self.stderr.seek(0) + str_error = ''.join(line.decode('utf-8') for line in self.stderr) + str_error += "************************\n" if str_error else '' + raise FailedToStartError(self._node_msg( - 'bitcoind exited with status {} during initialization'.format(self.process.returncode))) + f'bitcoind exited with status {self.process.returncode} during initialization. {str_error}')) try: rpc = get_rpc_proxy( rpc_url(self.datadir, self.index, self.chain, self.rpchost), From a9ef702a877a964bac724a56e2c0b5bee4ea7586 Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Wed, 4 Oct 2023 10:58:47 -0400 Subject: [PATCH 119/172] assumeutxo: change getchainstates RPC to return a list of chainstates Current getchainstates RPC returns "normal" and "snapshot" fields which are not ideal because it requires new "normal" and "snapshot" terms to be defined, and the definitions are not really consistent with internal code. (In the RPC interface, the "snapshot" chainstate becomes the "normal" chainstate after it is validated, while in internal code there is no "normal chainstate" and the "snapshot chainstate" is still called that temporarily after it is validated). The current getchainstatees RPC is also awkward to use if you to want information about the most-work chainstate because you have to look at the "snapshot" field if it exists, and otherwise fall back to the "normal" field. Fix these issues by having getchainstates just return a flat list of chainstates ordered by work, and adding new chainstate "validated" field alongside the existing "snapshot_blockhash" so it is explicit if a chainstate was originally loaded from a snapshot, and whether the snapshot has been validated. --- src/rpc/blockchain.cpp | 22 ++++++------- test/functional/feature_assumeutxo.py | 47 +++++++++++++-------------- 2 files changed, 32 insertions(+), 37 deletions(-) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 0f4941b40ccfd..abd723ee56223 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -2807,6 +2807,7 @@ const std::vector RPCHelpForChainstate{ {RPCResult::Type::STR_HEX, "snapshot_blockhash", /*optional=*/true, "the base block of the snapshot this chainstate is based on, if any"}, {RPCResult::Type::NUM, "coins_db_cache_bytes", "size of the coinsdb cache"}, {RPCResult::Type::NUM, "coins_tip_cache_bytes", "size of the coinstip cache"}, + {RPCResult::Type::BOOL, "validated", "whether the chainstate is fully validated. True if all blocks in the chainstate were validated, false if the chain is based on a snapshot and the snapshot has not yet been validated."}, }; static RPCHelpMan getchainstates() @@ -2818,8 +2819,7 @@ return RPCHelpMan{ RPCResult{ RPCResult::Type::OBJ, "", "", { {RPCResult::Type::NUM, "headers", "the number of headers seen so far"}, - {RPCResult::Type::OBJ, "normal", /*optional=*/true, "fully validated chainstate containing blocks this node has validated starting from the genesis block", RPCHelpForChainstate}, - {RPCResult::Type::OBJ, "snapshot", /*optional=*/true, "only present if an assumeutxo snapshot is loaded. Partially validated chainstate containing blocks this node has validated starting from the snapshot. After the snapshot is validated (when the 'normal' chainstate advances far enough to validate it), this chainstate will replace and become the 'normal' chainstate.", RPCHelpForChainstate}, + {RPCResult::Type::ARR, "chainstates", "list of the chainstates ordered by work, with the most-work (active) chainstate last", {{RPCResult::Type::OBJ, "", "", RPCHelpForChainstate},}}, } }, RPCExamples{ @@ -2834,7 +2834,7 @@ return RPCHelpMan{ NodeContext& node = EnsureAnyNodeContext(request.context); ChainstateManager& chainman = *node.chainman; - auto make_chain_data = [&](const Chainstate& cs) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { + auto make_chain_data = [&](const Chainstate& cs, bool validated) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); UniValue data(UniValue::VOBJ); if (!cs.m_chain.Tip()) { @@ -2852,20 +2852,18 @@ return RPCHelpMan{ if (cs.m_from_snapshot_blockhash) { data.pushKV("snapshot_blockhash", cs.m_from_snapshot_blockhash->ToString()); } + data.pushKV("validated", validated); return data; }; - if (chainman.GetAll().size() > 1) { - for (Chainstate* chainstate : chainman.GetAll()) { - obj.pushKV( - chainstate->m_from_snapshot_blockhash ? "snapshot" : "normal", - make_chain_data(*chainstate)); - } - } else { - obj.pushKV("normal", make_chain_data(chainman.ActiveChainstate())); - } obj.pushKV("headers", chainman.m_best_header ? chainman.m_best_header->nHeight : -1); + const auto& chainstates = chainman.GetAll(); + UniValue obj_chainstates{UniValue::VARR}; + for (Chainstate* cs : chainstates) { + obj_chainstates.push_back(make_chain_data(*cs, !cs->m_from_snapshot_blockhash || chainstates.size() == 1)); + } + obj.pushKV("chainstates", std::move(obj_chainstates)); return obj; } }; diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py index be0715df32cbd..15cacc204c10e 100755 --- a/test/functional/feature_assumeutxo.py +++ b/test/functional/feature_assumeutxo.py @@ -128,10 +128,13 @@ def no_sync(): assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT) assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) - monitor = n1.getchainstates() - assert_equal(monitor['normal']['blocks'], START_HEIGHT) - assert_equal(monitor['snapshot']['blocks'], SNAPSHOT_BASE_HEIGHT) - assert_equal(monitor['snapshot']['snapshot_blockhash'], dump_output['base_hash']) + normal, snapshot = n1.getchainstates()["chainstates"] + assert_equal(normal['blocks'], START_HEIGHT) + assert_equal(normal.get('snapshot_blockhash'), None) + assert_equal(normal['validated'], True) + assert_equal(snapshot['blocks'], SNAPSHOT_BASE_HEIGHT) + assert_equal(snapshot['snapshot_blockhash'], dump_output['base_hash']) + assert_equal(snapshot['validated'], False) assert_equal(n1.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT) @@ -159,20 +162,11 @@ def no_sync(): self.connect_nodes(0, 1) self.log.info(f"Ensuring snapshot chain syncs to tip. ({FINAL_HEIGHT})") - - def check_for_final_height(): - chainstates = n1.getchainstates() - # The background validation may have completed before we run our first - # check, so accept a final blockheight from either chainstate type. - cs = chainstates.get('snapshot') or chainstates.get('normal') - return cs['blocks'] == FINAL_HEIGHT - - wait_until_helper(check_for_final_height) + wait_until_helper(lambda: n1.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT) self.sync_blocks(nodes=(n0, n1)) self.log.info("Ensuring background validation completes") - # N.B.: the `snapshot` key disappears once the background validation is complete. - wait_until_helper(lambda: not n1.getchainstates().get('snapshot')) + wait_until_helper(lambda: len(n1.getchainstates()['chainstates']) == 1) # Ensure indexes have synced. completed_idx_state = { @@ -189,8 +183,8 @@ def check_for_final_height(): assert_equal(n.getblockchaininfo()["blocks"], FINAL_HEIGHT) - assert_equal(n.getchainstates()['normal']['blocks'], FINAL_HEIGHT) - assert_equal(n.getchainstates().get('snapshot'), None) + chainstate, = n.getchainstates()['chainstates'] + assert_equal(chainstate['blocks'], FINAL_HEIGHT) if i != 0: # Ensure indexes have synced for the assumeutxo node @@ -208,17 +202,20 @@ def check_for_final_height(): assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT) assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) - monitor = n2.getchainstates() - assert_equal(monitor['normal']['blocks'], START_HEIGHT) - assert_equal(monitor['snapshot']['blocks'], SNAPSHOT_BASE_HEIGHT) - assert_equal(monitor['snapshot']['snapshot_blockhash'], dump_output['base_hash']) + normal, snapshot = n2.getchainstates()['chainstates'] + assert_equal(normal['blocks'], START_HEIGHT) + assert_equal(normal.get('snapshot_blockhash'), None) + assert_equal(normal['validated'], True) + assert_equal(snapshot['blocks'], SNAPSHOT_BASE_HEIGHT) + assert_equal(snapshot['snapshot_blockhash'], dump_output['base_hash']) + assert_equal(snapshot['validated'], False) self.connect_nodes(0, 2) - wait_until_helper(lambda: n2.getchainstates()['snapshot']['blocks'] == FINAL_HEIGHT) + wait_until_helper(lambda: n2.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT) self.sync_blocks() self.log.info("Ensuring background validation completes") - wait_until_helper(lambda: not n2.getchainstates().get('snapshot')) + wait_until_helper(lambda: len(n2.getchainstates()['chainstates']) == 1) completed_idx_state = { 'basic block filter index': COMPLETE_IDX, @@ -234,8 +231,8 @@ def check_for_final_height(): assert_equal(n.getblockchaininfo()["blocks"], FINAL_HEIGHT) - assert_equal(n.getchainstates()['normal']['blocks'], FINAL_HEIGHT) - assert_equal(n.getchainstates().get('snapshot'), None) + chainstate, = n.getchainstates()['chainstates'] + assert_equal(chainstate['blocks'], FINAL_HEIGHT) if i != 0: # Ensure indexes have synced for the assumeutxo node From fa071aeb61dcc42cd122d3fb1abe4b9c238f8010 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Thu, 5 Oct 2023 15:54:19 +0200 Subject: [PATCH 120/172] wallet: No BDB creation, unless -deprecatedrpc=create_bdb --- src/wallet/rpc/wallet.cpp | 9 +++++++-- test/functional/test_framework/util.py | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/wallet/rpc/wallet.cpp b/src/wallet/rpc/wallet.cpp index 3774e6a3ef66f..164ce9afedb05 100644 --- a/src/wallet/rpc/wallet.cpp +++ b/src/wallet/rpc/wallet.cpp @@ -343,7 +343,7 @@ static RPCHelpMan createwallet() {"passphrase", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "Encrypt the wallet with this passphrase."}, {"avoid_reuse", RPCArg::Type::BOOL, RPCArg::Default{false}, "Keep track of coin reuse, and treat dirty and clean coins differently with privacy considerations in mind."}, {"descriptors", RPCArg::Type::BOOL, RPCArg::Default{true}, "Create a native descriptor wallet. The wallet will use descriptors internally to handle address creation." - " Setting to \"false\" will create a legacy wallet; however, the legacy wallet type is being deprecated and" + " Setting to \"false\" will create a legacy wallet; This is only possible with the -deprecatedrpc=create_bdb setting because, the legacy wallet type is being deprecated and" " support for creating and opening legacy wallets will be removed in the future."}, {"load_on_startup", RPCArg::Type::BOOL, RPCArg::Optional::OMITTED, "Save wallet name to persistent settings and load on startup. True to add wallet to startup list, false to remove, null to leave unchanged."}, {"external_signer", RPCArg::Type::BOOL, RPCArg::Default{false}, "Use an external signer such as a hardware wallet. Requires -signer to be configured. Wallet creation will fail if keys cannot be fetched. Requires disable_private_keys and descriptors set to true."}, @@ -389,11 +389,16 @@ static RPCHelpMan createwallet() if (!request.params[4].isNull() && request.params[4].get_bool()) { flags |= WALLET_FLAG_AVOID_REUSE; } - if (request.params[5].isNull() || request.params[5].get_bool()) { + if (self.Arg(5)) { #ifndef USE_SQLITE throw JSONRPCError(RPC_WALLET_ERROR, "Compiled without sqlite support (required for descriptor wallets)"); #endif flags |= WALLET_FLAG_DESCRIPTORS; + } else { + if (!context.chain->rpcEnableDeprecated("create_bdb")) { + throw JSONRPCError(RPC_WALLET_ERROR, "BDB wallet creation is deprecated and will be removed in a future release." + " In this release it can be re-enabled temporarily with the -deprecatedrpc=create_bdb setting."); + } } if (!request.params[7].isNull() && request.params[7].get_bool()) { #ifdef ENABLE_EXTERNAL_SIGNER diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 9143397042fb0..3bd18c26d856e 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -404,6 +404,7 @@ def write_config(config_path, *, n, chain, extra_config="", disable_autoconnect= f.write("upnp=0\n") f.write("natpmp=0\n") f.write("shrinkdebugfile=0\n") + f.write("deprecatedrpc=create_bdb\n") # Required to run the tests # To improve SQLite wallet performance so that the tests don't timeout, use -unsafesqlitesync f.write("unsafesqlitesync=1\n") if disable_autoconnect: From 88c8e3a0e4d6bee015a348536c6e12a2c7835896 Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Tue, 3 Oct 2023 08:43:16 -0400 Subject: [PATCH 121/172] github actions: Fix test-one-commit when parent of head is merge commit Instead of figuring out the commit *after* the last merge and rebasing on that with a ~1 suffix, just figure out the last merge commit directly and rebase on it. This way, if HEAD happens to be a merge commit, the rebase just succeeds immediately without blank variables or errors. From https://github.com/bitcoin/bitcoin/pull/28497#issuecomment-1743430631: The problem is that the PR only contains a one commit after the last merge, so the job _should_ be skipped, but the `pull_request.commits != 1` check is not smart enough to skip it because the PR is based on another PR and has merge ancestor commits. So specifically what happens is that after HEAD~ is checked out, the new HEAD is a merge commit, so the range `$(git log --merges -1 --format=%H)..HEAD` is equivalent to HEAD..HEAD, which is empty, so the `COMMIT_AFTER_LAST_MERGE` variable is empty and the rebase command fails. --- .github/workflows/ci.yml | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 28a027b780639..cc003d026c983 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,19 +31,41 @@ jobs: env: MAX_COUNT: 6 steps: - - run: echo "FETCH_DEPTH=$((${{ github.event.pull_request.commits }} + 2))" >> "$GITHUB_ENV" + - name: Determine fetch depth + run: echo "FETCH_DEPTH=$((${{ github.event.pull_request.commits }} + 2))" >> "$GITHUB_ENV" - uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: ${{ env.FETCH_DEPTH }} - - run: | + - name: Determine commit range + run: | + # Checkout HEAD~ and find the test base commit + # Checkout HEAD~ because it would be wasteful to rerun tests on the PR + # head commit that are already run by other jobs. git checkout HEAD~ - echo "COMMIT_AFTER_LAST_MERGE=$(git log $(git log --merges -1 --format=%H)..HEAD --format=%H --max-count=${{ env.MAX_COUNT }} | tail -1)" >> "$GITHUB_ENV" + # Figure out test base commit by listing ancestors of HEAD, excluding + # ancestors of the most recent merge commit, limiting the list to the + # newest MAX_COUNT ancestors, ordering it from oldest to newest, and + # taking the first one. + # + # If the branch contains up to MAX_COUNT ancestor commits after the + # most recent merge commit, all of those commits will be tested. If it + # contains more, only the most recent MAX_COUNT commits will be + # tested. + # + # In the command below, the ^@ suffix is used to refer to all parents + # of the merge commit as described in: + # https://git-scm.com/docs/git-rev-parse#_other_rev_parent_shorthand_notations + # and the ^ prefix is used to exclude these parents and all their + # ancestors from the rev-list output as described in: + # https://git-scm.com/docs/git-rev-list + echo "TEST_BASE=$(git rev-list -n$((${{ env.MAX_COUNT }} + 1)) --reverse HEAD ^$(git rev-list -n1 --merges HEAD)^@ | head -1)" >> "$GITHUB_ENV" - run: sudo apt install clang ccache build-essential libtool autotools-dev automake pkg-config bsdmainutils python3-zmq libevent-dev libboost-dev libsqlite3-dev libdb++-dev systemtap-sdt-dev libminiupnpc-dev libnatpmp-dev libqt5gui5 libqt5core5a libqt5dbus5 qttools5-dev qttools5-dev-tools qtwayland5 libqrencode-dev -y - name: Compile and run tests run: | + # Run tests on commits after the last merge commit and before the PR head commit # Use clang++, because it is a bit faster and uses less memory than g++ - git rebase --exec "echo Running test-one-commit on \$( git log -1 ) && ./autogen.sh && CC=clang CXX=clang++ ./configure && make clean && make -j $(nproc) check && ./test/functional/test_runner.py -j $(( $(nproc) * 2 ))" ${{ env.COMMIT_AFTER_LAST_MERGE }}~1 + git rebase --exec "echo Running test-one-commit on \$( git log -1 ) && ./autogen.sh && CC=clang CXX=clang++ ./configure && make clean && make -j $(nproc) check && ./test/functional/test_runner.py -j $(( $(nproc) * 2 ))" ${{ env.TEST_BASE }} macos-native-x86_64: name: 'macOS 13 native, x86_64, no depends, sqlite only, gui' From bd71f03df75d2c17926b6d575ffa886daa334e3a Mon Sep 17 00:00:00 2001 From: fanquake Date: Thu, 5 Oct 2023 13:40:40 +0100 Subject: [PATCH 122/172] doc: update example pulls in release-process.md --- doc/release-process.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/release-process.md b/doc/release-process.md index 468efeb7e135a..c70b0194abdf9 100644 --- a/doc/release-process.md +++ b/doc/release-process.md @@ -28,7 +28,7 @@ Release Process #### Before branch-off -* Update hardcoded [seeds](/contrib/seeds/README.md), see [this pull request](https://github.com/bitcoin/bitcoin/pull/7415) for an example. +* Update hardcoded [seeds](/contrib/seeds/README.md), see [this pull request](https://github.com/bitcoin/bitcoin/pull/27488) for an example. * Update the following variables in [`src/kernel/chainparams.cpp`](/src/kernel/chainparams.cpp) for mainnet, testnet, and signet: - `m_assumed_blockchain_size` and `m_assumed_chain_state_size` with the current size plus some overhead (see [this](#how-to-calculate-assumed-blockchain-and-chain-state-size) for information on how to calculate them). @@ -36,7 +36,7 @@ Release Process that causes rejection of blocks in the past history. - `chainTxData` with statistics about the transaction count and rate. Use the output of the `getchaintxstats` RPC with an `nBlocks` of 4096 (28 days) and a `bestblockhash` of RPC `getbestblockhash`; see - [this pull request](https://github.com/bitcoin/bitcoin/pull/20263) for an example. Reviewers can verify the results by running + [this pull request](https://github.com/bitcoin/bitcoin/pull/28591) for an example. Reviewers can verify the results by running `getchaintxstats ` with the `window_block_count` and `window_final_block_hash` from your output. - `defaultAssumeValid` with the output of RPC `getblockhash` using the `height` of `window_final_block_height` above (and update the block height comment with that height), taking into account the following: @@ -45,7 +45,7 @@ Release Process - `nMinimumChainWork` with the "chainwork" value of RPC `getblockheader` using the same height as that selected for the previous step. * Consider updating the headers synchronization tuning parameters to account for the chainparams updates. The optimal values change very slowly, so this isn't strictly necessary every release, but doing so doesn't hurt. - - Update configuration variables in [`contrib/devtools/headerssync-params.py`](contrib/devtools/headerssync-params.py): + - Update configuration variables in [`contrib/devtools/headerssync-params.py`](/contrib/devtools/headerssync-params.py): - Set `TIME` to the software's expected supported lifetime -- after this time, its ability to defend against a high bandwidth timewarp attacker will begin to degrade. - Set `MINCHAINWORK_HEADERS` to the height used for the `nMinimumChainWork` calculation above. - Check that the other variables still look reasonable. From 05af4dfa50c229c8533d9a71e046c9387e1cdb27 Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Thu, 5 Oct 2023 19:20:30 -0400 Subject: [PATCH 123/172] test: Use feerate higher than minrelay fee in wallet_fundraw The external input weight test in wallet_fundrawtransaction.py made transactions at the minimum relay fee. However due to ECDSA sometimes making a shorter signature than expected, the size estimate (and therefore the funded fee) ends up being a little bit too low, which results in the final transaction being under the min relay fee. We can compensate for this by just using a feerate higher than the minrelayfee as the actual feerate itself does not matter in this test. --- test/functional/wallet_fundrawtransaction.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/functional/wallet_fundrawtransaction.py b/test/functional/wallet_fundrawtransaction.py index ca4feefb2b6a2..9b125d998b0d6 100755 --- a/test/functional/wallet_fundrawtransaction.py +++ b/test/functional/wallet_fundrawtransaction.py @@ -1063,19 +1063,19 @@ def test_external_inputs(self): high_input_weight = input_weight * 2 # Funding should also work if the input weight is provided - funded_tx = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}]) + funded_tx = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}], fee_rate=2) signed_tx = wallet.signrawtransactionwithwallet(funded_tx["hex"]) signed_tx = self.nodes[0].signrawtransactionwithwallet(signed_tx["hex"]) assert_equal(self.nodes[0].testmempoolaccept([signed_tx["hex"]])[0]["allowed"], True) assert_equal(signed_tx["complete"], True) # Reducing the weight should have a lower fee - funded_tx2 = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": low_input_weight}]) + funded_tx2 = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": low_input_weight}], fee_rate=2) assert_greater_than(funded_tx["fee"], funded_tx2["fee"]) # Increasing the weight should have a higher fee - funded_tx2 = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}]) + funded_tx2 = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}], fee_rate=2) assert_greater_than(funded_tx2["fee"], funded_tx["fee"]) # The provided weight should override the calculated weight when solving data is provided - funded_tx3 = wallet.fundrawtransaction(raw_tx, solving_data={"descriptors": [desc]}, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}]) + funded_tx3 = wallet.fundrawtransaction(raw_tx, solving_data={"descriptors": [desc]}, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}], fee_rate=2) assert_equal(funded_tx2["fee"], funded_tx3["fee"]) # The feerate should be met funded_tx4 = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}], fee_rate=10) @@ -1085,8 +1085,8 @@ def test_external_inputs(self): assert_fee_amount(funded_tx4["fee"], tx4_vsize, Decimal(0.0001)) # Funding with weight at csuint boundaries should not cause problems - funded_tx = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 255}]) - funded_tx = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 65539}]) + funded_tx = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 255}], fee_rate=2) + funded_tx = wallet.fundrawtransaction(raw_tx, input_weights=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 65539}], fee_rate=2) self.nodes[2].unloadwallet("extfund") From 0a39b8cbd88e9a496823b36feed77d137ccd894c Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Tue, 3 Oct 2023 00:18:53 +0200 Subject: [PATCH 124/172] validation: remove unused mempool param in DetectSnapshotChainstate --- src/node/chainstate.cpp | 2 +- src/validation.cpp | 2 +- src/validation.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/node/chainstate.cpp b/src/node/chainstate.cpp index 16ca1d9156ba8..eb1994177a109 100644 --- a/src/node/chainstate.cpp +++ b/src/node/chainstate.cpp @@ -185,7 +185,7 @@ ChainstateLoadResult LoadChainstate(ChainstateManager& chainman, const CacheSize chainman.InitializeChainstate(options.mempool); // Load a chain created from a UTXO snapshot, if any exist. - bool has_snapshot = chainman.DetectSnapshotChainstate(options.mempool); + bool has_snapshot = chainman.DetectSnapshotChainstate(); if (has_snapshot && (options.reindex || options.reindex_chainstate)) { LogPrintf("[snapshot] deleting snapshot chainstate due to reindexing\n"); diff --git a/src/validation.cpp b/src/validation.cpp index e24c187591a8e..27e1a9785b3a9 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5768,7 +5768,7 @@ ChainstateManager::~ChainstateManager() m_versionbitscache.Clear(); } -bool ChainstateManager::DetectSnapshotChainstate(CTxMemPool* mempool) +bool ChainstateManager::DetectSnapshotChainstate() { assert(!m_snapshot_chainstate); std::optional path = node::FindSnapshotChainstateDir(m_options.datadir); diff --git a/src/validation.h b/src/validation.h index 94a00e44a4eed..33d97f0b2098c 100644 --- a/src/validation.h +++ b/src/validation.h @@ -1203,7 +1203,7 @@ class ChainstateManager //! When starting up, search the datadir for a chainstate based on a UTXO //! snapshot that is in the process of being validated. - bool DetectSnapshotChainstate(CTxMemPool* mempool) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + bool DetectSnapshotChainstate() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); void ResetChainstates() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); From a47fbe7d49e8921214ac159c558ff4ca19f98dce Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Tue, 3 Oct 2023 00:22:37 +0200 Subject: [PATCH 125/172] doc: Add and edit some comments around assumeutxo Co-authored-by: Ryan Ofsky --- src/net_processing.cpp | 2 ++ src/node/blockstorage.cpp | 5 +++-- src/validation.cpp | 4 ++-- src/validation.h | 18 ++++++++---------- 4 files changed, 15 insertions(+), 14 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index a8d1553eed688..bebbc66444163 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1937,6 +1937,8 @@ void PeerManagerImpl::BlockConnected( } } + // The following task can be skipped since we don't maintain a mempool for + // the ibd/background chainstate. if (role == ChainstateRole::BACKGROUND) { return; } diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 706b62ea9bc76..6e4e018b4a061 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -761,9 +761,10 @@ bool BlockManager::FlushChainstateBlockFile(int tip_height) { LOCK(cs_LastBlockFile); auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)]; + // If the cursor does not exist, it means an assumeutxo snapshot is loaded, + // but no blocks past the snapshot height have been written yet, so there + // is no data associated with the chainstate, and it is safe not to flush. if (cursor) { - // The cursor may not exist after a snapshot has been loaded but before any - // blocks have been downloaded. return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false); } return false; diff --git a/src/validation.cpp b/src/validation.cpp index 27e1a9785b3a9..07e8b9152b6bc 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -68,8 +68,8 @@ #include #include #include -#include #include +#include using kernel::CCoinsStats; using kernel::CoinStatsHashType; @@ -5367,7 +5367,7 @@ bool ChainstateManager::PopulateAndValidateSnapshot( // ActivateSnapshot(), but is done so that we avoid doing the long work of staging // a snapshot that isn't actually usable. if (WITH_LOCK(::cs_main, return !CBlockIndexWorkComparator()(ActiveTip(), snapshot_start_block))) { - LogPrintf("[snapshot] activation failed - height does not exceed active chainstate\n"); + LogPrintf("[snapshot] activation failed - work does not exceed active chainstate\n"); return false; } diff --git a/src/validation.h b/src/validation.h index 33d97f0b2098c..7ce60da6340b1 100644 --- a/src/validation.h +++ b/src/validation.h @@ -836,9 +836,10 @@ class ChainstateManager //! Once this pointer is set to a corresponding chainstate, it will not //! be reset until init.cpp:Shutdown(). //! - //! This is especially important when, e.g., calling ActivateBestChain() - //! on all chainstates because we are not able to hold ::cs_main going into - //! that call. + //! It is important for the pointer to not be deleted until shutdown, + //! because cs_main is not always held when the pointer is accessed, for + //! example when calling ActivateBestChain, so there's no way you could + //! prevent code from using the pointer while deleting it. std::unique_ptr m_ibd_chainstate GUARDED_BY(::cs_main); //! A chainstate initialized on the basis of a UTXO snapshot. If this is @@ -847,17 +848,14 @@ class ChainstateManager //! Once this pointer is set to a corresponding chainstate, it will not //! be reset until init.cpp:Shutdown(). //! - //! This is especially important when, e.g., calling ActivateBestChain() - //! on all chainstates because we are not able to hold ::cs_main going into - //! that call. + //! It is important for the pointer to not be deleted until shutdown, + //! because cs_main is not always held when the pointer is accessed, for + //! example when calling ActivateBestChain, so there's no way you could + //! prevent code from using the pointer while deleting it. std::unique_ptr m_snapshot_chainstate GUARDED_BY(::cs_main); //! Points to either the ibd or snapshot chainstate; indicates our //! most-work chain. - //! - //! This is especially important when, e.g., calling ActivateBestChain() - //! on all chainstates because we are not able to hold ::cs_main going into - //! that call. Chainstate* m_active_chainstate GUARDED_BY(::cs_main) {nullptr}; CBlockIndex* m_best_invalid GUARDED_BY(::cs_main){nullptr}; From 4e915e926bccbc9bdd61933ce44e87f2b4173b30 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Tue, 3 Oct 2023 00:55:10 +0200 Subject: [PATCH 126/172] test: Improvements of feature_assumeutxo - Remove usage of the internal wait_until_helper function - Use framework self.no_op instead of new no_sync function co-authored-by: Andrew Chow --- test/functional/feature_assumeutxo.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py index 15cacc204c10e..6c09a6f5e751d 100755 --- a/test/functional/feature_assumeutxo.py +++ b/test/functional/feature_assumeutxo.py @@ -36,7 +36,7 @@ """ from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, wait_until_helper +from test_framework.util import assert_equal START_HEIGHT = 199 SNAPSHOT_BASE_HEIGHT = 299 @@ -80,16 +80,13 @@ def run_test(self): self.sync_blocks() - def no_sync(): - pass - # Generate a series of blocks that `n0` will have in the snapshot, # but that n1 doesn't yet see. In order for the snapshot to activate, # though, we have to ferry over the new headers to n1 so that it # isn't waiting forever to see the header of the snapshot's base block # while disconnected from n0. for i in range(100): - self.generate(n0, nblocks=1, sync_fun=no_sync) + self.generate(n0, nblocks=1, sync_fun=self.no_op) newblock = n0.getblock(n0.getbestblockhash(), 0) # make n1 aware of the new header, but don't give it the block. @@ -116,7 +113,7 @@ def no_sync(): # Mine more blocks on top of the snapshot that n1 hasn't yet seen. This # will allow us to test n1's sync-to-tip on top of a snapshot. - self.generate(n0, nblocks=100, sync_fun=no_sync) + self.generate(n0, nblocks=100, sync_fun=self.no_op) assert_equal(n0.getblockcount(), FINAL_HEIGHT) assert_equal(n1.getblockcount(), START_HEIGHT) @@ -162,11 +159,11 @@ def no_sync(): self.connect_nodes(0, 1) self.log.info(f"Ensuring snapshot chain syncs to tip. ({FINAL_HEIGHT})") - wait_until_helper(lambda: n1.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT) + self.wait_until(lambda: n1.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT) self.sync_blocks(nodes=(n0, n1)) self.log.info("Ensuring background validation completes") - wait_until_helper(lambda: len(n1.getchainstates()['chainstates']) == 1) + self.wait_until(lambda: len(n1.getchainstates()['chainstates']) == 1) # Ensure indexes have synced. completed_idx_state = { @@ -211,11 +208,11 @@ def no_sync(): assert_equal(snapshot['validated'], False) self.connect_nodes(0, 2) - wait_until_helper(lambda: n2.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT) + self.wait_until(lambda: n2.getchainstates()['chainstates'][-1]['blocks'] == FINAL_HEIGHT) self.sync_blocks() self.log.info("Ensuring background validation completes") - wait_until_helper(lambda: len(n2.getchainstates()['chainstates']) == 1) + self.wait_until(lambda: len(n2.getchainstates()['chainstates']) == 1) completed_idx_state = { 'basic block filter index': COMPLETE_IDX, @@ -242,12 +239,12 @@ def no_sync(): self.restart_node(2, extra_args=[ '-reindex-chainstate=1', *self.extra_args[2]]) assert_equal(n2.getblockchaininfo()["blocks"], FINAL_HEIGHT) - wait_until_helper(lambda: n2.getblockcount() == FINAL_HEIGHT) + self.wait_until(lambda: n2.getblockcount() == FINAL_HEIGHT) self.log.info("Test -reindex of an assumeutxo-synced node") self.restart_node(2, extra_args=['-reindex=1', *self.extra_args[2]]) self.connect_nodes(0, 2) - wait_until_helper(lambda: n2.getblockcount() == FINAL_HEIGHT) + self.wait_until(lambda: n2.getblockcount() == FINAL_HEIGHT) if __name__ == '__main__': From 2c9354facb27a6c394bb0c64f85fc4e3a33f4aed Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Tue, 3 Oct 2023 01:05:05 +0200 Subject: [PATCH 127/172] doc: Add snapshot chainstate removal warning to reindexing documentation --- src/init.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index a0b4425898177..98f233d9dfc90 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -462,8 +462,8 @@ void SetupServerArgs(ArgsManager& argsman) argsman.AddArg("-prune=", strprintf("Reduce storage requirements by enabling pruning (deleting) of old blocks. This allows the pruneblockchain RPC to be called to delete specific blocks and enables automatic pruning of old blocks if a target size in MiB is provided. This mode is incompatible with -txindex. " "Warning: Reverting this setting requires re-downloading the entire blockchain. " "(default: 0 = disable pruning blocks, 1 = allow manual pruning via RPC, >=%u = automatically prune block files to stay under the specified target size in MiB)", MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - argsman.AddArg("-reindex", "Rebuild chain state and block index from the blk*.dat files on disk. This will also rebuild active optional indexes.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); - argsman.AddArg("-reindex-chainstate", "Rebuild chain state from the currently indexed blocks. When in pruning mode or if blocks on disk might be corrupted, use full -reindex instead.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-reindex", "If enabled, wipe chain state and block index, and rebuild them from blk*.dat files on disk. Also wipe and rebuild other optional indexes that are active. If an assumeutxo snapshot was loaded, its chainstate will be wiped as well. The snapshot can then be reloaded via RPC.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-reindex-chainstate", "If enabled, wipe chain state, and rebuild it from blk*.dat files on disk. If an assumeutxo snapshot was loaded, its chainstate will be wiped as well. The snapshot can then be reloaded via RPC.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-settings=", strprintf("Specify path to dynamic settings data file. Can be disabled with -nosettings. File is written at runtime and not meant to be edited by users (use %s instead for custom settings). Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME, BITCOIN_SETTINGS_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); #if HAVE_SYSTEM argsman.AddArg("-startupnotify=", "Execute command on startup.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); From 73700fb554d6abad705d8f48aed4840fedb36c79 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Tue, 3 Oct 2023 01:31:00 +0200 Subject: [PATCH 128/172] validation, test: Improve and document nChainTx check for testability Co-authored-by: Ryan Ofsky --- src/kernel/chainparams.cpp | 2 +- src/test/validation_tests.cpp | 4 ++-- src/validation.cpp | 12 ++++++++---- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 5e893a3f58c4d..644e2a2c67123 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -495,7 +495,7 @@ class CRegTestParams : public CChainParams { .height = 110, .hash_serialized = AssumeutxoHash{uint256S("0x1ebbf5850204c0bdb15bf030f47c7fe91d45c44c712697e4509ba67adb01c618")}, - .nChainTx = 110, + .nChainTx = 111, .blockhash = uint256S("0x696e92821f65549c7ee134edceeeeaaa4105647a3c4fd9f298c0aec0ab50425c") }, { diff --git a/src/test/validation_tests.cpp b/src/test/validation_tests.cpp index d34d98c219a83..2692037273250 100644 --- a/src/test/validation_tests.cpp +++ b/src/test/validation_tests.cpp @@ -138,11 +138,11 @@ BOOST_AUTO_TEST_CASE(test_assumeutxo) const auto out110 = *params->AssumeutxoForHeight(110); BOOST_CHECK_EQUAL(out110.hash_serialized.ToString(), "1ebbf5850204c0bdb15bf030f47c7fe91d45c44c712697e4509ba67adb01c618"); - BOOST_CHECK_EQUAL(out110.nChainTx, 110U); + BOOST_CHECK_EQUAL(out110.nChainTx, 111U); const auto out110_2 = *params->AssumeutxoForBlockhash(uint256S("0x696e92821f65549c7ee134edceeeeaaa4105647a3c4fd9f298c0aec0ab50425c")); BOOST_CHECK_EQUAL(out110_2.hash_serialized.ToString(), "1ebbf5850204c0bdb15bf030f47c7fe91d45c44c712697e4509ba67adb01c618"); - BOOST_CHECK_EQUAL(out110_2.nChainTx, 110U); + BOOST_CHECK_EQUAL(out110_2.nChainTx, 111U); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/validation.cpp b/src/validation.cpp index 07e8b9152b6bc..464030f9fc542 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -4844,10 +4844,14 @@ void ChainstateManager::CheckBlockIndex() CBlockIndex* pindexFirstAssumeValid = nullptr; // Oldest ancestor of pindex which has BLOCK_ASSUMED_VALID while (pindex != nullptr) { nNodes++; - if (pindex->pprev && pindex->nTx > 0) { - // nChainTx should increase monotonically - assert(pindex->pprev->nChainTx <= pindex->nChainTx); - } + // Make sure nChainTx sum is correctly computed. + unsigned int prev_chain_tx = pindex->pprev ? pindex->pprev->nChainTx : 0; + assert((pindex->nChainTx == pindex->nTx + prev_chain_tx) + // For testing, allow transaction counts to be completely unset. + || (pindex->nChainTx == 0 && pindex->nTx == 0) + // For testing, allow this nChainTx to be unset if previous is also unset. + || (pindex->nChainTx == 0 && prev_chain_tx == 0 && pindex->pprev)); + if (pindexFirstAssumeValid == nullptr && pindex->nStatus & BLOCK_ASSUMED_VALID) pindexFirstAssumeValid = pindex; if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex; if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) { From 82e48d20f1243fb7733e872a29661b151ab5d523 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Tue, 3 Oct 2023 11:15:15 +0200 Subject: [PATCH 129/172] blockstorage: Let FlushChainstateBlockFile return true in case of missing cursor Co-authored-by: Ryan Ofsky --- src/node/blockstorage.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 6e4e018b4a061..931db80274001 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -767,7 +767,8 @@ bool BlockManager::FlushChainstateBlockFile(int tip_height) if (cursor) { return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false); } - return false; + // No need to log warnings in this case. + return true; } uint64_t BlockManager::CalculateCurrentUsage() From a482f86779a6182d87004b463c0eaf21038181c3 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Tue, 3 Oct 2023 18:18:41 +0200 Subject: [PATCH 130/172] chain: Rename HaveTxsDownloaded to HaveNumChainTxs Co-authored-by: MarcoFalke --- src/chain.h | 4 +--- src/net_processing.cpp | 4 ++-- src/rpc/blockchain.cpp | 2 +- src/test/fuzz/chain.cpp | 2 +- src/validation.cpp | 22 +++++++++++----------- 5 files changed, 16 insertions(+), 18 deletions(-) diff --git a/src/chain.h b/src/chain.h index 78b06719f43a4..4bf2001f74b1e 100644 --- a/src/chain.h +++ b/src/chain.h @@ -280,10 +280,8 @@ class CBlockIndex * Note that this will be true for the snapshot base block, if one is loaded (and * all subsequent assumed-valid blocks) since its nChainTx value will have been set * manually based on the related AssumeutxoData entry. - * - * TODO: potentially change the name of this based on the fact above. */ - bool HaveTxsDownloaded() const { return nChainTx != 0; } + bool HaveNumChainTxs() const { return nChainTx != 0; } NodeSeconds Time() const { diff --git a/src/net_processing.cpp b/src/net_processing.cpp index bebbc66444163..84ccc54f03319 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1448,7 +1448,7 @@ void PeerManagerImpl::FindNextBlocks(std::vector& vBlocks, c return; } if (pindex->nStatus & BLOCK_HAVE_DATA || (activeChain && activeChain->Contains(pindex))) { - if (activeChain && pindex->HaveTxsDownloaded()) + if (activeChain && pindex->HaveNumChainTxs()) state->pindexLastCommonBlock = pindex; } else if (!IsBlockRequested(pindex->GetBlockHash())) { // The block is not already downloaded, and not yet in flight. @@ -2233,7 +2233,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& LOCK(cs_main); const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash); if (pindex) { - if (pindex->HaveTxsDownloaded() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) && + if (pindex->HaveNumChainTxs() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) && pindex->IsValid(BLOCK_VALID_TREE)) { // If we have the block and all of its parents, but have not yet validated it, // we might be in the middle of connecting it (ie in the unlock of cs_main diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index abd723ee56223..528d5cfaec741 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1455,7 +1455,7 @@ static RPCHelpMan getchaintips() } else if (block->nStatus & BLOCK_FAILED_MASK) { // This block or one of its ancestors is invalid. status = "invalid"; - } else if (!block->HaveTxsDownloaded()) { + } else if (!block->HaveNumChainTxs()) { // This block cannot be connected because full block data for it or one of its parents is missing. status = "headers-only"; } else if (block->IsValid(BLOCK_VALID_SCRIPTS)) { diff --git a/src/test/fuzz/chain.cpp b/src/test/fuzz/chain.cpp index 49b9898228626..0363f317b63f9 100644 --- a/src/test/fuzz/chain.cpp +++ b/src/test/fuzz/chain.cpp @@ -29,7 +29,7 @@ FUZZ_TARGET(chain) (void)disk_block_index->GetBlockTimeMax(); (void)disk_block_index->GetMedianTimePast(); (void)disk_block_index->GetUndoPos(); - (void)disk_block_index->HaveTxsDownloaded(); + (void)disk_block_index->HaveNumChainTxs(); (void)disk_block_index->IsValid(); } diff --git a/src/validation.cpp b/src/validation.cpp index 464030f9fc542..9108f911f04ee 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2996,7 +2996,7 @@ CBlockIndex* Chainstate::FindMostWorkChain() CBlockIndex *pindexTest = pindexNew; bool fInvalidAncestor = false; while (pindexTest && !m_chain.Contains(pindexTest)) { - assert(pindexTest->HaveTxsDownloaded() || pindexTest->nHeight == 0); + assert(pindexTest->HaveNumChainTxs() || pindexTest->nHeight == 0); // Pruned nodes may have entries in setBlockIndexCandidates for // which block files have been deleted. Remove those as candidates @@ -3351,7 +3351,7 @@ bool Chainstate::PreciousBlock(BlockValidationState& state, CBlockIndex* pindex) // call preciousblock 2**31-1 times on the same set of tips... m_chainman.nBlockReverseSequenceId--; } - if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveTxsDownloaded()) { + if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->HaveNumChainTxs()) { setBlockIndexCandidates.insert(pindex); PruneBlockIndexCandidates(); } @@ -3399,7 +3399,7 @@ bool Chainstate::InvalidateBlock(BlockValidationState& state, CBlockIndex* pinde if (!m_chain.Contains(candidate) && !CBlockIndexWorkComparator()(candidate, pindex->pprev) && candidate->IsValid(BLOCK_VALID_TRANSACTIONS) && - candidate->HaveTxsDownloaded()) { + candidate->HaveNumChainTxs()) { candidate_blocks_by_work.insert(std::make_pair(candidate->nChainWork, candidate)); } } @@ -3488,7 +3488,7 @@ bool Chainstate::InvalidateBlock(BlockValidationState& state, CBlockIndex* pinde // Loop back over all block index entries and add any missing entries // to setBlockIndexCandidates. for (auto& [_, block_index] : m_blockman.m_block_index) { - if (block_index.IsValid(BLOCK_VALID_TRANSACTIONS) && block_index.HaveTxsDownloaded() && !setBlockIndexCandidates.value_comp()(&block_index, m_chain.Tip())) { + if (block_index.IsValid(BLOCK_VALID_TRANSACTIONS) && block_index.HaveNumChainTxs() && !setBlockIndexCandidates.value_comp()(&block_index, m_chain.Tip())) { setBlockIndexCandidates.insert(&block_index); } } @@ -3520,7 +3520,7 @@ void Chainstate::ResetBlockFailureFlags(CBlockIndex *pindex) { if (!block_index.IsValid() && block_index.GetAncestor(nHeight) == pindex) { block_index.nStatus &= ~BLOCK_FAILED_MASK; m_blockman.m_dirty_blockindex.insert(&block_index); - if (block_index.IsValid(BLOCK_VALID_TRANSACTIONS) && block_index.HaveTxsDownloaded() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), &block_index)) { + if (block_index.IsValid(BLOCK_VALID_TRANSACTIONS) && block_index.HaveNumChainTxs() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), &block_index)) { setBlockIndexCandidates.insert(&block_index); } if (&block_index == m_chainman.m_best_invalid) { @@ -3583,7 +3583,7 @@ void ChainstateManager::ReceivedBlockTransactions(const CBlock& block, CBlockInd pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS); m_blockman.m_dirty_blockindex.insert(pindexNew); - if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveTxsDownloaded()) { + if (pindexNew->pprev == nullptr || pindexNew->pprev->HaveNumChainTxs()) { // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS. std::deque queue; queue.push_back(pindexNew); @@ -4566,7 +4566,7 @@ bool ChainstateManager::LoadBlockIndex() // here. if (pindex == GetSnapshotBaseBlock() || (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && - (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) { + (pindex->HaveNumChainTxs() || pindex->pprev == nullptr))) { for (Chainstate* chainstate : GetAll()) { chainstate->TryAddBlockIndexCandidate(pindex); @@ -4890,7 +4890,7 @@ void ChainstateManager::CheckBlockIndex() } } } - if (!pindex->HaveTxsDownloaded()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock) + if (!pindex->HaveNumChainTxs()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock) // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred). // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred. // Unless these indexes are assumed valid and pending block download on a @@ -4920,9 +4920,9 @@ void ChainstateManager::CheckBlockIndex() // actually seen a block's transactions. assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent. } - // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveTxsDownloaded(). - assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveTxsDownloaded()); - assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveTxsDownloaded()); + // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveNumChainTxs(). + assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveNumChainTxs()); + assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveNumChainTxs()); assert(pindex->nHeight == nHeight); // nHeight must be consistent. assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's. assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks. From 1ff1c34656d49d60a93066a886dc1bfad9baccf4 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Tue, 3 Oct 2023 18:34:20 +0200 Subject: [PATCH 131/172] test: Rename wait_until_helper to wait_until_helper_internal Co-authored-by: MarcoFalke --- test/functional/test_framework/p2p.py | 6 +++--- test/functional/test_framework/test_framework.py | 4 ++-- test/functional/test_framework/test_node.py | 8 ++++---- test/functional/test_framework/util.py | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index ceb4bbd7dea95..be4ed624fce9d 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -77,7 +77,7 @@ from test_framework.util import ( MAX_NODES, p2p_port, - wait_until_helper, + wait_until_helper_internal, ) logger = logging.getLogger("TestFramework.p2p") @@ -466,7 +466,7 @@ def test_function(): assert self.is_connected return test_function_in() - wait_until_helper(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor) + wait_until_helper_internal(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor) def wait_for_connect(self, timeout=60): test_function = lambda: self.is_connected @@ -602,7 +602,7 @@ def run(self): def close(self, timeout=10): """Close the connections and network event loop.""" self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop) - wait_until_helper(lambda: not self.network_event_loop.is_running(), timeout=timeout) + wait_until_helper_internal(lambda: not self.network_event_loop.is_running(), timeout=timeout) self.network_event_loop.close() self.join(timeout) # Safe to remove event loop. diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index a34c34713e922..c46c04c0ec6ed 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -33,7 +33,7 @@ get_datadir_path, initialize_datadir, p2p_port, - wait_until_helper, + wait_until_helper_internal, ) @@ -747,7 +747,7 @@ def sync_all(self, nodes=None): self.sync_mempools(nodes) def wait_until(self, test_function, timeout=60): - return wait_until_helper(test_function, timeout=timeout, timeout_factor=self.options.timeout_factor) + return wait_until_helper_internal(test_function, timeout=timeout, timeout_factor=self.options.timeout_factor) # Private helper methods. These should not be accessed by the subclass test scripts. diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 7e85935807d9e..33a753964123f 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -36,7 +36,7 @@ get_auth_cookie, get_rpc_proxy, rpc_url, - wait_until_helper, + wait_until_helper_internal, p2p_port, ) @@ -253,7 +253,7 @@ def wait_for_rpc_connection(self): if self.version_is_at_least(190000): # getmempoolinfo.loaded is available since commit # bb8ae2c (version 0.19.0) - wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'], timeout_factor=self.timeout_factor) + wait_until_helper_internal(lambda: rpc.getmempoolinfo()['loaded'], timeout_factor=self.timeout_factor) # Wait for the node to finish reindex, block import, and # loading the mempool. Usually importing happens fast or # even "immediate" when the node is started. However, there @@ -407,7 +407,7 @@ def is_node_stopped(self, *, expected_stderr="", expected_ret_code=0): def wait_until_stopped(self, *, timeout=BITCOIND_PROC_WAIT_TIMEOUT, expect_error=False, **kwargs): expected_ret_code = 1 if expect_error else 0 # Whether node shutdown return EXIT_FAILURE or EXIT_SUCCESS - wait_until_helper(lambda: self.is_node_stopped(expected_ret_code=expected_ret_code, **kwargs), timeout=timeout, timeout_factor=self.timeout_factor) + wait_until_helper_internal(lambda: self.is_node_stopped(expected_ret_code=expected_ret_code, **kwargs), timeout=timeout, timeout_factor=self.timeout_factor) def replace_in_config(self, replacements): """ @@ -718,7 +718,7 @@ def disconnect_p2ps(self): p.peer_disconnect() del self.p2ps[:] - wait_until_helper(lambda: self.num_test_p2p_connections() == 0, timeout_factor=self.timeout_factor) + wait_until_helper_internal(lambda: self.num_test_p2p_connections() == 0, timeout_factor=self.timeout_factor) def bumpmocktime(self, seconds): """Fast forward using setmocktime to self.mocktime + seconds. Requires setmocktime to have diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 3bd18c26d856e..0c10d500afcb9 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -241,7 +241,7 @@ def satoshi_round(amount): return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) -def wait_until_helper(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, timeout_factor=1.0): +def wait_until_helper_internal(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None, timeout_factor=1.0): """Sleep until the predicate resolves to be True. Warning: Note that this method is not recommended to be used in tests as it is From 710e5db61bf7b303fa425f8dcbdce536281fa7f3 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Fri, 6 Oct 2023 18:43:31 +0200 Subject: [PATCH 132/172] doc: Drop references to assumevalid in assumeutxo docs --- doc/design/assumeutxo.md | 2 +- doc/release-notes-27596.md | 2 +- src/rpc/blockchain.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/design/assumeutxo.md b/doc/design/assumeutxo.md index 8068a93f27a7d..75a7b6c8666ec 100644 --- a/doc/design/assumeutxo.md +++ b/doc/design/assumeutxo.md @@ -1,7 +1,7 @@ # assumeutxo Assumeutxo is a feature that allows fast bootstrapping of a validating bitcoind -instance with a very similar security model to assumevalid. +instance. The RPC commands `dumptxoutset` and `loadtxoutset` are used to respectively generate and load UTXO snapshots. The utility script diff --git a/doc/release-notes-27596.md b/doc/release-notes-27596.md index 799b82643fec6..cbaf4b3a9e3ee 100644 --- a/doc/release-notes-27596.md +++ b/doc/release-notes-27596.md @@ -12,7 +12,7 @@ RPC `loadtxoutset` has been added, which allows loading a UTXO snapshot of the format generated by `dumptxoutset`. Once this snapshot is loaded, its contents will be deserialized into a second chainstate data structure, which is then used to sync to -the network's tip under a security model very much like `assumevalid`. +the network's tip. Meanwhile, the original chainstate will complete the initial block download process in the background, eventually validating up to the block that the snapshot is based upon. diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 528d5cfaec741..605e7f1d05315 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -2707,7 +2707,7 @@ static RPCHelpMan loadtxoutset() "Load the serialized UTXO set from disk.\n" "Once this snapshot is loaded, its contents will be " "deserialized into a second chainstate data structure, which is then used to sync to " - "the network's tip under a security model very much like `assumevalid`. " + "the network's tip. " "Meanwhile, the original chainstate will complete the initial block download process in " "the background, eventually validating up to the block that the snapshot is based upon.\n\n" From 5d227a68627614efa8618d360efee22a47afa88b Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Fri, 6 Oct 2023 19:22:32 +0200 Subject: [PATCH 133/172] rpc: Use Ensure(Any)Chainman in assumeutxo related RPCs --- src/rpc/blockchain.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 605e7f1d05315..31dd8164fe643 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -2759,7 +2759,7 @@ static RPCHelpMan loadtxoutset() LogPrintf("[snapshot] waiting to see blockheader %s in headers chain before snapshot activation\n", base_blockhash.ToString()); - ChainstateManager& chainman = *node.chainman; + ChainstateManager& chainman = EnsureChainman(node); while (max_secs_to_wait_for_headers > 0) { snapshot_start_block = WITH_LOCK(::cs_main, @@ -2831,8 +2831,7 @@ return RPCHelpMan{ LOCK(cs_main); UniValue obj(UniValue::VOBJ); - NodeContext& node = EnsureAnyNodeContext(request.context); - ChainstateManager& chainman = *node.chainman; + ChainstateManager& chainman = EnsureAnyChainman(request.context); auto make_chain_data = [&](const Chainstate& cs, bool validated) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); From b442580ed2a6173f0cfb86f265887d783dde3ff8 Mon Sep 17 00:00:00 2001 From: furszy Date: Fri, 6 Oct 2023 10:28:26 -0300 Subject: [PATCH 134/172] gui: remove legacy wallet creation --- src/qt/bitcoingui.cpp | 27 ++++++----- src/qt/bitcoingui.h | 2 + src/qt/createwalletdialog.cpp | 20 --------- src/qt/createwalletdialog.h | 1 - src/qt/forms/createwalletdialog.ui | 72 +++++++++++++++++++++++------- src/qt/walletcontroller.cpp | 5 +-- 6 files changed, 77 insertions(+), 50 deletions(-) diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index 8a46d464370f5..171b50d8093dc 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -109,10 +109,7 @@ BitcoinGUI::BitcoinGUI(interfaces::Node& node, const PlatformStyle *_platformSty { /** Create wallet frame and make it the central widget */ walletFrame = new WalletFrame(_platformStyle, this); - connect(walletFrame, &WalletFrame::createWalletButtonClicked, [this] { - auto activity = new CreateWalletActivity(getWalletController(), this); - activity->create(); - }); + connect(walletFrame, &WalletFrame::createWalletButtonClicked, this, &BitcoinGUI::createWallet); connect(walletFrame, &WalletFrame::message, [this](const QString& title, const QString& message, unsigned int style) { this->message(title, message, style); }); @@ -453,12 +450,7 @@ void BitcoinGUI::createActions() connect(m_close_wallet_action, &QAction::triggered, [this] { m_wallet_controller->closeWallet(walletFrame->currentWalletModel(), this); }); - connect(m_create_wallet_action, &QAction::triggered, [this] { - auto activity = new CreateWalletActivity(m_wallet_controller, this); - connect(activity, &CreateWalletActivity::created, this, &BitcoinGUI::setCurrentWallet); - connect(activity, &CreateWalletActivity::created, rpcConsole, &RPCConsole::setCurrentWallet); - activity->create(); - }); + connect(m_create_wallet_action, &QAction::triggered, this, &BitcoinGUI::createWallet); connect(m_close_all_wallets_action, &QAction::triggered, [this] { m_wallet_controller->closeAllWallets(this); }); @@ -1191,6 +1183,21 @@ void BitcoinGUI::setNumBlocks(int count, const QDateTime& blockDate, double nVer progressBar->setToolTip(tooltip); } +void BitcoinGUI::createWallet() +{ +#ifdef ENABLE_WALLET +#ifndef USE_SQLITE + // Compiled without sqlite support (required for descriptor wallets) + message(tr("Error creating wallet"), tr("Cannot create new wallet, the software was compiled without sqlite support (required for descriptor wallets)"), CClientUIInterface::MSG_ERROR); + return; +#endif // USE_SQLITE + auto activity = new CreateWalletActivity(getWalletController(), this); + connect(activity, &CreateWalletActivity::created, this, &BitcoinGUI::setCurrentWallet); + connect(activity, &CreateWalletActivity::created, rpcConsole, &RPCConsole::setCurrentWallet); + activity->create(); +#endif // ENABLE_WALLET +} + void BitcoinGUI::message(const QString& title, QString message, unsigned int style, bool* ret, const QString& detailed_message) { // Default title. On macOS, the window title is ignored (as required by the macOS Guidelines). diff --git a/src/qt/bitcoingui.h b/src/qt/bitcoingui.h index 510561454bcdb..6fdc4c60d8ba5 100644 --- a/src/qt/bitcoingui.h +++ b/src/qt/bitcoingui.h @@ -230,6 +230,8 @@ public Q_SLOTS: void setNetworkActive(bool network_active); /** Set number of blocks and last block date shown in the UI */ void setNumBlocks(int count, const QDateTime& blockDate, double nVerificationProgress, SyncType synctype, SynchronizationState sync_state); + /** Launch the wallet creation modal (no-op if wallet is not compiled) **/ + void createWallet(); /** Notify the user of an event from the core network or transaction handling code. @param[in] title the message box / notification title diff --git a/src/qt/createwalletdialog.cpp b/src/qt/createwalletdialog.cpp index 3e8be3e6754dd..6557280d891b2 100644 --- a/src/qt/createwalletdialog.cpp +++ b/src/qt/createwalletdialog.cpp @@ -50,12 +50,10 @@ CreateWalletDialog::CreateWalletDialog(QWidget* parent) : ui->encrypt_wallet_checkbox->setEnabled(!checked); ui->blank_wallet_checkbox->setEnabled(!checked); ui->disable_privkeys_checkbox->setEnabled(!checked); - ui->descriptor_checkbox->setEnabled(!checked); // The external signer checkbox is only enabled when a device is detected. // In that case it is checked by default. Toggling it restores the other // options to their default. - ui->descriptor_checkbox->setChecked(checked); ui->encrypt_wallet_checkbox->setChecked(false); ui->disable_privkeys_checkbox->setChecked(checked); ui->blank_wallet_checkbox->setChecked(false); @@ -87,19 +85,6 @@ CreateWalletDialog::CreateWalletDialog(QWidget* parent) : } }); -#ifndef USE_SQLITE - ui->descriptor_checkbox->setToolTip(tr("Compiled without sqlite support (required for descriptor wallets)")); - ui->descriptor_checkbox->setEnabled(false); - ui->descriptor_checkbox->setChecked(false); - ui->external_signer_checkbox->setEnabled(false); - ui->external_signer_checkbox->setChecked(false); -#endif - -#ifndef USE_BDB - ui->descriptor_checkbox->setEnabled(false); - ui->descriptor_checkbox->setChecked(true); -#endif - #ifndef ENABLE_EXTERNAL_SIGNER //: "External signing" means using devices such as hardware wallets. ui->external_signer_checkbox->setToolTip(tr("Compiled without external signing support (required for external signing)")); @@ -155,11 +140,6 @@ bool CreateWalletDialog::isMakeBlankWalletChecked() const return ui->blank_wallet_checkbox->isChecked(); } -bool CreateWalletDialog::isDescriptorWalletChecked() const -{ - return ui->descriptor_checkbox->isChecked(); -} - bool CreateWalletDialog::isExternalSignerChecked() const { return ui->external_signer_checkbox->isChecked(); diff --git a/src/qt/createwalletdialog.h b/src/qt/createwalletdialog.h index 939b82ff78c44..24ee97385b000 100644 --- a/src/qt/createwalletdialog.h +++ b/src/qt/createwalletdialog.h @@ -35,7 +35,6 @@ class CreateWalletDialog : public QDialog bool isEncryptWalletChecked() const; bool isDisablePrivateKeysChecked() const; bool isMakeBlankWalletChecked() const; - bool isDescriptorWalletChecked() const; bool isExternalSignerChecked() const; private: diff --git a/src/qt/forms/createwalletdialog.ui b/src/qt/forms/createwalletdialog.ui index 56adbe17a5ce4..1d6f0ed530a5f 100644 --- a/src/qt/forms/createwalletdialog.ui +++ b/src/qt/forms/createwalletdialog.ui @@ -6,8 +6,8 @@ 0 0 - 364 - 249 + 371 + 298 @@ -17,6 +17,48 @@ true + + + + + 0 + 0 + + + + You are one step away from creating your new wallet! + + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter + + + false + + + + + + + Please provide a name and, if desired, enable any advanced options + + + + + + + Qt::Vertical + + + QSizePolicy::Fixed + + + + 20 + 3 + + + + @@ -75,7 +117,19 @@ Advanced Options + + Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter + + + false + + + false + + + 9 + @@ -99,19 +153,6 @@ - - - - Use descriptors for scriptPubKey management - - - Descriptor Wallet - - - true - - - @@ -155,7 +196,6 @@ encrypt_wallet_checkbox disable_privkeys_checkbox blank_wallet_checkbox - descriptor_checkbox external_signer_checkbox diff --git a/src/qt/walletcontroller.cpp b/src/qt/walletcontroller.cpp index ca2fa2d672f1f..b1ef489cc3f2d 100644 --- a/src/qt/walletcontroller.cpp +++ b/src/qt/walletcontroller.cpp @@ -250,15 +250,14 @@ void CreateWalletActivity::createWallet() std::string name = m_create_wallet_dialog->walletName().toStdString(); uint64_t flags = 0; + // Enable descriptors by default. + flags |= WALLET_FLAG_DESCRIPTORS; if (m_create_wallet_dialog->isDisablePrivateKeysChecked()) { flags |= WALLET_FLAG_DISABLE_PRIVATE_KEYS; } if (m_create_wallet_dialog->isMakeBlankWalletChecked()) { flags |= WALLET_FLAG_BLANK_WALLET; } - if (m_create_wallet_dialog->isDescriptorWalletChecked()) { - flags |= WALLET_FLAG_DESCRIPTORS; - } if (m_create_wallet_dialog->isExternalSignerChecked()) { flags |= WALLET_FLAG_EXTERNAL_SIGNER; } From a3793f2d1a43624631d6329f6c900a83e7dd0e98 Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Tue, 26 Sep 2023 13:01:13 +0200 Subject: [PATCH 135/172] miniscript: add a missing dup key check bypass in Parse() This was calling the wrong constructor. --- src/script/miniscript.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/script/miniscript.h b/src/script/miniscript.h index 4c6bd0bb1dd61..e6c0973f85b68 100644 --- a/src/script/miniscript.h +++ b/src/script/miniscript.h @@ -1727,7 +1727,7 @@ inline NodeRef Parse(Span in, const Ctx& ctx) case ParseContext::AND_N: { auto mid = std::move(constructed.back()); constructed.pop_back(); - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::ANDOR, Vector(std::move(constructed.back()), std::move(mid), MakeNodeRef(ctx, Fragment::JUST_0))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::ANDOR, Vector(std::move(constructed.back()), std::move(mid), MakeNodeRef(internal::NoDupCheck{}, Fragment::JUST_0))); break; } case ParseContext::AND_V: { From bba9340a947446cd1c70852f58dcd8aee35be9ac Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Tue, 2 May 2023 17:55:09 +0200 Subject: [PATCH 136/172] miniscript: don't anticipate signature presence in CalcStackSize() It's true that for any public key there'll be a signature check in a valid Miniscript. The code would previously, when computing the size of a satisfaction, account for the signature when it sees a public key push. Instead, account for it when it is required (ie when encountering the `c:` wrapper). This has two benefits: - Allows to accurately compute the net effect of a fragment on the stack size. This is necessary to track the size of the stack during the execution of a Script. - It also just makes more sense, making the code more accessible to future contributors. --- src/script/miniscript.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/script/miniscript.h b/src/script/miniscript.h index e6c0973f85b68..4effa5ce4e6ef 100644 --- a/src/script/miniscript.h +++ b/src/script/miniscript.h @@ -813,8 +813,8 @@ struct Node { case Fragment::JUST_1: case Fragment::OLDER: case Fragment::AFTER: return {0, {}}; - case Fragment::PK_K: return {1, 1}; - case Fragment::PK_H: return {2, 2}; + case Fragment::PK_K: return {0, 0}; + case Fragment::PK_H: return {1, 1}; case Fragment::SHA256: case Fragment::RIPEMD160: case Fragment::HASH256: @@ -837,8 +837,8 @@ struct Node { case Fragment::MULTI: return {k + 1, k + 1}; case Fragment::WRAP_A: case Fragment::WRAP_N: - case Fragment::WRAP_S: - case Fragment::WRAP_C: return subs[0]->ss; + case Fragment::WRAP_S: return subs[0]->ss; + case Fragment::WRAP_C: return {subs[0]->ss.sat + 1, subs[0]->ss.dsat + 1}; case Fragment::WRAP_D: return {1 + subs[0]->ss.sat, 1}; case Fragment::WRAP_V: return {subs[0]->ss.sat, {}}; case Fragment::WRAP_J: return {subs[0]->ss.sat, 1}; From c3738d0344f589162b9ffb78b8e2d78f612d3786 Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Sat, 21 Jan 2023 13:43:15 +0100 Subject: [PATCH 137/172] miniscript: introduce a MsContext() helper to contexts We are going to introduce Tapscript support in Miniscript, for which some of Miniscript rules and properties change (new or modified fragments, different typing rules, different resources consumption, ..). --- src/script/descriptor.cpp | 13 ++++++++++--- src/script/miniscript.h | 5 +++++ src/script/sign.cpp | 6 ++++++ src/test/fuzz/miniscript.cpp | 8 ++++++++ src/test/miniscript_tests.cpp | 6 ++++++ 5 files changed, 35 insertions(+), 3 deletions(-) diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 896fb0b5b314d..eaef481c51b2a 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -1426,8 +1426,11 @@ struct KeyParser { mutable std::vector> m_keys; //! Used to detect key parsing errors within a Miniscript. mutable std::string m_key_parsing_error; + //! The script context we're operating within (Tapscript or P2WSH). + const miniscript::MiniscriptContext m_script_ctx; - KeyParser(FlatSigningProvider* out LIFETIMEBOUND, const SigningProvider* in LIFETIMEBOUND) : m_out(out), m_in(in) {} + KeyParser(FlatSigningProvider* out LIFETIMEBOUND, const SigningProvider* in LIFETIMEBOUND, miniscript::MiniscriptContext ctx) + : m_out(out), m_in(in), m_script_ctx(ctx) {} bool KeyCompare(const Key& a, const Key& b) const { return *m_keys.at(a) < *m_keys.at(b); @@ -1475,6 +1478,10 @@ struct KeyParser { } return {}; } + + miniscript::MiniscriptContext MsContext() const { + return m_script_ctx; + } }; /** Parse a script in a particular context. */ @@ -1714,7 +1721,7 @@ std::unique_ptr ParseScript(uint32_t& key_exp_index, Span InferScript(const CScript& script, ParseScriptCo } if (ctx == ParseScriptContext::P2WSH) { - KeyParser parser(nullptr, &provider); + KeyParser parser(/* out = */nullptr, /* in = */&provider, /* ctx = */miniscript::MiniscriptContext::P2WSH); auto node = miniscript::FromScript(script, parser); if (node && node->IsSane()) { return std::make_unique(std::move(parser.m_keys), std::move(node)); diff --git a/src/script/miniscript.h b/src/script/miniscript.h index 4effa5ce4e6ef..635efeed44c03 100644 --- a/src/script/miniscript.h +++ b/src/script/miniscript.h @@ -229,6 +229,11 @@ enum class Availability { MAYBE, }; +enum class MiniscriptContext { + P2WSH, + TAPSCRIPT, +}; + namespace internal { //! Helper function for Node::CalcType. diff --git a/src/script/sign.cpp b/src/script/sign.cpp index 92b7ad50b57ff..92c42dfe25660 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -404,6 +404,8 @@ struct Satisfier { SignatureData& m_sig_data; const BaseSignatureCreator& m_creator; const CScript& m_witness_script; + //! For now Miniscript is only available under P2WSH. + const miniscript::MiniscriptContext m_script_ctx{miniscript::MiniscriptContext::P2WSH}; explicit Satisfier(const SigningProvider& provider LIFETIMEBOUND, SignatureData& sig_data LIFETIMEBOUND, const BaseSignatureCreator& creator LIFETIMEBOUND, @@ -466,6 +468,10 @@ struct Satisfier { miniscript::Availability SatHASH160(const std::vector& hash, std::vector& preimage) const { return MsLookupHelper(m_sig_data.hash160_preimages, hash, preimage); } + + miniscript::MiniscriptContext MsContext() const { + return m_script_ctx; + } }; bool ProduceSignature(const SigningProvider& provider, const BaseSignatureCreator& creator, const CScript& fromPubKey, SignatureData& sigdata) diff --git a/src/test/fuzz/miniscript.cpp b/src/test/fuzz/miniscript.cpp index 0246507da11b7..072938bbd207f 100644 --- a/src/test/fuzz/miniscript.cpp +++ b/src/test/fuzz/miniscript.cpp @@ -128,6 +128,10 @@ struct ParserContext { if (it == TEST_DATA.dummy_keys_map.end()) return {}; return it->second; } + + miniscript::MiniscriptContext MsContext() const { + return miniscript::MiniscriptContext::P2WSH; + } } PARSER_CTX; //! Context that implements naive conversion from/to script only, for roundtrip testing. @@ -172,6 +176,10 @@ struct ScriptParserContext { key.is_hash = true; return key; } + + miniscript::MiniscriptContext MsContext() const { + return miniscript::MiniscriptContext::P2WSH; + } } SCRIPT_PARSER_CONTEXT; //! Context to produce a satisfaction for a Miniscript node using the pre-computed data. diff --git a/src/test/miniscript_tests.cpp b/src/test/miniscript_tests.cpp index b69317c4d925b..7f90cbc3dd1e2 100644 --- a/src/test/miniscript_tests.cpp +++ b/src/test/miniscript_tests.cpp @@ -114,6 +114,8 @@ typedef std::pair Challenge; struct KeyConverter { typedef CPubKey Key; + const miniscript::MiniscriptContext m_script_ctx{miniscript::MiniscriptContext::P2WSH}; + bool KeyCompare(const Key& a, const Key& b) const { return a < b; } @@ -158,6 +160,10 @@ struct KeyConverter { std::optional ToString(const Key& key) const { return HexStr(ToPKBytes(key)); } + + miniscript::MiniscriptContext MsContext() const { + return m_script_ctx; + } }; /** A class that encapsulates all signing/hash revealing operations. */ From 91b4db859023f5cf59f4b27f880484c863ccae66 Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Thu, 28 Sep 2023 17:07:02 +0200 Subject: [PATCH 138/172] miniscript: store the script context within the Node structure Some checks will be different depending on the script context (for instance the maximum script size). --- src/script/miniscript.h | 160 +++++++++++++++++++---------------- src/test/fuzz/miniscript.cpp | 3 +- 2 files changed, 89 insertions(+), 74 deletions(-) diff --git a/src/script/miniscript.h b/src/script/miniscript.h index 635efeed44c03..e80d228c97b53 100644 --- a/src/script/miniscript.h +++ b/src/script/miniscript.h @@ -368,6 +368,8 @@ struct Node { const std::vector data; //! Subexpressions (for WRAP_*/AND_*/OR_*/ANDOR/THRESH) const std::vector> subs; + //! The Script context for this node. Either P2WSH or Tapscript. + const MiniscriptContext m_script_ctx; private: //! Cached ops counts. @@ -1333,20 +1335,32 @@ struct Node { bool operator==(const Node& arg) const { return Compare(*this, arg) == 0; } // Constructors with various argument combinations, which bypass the duplicate key check. - Node(internal::NoDupCheck, Fragment nt, std::vector> sub, std::vector arg, uint32_t val = 0) : fragment(nt), k(val), data(std::move(arg)), subs(std::move(sub)), ops(CalcOps()), ss(CalcStackSize()), ws(CalcWitnessSize()), typ(CalcType()), scriptlen(CalcScriptLen()) {} - Node(internal::NoDupCheck, Fragment nt, std::vector arg, uint32_t val = 0) : fragment(nt), k(val), data(std::move(arg)), ops(CalcOps()), ss(CalcStackSize()), ws(CalcWitnessSize()), typ(CalcType()), scriptlen(CalcScriptLen()) {} - Node(internal::NoDupCheck, Fragment nt, std::vector> sub, std::vector key, uint32_t val = 0) : fragment(nt), k(val), keys(std::move(key)), subs(std::move(sub)), ops(CalcOps()), ss(CalcStackSize()), ws(CalcWitnessSize()), typ(CalcType()), scriptlen(CalcScriptLen()) {} - Node(internal::NoDupCheck, Fragment nt, std::vector key, uint32_t val = 0) : fragment(nt), k(val), keys(std::move(key)), ops(CalcOps()), ss(CalcStackSize()), ws(CalcWitnessSize()), typ(CalcType()), scriptlen(CalcScriptLen()) {} - Node(internal::NoDupCheck, Fragment nt, std::vector> sub, uint32_t val = 0) : fragment(nt), k(val), subs(std::move(sub)), ops(CalcOps()), ss(CalcStackSize()), ws(CalcWitnessSize()), typ(CalcType()), scriptlen(CalcScriptLen()) {} - Node(internal::NoDupCheck, Fragment nt, uint32_t val = 0) : fragment(nt), k(val), ops(CalcOps()), ss(CalcStackSize()), ws(CalcWitnessSize()), typ(CalcType()), scriptlen(CalcScriptLen()) {} + Node(internal::NoDupCheck, MiniscriptContext script_ctx, Fragment nt, std::vector> sub, std::vector arg, uint32_t val = 0) + : fragment(nt), k(val), data(std::move(arg)), subs(std::move(sub)), m_script_ctx{script_ctx}, ops(CalcOps()), ss(CalcStackSize()), ws(CalcWitnessSize()), typ(CalcType()), scriptlen(CalcScriptLen()) {} + Node(internal::NoDupCheck, MiniscriptContext script_ctx, Fragment nt, std::vector arg, uint32_t val = 0) + : fragment(nt), k(val), data(std::move(arg)), m_script_ctx{script_ctx}, ops(CalcOps()), ss(CalcStackSize()), ws(CalcWitnessSize()), typ(CalcType()), scriptlen(CalcScriptLen()) {} + Node(internal::NoDupCheck, MiniscriptContext script_ctx, Fragment nt, std::vector> sub, std::vector key, uint32_t val = 0) + : fragment(nt), k(val), keys(std::move(key)), m_script_ctx{script_ctx}, subs(std::move(sub)), ops(CalcOps()), ss(CalcStackSize()), ws(CalcWitnessSize()), typ(CalcType()), scriptlen(CalcScriptLen()) {} + Node(internal::NoDupCheck, MiniscriptContext script_ctx, Fragment nt, std::vector key, uint32_t val = 0) + : fragment(nt), k(val), keys(std::move(key)), m_script_ctx{script_ctx}, ops(CalcOps()), ss(CalcStackSize()), ws(CalcWitnessSize()), typ(CalcType()), scriptlen(CalcScriptLen()) {} + Node(internal::NoDupCheck, MiniscriptContext script_ctx, Fragment nt, std::vector> sub, uint32_t val = 0) + : fragment(nt), k(val), subs(std::move(sub)), m_script_ctx{script_ctx}, ops(CalcOps()), ss(CalcStackSize()), ws(CalcWitnessSize()), typ(CalcType()), scriptlen(CalcScriptLen()) {} + Node(internal::NoDupCheck, MiniscriptContext script_ctx, Fragment nt, uint32_t val = 0) + : fragment(nt), k(val), m_script_ctx{script_ctx}, ops(CalcOps()), ss(CalcStackSize()), ws(CalcWitnessSize()), typ(CalcType()), scriptlen(CalcScriptLen()) {} // Constructors with various argument combinations, which do perform the duplicate key check. - template Node(const Ctx& ctx, Fragment nt, std::vector> sub, std::vector arg, uint32_t val = 0) : Node(internal::NoDupCheck{}, nt, std::move(sub), std::move(arg), val) { DuplicateKeyCheck(ctx); } - template Node(const Ctx& ctx, Fragment nt, std::vector arg, uint32_t val = 0) : Node(internal::NoDupCheck{}, nt, std::move(arg), val) { DuplicateKeyCheck(ctx);} - template Node(const Ctx& ctx, Fragment nt, std::vector> sub, std::vector key, uint32_t val = 0) : Node(internal::NoDupCheck{}, nt, std::move(sub), std::move(key), val) { DuplicateKeyCheck(ctx); } - template Node(const Ctx& ctx, Fragment nt, std::vector key, uint32_t val = 0) : Node(internal::NoDupCheck{}, nt, std::move(key), val) { DuplicateKeyCheck(ctx); } - template Node(const Ctx& ctx, Fragment nt, std::vector> sub, uint32_t val = 0) : Node(internal::NoDupCheck{}, nt, std::move(sub), val) { DuplicateKeyCheck(ctx); } - template Node(const Ctx& ctx, Fragment nt, uint32_t val = 0) : Node(internal::NoDupCheck{}, nt, val) { DuplicateKeyCheck(ctx); } + template Node(const Ctx& ctx, Fragment nt, std::vector> sub, std::vector arg, uint32_t val = 0) + : Node(internal::NoDupCheck{}, ctx.MsContext(), nt, std::move(sub), std::move(arg), val) { DuplicateKeyCheck(ctx); } + template Node(const Ctx& ctx, Fragment nt, std::vector arg, uint32_t val = 0) + : Node(internal::NoDupCheck{}, ctx.MsContext(), nt, std::move(arg), val) { DuplicateKeyCheck(ctx);} + template Node(const Ctx& ctx, Fragment nt, std::vector> sub, std::vector key, uint32_t val = 0) + : Node(internal::NoDupCheck{}, ctx.MsContext(), nt, std::move(sub), std::move(key), val) { DuplicateKeyCheck(ctx); } + template Node(const Ctx& ctx, Fragment nt, std::vector key, uint32_t val = 0) + : Node(internal::NoDupCheck{}, ctx.MsContext(), nt, std::move(key), val) { DuplicateKeyCheck(ctx); } + template Node(const Ctx& ctx, Fragment nt, std::vector> sub, uint32_t val = 0) + : Node(internal::NoDupCheck{}, ctx.MsContext(), nt, std::move(sub), val) { DuplicateKeyCheck(ctx); } + template Node(const Ctx& ctx, Fragment nt, uint32_t val = 0) + : Node(internal::NoDupCheck{}, ctx.MsContext(), nt, val) { DuplicateKeyCheck(ctx); } }; namespace internal { @@ -1434,14 +1448,14 @@ std::optional, int>> ParseHexStrEnd(Span -void BuildBack(Fragment nt, std::vector>& constructed, const bool reverse = false) +void BuildBack(const MiniscriptContext script_ctx, Fragment nt, std::vector>& constructed, const bool reverse = false) { NodeRef child = std::move(constructed.back()); constructed.pop_back(); if (reverse) { - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, nt, Vector(std::move(child), std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, script_ctx, nt, Vector(std::move(child), std::move(constructed.back()))); } else { - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, nt, Vector(std::move(constructed.back()), std::move(child))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, script_ctx, nt, Vector(std::move(constructed.back()), std::move(child))); } } @@ -1526,7 +1540,7 @@ inline NodeRef Parse(Span in, const Ctx& ctx) } else if (in[j] == 'l') { // The l: wrapper is equivalent to or_i(0,X) script_size += 4; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::JUST_0)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::JUST_0)); to_parse.emplace_back(ParseContext::OR_I, -1, -1); } else { return {}; @@ -1539,63 +1553,63 @@ inline NodeRef Parse(Span in, const Ctx& ctx) } case ParseContext::EXPR: { if (Const("0", in)) { - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::JUST_0)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::JUST_0)); } else if (Const("1", in)) { - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::JUST_1)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::JUST_1)); } else if (Const("pk(", in)) { auto res = ParseKeyEnd(in, ctx); if (!res) return {}; auto& [key, key_size] = *res; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_C, Vector(MakeNodeRef(internal::NoDupCheck{}, Fragment::PK_K, Vector(std::move(key)))))); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_C, Vector(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::PK_K, Vector(std::move(key)))))); in = in.subspan(key_size + 1); script_size += 34; } else if (Const("pkh(", in)) { auto res = ParseKeyEnd(in, ctx); if (!res) return {}; auto& [key, key_size] = *res; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_C, Vector(MakeNodeRef(internal::NoDupCheck{}, Fragment::PK_H, Vector(std::move(key)))))); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_C, Vector(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::PK_H, Vector(std::move(key)))))); in = in.subspan(key_size + 1); script_size += 24; } else if (Const("pk_k(", in)) { auto res = ParseKeyEnd(in, ctx); if (!res) return {}; auto& [key, key_size] = *res; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::PK_K, Vector(std::move(key)))); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::PK_K, Vector(std::move(key)))); in = in.subspan(key_size + 1); script_size += 33; } else if (Const("pk_h(", in)) { auto res = ParseKeyEnd(in, ctx); if (!res) return {}; auto& [key, key_size] = *res; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::PK_H, Vector(std::move(key)))); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::PK_H, Vector(std::move(key)))); in = in.subspan(key_size + 1); script_size += 23; } else if (Const("sha256(", in)) { auto res = ParseHexStrEnd(in, 32, ctx); if (!res) return {}; auto& [hash, hash_size] = *res; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::SHA256, std::move(hash))); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::SHA256, std::move(hash))); in = in.subspan(hash_size + 1); script_size += 38; } else if (Const("ripemd160(", in)) { auto res = ParseHexStrEnd(in, 20, ctx); if (!res) return {}; auto& [hash, hash_size] = *res; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::RIPEMD160, std::move(hash))); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::RIPEMD160, std::move(hash))); in = in.subspan(hash_size + 1); script_size += 26; } else if (Const("hash256(", in)) { auto res = ParseHexStrEnd(in, 32, ctx); if (!res) return {}; auto& [hash, hash_size] = *res; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::HASH256, std::move(hash))); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::HASH256, std::move(hash))); in = in.subspan(hash_size + 1); script_size += 38; } else if (Const("hash160(", in)) { auto res = ParseHexStrEnd(in, 20, ctx); if (!res) return {}; auto& [hash, hash_size] = *res; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::HASH160, std::move(hash))); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::HASH160, std::move(hash))); in = in.subspan(hash_size + 1); script_size += 26; } else if (Const("after(", in)) { @@ -1604,7 +1618,7 @@ inline NodeRef Parse(Span in, const Ctx& ctx) int64_t num; if (!ParseInt64(std::string(in.begin(), in.begin() + arg_size), &num)) return {}; if (num < 1 || num >= 0x80000000L) return {}; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::AFTER, num)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::AFTER, num)); in = in.subspan(arg_size + 1); script_size += 1 + (num > 16) + (num > 0x7f) + (num > 0x7fff) + (num > 0x7fffff); } else if (Const("older(", in)) { @@ -1613,7 +1627,7 @@ inline NodeRef Parse(Span in, const Ctx& ctx) int64_t num; if (!ParseInt64(std::string(in.begin(), in.begin() + arg_size), &num)) return {}; if (num < 1 || num >= 0x80000000L) return {}; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::OLDER, num)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::OLDER, num)); in = in.subspan(arg_size + 1); script_size += 1 + (num > 16) + (num > 0x7f) + (num > 0x7fff) + (num > 0x7fffff); } else if (Const("multi(", in)) { @@ -1636,7 +1650,7 @@ inline NodeRef Parse(Span in, const Ctx& ctx) if (keys.size() < 1 || keys.size() > 20) return {}; if (k < 1 || k > (int64_t)keys.size()) return {}; script_size += 2 + (keys.size() > 16) + (k > 16) + 34 * keys.size(); - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::MULTI, std::move(keys), k)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::MULTI, std::move(keys), k)); } else if (Const("thresh(", in)) { int next_comma = FindNextChar(in, ','); if (next_comma < 1) return {}; @@ -1689,70 +1703,70 @@ inline NodeRef Parse(Span in, const Ctx& ctx) break; } case ParseContext::ALT: { - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_A, Vector(std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_A, Vector(std::move(constructed.back()))); break; } case ParseContext::SWAP: { - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_S, Vector(std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_S, Vector(std::move(constructed.back()))); break; } case ParseContext::CHECK: { - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_C, Vector(std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_C, Vector(std::move(constructed.back()))); break; } case ParseContext::DUP_IF: { - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_D, Vector(std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_D, Vector(std::move(constructed.back()))); break; } case ParseContext::NON_ZERO: { - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_J, Vector(std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_J, Vector(std::move(constructed.back()))); break; } case ParseContext::ZERO_NOTEQUAL: { - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_N, Vector(std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_N, Vector(std::move(constructed.back()))); break; } case ParseContext::VERIFY: { script_size += (constructed.back()->GetType() << "x"_mst); - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_V, Vector(std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_V, Vector(std::move(constructed.back()))); break; } case ParseContext::WRAP_U: { - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::OR_I, Vector(std::move(constructed.back()), MakeNodeRef(internal::NoDupCheck{}, Fragment::JUST_0))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::OR_I, Vector(std::move(constructed.back()), MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::JUST_0))); break; } case ParseContext::WRAP_T: { - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::AND_V, Vector(std::move(constructed.back()), MakeNodeRef(internal::NoDupCheck{}, Fragment::JUST_1))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::AND_V, Vector(std::move(constructed.back()), MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::JUST_1))); break; } case ParseContext::AND_B: { - BuildBack(Fragment::AND_B, constructed); + BuildBack(ctx.MsContext(), Fragment::AND_B, constructed); break; } case ParseContext::AND_N: { auto mid = std::move(constructed.back()); constructed.pop_back(); - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::ANDOR, Vector(std::move(constructed.back()), std::move(mid), MakeNodeRef(internal::NoDupCheck{}, Fragment::JUST_0))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::ANDOR, Vector(std::move(constructed.back()), std::move(mid), MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::JUST_0))); break; } case ParseContext::AND_V: { - BuildBack(Fragment::AND_V, constructed); + BuildBack(ctx.MsContext(), Fragment::AND_V, constructed); break; } case ParseContext::OR_B: { - BuildBack(Fragment::OR_B, constructed); + BuildBack(ctx.MsContext(), Fragment::OR_B, constructed); break; } case ParseContext::OR_C: { - BuildBack(Fragment::OR_C, constructed); + BuildBack(ctx.MsContext(), Fragment::OR_C, constructed); break; } case ParseContext::OR_D: { - BuildBack(Fragment::OR_D, constructed); + BuildBack(ctx.MsContext(), Fragment::OR_D, constructed); break; } case ParseContext::OR_I: { - BuildBack(Fragment::OR_I, constructed); + BuildBack(ctx.MsContext(), Fragment::OR_I, constructed); break; } case ParseContext::ANDOR: { @@ -1760,7 +1774,7 @@ inline NodeRef Parse(Span in, const Ctx& ctx) constructed.pop_back(); auto mid = std::move(constructed.back()); constructed.pop_back(); - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::ANDOR, Vector(std::move(constructed.back()), std::move(mid), std::move(right))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::ANDOR, Vector(std::move(constructed.back()), std::move(mid), std::move(right))); break; } case ParseContext::THRESH: { @@ -1780,7 +1794,7 @@ inline NodeRef Parse(Span in, const Ctx& ctx) constructed.pop_back(); } std::reverse(subs.begin(), subs.end()); - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::THRESH, std::move(subs), k)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::THRESH, std::move(subs), k)); } else { return {}; } @@ -1916,12 +1930,12 @@ inline NodeRef DecodeScript(I& in, I last, const Ctx& ctx) // Constants if (in[0].first == OP_1) { ++in; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::JUST_1)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::JUST_1)); break; } if (in[0].first == OP_0) { ++in; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::JUST_0)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::JUST_0)); break; } // Public keys @@ -1929,14 +1943,14 @@ inline NodeRef DecodeScript(I& in, I last, const Ctx& ctx) auto key = ctx.FromPKBytes(in[0].second.begin(), in[0].second.end()); if (!key) return {}; ++in; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::PK_K, Vector(std::move(*key)))); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::PK_K, Vector(std::move(*key)))); break; } if (last - in >= 5 && in[0].first == OP_VERIFY && in[1].first == OP_EQUAL && in[3].first == OP_HASH160 && in[4].first == OP_DUP && in[2].second.size() == 20) { auto key = ctx.FromPKHBytes(in[2].second.begin(), in[2].second.end()); if (!key) return {}; in += 5; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::PK_H, Vector(std::move(*key)))); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::PK_H, Vector(std::move(*key)))); break; } // Time locks @@ -1944,31 +1958,31 @@ inline NodeRef DecodeScript(I& in, I last, const Ctx& ctx) if (last - in >= 2 && in[0].first == OP_CHECKSEQUENCEVERIFY && (num = ParseScriptNumber(in[1]))) { in += 2; if (*num < 1 || *num > 0x7FFFFFFFL) return {}; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::OLDER, *num)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::OLDER, *num)); break; } if (last - in >= 2 && in[0].first == OP_CHECKLOCKTIMEVERIFY && (num = ParseScriptNumber(in[1]))) { in += 2; if (num < 1 || num > 0x7FFFFFFFL) return {}; - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::AFTER, *num)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::AFTER, *num)); break; } // Hashes if (last - in >= 7 && in[0].first == OP_EQUAL && in[3].first == OP_VERIFY && in[4].first == OP_EQUAL && (num = ParseScriptNumber(in[5])) && num == 32 && in[6].first == OP_SIZE) { if (in[2].first == OP_SHA256 && in[1].second.size() == 32) { - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::SHA256, in[1].second)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::SHA256, in[1].second)); in += 7; break; } else if (in[2].first == OP_RIPEMD160 && in[1].second.size() == 20) { - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::RIPEMD160, in[1].second)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::RIPEMD160, in[1].second)); in += 7; break; } else if (in[2].first == OP_HASH256 && in[1].second.size() == 32) { - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::HASH256, in[1].second)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::HASH256, in[1].second)); in += 7; break; } else if (in[2].first == OP_HASH160 && in[1].second.size() == 20) { - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::HASH160, in[1].second)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::HASH160, in[1].second)); in += 7; break; } @@ -1989,7 +2003,7 @@ inline NodeRef DecodeScript(I& in, I last, const Ctx& ctx) if (!k || *k < 1 || *k > *n) return {}; in += 3 + *n; std::reverse(keys.begin(), keys.end()); - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::MULTI, std::move(keys), *k)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::MULTI, std::move(keys), *k)); break; } /** In the following wrappers, we only need to push SINGLE_BKV_EXPR rather @@ -2084,63 +2098,63 @@ inline NodeRef DecodeScript(I& in, I last, const Ctx& ctx) case DecodeContext::SWAP: { if (in >= last || in[0].first != OP_SWAP || constructed.empty()) return {}; ++in; - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_S, Vector(std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_S, Vector(std::move(constructed.back()))); break; } case DecodeContext::ALT: { if (in >= last || in[0].first != OP_TOALTSTACK || constructed.empty()) return {}; ++in; - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_A, Vector(std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_A, Vector(std::move(constructed.back()))); break; } case DecodeContext::CHECK: { if (constructed.empty()) return {}; - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_C, Vector(std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_C, Vector(std::move(constructed.back()))); break; } case DecodeContext::DUP_IF: { if (constructed.empty()) return {}; - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_D, Vector(std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_D, Vector(std::move(constructed.back()))); break; } case DecodeContext::VERIFY: { if (constructed.empty()) return {}; - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_V, Vector(std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_V, Vector(std::move(constructed.back()))); break; } case DecodeContext::NON_ZERO: { if (constructed.empty()) return {}; - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_J, Vector(std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_J, Vector(std::move(constructed.back()))); break; } case DecodeContext::ZERO_NOTEQUAL: { if (constructed.empty()) return {}; - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::WRAP_N, Vector(std::move(constructed.back()))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::WRAP_N, Vector(std::move(constructed.back()))); break; } case DecodeContext::AND_V: { if (constructed.size() < 2) return {}; - BuildBack(Fragment::AND_V, constructed, /*reverse=*/true); + BuildBack(ctx.MsContext(), Fragment::AND_V, constructed, /*reverse=*/true); break; } case DecodeContext::AND_B: { if (constructed.size() < 2) return {}; - BuildBack(Fragment::AND_B, constructed, /*reverse=*/true); + BuildBack(ctx.MsContext(), Fragment::AND_B, constructed, /*reverse=*/true); break; } case DecodeContext::OR_B: { if (constructed.size() < 2) return {}; - BuildBack(Fragment::OR_B, constructed, /*reverse=*/true); + BuildBack(ctx.MsContext(), Fragment::OR_B, constructed, /*reverse=*/true); break; } case DecodeContext::OR_C: { if (constructed.size() < 2) return {}; - BuildBack(Fragment::OR_C, constructed, /*reverse=*/true); + BuildBack(ctx.MsContext(), Fragment::OR_C, constructed, /*reverse=*/true); break; } case DecodeContext::OR_D: { if (constructed.size() < 2) return {}; - BuildBack(Fragment::OR_D, constructed, /*reverse=*/true); + BuildBack(ctx.MsContext(), Fragment::OR_D, constructed, /*reverse=*/true); break; } case DecodeContext::ANDOR: { @@ -2150,7 +2164,7 @@ inline NodeRef DecodeScript(I& in, I last, const Ctx& ctx) NodeRef right = std::move(constructed.back()); constructed.pop_back(); NodeRef mid = std::move(constructed.back()); - constructed.back() = MakeNodeRef(internal::NoDupCheck{}, Fragment::ANDOR, Vector(std::move(left), std::move(mid), std::move(right))); + constructed.back() = MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::ANDOR, Vector(std::move(left), std::move(mid), std::move(right))); break; } case DecodeContext::THRESH_W: { @@ -2174,7 +2188,7 @@ inline NodeRef DecodeScript(I& in, I last, const Ctx& ctx) constructed.pop_back(); subs.push_back(std::move(sub)); } - constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, Fragment::THRESH, std::move(subs), k)); + constructed.push_back(MakeNodeRef(internal::NoDupCheck{}, ctx.MsContext(), Fragment::THRESH, std::move(subs), k)); break; } case DecodeContext::ENDIF: { @@ -2224,7 +2238,7 @@ inline NodeRef DecodeScript(I& in, I last, const Ctx& ctx) if (in >= last) return {}; if (in[0].first == OP_IF) { ++in; - BuildBack(Fragment::OR_I, constructed, /*reverse=*/true); + BuildBack(ctx.MsContext(), Fragment::OR_I, constructed, /*reverse=*/true); } else if (in[0].first == OP_NOTIF) { ++in; to_parse.emplace_back(DecodeContext::ANDOR, -1, -1); diff --git a/src/test/fuzz/miniscript.cpp b/src/test/fuzz/miniscript.cpp index 072938bbd207f..d85ed707bd287 100644 --- a/src/test/fuzz/miniscript.cpp +++ b/src/test/fuzz/miniscript.cpp @@ -257,10 +257,11 @@ using NodeRef = miniscript::NodeRef; using Node = miniscript::Node; using Type = miniscript::Type; using miniscript::operator"" _mst; +using MsCtx = miniscript::MiniscriptContext; //! Construct a miniscript node as a shared_ptr. template NodeRef MakeNodeRef(Args&&... args) { - return miniscript::MakeNodeRef(miniscript::internal::NoDupCheck{}, std::forward(args)...); + return miniscript::MakeNodeRef(miniscript::internal::NoDupCheck{}, MsCtx::P2WSH, std::forward(args)...); } /** Information about a yet to be constructed Miniscript node. */ From 9164c2eca164d78cbae5351d383f39320711efb9 Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Sat, 21 Jan 2023 13:49:59 +0100 Subject: [PATCH 139/172] miniscript: restrict multi() usage to P2WSH context CHECKMULTISIG is disabled for Tapscript. Instead, we'll introduce a multi_a() fragment with the same semantic as multi(). --- src/script/miniscript.cpp | 1 + src/script/miniscript.h | 21 +++++++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/src/script/miniscript.cpp b/src/script/miniscript.cpp index 19556a97753d7..28c79eab3095f 100644 --- a/src/script/miniscript.cpp +++ b/src/script/miniscript.cpp @@ -10,6 +10,7 @@ #include namespace miniscript { + namespace internal { Type SanitizeType(Type e) { diff --git a/src/script/miniscript.h b/src/script/miniscript.h index e80d228c97b53..95ff10be2a295 100644 --- a/src/script/miniscript.h +++ b/src/script/miniscript.h @@ -20,6 +20,7 @@ #include #include