From b45f1f56582fb3a0d17db5014ac57f1fb40a3611 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Fri, 5 Jan 2024 14:26:52 -0500 Subject: [PATCH 01/79] serfloat: do not test encode(bits)=bits anymore --- src/test/serfloat_tests.cpp | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/test/serfloat_tests.cpp b/src/test/serfloat_tests.cpp index b36bdc02caf64..d9b29b60ecd38 100644 --- a/src/test/serfloat_tests.cpp +++ b/src/test/serfloat_tests.cpp @@ -47,7 +47,6 @@ BOOST_AUTO_TEST_CASE(double_serfloat_tests) { BOOST_CHECK_EQUAL(TestDouble(4.0), 0x4010000000000000ULL); BOOST_CHECK_EQUAL(TestDouble(785.066650390625), 0x4088888880000000ULL); - // Roundtrip test on IEC559-compatible systems if (std::numeric_limits::is_iec559) { BOOST_CHECK_EQUAL(sizeof(double), 8U); BOOST_CHECK_EQUAL(sizeof(uint64_t), 8U); @@ -64,8 +63,7 @@ BOOST_AUTO_TEST_CASE(double_serfloat_tests) { TestDouble(-std::numeric_limits::signaling_NaN()); TestDouble(std::numeric_limits::denorm_min()); TestDouble(-std::numeric_limits::denorm_min()); - // Test exact encoding: on currently supported platforms, EncodeDouble - // should produce exactly the same as the in-memory representation for non-NaN. + // On IEC559-compatible systems, construct doubles to test from the encoding. for (int j = 0; j < 1000; ++j) { // Iterate over 9 specific bits exhaustively; the others are chosen randomly. // These specific bits are the sign bit, and the 2 top and bottom bits of @@ -92,8 +90,7 @@ BOOST_AUTO_TEST_CASE(double_serfloat_tests) { if (x & 256) v |= (uint64_t{1} << 63); double f; memcpy(&f, &v, 8); - uint64_t v2 = TestDouble(f); - if (!std::isnan(f)) BOOST_CHECK_EQUAL(v, v2); + TestDouble(f); } } } From 6e873df3478f3ab8f67d1b9339c7e990ae90e95b Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Fri, 5 Jan 2024 14:37:57 -0500 Subject: [PATCH 02/79] serfloat: improve/simplify tests --- src/test/serfloat_tests.cpp | 113 ++++++++++++++++++++++-------------- 1 file changed, 69 insertions(+), 44 deletions(-) diff --git a/src/test/serfloat_tests.cpp b/src/test/serfloat_tests.cpp index d9b29b60ecd38..304541074f257 100644 --- a/src/test/serfloat_tests.cpp +++ b/src/test/serfloat_tests.cpp @@ -37,6 +37,7 @@ uint64_t TestDouble(double f) { } // namespace BOOST_AUTO_TEST_CASE(double_serfloat_tests) { + // Test specific values against their expected encoding. BOOST_CHECK_EQUAL(TestDouble(0.0), 0U); BOOST_CHECK_EQUAL(TestDouble(-0.0), 0x8000000000000000); BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits::infinity()), 0x7ff0000000000000U); @@ -46,52 +47,76 @@ BOOST_AUTO_TEST_CASE(double_serfloat_tests) { BOOST_CHECK_EQUAL(TestDouble(2.0), 0x4000000000000000ULL); BOOST_CHECK_EQUAL(TestDouble(4.0), 0x4010000000000000ULL); BOOST_CHECK_EQUAL(TestDouble(785.066650390625), 0x4088888880000000ULL); + BOOST_CHECK_EQUAL(TestDouble(3.7243058682384174), 0x400dcb60e0031440); + BOOST_CHECK_EQUAL(TestDouble(91.64070592566159), 0x4056e901536d447a); + BOOST_CHECK_EQUAL(TestDouble(-98.63087668642575), 0xc058a860489c007a); + BOOST_CHECK_EQUAL(TestDouble(4.908737756962054), 0x4013a28c268b2b70); + BOOST_CHECK_EQUAL(TestDouble(77.9247330021754), 0x40537b2ed3547804); + BOOST_CHECK_EQUAL(TestDouble(40.24732825357566), 0x40441fa873c43dfc); + BOOST_CHECK_EQUAL(TestDouble(71.39395607929222), 0x4051d936938f27b6); + BOOST_CHECK_EQUAL(TestDouble(58.80100710817612), 0x404d668766a2bd70); + BOOST_CHECK_EQUAL(TestDouble(-30.10665786964975), 0xc03e1b4dee1e01b8); + BOOST_CHECK_EQUAL(TestDouble(60.15231509068704), 0x404e137f0f969814); + BOOST_CHECK_EQUAL(TestDouble(-48.15848711335961), 0xc04814494e445bc6); + BOOST_CHECK_EQUAL(TestDouble(26.68450101125353), 0x403aaf3b755169b0); + BOOST_CHECK_EQUAL(TestDouble(-65.72071986604303), 0xc0506e2046378ede); + BOOST_CHECK_EQUAL(TestDouble(17.95575825512381), 0x4031f4ac92b0a388); + BOOST_CHECK_EQUAL(TestDouble(-35.27171863226279), 0xc041a2c7ad17a42a); + BOOST_CHECK_EQUAL(TestDouble(-8.58810329425124), 0xc0212d1bdffef538); + BOOST_CHECK_EQUAL(TestDouble(88.51393044338977), 0x405620e43c83b1c8); + BOOST_CHECK_EQUAL(TestDouble(48.07224932612732), 0x4048093f77466ffc); + BOOST_CHECK_EQUAL(TestDouble(9.867348871395659e+117), 0x586f4daeb2459b9f); + BOOST_CHECK_EQUAL(TestDouble(-1.5166424385129721e+206), 0xeabe3bbc484bd458); + BOOST_CHECK_EQUAL(TestDouble(-8.585156555624594e-275), 0x8707c76eee012429); + BOOST_CHECK_EQUAL(TestDouble(2.2794371091628822e+113), 0x5777b2184458f4ee); + BOOST_CHECK_EQUAL(TestDouble(-1.1290476594131867e+163), 0xe1c91893d3488bb0); + BOOST_CHECK_EQUAL(TestDouble(9.143848423979275e-246), 0x0d0ff76e5f2620a3); + BOOST_CHECK_EQUAL(TestDouble(-2.8366718125941117e+81), 0xd0d7ec7e754b394a); + BOOST_CHECK_EQUAL(TestDouble(-1.2754409481684012e+229), 0xef80d32f8ec55342); + BOOST_CHECK_EQUAL(TestDouble(6.000577060053642e-186), 0x197a1be7c8209b6a); + BOOST_CHECK_EQUAL(TestDouble(2.0839423284378986e-302), 0x014c94f8689cb0a5); + BOOST_CHECK_EQUAL(TestDouble(-1.422140051483753e+259), 0xf5bd99271d04bb35); + BOOST_CHECK_EQUAL(TestDouble(-1.0593973991188853e+46), 0xc97db0cdb72d1046); + BOOST_CHECK_EQUAL(TestDouble(2.62945125875249e+190), 0x67779b36366c993b); + BOOST_CHECK_EQUAL(TestDouble(-2.920377657275094e+115), 0xd7e7b7b45908e23b); + BOOST_CHECK_EQUAL(TestDouble(9.790289014855851e-118), 0x27a3c031cc428bcc); + BOOST_CHECK_EQUAL(TestDouble(-4.629317182034961e-114), 0xa866ccf0b753705a); + BOOST_CHECK_EQUAL(TestDouble(-1.7674605603846528e+279), 0xf9e8ed383ffc3e25); + BOOST_CHECK_EQUAL(TestDouble(2.5308171727712605e+120), 0x58ef5cd55f0ec997); + BOOST_CHECK_EQUAL(TestDouble(-1.05034156412799e+54), 0xcb25eea1b9350fa0); - if (std::numeric_limits::is_iec559) { - BOOST_CHECK_EQUAL(sizeof(double), 8U); - BOOST_CHECK_EQUAL(sizeof(uint64_t), 8U); - // Test extreme values - TestDouble(std::numeric_limits::min()); - TestDouble(-std::numeric_limits::min()); - TestDouble(std::numeric_limits::max()); - TestDouble(-std::numeric_limits::max()); - TestDouble(std::numeric_limits::lowest()); - TestDouble(-std::numeric_limits::lowest()); - TestDouble(std::numeric_limits::quiet_NaN()); - TestDouble(-std::numeric_limits::quiet_NaN()); - TestDouble(std::numeric_limits::signaling_NaN()); - TestDouble(-std::numeric_limits::signaling_NaN()); - TestDouble(std::numeric_limits::denorm_min()); - TestDouble(-std::numeric_limits::denorm_min()); - // On IEC559-compatible systems, construct doubles to test from the encoding. - for (int j = 0; j < 1000; ++j) { - // Iterate over 9 specific bits exhaustively; the others are chosen randomly. - // These specific bits are the sign bit, and the 2 top and bottom bits of - // exponent and mantissa in the IEEE754 binary64 format. - for (int x = 0; x < 512; ++x) { - uint64_t v = InsecureRandBits(64); - v &= ~(uint64_t{1} << 0); - if (x & 1) v |= (uint64_t{1} << 0); - v &= ~(uint64_t{1} << 1); - if (x & 2) v |= (uint64_t{1} << 1); - v &= ~(uint64_t{1} << 50); - if (x & 4) v |= (uint64_t{1} << 50); - v &= ~(uint64_t{1} << 51); - if (x & 8) v |= (uint64_t{1} << 51); - v &= ~(uint64_t{1} << 52); - if (x & 16) v |= (uint64_t{1} << 52); - v &= ~(uint64_t{1} << 53); - if (x & 32) v |= (uint64_t{1} << 53); - v &= ~(uint64_t{1} << 61); - if (x & 64) v |= (uint64_t{1} << 61); - v &= ~(uint64_t{1} << 62); - if (x & 128) v |= (uint64_t{1} << 62); - v &= ~(uint64_t{1} << 63); - if (x & 256) v |= (uint64_t{1} << 63); - double f; - memcpy(&f, &v, 8); - TestDouble(f); + // Test extreme values + BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits::min()), 0x10000000000000); + BOOST_CHECK_EQUAL(TestDouble(-std::numeric_limits::min()), 0x8010000000000000); + BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits::max()), 0x7fefffffffffffff); + BOOST_CHECK_EQUAL(TestDouble(-std::numeric_limits::max()), 0xffefffffffffffff); + BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits::lowest()), 0xffefffffffffffff); + BOOST_CHECK_EQUAL(TestDouble(-std::numeric_limits::lowest()), 0x7fefffffffffffff); + BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits::denorm_min()), 0x1); + BOOST_CHECK_EQUAL(TestDouble(-std::numeric_limits::denorm_min()), 0x8000000000000001); + // Note that all NaNs are encoded the same way. + BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits::quiet_NaN()), 0x7ff8000000000000); + BOOST_CHECK_EQUAL(TestDouble(-std::numeric_limits::quiet_NaN()), 0x7ff8000000000000); + BOOST_CHECK_EQUAL(TestDouble(std::numeric_limits::signaling_NaN()), 0x7ff8000000000000); + BOOST_CHECK_EQUAL(TestDouble(-std::numeric_limits::signaling_NaN()), 0x7ff8000000000000); + + // Construct doubles to test from the encoding. + static_assert(sizeof(double) == 8); + static_assert(sizeof(uint64_t) == 8); + for (int j = 0; j < 1000; ++j) { + // Iterate over 9 specific bits exhaustively; the others are chosen randomly. + // These specific bits are the sign bit, and the 2 top and bottom bits of + // exponent and mantissa in the IEEE754 binary64 format. + for (int x = 0; x < 512; ++x) { + uint64_t v = InsecureRandBits(64); + int x_pos = 0; + for (int v_pos : {0, 1, 50, 51, 52, 53, 61, 62, 63}) { + v &= ~(uint64_t{1} << v_pos); + if ((x >> (x_pos++)) & 1) v |= (uint64_t{1} << v_pos); } + double f; + memcpy(&f, &v, 8); + TestDouble(f); } } } From 405ac819af1eb0f6cf6d1805cb668f4e8ab4a6f3 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Fri, 26 Jan 2024 03:28:33 +0100 Subject: [PATCH 03/79] test: p2p: support disconnect waiting for `add_outbound_p2p_connection` Adds a new boolean parameter `wait_for_disconnect` to the `add_outbound_p2p_connection` method. If set, the node under test is checked to disconnect immediately after receiving the version message (same logic as for feeler connections). --- test/functional/test_framework/test_node.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 3baa78fd79f90..851888468e0db 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -726,7 +726,7 @@ def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, send_version=Tru return p2p_conn - def add_outbound_p2p_connection(self, p2p_conn, *, wait_for_verack=True, p2p_idx, connection_type="outbound-full-relay", supports_v2_p2p=None, advertise_v2_p2p=None, **kwargs): + def add_outbound_p2p_connection(self, p2p_conn, *, wait_for_verack=True, wait_for_disconnect=False, p2p_idx, connection_type="outbound-full-relay", supports_v2_p2p=None, advertise_v2_p2p=None, **kwargs): """Add an outbound p2p connection from node. Must be an "outbound-full-relay", "block-relay-only", "addr-fetch" or "feeler" connection. @@ -773,7 +773,7 @@ def addconnection_callback(address, port): if reconnect: p2p_conn.wait_for_reconnect() - if connection_type == "feeler": + if connection_type == "feeler" or wait_for_disconnect: # feeler connections are closed as soon as the node receives a `version` message p2p_conn.wait_until(lambda: p2p_conn.message_count["version"] == 1, check_connected=False) p2p_conn.wait_until(lambda: not p2p_conn.is_connected, check_connected=False) From c4a67d396d0aa99f658cafe381e39622859eb0be Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Fri, 19 Jan 2024 03:08:29 +0100 Subject: [PATCH 04/79] test: p2p: check disconnect due to lack of desirable service flags --- test/functional/p2p_handshake.py | 69 ++++++++++++++++++++++ test/functional/test_framework/messages.py | 1 + test/functional/test_runner.py | 2 + 3 files changed, 72 insertions(+) create mode 100755 test/functional/p2p_handshake.py diff --git a/test/functional/p2p_handshake.py b/test/functional/p2p_handshake.py new file mode 100755 index 0000000000000..b8544f9016789 --- /dev/null +++ b/test/functional/p2p_handshake.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +Test P2P behaviour during the handshake phase (VERSION, VERACK messages). +""" +import itertools + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.messages import ( + NODE_NETWORK, + NODE_NONE, + NODE_P2P_V2, + NODE_WITNESS, +) +from test_framework.p2p import P2PInterface + + +# usual desirable service flags for outbound non-pruned peers +DESIRABLE_SERVICE_FLAGS = NODE_NETWORK | NODE_WITNESS + + +class P2PHandshakeTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + + def add_outbound_connection(self, node, connection_type, services, wait_for_disconnect): + peer = node.add_outbound_p2p_connection( + P2PInterface(), p2p_idx=0, wait_for_disconnect=wait_for_disconnect, + connection_type=connection_type, services=services, + supports_v2_p2p=self.options.v2transport, advertise_v2_p2p=self.options.v2transport) + if not wait_for_disconnect: + # check that connection is alive past the version handshake and disconnect manually + peer.sync_with_ping() + peer.peer_disconnect() + peer.wait_for_disconnect() + + def test_desirable_service_flags(self, node, service_flag_tests, expect_disconnect): + """Check that connecting to a peer either fails or succeeds depending on its offered + service flags in the VERSION message. The test is exercised for all relevant + outbound connection types where the desirable service flags check is done.""" + CONNECTION_TYPES = ["outbound-full-relay", "block-relay-only", "addr-fetch"] + for conn_type, services in itertools.product(CONNECTION_TYPES, service_flag_tests): + if self.options.v2transport: + services |= NODE_P2P_V2 + expected_result = "disconnect" if expect_disconnect else "connect" + self.log.info(f' - services 0x{services:08x}, type "{conn_type}" [{expected_result}]') + if expect_disconnect: + expected_debug_log = f'does not offer the expected services ' \ + f'({services:08x} offered, {DESIRABLE_SERVICE_FLAGS:08x} expected)' + with node.assert_debug_log([expected_debug_log]): + self.add_outbound_connection(node, conn_type, services, wait_for_disconnect=True) + else: + self.add_outbound_connection(node, conn_type, services, wait_for_disconnect=False) + + def run_test(self): + node = self.nodes[0] + self.log.info("Check that lacking desired service flags leads to disconnect (non-pruned peers)") + self.test_desirable_service_flags(node, [NODE_NONE, NODE_NETWORK, NODE_WITNESS], expect_disconnect=True) + self.test_desirable_service_flags(node, [NODE_NETWORK | NODE_WITNESS], expect_disconnect=False) + + self.log.info("Check that feeler connections get disconnected immediately") + with node.assert_debug_log([f"feeler connection completed"]): + self.add_outbound_connection(node, "feeler", NODE_NONE, wait_for_disconnect=True) + + +if __name__ == '__main__': + P2PHandshakeTest().main() diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 1780678de1e5f..4e496a927587b 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -46,6 +46,7 @@ MAX_HEADERS_RESULTS = 2000 # Number of headers sent in one getheaders result MAX_INV_SIZE = 50000 # Maximum number of entries in an 'inv' protocol message +NODE_NONE = 0 NODE_NETWORK = (1 << 0) NODE_BLOOM = (1 << 2) NODE_WITNESS = (1 << 3) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 4d66ea97c84c8..9cdd54090b8d5 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -395,6 +395,8 @@ 'rpc_getdescriptorinfo.py', 'rpc_mempool_info.py', 'rpc_help.py', + 'p2p_handshake.py', + 'p2p_handshake.py --v2transport', 'feature_dirsymlinks.py', 'feature_help.py', 'feature_shutdown.py', From 2f23987849758537f76df7374d85a7e87b578b61 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Wed, 28 Feb 2024 03:02:21 +0100 Subject: [PATCH 05/79] test: p2p: check limited peers desirability (depending on best block depth) This adds coverage for the logic introduced in PR #28170 ("p2p: adaptive connections services flags"). --- test/functional/p2p_handshake.py | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/test/functional/p2p_handshake.py b/test/functional/p2p_handshake.py index b8544f9016789..3fbb940cbdc55 100755 --- a/test/functional/p2p_handshake.py +++ b/test/functional/p2p_handshake.py @@ -6,10 +6,12 @@ Test P2P behaviour during the handshake phase (VERSION, VERACK messages). """ import itertools +import time from test_framework.test_framework import BitcoinTestFramework from test_framework.messages import ( NODE_NETWORK, + NODE_NETWORK_LIMITED, NODE_NONE, NODE_P2P_V2, NODE_WITNESS, @@ -17,8 +19,12 @@ from test_framework.p2p import P2PInterface -# usual desirable service flags for outbound non-pruned peers -DESIRABLE_SERVICE_FLAGS = NODE_NETWORK | NODE_WITNESS +# Desirable service flags for outbound non-pruned and pruned peers. Note that +# the desirable service flags for pruned peers are dynamic and only apply if +# 1. the peer's service flag NODE_NETWORK_LIMITED is set *and* +# 2. the local chain is close to the tip (<24h) +DESIRABLE_SERVICE_FLAGS_FULL = NODE_NETWORK | NODE_WITNESS +DESIRABLE_SERVICE_FLAGS_PRUNED = NODE_NETWORK_LIMITED | NODE_WITNESS class P2PHandshakeTest(BitcoinTestFramework): @@ -36,7 +42,7 @@ def add_outbound_connection(self, node, connection_type, services, wait_for_disc peer.peer_disconnect() peer.wait_for_disconnect() - def test_desirable_service_flags(self, node, service_flag_tests, expect_disconnect): + def test_desirable_service_flags(self, node, service_flag_tests, desirable_service_flags, expect_disconnect): """Check that connecting to a peer either fails or succeeds depending on its offered service flags in the VERSION message. The test is exercised for all relevant outbound connection types where the desirable service flags check is done.""" @@ -47,18 +53,30 @@ def test_desirable_service_flags(self, node, service_flag_tests, expect_disconne expected_result = "disconnect" if expect_disconnect else "connect" self.log.info(f' - services 0x{services:08x}, type "{conn_type}" [{expected_result}]') if expect_disconnect: + assert (services & desirable_service_flags) != desirable_service_flags expected_debug_log = f'does not offer the expected services ' \ - f'({services:08x} offered, {DESIRABLE_SERVICE_FLAGS:08x} expected)' + f'({services:08x} offered, {desirable_service_flags:08x} expected)' with node.assert_debug_log([expected_debug_log]): self.add_outbound_connection(node, conn_type, services, wait_for_disconnect=True) else: + assert (services & desirable_service_flags) == desirable_service_flags self.add_outbound_connection(node, conn_type, services, wait_for_disconnect=False) def run_test(self): node = self.nodes[0] self.log.info("Check that lacking desired service flags leads to disconnect (non-pruned peers)") - self.test_desirable_service_flags(node, [NODE_NONE, NODE_NETWORK, NODE_WITNESS], expect_disconnect=True) - self.test_desirable_service_flags(node, [NODE_NETWORK | NODE_WITNESS], expect_disconnect=False) + self.test_desirable_service_flags(node, [NODE_NONE, NODE_NETWORK, NODE_WITNESS], + DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True) + self.test_desirable_service_flags(node, [NODE_NETWORK | NODE_WITNESS], + DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=False) + + self.log.info("Check that limited peers are only desired if the local chain is close to the tip (<24h)") + node.setmocktime(int(time.time()) + 25 * 3600) # tip outside the 24h window, should fail + self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS], + DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True) + node.setmocktime(int(time.time()) + 23 * 3600) # tip inside the 24h window, should succeed + self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS], + DESIRABLE_SERVICE_FLAGS_PRUNED, expect_disconnect=False) self.log.info("Check that feeler connections get disconnected immediately") with node.assert_debug_log([f"feeler connection completed"]): From bcbd7eb8d40fbbd0e58c61acef087d65f2047036 Mon Sep 17 00:00:00 2001 From: furszy Date: Sat, 25 Nov 2023 12:10:52 -0300 Subject: [PATCH 06/79] bench: basic block filter index initial sync Introduce benchmark for the block filter index sync. And makes synchronous 'Sync()' mechanism accessible. --- src/Makefile.bench.include | 1 + src/bench/index_blockfilter.cpp | 43 +++++++++++++++++++++++++++++++++ src/index/base.cpp | 4 +-- src/index/base.h | 16 ++++++------ 4 files changed, 54 insertions(+), 10 deletions(-) create mode 100644 src/bench/index_blockfilter.cpp diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include index 4d814bc5dc92b..7ba0111fa686f 100644 --- a/src/Makefile.bench.include +++ b/src/Makefile.bench.include @@ -34,6 +34,7 @@ bench_bench_bitcoin_SOURCES = \ bench/examples.cpp \ bench/gcs_filter.cpp \ bench/hashpadding.cpp \ + bench/index_blockfilter.cpp \ bench/load_external.cpp \ bench/lockedpool.cpp \ bench/logging.cpp \ diff --git a/src/bench/index_blockfilter.cpp b/src/bench/index_blockfilter.cpp new file mode 100644 index 0000000000000..5e0bfbfea6b3b --- /dev/null +++ b/src/bench/index_blockfilter.cpp @@ -0,0 +1,43 @@ +// Copyright (c) 2023-present The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or https://www.opensource.org/licenses/mit-license.php. + +#include + +#include +#include +#include +#include +#include +#include + +// Very simple block filter index sync benchmark, only using coinbase outputs. +static void BlockFilterIndexSync(benchmark::Bench& bench) +{ + const auto test_setup = MakeNoLogFileContext(); + + // Create more blocks + int CHAIN_SIZE = 600; + CPubKey pubkey{ParseHex("02ed26169896db86ced4cbb7b3ecef9859b5952825adbeab998fb5b307e54949c9")}; + CScript script = GetScriptForDestination(WitnessV0KeyHash(pubkey)); + std::vector noTxns; + for (int i = 0; i < CHAIN_SIZE - 100; i++) { + test_setup->CreateAndProcessBlock(noTxns, script); + SetMockTime(GetTime() + 1); + } + assert(WITH_LOCK(::cs_main, return test_setup->m_node.chainman->ActiveHeight() == CHAIN_SIZE)); + + bench.minEpochIterations(5).run([&] { + BlockFilterIndex filter_index(interfaces::MakeChain(test_setup->m_node), BlockFilterType::BASIC, + /*n_cache_size=*/0, /*f_memory=*/false, /*f_wipe=*/true); + assert(filter_index.Init()); + assert(!filter_index.BlockUntilSyncedToCurrentChain()); + filter_index.Sync(); + + IndexSummary summary = filter_index.GetSummary(); + assert(summary.synced); + assert(summary.best_block_hash == WITH_LOCK(::cs_main, return test_setup->m_node.chainman->ActiveTip()->GetBlockHash())); + }); +} + +BENCHMARK(BlockFilterIndexSync, benchmark::PriorityLevel::HIGH); diff --git a/src/index/base.cpp b/src/index/base.cpp index 036292cd8a11c..5953fe215004a 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -141,7 +141,7 @@ static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev, CChain& return chain.Next(chain.FindFork(pindex_prev)); } -void BaseIndex::ThreadSync() +void BaseIndex::Sync() { const CBlockIndex* pindex = m_best_block_index.load(); if (!m_synced) { @@ -394,7 +394,7 @@ bool BaseIndex::StartBackgroundSync() { if (!m_init) throw std::logic_error("Error: Cannot start a non-initialized index"); - m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { ThreadSync(); }); + m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { Sync(); }); return true; } diff --git a/src/index/base.h b/src/index/base.h index 154061fb19120..0eb1d9ca3b229 100644 --- a/src/index/base.h +++ b/src/index/base.h @@ -78,13 +78,6 @@ class BaseIndex : public CValidationInterface std::thread m_thread_sync; CThreadInterrupt m_interrupt; - /// Sync the index with the block index starting from the current best block. - /// Intended to be run in its own thread, m_thread_sync, and can be - /// interrupted with m_interrupt. Once the index gets in sync, the m_synced - /// flag is set and the BlockConnected ValidationInterface callback takes - /// over and the sync thread exits. - void ThreadSync(); - /// Write the current index state (eg. chain block locator and subclass-specific items) to disk. /// /// Recommendations for error handling: @@ -152,9 +145,16 @@ class BaseIndex : public CValidationInterface /// validation interface so that it stays in sync with blockchain updates. [[nodiscard]] bool Init(); - /// Starts the initial sync process. + /// Starts the initial sync process on a background thread. [[nodiscard]] bool StartBackgroundSync(); + /// Sync the index with the block index starting from the current best block. + /// Intended to be run in its own thread, m_thread_sync, and can be + /// interrupted with m_interrupt. Once the index gets in sync, the m_synced + /// flag is set and the BlockConnected ValidationInterface callback takes + /// over and the sync thread exits. + void Sync(); + /// Stops the instance from staying in sync with blockchain updates. void Stop(); From 331f044e3b49223cedd16803d123c0da9d91d6a2 Mon Sep 17 00:00:00 2001 From: furszy Date: Tue, 24 Jan 2023 20:49:42 -0300 Subject: [PATCH 07/79] index: blockfilter, decouple Write into its own function --- src/index/blockfilterindex.cpp | 11 ++++++++--- src/index/blockfilterindex.h | 2 ++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp index 65993e830e54e..1085b4da77dab 100644 --- a/src/index/blockfilterindex.cpp +++ b/src/index/blockfilterindex.cpp @@ -252,16 +252,21 @@ bool BlockFilterIndex::CustomAppend(const interfaces::BlockInfo& block) BlockFilter filter(m_filter_type, *Assert(block.data), block_undo); + return Write(filter, block.height, filter.ComputeHeader(prev_header)); +} + +bool BlockFilterIndex::Write(const BlockFilter& filter, uint32_t block_height, const uint256& filter_header) +{ size_t bytes_written = WriteFilterToDisk(m_next_filter_pos, filter); if (bytes_written == 0) return false; std::pair value; - value.first = block.hash; + value.first = filter.GetBlockHash(); value.second.hash = filter.GetHash(); - value.second.header = filter.ComputeHeader(prev_header); + value.second.header = filter_header; value.second.pos = m_next_filter_pos; - if (!m_db->Write(DBHeightKey(block.height), value)) { + if (!m_db->Write(DBHeightKey(block_height), value)) { return false; } diff --git a/src/index/blockfilterindex.h b/src/index/blockfilterindex.h index 10a1cfd2ee0e5..1cfc21d00f4ff 100644 --- a/src/index/blockfilterindex.h +++ b/src/index/blockfilterindex.h @@ -44,6 +44,8 @@ class BlockFilterIndex final : public BaseIndex bool AllowPrune() const override { return true; } + bool Write(const BlockFilter& filter, uint32_t block_height, const uint256& filter_header); + protected: bool CustomInit(const std::optional& block) override; From a6756ecdb2f1ac960433412807aa377d1ee80d05 Mon Sep 17 00:00:00 2001 From: furszy Date: Mon, 30 Jan 2023 17:51:16 -0300 Subject: [PATCH 08/79] index: blockfilter, decouple header lookup into its own function --- src/index/blockfilterindex.cpp | 32 +++++++++++++++++++------------- src/index/blockfilterindex.h | 2 ++ 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp index 1085b4da77dab..204e5d7e18b49 100644 --- a/src/index/blockfilterindex.cpp +++ b/src/index/blockfilterindex.cpp @@ -222,6 +222,22 @@ size_t BlockFilterIndex::WriteFilterToDisk(FlatFilePos& pos, const BlockFilter& return data_size; } +std::optional BlockFilterIndex::ReadFilterHeader(int height, const uint256& expected_block_hash) +{ + std::pair read_out; + if (!m_db->Read(DBHeightKey(height), read_out)) { + return std::nullopt; + } + + if (read_out.first != expected_block_hash) { + LogError("%s: previous block header belongs to unexpected block %s; expected %s\n", + __func__, read_out.first.ToString(), expected_block_hash.ToString()); + return std::nullopt; + } + + return read_out.second.header; +} + bool BlockFilterIndex::CustomAppend(const interfaces::BlockInfo& block) { CBlockUndo block_undo; @@ -235,19 +251,9 @@ bool BlockFilterIndex::CustomAppend(const interfaces::BlockInfo& block) return false; } - std::pair read_out; - if (!m_db->Read(DBHeightKey(block.height - 1), read_out)) { - return false; - } - - uint256 expected_block_hash = *Assert(block.prev_hash); - if (read_out.first != expected_block_hash) { - LogError("%s: previous block header belongs to unexpected block %s; expected %s\n", - __func__, read_out.first.ToString(), expected_block_hash.ToString()); - return false; - } - - prev_header = read_out.second.header; + auto op_prev_header = ReadFilterHeader(block.height - 1, *Assert(block.prev_hash)); + if (!op_prev_header) return false; + prev_header = *op_prev_header; } BlockFilter filter(m_filter_type, *Assert(block.data), block_undo); diff --git a/src/index/blockfilterindex.h b/src/index/blockfilterindex.h index 1cfc21d00f4ff..01ba1025c87d5 100644 --- a/src/index/blockfilterindex.h +++ b/src/index/blockfilterindex.h @@ -46,6 +46,8 @@ class BlockFilterIndex final : public BaseIndex bool Write(const BlockFilter& filter, uint32_t block_height, const uint256& filter_header); + std::optional ReadFilterHeader(int height, const uint256& expected_block_hash); + protected: bool CustomInit(const std::optional& block) override; From fa5729436ca12b20cfa2cd1f0c6f54af7192f0a6 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Tue, 27 Feb 2024 13:03:48 +0100 Subject: [PATCH 09/79] lint: Fix lint-whitespace issues --- test/lint/lint-whitespace.py | 136 ------------------------------ test/lint/test_runner/src/main.rs | 81 ++++++++++++++++++ 2 files changed, 81 insertions(+), 136 deletions(-) delete mode 100755 test/lint/lint-whitespace.py diff --git a/test/lint/lint-whitespace.py b/test/lint/lint-whitespace.py deleted file mode 100755 index f5e4a776d0ab5..0000000000000 --- a/test/lint/lint-whitespace.py +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (c) 2017-2022 The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -# -# Check for new lines in diff that introduce trailing whitespace or -# tab characters instead of spaces. - -# We can't run this check unless we know the commit range for the PR. - -import argparse -import os -import re -import sys - -from subprocess import check_output - -EXCLUDED_DIRS = ["depends/patches/", - "contrib/guix/patches/", - "src/leveldb/", - "src/crc32c/", - "src/secp256k1/", - "src/minisketch/", - "doc/release-notes/", - "src/qt/locale"] - -def parse_args(): - """Parse command line arguments.""" - parser = argparse.ArgumentParser( - description=""" - Check for new lines in diff that introduce trailing whitespace - or tab characters instead of spaces in unstaged changes, the - previous n commits, or a commit-range. - """, - epilog=f""" - You can manually set the commit-range with the COMMIT_RANGE - environment variable (e.g. "COMMIT_RANGE='47ba2c3...ee50c9e' - {sys.argv[0]}"). Defaults to current merge base when neither - prev-commits nor the environment variable is set. - """) - - parser.add_argument("--prev-commits", "-p", required=False, help="The previous n commits to check") - - return parser.parse_args() - - -def report_diff(selection): - filename = "" - seen = False - seenln = False - - print("The following changes were suspected:") - - for line in selection: - if re.match(r"^diff", line): - filename = line - seen = False - elif re.match(r"^@@", line): - linenumber = line - seenln = False - else: - if not seen: - # The first time a file is seen with trailing whitespace or a tab character, we print the - # filename (preceded by a newline). - print("") - print(filename) - seen = True - if not seenln: - print(linenumber) - seenln = True - print(line) - - -def get_diff(commit_range, check_only_code): - exclude_args = [":(exclude)" + dir for dir in EXCLUDED_DIRS] - - if check_only_code: - what_files = ["*.cpp", "*.h", "*.md", "*.py", "*.sh"] - else: - what_files = ["."] - - diff = check_output(["git", "diff", "-U0", commit_range, "--"] + what_files + exclude_args, text=True, encoding="utf8") - - return diff - - -def main(): - args = parse_args() - - if not os.getenv("COMMIT_RANGE"): - if args.prev_commits: - commit_range = "HEAD~" + args.prev_commits + "...HEAD" - else: - # This assumes that the target branch of the pull request will be master. - merge_base = check_output(["git", "merge-base", "HEAD", "master"], text=True, encoding="utf8").rstrip("\n") - commit_range = merge_base + "..HEAD" - else: - commit_range = os.getenv("COMMIT_RANGE") - if commit_range == "SKIP_EMPTY_NOT_A_PR": - sys.exit(0) - - whitespace_selection = [] - tab_selection = [] - - # Check if trailing whitespace was found in the diff. - for line in get_diff(commit_range, check_only_code=False).splitlines(): - if re.match(r"^(diff --git|\@@|^\+.*\s+$)", line): - whitespace_selection.append(line) - - whitespace_additions = [i for i in whitespace_selection if i.startswith("+")] - - # Check if tab characters were found in the diff. - for line in get_diff(commit_range, check_only_code=True).splitlines(): - if re.match(r"^(diff --git|\@@|^\+.*\t)", line): - tab_selection.append(line) - - tab_additions = [i for i in tab_selection if i.startswith("+")] - - ret = 0 - - if len(whitespace_additions) > 0: - print("This diff appears to have added new lines with trailing whitespace.") - report_diff(whitespace_selection) - ret = 1 - - if len(tab_additions) > 0: - print("This diff appears to have added new lines with tab characters instead of spaces.") - report_diff(tab_selection) - ret = 1 - - sys.exit(ret) - - -if __name__ == "__main__": - main() diff --git a/test/lint/test_runner/src/main.rs b/test/lint/test_runner/src/main.rs index b97e822484bf7..f9df57623949c 100644 --- a/test/lint/test_runner/src/main.rs +++ b/test/lint/test_runner/src/main.rs @@ -95,6 +95,85 @@ fs:: namespace, which has unsafe filesystem functions marked as deleted. } } +/// Return the pathspecs for whitespace related excludes +fn get_pathspecs_exclude_whitespace() -> Vec { + let mut list = get_pathspecs_exclude_subtrees(); + list.extend( + [ + // Permanent excludes + "*.patch", + "src/qt/locale", + "contrib/windeploy/win-codesign.cert", + "doc/README_windows.txt", + // Temporary excludes, or existing violations + "doc/release-notes/release-notes-0.*", + "contrib/init/bitcoind.openrc", + "contrib/macdeploy/macdeployqtplus", + "src/crypto/sha256_sse4.cpp", + "src/qt/res/src/*.svg", + "test/functional/test_framework/crypto/ellswift_decode_test_vectors.csv", + "test/functional/test_framework/crypto/xswiftec_inv_test_vectors.csv", + "contrib/qos/tc.sh", + "contrib/verify-commits/gpg.sh", + "src/univalue/include/univalue_escapes.h", + "src/univalue/test/object.cpp", + "test/lint/git-subtree-check.sh", + ] + .iter() + .map(|s| format!(":(exclude){}", s)), + ); + list +} + +fn lint_trailing_whitespace() -> LintResult { + let trailing_space = git() + .args(["grep", "-I", "--line-number", "\\s$", "--"]) + .args(get_pathspecs_exclude_whitespace()) + .status() + .expect("command error") + .success(); + if trailing_space { + Err(r#" +^^^ +Trailing whitespace is problematic, because git may warn about it, or editors may remove it by +default, forcing developers in the future to either undo the changes manually or spend time on +review. + +Thus, it is best to remove the trailing space now. + +Please add any false positives, such as subtrees, Windows-related files, patch files, or externally +sourced files to the exclude list. + "# + .to_string()) + } else { + Ok(()) + } +} + +fn lint_tabs_whitespace() -> LintResult { + let tabs = git() + .args(["grep", "-I", "--line-number", "--perl-regexp", "^\\t", "--"]) + .args(["*.cpp", "*.h", "*.md", "*.py", "*.sh"]) + .args(get_pathspecs_exclude_whitespace()) + .status() + .expect("command error") + .success(); + if tabs { + Err(r#" +^^^ +Use of tabs in this codebase is problematic, because existing code uses spaces and tabs will cause +display issues and conflict with editor settings. + +Please remove the tabs. + +Please add any false positives, such as subtrees, or externally sourced files to the exclude list. + "# + .to_string()) + } else { + Ok(()) + } +} + fn lint_includes_build_config() -> LintResult { let config_path = "./src/config/bitcoin-config.h.in"; let include_directive = "#include "; @@ -232,6 +311,8 @@ fn main() -> ExitCode { let test_list: Vec<(&str, LintFn)> = vec![ ("subtree check", lint_subtree), ("std::filesystem check", lint_std_filesystem), + ("trailing whitespace check", lint_trailing_whitespace), + ("no-tabs check", lint_tabs_whitespace), ("build config includes check", lint_includes_build_config), ("-help=1 documentation check", lint_doc), ("lint-*.py scripts", lint_all), From dfcef536d0e6c40e98dce35ae7af6e3e4a2595cd Mon Sep 17 00:00:00 2001 From: Matthew Zipkin Date: Fri, 15 Sep 2023 14:16:22 -0400 Subject: [PATCH 10/79] blockstorage: do not flush block to disk if it is already there test: ensure we can reindex from read-only block files now --- src/node/blockstorage.cpp | 24 ++++++++++----------- test/functional/feature_reindex_readonly.py | 9 ++++---- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 211d557826367..996ac30c57e4d 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -906,19 +906,19 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne if (!fKnown) { LogPrint(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n", last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight); - } - // Do not propagate the return code. The flush concerns a previous block - // and undo file that has already been written to. If a flush fails - // here, and we crash, there is no expected additional block data - // inconsistency arising from the flush failure here. However, the undo - // data may be inconsistent after a crash if the flush is called during - // a reindex. A flush error might also leave some of the data files - // untrimmed. - if (!FlushBlockFile(last_blockfile, !fKnown, finalize_undo)) { - LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, - "Failed to flush previous block file %05i (finalize=%i, finalize_undo=%i) before opening new block file %05i\n", - last_blockfile, !fKnown, finalize_undo, nFile); + // Do not propagate the return code. The flush concerns a previous block + // and undo file that has already been written to. If a flush fails + // here, and we crash, there is no expected additional block data + // inconsistency arising from the flush failure here. However, the undo + // data may be inconsistent after a crash if the flush is called during + // a reindex. A flush error might also leave some of the data files + // untrimmed. + if (!FlushBlockFile(last_blockfile, !fKnown, finalize_undo)) { + LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, + "Failed to flush previous block file %05i (finalize=%i, finalize_undo=%i) before opening new block file %05i\n", + last_blockfile, !fKnown, finalize_undo, nFile); + } } // No undo data yet in the new file, so reset our undo-height tracking. m_blockfile_cursors[chain_type] = BlockfileCursor{nFile}; diff --git a/test/functional/feature_reindex_readonly.py b/test/functional/feature_reindex_readonly.py index dd99c3c4fa317..25cff87a3b7b1 100755 --- a/test/functional/feature_reindex_readonly.py +++ b/test/functional/feature_reindex_readonly.py @@ -24,6 +24,7 @@ def reindex_readonly(self): opreturn = "6a" nulldata = fastprune_blockfile_size * "ff" self.generateblock(self.nodes[0], output=f"raw({opreturn}{nulldata})", transactions=[]) + block_count = self.nodes[0].getblockcount() self.stop_node(0) assert (self.nodes[0].chain_path / "blocks" / "blk00000.dat").exists() @@ -73,10 +74,10 @@ def reindex_readonly(self): pass if undo_immutable: - self.log.info("Attempt to restart and reindex the node with the unwritable block file") - with self.nodes[0].assert_debug_log(expected_msgs=['FlushStateToDisk', 'failed to open file'], unexpected_msgs=[]): - self.nodes[0].assert_start_raises_init_error(extra_args=['-reindex', '-fastprune'], - expected_msg="Error: A fatal internal error occurred, see debug.log for details") + self.log.debug("Attempt to restart and reindex the node with the unwritable block file") + with self.nodes[0].wait_for_debug_log([b"Reindexing finished"]): + self.start_node(0, extra_args=['-reindex', '-fastprune']) + assert block_count == self.nodes[0].getblockcount() undo_immutable() filename.chmod(0o777) From f1469eb45469672046c5793b44863f606736c853 Mon Sep 17 00:00:00 2001 From: furszy Date: Fri, 23 Feb 2024 16:31:13 -0300 Subject: [PATCH 11/79] index: cache last block filter header Avoid disk read operations on every new processed block. --- src/index/blockfilterindex.cpp | 22 ++++++++++++++++------ src/index/blockfilterindex.h | 3 +++ 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp index 204e5d7e18b49..41bdca9df562a 100644 --- a/src/index/blockfilterindex.cpp +++ b/src/index/blockfilterindex.cpp @@ -128,6 +128,16 @@ bool BlockFilterIndex::CustomInit(const std::optional& blo m_next_filter_pos.nFile = 0; m_next_filter_pos.nPos = 0; } + + if (block) { + auto op_last_header = ReadFilterHeader(block->height, block->hash); + if (!op_last_header) { + LogError("Cannot read last block filter header; index may be corrupted\n"); + return false; + } + m_last_header = *op_last_header; + } + return true; } @@ -241,7 +251,6 @@ std::optional BlockFilterIndex::ReadFilterHeader(int height, const uint bool BlockFilterIndex::CustomAppend(const interfaces::BlockInfo& block) { CBlockUndo block_undo; - uint256 prev_header; if (block.height > 0) { // pindex variable gives indexing code access to node internals. It @@ -250,15 +259,14 @@ bool BlockFilterIndex::CustomAppend(const interfaces::BlockInfo& block) if (!m_chainstate->m_blockman.UndoReadFromDisk(block_undo, *pindex)) { return false; } - - auto op_prev_header = ReadFilterHeader(block.height - 1, *Assert(block.prev_hash)); - if (!op_prev_header) return false; - prev_header = *op_prev_header; } BlockFilter filter(m_filter_type, *Assert(block.data), block_undo); - return Write(filter, block.height, filter.ComputeHeader(prev_header)); + const uint256& header = filter.ComputeHeader(m_last_header); + bool res = Write(filter, block.height, header); + if (res) m_last_header = header; // update last header + return res; } bool BlockFilterIndex::Write(const BlockFilter& filter, uint32_t block_height, const uint256& filter_header) @@ -326,6 +334,8 @@ bool BlockFilterIndex::CustomRewind(const interfaces::BlockKey& current_tip, con batch.Write(DB_FILTER_POS, m_next_filter_pos); if (!m_db->WriteBatch(batch)) return false; + // Update cached header + m_last_header = *Assert(ReadFilterHeader(new_tip.height, new_tip.hash)); return true; } diff --git a/src/index/blockfilterindex.h b/src/index/blockfilterindex.h index 01ba1025c87d5..cdb9563fb8ec8 100644 --- a/src/index/blockfilterindex.h +++ b/src/index/blockfilterindex.h @@ -42,6 +42,9 @@ class BlockFilterIndex final : public BaseIndex /** cache of block hash to filter header, to avoid disk access when responding to getcfcheckpt. */ std::unordered_map m_headers_cache GUARDED_BY(m_cs_headers_cache); + // Last computed header to avoid disk reads on every new block. + uint256 m_last_header{}; + bool AllowPrune() const override { return true; } bool Write(const BlockFilter& filter, uint32_t block_height, const uint256& filter_header); From 0faafb57f8298547949cbc0044ee9e925ed887ba Mon Sep 17 00:00:00 2001 From: furszy Date: Thu, 16 Feb 2023 17:25:00 -0300 Subject: [PATCH 12/79] index: decrease ThreadSync cs_main contention Only NextSyncBlock requires cs_main lock. The other function calls like Commit or Rewind will lock or not cs_main internally when they need it. Avoiding keeping cs_main locked when Commit() or Rewind() write data to disk. --- src/index/base.cpp | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/src/index/base.cpp b/src/index/base.cpp index 5953fe215004a..b4bda2fca6ac8 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -159,23 +159,20 @@ void BaseIndex::Sync() return; } - { - LOCK(cs_main); - const CBlockIndex* pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain); - if (!pindex_next) { - SetBestBlockIndex(pindex); - m_synced = true; - // No need to handle errors in Commit. See rationale above. - Commit(); - break; - } - if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) { - FatalErrorf("%s: Failed to rewind index %s to a previous chain tip", - __func__, GetName()); - return; - } - pindex = pindex_next; + const CBlockIndex* pindex_next = WITH_LOCK(cs_main, return NextSyncBlock(pindex, m_chainstate->m_chain)); + if (!pindex_next) { + SetBestBlockIndex(pindex); + m_synced = true; + // No need to handle errors in Commit. See rationale above. + Commit(); + break; } + if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) { + FatalErrorf("%s: Failed to rewind index %s to a previous chain tip", __func__, GetName()); + return; + } + pindex = pindex_next; + auto current_time{std::chrono::steady_clock::now()}; if (last_log_time + SYNC_LOG_INTERVAL < current_time) { From 99afb9d15a08d2f46739f4d2b66c63dbabd7a44e Mon Sep 17 00:00:00 2001 From: furszy Date: Tue, 28 Nov 2023 09:02:00 -0300 Subject: [PATCH 13/79] refactor: init, simplify index shutdown code --- src/init.cpp | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index b9a0bb732aa04..251ef0f8ff33f 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -256,12 +256,8 @@ void Interrupt(NodeContext& node) InterruptMapPort(); if (node.connman) node.connman->Interrupt(); - if (g_txindex) { - g_txindex->Interrupt(); - } - ForEachBlockFilterIndex([](BlockFilterIndex& index) { index.Interrupt(); }); - if (g_coin_stats_index) { - g_coin_stats_index->Interrupt(); + for (auto* index : node.indexes) { + index->Interrupt(); } } @@ -337,16 +333,11 @@ void Shutdown(NodeContext& node) if (node.validation_signals) node.validation_signals->FlushBackgroundCallbacks(); // Stop and delete all indexes only after flushing background callbacks. - if (g_txindex) { - g_txindex->Stop(); - g_txindex.reset(); - } - if (g_coin_stats_index) { - g_coin_stats_index->Stop(); - g_coin_stats_index.reset(); - } - ForEachBlockFilterIndex([](BlockFilterIndex& index) { index.Stop(); }); + for (auto* index : node.indexes) index->Stop(); + if (g_txindex) g_txindex.reset(); + if (g_coin_stats_index) g_coin_stats_index.reset(); DestroyAllBlockFilterIndexes(); + node.indexes.clear(); // all instances are nullptr now // Any future callbacks will be dropped. This should absolutely be safe - if // missing a callback results in an unrecoverable situation, unclean shutdown From 432a542e271f5b6ecb1c6ea4fa9108ad4b3a5a43 Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Tue, 12 Mar 2024 14:00:23 -0400 Subject: [PATCH 14/79] test: fix intermittent failures with test=addrman The nKey of the addrman is generated the first time the node is started. Therefore, restarting a node or turning it off and on again won't make a previously non-deterministic addrman deterministic. Co-authored-by: 0xb10c --- test/functional/feature_asmap.py | 3 ++- test/functional/rpc_net.py | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/test/functional/feature_asmap.py b/test/functional/feature_asmap.py index 7e32f8d515b2d..024a8fa18c063 100755 --- a/test/functional/feature_asmap.py +++ b/test/functional/feature_asmap.py @@ -39,7 +39,8 @@ def expected_messages(filename): class AsmapTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 - self.extra_args = [["-checkaddrman=1"]] # Do addrman checks on all operations. + # Do addrman checks on all operations and use deterministic addrman + self.extra_args = [["-checkaddrman=1", "-test=addrman"]] def fill_addrman(self, node_id): """Add 2 tried addresses to the addrman, followed by 2 new addresses.""" diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 265d9d959ae4b..22789644f2733 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -319,7 +319,9 @@ def test_getnodeaddresses(self): def test_addpeeraddress(self): self.log.info("Test addpeeraddress") - self.restart_node(1, ["-checkaddrman=1", "-test=addrman"]) + # The node has an existing, non-deterministic addrman from a previous test. + # Clear it to have a deterministic addrman. + self.restart_node(1, ["-checkaddrman=1", "-test=addrman"], clear_addrman=True) node = self.nodes[1] self.log.debug("Test that addpeerinfo is a hidden RPC") From 38f70ba6ac86fb96c60571d2e1f316315c1c73cc Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Mon, 27 Nov 2023 14:50:55 -0500 Subject: [PATCH 15/79] RPC: Add maxfeerate and maxburnamount args to submitpackage And thread the feerate value through ProcessNewPackage to reject individual transactions that exceed the given feerate. This allows subpackage processing, and is compatible with future package RBF work. --- src/node/transaction.h | 6 ++++++ src/rpc/client.cpp | 2 ++ src/rpc/mempool.cpp | 33 ++++++++++++++++++++++++++--- src/test/fuzz/package_eval.cpp | 2 +- src/test/fuzz/tx_pool.cpp | 2 +- src/test/txpackage_tests.cpp | 36 ++++++++++++++++---------------- src/validation.cpp | 37 ++++++++++++++++++++++++++++----- src/validation.h | 6 ++++-- test/functional/rpc_packages.py | 30 ++++++++++++++++++++++++++ 9 files changed, 124 insertions(+), 30 deletions(-) diff --git a/src/node/transaction.h b/src/node/transaction.h index 168273594ce50..6782536aceba2 100644 --- a/src/node/transaction.h +++ b/src/node/transaction.h @@ -26,6 +26,12 @@ struct NodeContext; */ static const CFeeRate DEFAULT_MAX_RAW_TX_FEE_RATE{COIN / 10}; +/** Maximum burn value for sendrawtransaction, submitpackage, and testmempoolaccept RPC calls. + * By default, a transaction with a burn value higher than this will be rejected + * by these RPCs and the GUI. This can be overridden with the maxburnamount argument. + */ +static const CAmount DEFAULT_MAX_BURN_AMOUNT{0}; + /** * Submit a transaction to the mempool and (optionally) relay it to all P2P peers. * diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp index 5825efdf82ea1..eb05f33b42a4b 100644 --- a/src/rpc/client.cpp +++ b/src/rpc/client.cpp @@ -128,6 +128,8 @@ static const CRPCConvertParam vRPCConvertParams[] = { "testmempoolaccept", 0, "rawtxs" }, { "testmempoolaccept", 1, "maxfeerate" }, { "submitpackage", 0, "package" }, + { "submitpackage", 1, "maxfeerate" }, + { "submitpackage", 2, "maxburnamount" }, { "combinerawtransaction", 0, "txs" }, { "fundrawtransaction", 1, "options" }, { "fundrawtransaction", 1, "add_inputs"}, diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index 25bfec2d45e21..8539506f2f751 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -28,6 +28,7 @@ using kernel::DumpMempool; +using node::DEFAULT_MAX_BURN_AMOUNT; using node::DEFAULT_MAX_RAW_TX_FEE_RATE; using node::MempoolPath; using node::NodeContext; @@ -46,7 +47,7 @@ static RPCHelpMan sendrawtransaction() {"maxfeerate", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(DEFAULT_MAX_RAW_TX_FEE_RATE.GetFeePerK())}, "Reject transactions whose fee rate is higher than the specified value, expressed in " + CURRENCY_UNIT + "/kvB.\nFee rates larger than 1BTC/kvB are rejected.\nSet to 0 to accept any fee rate."}, - {"maxburnamount", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(0)}, + {"maxburnamount", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(DEFAULT_MAX_BURN_AMOUNT)}, "Reject transactions with provably unspendable outputs (e.g. 'datacarrier' outputs that use the OP_RETURN opcode) greater than the specified value, expressed in " + CURRENCY_UNIT + ".\n" "If burning funds through unspendable outputs is desired, increase this value.\n" "This check is based on heuristics and does not guarantee spendability of outputs.\n"}, @@ -180,7 +181,7 @@ static RPCHelpMan testmempoolaccept() Chainstate& chainstate = chainman.ActiveChainstate(); const PackageMempoolAcceptResult package_result = [&] { LOCK(::cs_main); - if (txns.size() > 1) return ProcessNewPackage(chainstate, mempool, txns, /*test_accept=*/true); + if (txns.size() > 1) return ProcessNewPackage(chainstate, mempool, txns, /*test_accept=*/true, /*max_sane_feerate=*/{}); return PackageMempoolAcceptResult(txns[0]->GetWitnessHash(), chainman.ProcessTransaction(txns[0], /*test_accept=*/true)); }(); @@ -823,6 +824,14 @@ static RPCHelpMan submitpackage() {"rawtx", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, ""}, }, }, + {"maxfeerate", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(DEFAULT_MAX_RAW_TX_FEE_RATE.GetFeePerK())}, + "Reject transactions whose fee rate is higher than the specified value, expressed in " + CURRENCY_UNIT + + "/kvB.\nFee rates larger than 1BTC/kvB are rejected.\nSet to 0 to accept any fee rate."}, + {"maxburnamount", RPCArg::Type::AMOUNT, RPCArg::Default{FormatMoney(DEFAULT_MAX_BURN_AMOUNT)}, + "Reject transactions with provably unspendable outputs (e.g. 'datacarrier' outputs that use the OP_RETURN opcode) greater than the specified value, expressed in " + CURRENCY_UNIT + ".\n" + "If burning funds through unspendable outputs is desired, increase this value.\n" + "This check is based on heuristics and does not guarantee spendability of outputs.\n" + }, }, RPCResult{ RPCResult::Type::OBJ, "", "", @@ -862,6 +871,17 @@ static RPCHelpMan submitpackage() "Array must contain between 1 and " + ToString(MAX_PACKAGE_COUNT) + " transactions."); } + // Fee check needs to be run with chainstate and package context + const CFeeRate max_raw_tx_fee_rate = ParseFeeRate(self.Arg(1)); + std::optional max_sane_feerate{max_raw_tx_fee_rate}; + // 0-value is special; it's mapped to no sanity check + if (max_raw_tx_fee_rate == CFeeRate(0)) { + max_sane_feerate = std::nullopt; + } + + // Burn sanity check is run with no context + const CAmount max_burn_amount = request.params[2].isNull() ? 0 : AmountFromValue(request.params[2]); + std::vector txns; txns.reserve(raw_transactions.size()); for (const auto& rawtx : raw_transactions.getValues()) { @@ -870,6 +890,13 @@ static RPCHelpMan submitpackage() throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed: " + rawtx.get_str() + " Make sure the tx has at least one input."); } + + for (const auto& out : mtx.vout) { + if((out.scriptPubKey.IsUnspendable() || !out.scriptPubKey.HasValidOps()) && out.nValue > max_burn_amount) { + throw JSONRPCTransactionError(TransactionError::MAX_BURN_EXCEEDED); + } + } + txns.emplace_back(MakeTransactionRef(std::move(mtx))); } if (!IsChildWithParentsTree(txns)) { @@ -879,7 +906,7 @@ static RPCHelpMan submitpackage() NodeContext& node = EnsureAnyNodeContext(request.context); CTxMemPool& mempool = EnsureMemPool(node); Chainstate& chainstate = EnsureChainman(node).ActiveChainstate(); - const auto package_result = WITH_LOCK(::cs_main, return ProcessNewPackage(chainstate, mempool, txns, /*test_accept=*/ false)); + const auto package_result = WITH_LOCK(::cs_main, return ProcessNewPackage(chainstate, mempool, txns, /*test_accept=*/ false, max_sane_feerate)); std::string package_msg = "success"; diff --git a/src/test/fuzz/package_eval.cpp b/src/test/fuzz/package_eval.cpp index 9e658e0ceda9b..a16dbbe8ca6af 100644 --- a/src/test/fuzz/package_eval.cpp +++ b/src/test/fuzz/package_eval.cpp @@ -277,7 +277,7 @@ FUZZ_TARGET(tx_package_eval, .init = initialize_tx_pool) auto single_submit = txs.size() == 1 && fuzzed_data_provider.ConsumeBool(); const auto result_package = WITH_LOCK(::cs_main, - return ProcessNewPackage(chainstate, tx_pool, txs, /*test_accept=*/single_submit)); + return ProcessNewPackage(chainstate, tx_pool, txs, /*test_accept=*/single_submit, /*max_sane_feerate=*/{})); // Always set bypass_limits to false because it is not supported in ProcessNewPackage and // can be a source of divergence. diff --git a/src/test/fuzz/tx_pool.cpp b/src/test/fuzz/tx_pool.cpp index fcf230642a61c..69f3cc22f6062 100644 --- a/src/test/fuzz/tx_pool.cpp +++ b/src/test/fuzz/tx_pool.cpp @@ -291,7 +291,7 @@ FUZZ_TARGET(tx_pool_standard, .init = initialize_tx_pool) // Make sure ProcessNewPackage on one transaction works. // The result is not guaranteed to be the same as what is returned by ATMP. const auto result_package = WITH_LOCK(::cs_main, - return ProcessNewPackage(chainstate, tx_pool, {tx}, true)); + return ProcessNewPackage(chainstate, tx_pool, {tx}, true, /*max_sane_feerate=*/{})); // If something went wrong due to a package-specific policy, it might not return a // validation result for the transaction. if (result_package.m_state.GetResult() != PackageValidationResult::PCKG_POLICY) { diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp index f6456526bb054..eb131dc6bba8f 100644 --- a/src/test/txpackage_tests.cpp +++ b/src/test/txpackage_tests.cpp @@ -132,7 +132,7 @@ BOOST_FIXTURE_TEST_CASE(package_validation_tests, TestChain100Setup) /*output_amount=*/CAmount(48 * COIN), /*submit=*/false); CTransactionRef tx_child = MakeTransactionRef(mtx_child); Package package_parent_child{tx_parent, tx_child}; - const auto result_parent_child = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_parent_child, /*test_accept=*/true); + const auto result_parent_child = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_parent_child, /*test_accept=*/true, /*max_sane_feerate=*/{}); if (auto err_parent_child{CheckPackageMempoolAcceptResult(package_parent_child, result_parent_child, /*expect_valid=*/true, nullptr)}) { BOOST_ERROR(err_parent_child.value()); } else { @@ -151,7 +151,7 @@ BOOST_FIXTURE_TEST_CASE(package_validation_tests, TestChain100Setup) CTransactionRef giant_ptx = create_placeholder_tx(999, 999); BOOST_CHECK(GetVirtualTransactionSize(*giant_ptx) > DEFAULT_ANCESTOR_SIZE_LIMIT_KVB * 1000); Package package_single_giant{giant_ptx}; - auto result_single_large = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_single_giant, /*test_accept=*/true); + auto result_single_large = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_single_giant, /*test_accept=*/true, /*max_sane_feerate=*/{}); if (auto err_single_large{CheckPackageMempoolAcceptResult(package_single_giant, result_single_large, /*expect_valid=*/false, nullptr)}) { BOOST_ERROR(err_single_large.value()); } else { @@ -275,7 +275,7 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup) package_unrelated.emplace_back(MakeTransactionRef(mtx)); } auto result_unrelated_submit = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_unrelated, /*test_accept=*/false); + package_unrelated, /*test_accept=*/false, /*max_sane_feerate=*/{}); // We don't expect m_tx_results for each transaction when basic sanity checks haven't passed. BOOST_CHECK(result_unrelated_submit.m_state.IsInvalid()); BOOST_CHECK_EQUAL(result_unrelated_submit.m_state.GetResult(), PackageValidationResult::PCKG_POLICY); @@ -315,7 +315,7 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup) // 3 Generations is not allowed. { auto result_3gen_submit = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_3gen, /*test_accept=*/false); + package_3gen, /*test_accept=*/false, /*max_sane_feerate=*/{}); BOOST_CHECK(result_3gen_submit.m_state.IsInvalid()); BOOST_CHECK_EQUAL(result_3gen_submit.m_state.GetResult(), PackageValidationResult::PCKG_POLICY); BOOST_CHECK_EQUAL(result_3gen_submit.m_state.GetRejectReason(), "package-not-child-with-parents"); @@ -332,7 +332,7 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup) CTransactionRef tx_parent_invalid = MakeTransactionRef(mtx_parent_invalid); Package package_invalid_parent{tx_parent_invalid, tx_child}; auto result_quit_early = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_invalid_parent, /*test_accept=*/ false); + package_invalid_parent, /*test_accept=*/ false, /*max_sane_feerate=*/{}); if (auto err_parent_invalid{CheckPackageMempoolAcceptResult(package_invalid_parent, result_quit_early, /*expect_valid=*/false, m_node.mempool.get())}) { BOOST_ERROR(err_parent_invalid.value()); } else { @@ -353,7 +353,7 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup) package_missing_parent.push_back(MakeTransactionRef(mtx_child)); { const auto result_missing_parent = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_missing_parent, /*test_accept=*/false); + package_missing_parent, /*test_accept=*/false, /*max_sane_feerate=*/{}); BOOST_CHECK(result_missing_parent.m_state.IsInvalid()); BOOST_CHECK_EQUAL(result_missing_parent.m_state.GetResult(), PackageValidationResult::PCKG_POLICY); BOOST_CHECK_EQUAL(result_missing_parent.m_state.GetRejectReason(), "package-not-child-with-unconfirmed-parents"); @@ -363,7 +363,7 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup) // Submit package with parent + child. { const auto submit_parent_child = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_parent_child, /*test_accept=*/false); + package_parent_child, /*test_accept=*/false, /*max_sane_feerate=*/{}); expected_pool_size += 2; BOOST_CHECK_MESSAGE(submit_parent_child.m_state.IsValid(), "Package validation unexpectedly failed: " << submit_parent_child.m_state.GetRejectReason()); @@ -385,7 +385,7 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup) // Already-in-mempool transactions should be detected and de-duplicated. { const auto submit_deduped = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_parent_child, /*test_accept=*/false); + package_parent_child, /*test_accept=*/false, /*max_sane_feerate=*/{}); if (auto err_deduped{CheckPackageMempoolAcceptResult(package_parent_child, submit_deduped, /*expect_valid=*/true, m_node.mempool.get())}) { BOOST_ERROR(err_deduped.value()); } else { @@ -456,7 +456,7 @@ BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup) { Package package_parent_child1{ptx_parent, ptx_child1}; const auto submit_witness1 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_parent_child1, /*test_accept=*/false); + package_parent_child1, /*test_accept=*/false, /*max_sane_feerate=*/{}); if (auto err_witness1{CheckPackageMempoolAcceptResult(package_parent_child1, submit_witness1, /*expect_valid=*/true, m_node.mempool.get())}) { BOOST_ERROR(err_witness1.value()); } @@ -464,7 +464,7 @@ BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup) // Child2 would have been validated individually. Package package_parent_child2{ptx_parent, ptx_child2}; const auto submit_witness2 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_parent_child2, /*test_accept=*/false); + package_parent_child2, /*test_accept=*/false, /*max_sane_feerate=*/{}); if (auto err_witness2{CheckPackageMempoolAcceptResult(package_parent_child2, submit_witness2, /*expect_valid=*/true, m_node.mempool.get())}) { BOOST_ERROR(err_witness2.value()); } else { @@ -478,7 +478,7 @@ BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup) // Deduplication should work when wtxid != txid. Submit package with the already-in-mempool // transactions again, which should not fail. const auto submit_segwit_dedup = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_parent_child1, /*test_accept=*/false); + package_parent_child1, /*test_accept=*/false, /*max_sane_feerate=*/{}); if (auto err_segwit_dedup{CheckPackageMempoolAcceptResult(package_parent_child1, submit_segwit_dedup, /*expect_valid=*/true, m_node.mempool.get())}) { BOOST_ERROR(err_segwit_dedup.value()); } else { @@ -508,7 +508,7 @@ BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup) { Package package_child2_grandchild{ptx_child2, ptx_grandchild}; const auto submit_spend_ignored = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_child2_grandchild, /*test_accept=*/false); + package_child2_grandchild, /*test_accept=*/false, /*max_sane_feerate=*/{}); if (auto err_spend_ignored{CheckPackageMempoolAcceptResult(package_child2_grandchild, submit_spend_ignored, /*expect_valid=*/true, m_node.mempool.get())}) { BOOST_ERROR(err_spend_ignored.value()); } else { @@ -606,7 +606,7 @@ BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup) // parent3 should be accepted // child should be accepted { - const auto mixed_result = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_mixed, false); + const auto mixed_result = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_mixed, false, /*max_sane_feerate=*/{}); if (auto err_mixed{CheckPackageMempoolAcceptResult(package_mixed, mixed_result, /*expect_valid=*/true, m_node.mempool.get())}) { BOOST_ERROR(err_mixed.value()); } else { @@ -670,7 +670,7 @@ BOOST_FIXTURE_TEST_CASE(package_cpfp_tests, TestChain100Setup) { BOOST_CHECK_EQUAL(m_node.mempool->size(), expected_pool_size); const auto submit_cpfp_deprio = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_cpfp, /*test_accept=*/ false); + package_cpfp, /*test_accept=*/ false, /*max_sane_feerate=*/{}); if (auto err_cpfp_deprio{CheckPackageMempoolAcceptResult(package_cpfp, submit_cpfp_deprio, /*expect_valid=*/false, m_node.mempool.get())}) { BOOST_ERROR(err_cpfp_deprio.value()); } else { @@ -692,7 +692,7 @@ BOOST_FIXTURE_TEST_CASE(package_cpfp_tests, TestChain100Setup) { BOOST_CHECK_EQUAL(m_node.mempool->size(), expected_pool_size); const auto submit_cpfp = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_cpfp, /*test_accept=*/ false); + package_cpfp, /*test_accept=*/ false, /*max_sane_feerate=*/{}); if (auto err_cpfp{CheckPackageMempoolAcceptResult(package_cpfp, submit_cpfp, /*expect_valid=*/true, m_node.mempool.get())}) { BOOST_ERROR(err_cpfp.value()); } else { @@ -744,7 +744,7 @@ BOOST_FIXTURE_TEST_CASE(package_cpfp_tests, TestChain100Setup) // Cheap package should fail for being too low fee. { const auto submit_package_too_low = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_still_too_low, /*test_accept=*/false); + package_still_too_low, /*test_accept=*/false, /*max_sane_feerate=*/{}); if (auto err_package_too_low{CheckPackageMempoolAcceptResult(package_still_too_low, submit_package_too_low, /*expect_valid=*/false, m_node.mempool.get())}) { BOOST_ERROR(err_package_too_low.value()); } else { @@ -770,7 +770,7 @@ BOOST_FIXTURE_TEST_CASE(package_cpfp_tests, TestChain100Setup) // Now that the child's fees have "increased" by 1 BTC, the cheap package should succeed. { const auto submit_prioritised_package = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_still_too_low, /*test_accept=*/false); + package_still_too_low, /*test_accept=*/false, /*max_sane_feerate=*/{}); if (auto err_prioritised{CheckPackageMempoolAcceptResult(package_still_too_low, submit_prioritised_package, /*expect_valid=*/true, m_node.mempool.get())}) { BOOST_ERROR(err_prioritised.value()); } else { @@ -818,7 +818,7 @@ BOOST_FIXTURE_TEST_CASE(package_cpfp_tests, TestChain100Setup) { BOOST_CHECK_EQUAL(m_node.mempool->size(), expected_pool_size); const auto submit_rich_parent = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, - package_rich_parent, /*test_accept=*/false); + package_rich_parent, /*test_accept=*/false, /*max_sane_feerate=*/{}); if (auto err_rich_parent{CheckPackageMempoolAcceptResult(package_rich_parent, submit_rich_parent, /*expect_valid=*/false, m_node.mempool.get())}) { BOOST_ERROR(err_rich_parent.value()); } else { diff --git a/src/validation.cpp b/src/validation.cpp index 81a3c35864995..428195663a48a 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -472,6 +472,11 @@ class MemPoolAccept * policies such as mempool min fee and min relay fee. */ const bool m_package_feerates; + /** Used for local submission of transactions to catch "absurd" fees + * due to fee miscalculation by wallets. std:nullopt implies unset, allowing any feerates. + * Any individual transaction failing this check causes immediate failure. + */ + const std::optional m_client_maxfeerate; /** Parameters for single transaction mempool validation. */ static ATMPArgs SingleAccept(const CChainParams& chainparams, int64_t accept_time, @@ -485,6 +490,7 @@ class MemPoolAccept /* m_allow_replacement */ true, /* m_package_submission */ false, /* m_package_feerates */ false, + /* m_client_maxfeerate */ {}, // checked by caller }; } @@ -499,12 +505,13 @@ class MemPoolAccept /* m_allow_replacement */ false, /* m_package_submission */ false, // not submitting to mempool /* m_package_feerates */ false, + /* m_client_maxfeerate */ {}, // checked by caller }; } /** Parameters for child-with-unconfirmed-parents package validation. */ static ATMPArgs PackageChildWithParents(const CChainParams& chainparams, int64_t accept_time, - std::vector& coins_to_uncache) { + std::vector& coins_to_uncache, std::optional& client_maxfeerate) { return ATMPArgs{/* m_chainparams */ chainparams, /* m_accept_time */ accept_time, /* m_bypass_limits */ false, @@ -513,6 +520,7 @@ class MemPoolAccept /* m_allow_replacement */ false, /* m_package_submission */ true, /* m_package_feerates */ true, + /* m_client_maxfeerate */ client_maxfeerate, }; } @@ -526,6 +534,7 @@ class MemPoolAccept /* m_allow_replacement */ true, /* m_package_submission */ true, // do not LimitMempoolSize in Finalize() /* m_package_feerates */ false, // only 1 transaction + /* m_client_maxfeerate */ package_args.m_client_maxfeerate, }; } @@ -539,7 +548,8 @@ class MemPoolAccept bool test_accept, bool allow_replacement, bool package_submission, - bool package_feerates) + bool package_feerates, + std::optional client_maxfeerate) : m_chainparams{chainparams}, m_accept_time{accept_time}, m_bypass_limits{bypass_limits}, @@ -547,7 +557,8 @@ class MemPoolAccept m_test_accept{test_accept}, m_allow_replacement{allow_replacement}, m_package_submission{package_submission}, - m_package_feerates{package_feerates} + m_package_feerates{package_feerates}, + m_client_maxfeerate{client_maxfeerate} { } }; @@ -1255,6 +1266,12 @@ MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef return MempoolAcceptResult::Failure(ws.m_state); } + // Individual modified feerate exceeded caller-defined max; abort + if (args.m_client_maxfeerate && CFeeRate(ws.m_modified_fees, ws.m_vsize) > args.m_client_maxfeerate.value()) { + ws.m_state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "max feerate exceeded", ""); + return MempoolAcceptResult::Failure(ws.m_state); + } + if (m_rbf && !ReplacementChecks(ws)) return MempoolAcceptResult::Failure(ws.m_state); // Perform the inexpensive checks first and avoid hashing and signature verification unless @@ -1313,6 +1330,16 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std:: results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state)); return PackageMempoolAcceptResult(package_state, std::move(results)); } + + // Individual modified feerate exceeded caller-defined max; abort + // N.B. this doesn't take into account CPFPs. Chunk-aware validation may be more robust. + if (args.m_client_maxfeerate && CFeeRate(ws.m_modified_fees, ws.m_vsize) > args.m_client_maxfeerate.value()) { + package_state.Invalid(PackageValidationResult::PCKG_TX, "max feerate exceeded"); + // Exit early to avoid doing pointless work. Update the failed tx result; the rest are unfinished. + results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state)); + return PackageMempoolAcceptResult(package_state, std::move(results)); + } + // Make the coins created by this transaction available for subsequent transactions in the // package to spend. Since we already checked conflicts in the package and we don't allow // replacements, we don't need to track the coins spent. Note that this logic will need to be @@ -1657,7 +1684,7 @@ MempoolAcceptResult AcceptToMemoryPool(Chainstate& active_chainstate, const CTra } PackageMempoolAcceptResult ProcessNewPackage(Chainstate& active_chainstate, CTxMemPool& pool, - const Package& package, bool test_accept) + const Package& package, bool test_accept, std::optional client_maxfeerate) { AssertLockHeld(cs_main); assert(!package.empty()); @@ -1671,7 +1698,7 @@ PackageMempoolAcceptResult ProcessNewPackage(Chainstate& active_chainstate, CTxM auto args = MemPoolAccept::ATMPArgs::PackageTestAccept(chainparams, GetTime(), coins_to_uncache); return MemPoolAccept(pool, active_chainstate).AcceptMultipleTransactions(package, args); } else { - auto args = MemPoolAccept::ATMPArgs::PackageChildWithParents(chainparams, GetTime(), coins_to_uncache); + auto args = MemPoolAccept::ATMPArgs::PackageChildWithParents(chainparams, GetTime(), coins_to_uncache, client_maxfeerate); return MemPoolAccept(pool, active_chainstate).AcceptPackage(package, args); } }(); diff --git a/src/validation.h b/src/validation.h index 94765bfbcd8a6..0746319b23f62 100644 --- a/src/validation.h +++ b/src/validation.h @@ -274,13 +274,15 @@ MempoolAcceptResult AcceptToMemoryPool(Chainstate& active_chainstate, const CTra /** * Validate (and maybe submit) a package to the mempool. See doc/policy/packages.md for full details * on package validation rules. -* @param[in] test_accept When true, run validation checks but don't submit to mempool. +* @param[in] test_accept When true, run validation checks but don't submit to mempool. +* @param[in] max_sane_feerate If exceeded by an individual transaction, rest of (sub)package evalution is aborted. +* Only for sanity checks against local submission of transactions. * @returns a PackageMempoolAcceptResult which includes a MempoolAcceptResult for each transaction. * If a transaction fails, validation will exit early and some results may be missing. It is also * possible for the package to be partially submitted. */ PackageMempoolAcceptResult ProcessNewPackage(Chainstate& active_chainstate, CTxMemPool& pool, - const Package& txns, bool test_accept) + const Package& txns, bool test_accept, std::optional max_sane_feerate) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /* Mempool validation helper functions */ diff --git a/test/functional/rpc_packages.py b/test/functional/rpc_packages.py index 664f2df3f19a0..64cb872faa5bb 100755 --- a/test/functional/rpc_packages.py +++ b/test/functional/rpc_packages.py @@ -81,6 +81,7 @@ def run_test(self): self.test_conflicting() self.test_rbf() self.test_submitpackage() + self.test_maxfeerate_maxburn_submitpackage() def test_independent(self, coin): self.log.info("Test multiple independent transactions in a package") @@ -356,5 +357,34 @@ def test_submitpackage(self): assert_equal(res["tx-results"][sec_wtxid]["error"], "version") peer.wait_for_broadcast([first_wtxid]) + def test_maxfeerate_maxburn_submitpackage(self): + node = self.nodes[0] + # clear mempool + deterministic_address = node.get_deterministic_priv_key().address + self.generatetoaddress(node, 1, deterministic_address) + + self.log.info("Submitpackage maxfeerate arg testing") + chained_txns = self.wallet.create_self_transfer_chain(chain_length=2) + minrate_btc_kvb = min([chained_txn["fee"] / chained_txn["tx"].get_vsize() * 1000 for chained_txn in chained_txns]) + chain_hex = [t["hex"] for t in chained_txns] + pkg_result = node.submitpackage(chain_hex, maxfeerate=minrate_btc_kvb - Decimal("0.00000001")) + assert_equal(pkg_result["tx-results"][chained_txns[0]["wtxid"]]["error"], "max feerate exceeded") + assert_equal(pkg_result["tx-results"][chained_txns[1]["wtxid"]]["error"], "bad-txns-inputs-missingorspent") + assert_equal(node.getrawmempool(), []) + + self.log.info("Submitpackage maxburnamount arg testing") + tx = tx_from_hex(chain_hex[1]) + tx.vout[-1].scriptPubKey = b'a' * 10001 # scriptPubKey bigger than 10k IsUnspendable + chain_hex = [chain_hex[0], tx.serialize().hex()] + # burn test is run before any package evaluation; nothing makes it in and we get broader exception + assert_raises_rpc_error(-25, "Unspendable output exceeds maximum configured by user", node.submitpackage, chain_hex, 0, chained_txns[1]["new_utxo"]["value"] - Decimal("0.00000001")) + assert_equal(node.getrawmempool(), []) + + # Relax the restrictions for both and send it; parent gets through as own subpackage + pkg_result = node.submitpackage(chain_hex, maxfeerate=minrate_btc_kvb, maxburnamount=chained_txns[1]["new_utxo"]["value"]) + assert "error" not in pkg_result["tx-results"][chained_txns[0]["wtxid"]] + assert_equal(pkg_result["tx-results"][tx.getwtxid()]["error"], "scriptpubkey") + assert_equal(node.getrawmempool(), [chained_txns[0]["txid"]]) + if __name__ == "__main__": RPCPackagesTest().main() From fa58550317c633c411009c1cc8fb692e3baf97e8 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Wed, 13 Mar 2024 15:11:28 +0100 Subject: [PATCH 16/79] refactor: Modernize header sync logs No change in behavior, only the modern aliases and types are used. --- src/validation.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index 94d2680db749b..a2300104970e5 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -4188,9 +4188,9 @@ bool ChainstateManager::ProcessNewBlockHeaders(const std::vector& if (NotifyHeaderTip(*this)) { if (IsInitialBlockDownload() && ppindex && *ppindex) { const CBlockIndex& last_accepted{**ppindex}; - const int64_t blocks_left{(GetTime() - last_accepted.GetBlockTime()) / GetConsensus().nPowTargetSpacing}; + int64_t blocks_left{(NodeClock::now() - last_accepted.Time()) / GetConsensus().PowTargetSpacing()}; const double progress{100.0 * last_accepted.nHeight / (last_accepted.nHeight + blocks_left)}; - LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", last_accepted.nHeight, progress); + LogInfo("Synchronizing blockheaders, height: %d (~%.2f%%)\n", last_accepted.nHeight, progress); } } return true; @@ -4214,9 +4214,9 @@ void ChainstateManager::ReportHeadersPresync(const arith_uint256& work, int64_t bool initial_download = IsInitialBlockDownload(); GetNotifications().headerTip(GetSynchronizationState(initial_download), height, timestamp, /*presync=*/true); if (initial_download) { - const int64_t blocks_left{(GetTime() - timestamp) / GetConsensus().nPowTargetSpacing}; + int64_t blocks_left{(NodeClock::now() - NodeSeconds{std::chrono::seconds{timestamp}}) / GetConsensus().PowTargetSpacing()}; const double progress{100.0 * height / (height + blocks_left)}; - LogPrintf("Pre-synchronizing blockheaders, height: %d (~%.2f%%)\n", height, progress); + LogInfo("Pre-synchronizing blockheaders, height: %d (~%.2f%%)\n", height, progress); } } From fa4d98b3c8e63f20c6f366fc9382039ba52614a4 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Wed, 13 Mar 2024 15:15:42 +0100 Subject: [PATCH 17/79] Avoid divide-by-zero in header sync logs when NodeClock is behind --- src/validation.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/validation.cpp b/src/validation.cpp index a2300104970e5..a9d9f9d207cfc 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -4189,6 +4189,7 @@ bool ChainstateManager::ProcessNewBlockHeaders(const std::vector& if (IsInitialBlockDownload() && ppindex && *ppindex) { const CBlockIndex& last_accepted{**ppindex}; int64_t blocks_left{(NodeClock::now() - last_accepted.Time()) / GetConsensus().PowTargetSpacing()}; + blocks_left = std::max(0, blocks_left); const double progress{100.0 * last_accepted.nHeight / (last_accepted.nHeight + blocks_left)}; LogInfo("Synchronizing blockheaders, height: %d (~%.2f%%)\n", last_accepted.nHeight, progress); } @@ -4215,6 +4216,7 @@ void ChainstateManager::ReportHeadersPresync(const arith_uint256& work, int64_t GetNotifications().headerTip(GetSynchronizationState(initial_download), height, timestamp, /*presync=*/true); if (initial_download) { int64_t blocks_left{(NodeClock::now() - NodeSeconds{std::chrono::seconds{timestamp}}) / GetConsensus().PowTargetSpacing()}; + blocks_left = std::max(0, blocks_left); const double progress{100.0 * height / (height + blocks_left)}; LogInfo("Pre-synchronizing blockheaders, height: %d (~%.2f%%)\n", height, progress); } From 5555395c15e896230a55c131fc3cbfd9d116adf8 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Wed, 13 Mar 2024 17:16:53 +0100 Subject: [PATCH 18/79] lint: Use git --no-pager to print any output in one go --- test/lint/test_runner/src/main.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/lint/test_runner/src/main.rs b/test/lint/test_runner/src/main.rs index f9df57623949c..e22e047e4b19c 100644 --- a/test/lint/test_runner/src/main.rs +++ b/test/lint/test_runner/src/main.rs @@ -14,7 +14,9 @@ type LintFn = fn() -> LintResult; /// Return the git command fn git() -> Command { - Command::new("git") + let mut git = Command::new("git"); + git.arg("--no-pager"); + git } /// Return stdout From fae70ba00da27ca5734c88e9964c872c7faa0f78 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Fri, 15 Dec 2023 17:01:06 +0100 Subject: [PATCH 19/79] ci: Better tidy errors --- ci/test/03_test_script.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ci/test/03_test_script.sh b/ci/test/03_test_script.sh index 786cb08bf65bb..d85e24a5c00cc 100755 --- a/ci/test/03_test_script.sh +++ b/ci/test/03_test_script.sh @@ -182,7 +182,11 @@ if [ "${RUN_TIDY}" = "true" ]; then set -eo pipefail cd "${BASE_BUILD_DIR}/bitcoin-$HOST/src/" - ( run-clang-tidy-"${TIDY_LLVM_V}" -quiet -load="/tidy-build/libbitcoin-tidy.so" "${MAKEJOBS}" ) | grep -C5 "error" + if ! ( run-clang-tidy-"${TIDY_LLVM_V}" -quiet -load="/tidy-build/libbitcoin-tidy.so" "${MAKEJOBS}" | tee tmp.tidy-out.txt ); then + grep -C5 "error: " tmp.tidy-out.txt + echo "^^^ ⚠️ Failure generated from clang-tidy" + false + fi # Filter out files by regex here, because regex may not be # accepted in src/.bear-tidy-config # Filter out: From 76d6537698e46f52d6c45a76f7d99ba427d57dca Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 13 Mar 2024 15:18:58 +0000 Subject: [PATCH 20/79] depends: drop 1 qt determinism patch No-longer required now that we are building with GCC 12. --- depends/packages/qt.mk | 2 -- .../qt/fast_fixed_dtoa_no_optimize.patch | 20 ------------------- 2 files changed, 22 deletions(-) delete mode 100644 depends/patches/qt/fast_fixed_dtoa_no_optimize.patch diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index 5608e5f07387d..d09ad75eec818 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -18,7 +18,6 @@ $(package)_patches += qtbase-moc-ignore-gcc-macro.patch $(package)_patches += use_android_ndk23.patch $(package)_patches += rcc_hardcode_timestamp.patch $(package)_patches += duplicate_lcqpafonts.patch -$(package)_patches += fast_fixed_dtoa_no_optimize.patch $(package)_patches += guix_cross_lib_path.patch $(package)_patches += fix-macos-linker.patch $(package)_patches += memory_resource.patch @@ -251,7 +250,6 @@ define $(package)_preprocess_cmds patch -p1 -i $($(package)_patch_dir)/rcc_hardcode_timestamp.patch && \ patch -p1 -i $($(package)_patch_dir)/duplicate_lcqpafonts.patch && \ patch -p1 -i $($(package)_patch_dir)/utc_from_string_no_optimize.patch && \ - patch -p1 -i $($(package)_patch_dir)/fast_fixed_dtoa_no_optimize.patch && \ patch -p1 -i $($(package)_patch_dir)/guix_cross_lib_path.patch && \ patch -p1 -i $($(package)_patch_dir)/windows_lto.patch && \ mkdir -p qtbase/mkspecs/macx-clang-linux &&\ diff --git a/depends/patches/qt/fast_fixed_dtoa_no_optimize.patch b/depends/patches/qt/fast_fixed_dtoa_no_optimize.patch deleted file mode 100644 index d4d6539f56dc4..0000000000000 --- a/depends/patches/qt/fast_fixed_dtoa_no_optimize.patch +++ /dev/null @@ -1,20 +0,0 @@ -Modify the optimisation flags for FastFixedDtoa. -This fixes a non-determinism issue in the asm produced for -this function when cross-compiling on x86_64 and aarch64 for -the arm-linux-gnueabihf HOST. - ---- a/qtbase/src/3rdparty/double-conversion/fixed-dtoa.h -+++ b/qtbase/src/3rdparty/double-conversion/fixed-dtoa.h -@@ -48,9 +48,12 @@ namespace double_conversion { - // - // This method only works for some parameters. If it can't handle the input it - // returns false. The output is null-terminated when the function succeeds. -+#pragma GCC push_options -+#pragma GCC optimize ("-O1") - bool FastFixedDtoa(double v, int fractional_count, - Vector buffer, int* length, int* decimal_point); - -+#pragma GCC pop_options - } // namespace double_conversion - - #endif // DOUBLE_CONVERSION_FIXED_DTOA_H_ From fa8409e760b8f8734406dcbf98f00ba21d160f87 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Fri, 15 Dec 2023 13:01:26 +0100 Subject: [PATCH 21/79] build: Bump g++ minimum supported version to 11 --- ci/test/00_setup_env_native_previous_releases.sh | 8 ++++---- doc/dependencies.md | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ci/test/00_setup_env_native_previous_releases.sh b/ci/test/00_setup_env_native_previous_releases.sh index 94e88f872f577..3166686d9a5f0 100755 --- a/ci/test/00_setup_env_native_previous_releases.sh +++ b/ci/test/00_setup_env_native_previous_releases.sh @@ -7,10 +7,10 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_previous_releases -export CI_IMAGE_NAME_TAG="docker.io/debian:bullseye" -# Use minimum supported python3.9 and gcc-10, see doc/dependencies.md -export PACKAGES="gcc-10 g++-10 python3-zmq" -export DEP_OPTS="NO_UPNP=1 NO_NATPMP=1 DEBUG=1 CC=gcc-10 CXX=g++-10" +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:22.04" +# Use minimum supported python3.9 (or best effort 3.10) and gcc-11, see doc/dependencies.md +export PACKAGES="gcc-11 g++-11 python3-zmq" +export DEP_OPTS="NO_UPNP=1 NO_NATPMP=1 DEBUG=1 CC=gcc-11 CXX=g++-11" export TEST_RUNNER_EXTRA="--previous-releases --coverage --extended --exclude feature_dbcrash" # Run extended tests so that coverage does not fail, but exclude the very slow dbcrash export RUN_UNIT_TESTS_SEQUENTIAL="true" export RUN_UNIT_TESTS="false" diff --git a/doc/dependencies.md b/doc/dependencies.md index e992b50b060a2..237f617c02aa1 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -9,7 +9,7 @@ You can find installation instructions in the `build-*.md` file for your platfor | [Autoconf](https://www.gnu.org/software/autoconf/) | [2.69](https://github.com/bitcoin/bitcoin/pull/17769) | | [Automake](https://www.gnu.org/software/automake/) | [1.13](https://github.com/bitcoin/bitcoin/pull/18290) | | [Clang](https://clang.llvm.org) | [14.0](https://github.com/bitcoin/bitcoin/pull/29208) | -| [GCC](https://gcc.gnu.org) | [10.1](https://github.com/bitcoin/bitcoin/pull/28348) | +| [GCC](https://gcc.gnu.org) | [11.1](https://github.com/bitcoin/bitcoin/pull/29091) | | [Python](https://www.python.org) (scripts, tests) | [3.9](https://github.com/bitcoin/bitcoin/pull/28211) | | [systemtap](https://sourceware.org/systemtap/) ([tracing](tracing.md))| N/A | From cf5faf73c99199e7476b8c86358095300544d1bd Mon Sep 17 00:00:00 2001 From: fanquake Date: Thu, 14 Mar 2024 10:47:13 +0000 Subject: [PATCH 22/79] guix: bump time-machine to dc4842797bfdc5f9f3f5f725bf189c2b68bd6b5a This includes a commit to fix building LLVM 17 on riscv64, see https://git.savannah.gnu.org/cgit/guix.git/commit/?id=4e26331a5ee87928a16888c36d51e270f0f10f90. Followup to discussion in https://github.com/bitcoin/bitcoin/pull/28880#issuecomment-1843313196. If you don't have riscv64 hardware, this can be tested with the following: ```bash guix time-machine --commit=d5ca4d4fd713a9f7e17e074a1e37dda99bbb09fc -- build --target=riscv64-linux-gnu llvm .... riscv64-linux-gnu-ld: CMakeFiles/dsymutil.dir/dsymutil.cpp.o: undefined reference to symbol '__atomic_fetch_and_1@@LIBATOMIC_1.0' riscv64-linux-gnu-ld: /gnu/store/i4ga0pnr1b74bir2bjyp8mcrrbsvk7d3-gcc-cross-riscv64-linux-gnu-11.3.0-lib/riscv64-linux-gnu/lib/libatomic.so.1: error adding symbols: DSO missing from command line collect2: error: ld returned 1 exit status guix time-machine --commit=dc4842797bfdc5f9f3f5f725bf189c2b68bd6b5a -- build --target=riscv64-linux-gnu llvm .... grafting '/gnu/store/7y0j0y8jaz4mjx2nz0y42wdnxxjp6id6-llvm-17.0.6-opt-viewer' -> '/gnu/store/8xvahrrjscbprh6cjj0qp5bm9mm78wwa-llvm-17.0.6-opt-viewer'... grafting '/gnu/store/bjhw648bz7ijd2p9hgzzdbw1q8hpagk8-llvm-17.0.6' -> '/gnu/store/x50qi8i2ywgpx6azv4k55ms0w5xjxxg5-llvm-17.0.6'... successfully built /gnu/store/q9xvk8gzzvb4dxfzf6yi5164zd0d1vj2-llvm-17.0.6.drv ``` --- contrib/guix/libexec/prelude.bash | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/guix/libexec/prelude.bash b/contrib/guix/libexec/prelude.bash index 6c912ca748d7e..ce6a9562b44e4 100644 --- a/contrib/guix/libexec/prelude.bash +++ b/contrib/guix/libexec/prelude.bash @@ -51,7 +51,7 @@ fi time-machine() { # shellcheck disable=SC2086 guix time-machine --url=https://git.savannah.gnu.org/git/guix.git \ - --commit=d5ca4d4fd713a9f7e17e074a1e37dda99bbb09fc \ + --commit=dc4842797bfdc5f9f3f5f725bf189c2b68bd6b5a \ --cores="$JOBS" \ --keep-failed \ --fallback \ From fa5844f06d74b35cd27c1927e2250ebb494578e9 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Thu, 14 Mar 2024 15:57:30 +0100 Subject: [PATCH 23/79] Remove unused g++-10 workaround This reverts d4999d40b9bd04dc20111aaaa6ed2d3db1a5caf9 --- src/util/fs_helpers.cpp | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/src/util/fs_helpers.cpp b/src/util/fs_helpers.cpp index f9393c9c136c6..bce56024620ab 100644 --- a/src/util/fs_helpers.cpp +++ b/src/util/fs_helpers.cpp @@ -249,20 +249,9 @@ fs::path GetSpecialFolderPath(int nFolder, bool fCreate) bool RenameOver(fs::path src, fs::path dest) { -#ifdef __MINGW64__ - // This is a workaround for a bug in libstdc++ which - // implements fs::rename with _wrename function. - // This bug has been fixed in upstream: - // - GCC 10.3: 8dd1c1085587c9f8a21bb5e588dfe1e8cdbba79e - // - GCC 11.1: 1dfd95f0a0ca1d9e6cbc00e6cbfd1fa20a98f312 - // For more details see the commits mentioned above. - return MoveFileExW(src.wstring().c_str(), dest.wstring().c_str(), - MOVEFILE_REPLACE_EXISTING) != 0; -#else std::error_code error; fs::rename(src, dest, error); return !error; -#endif } /** From e037c4fe0914d8fa9149ce7532c0d70f738e79e9 Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 16 Jan 2024 12:01:04 +0000 Subject: [PATCH 24/79] depends: always configure with --with-pic We currently do this sporadically. Not only amongst packages, but across OS's, i.e sometimes it's done for BSDs/Android, and sometimes not. Configure with `--with-pic` globally instead. I think this generally makes more sense, and should not have any downsides. See related discussion in https://github.com/bitcoin/bitcoin/pull/28846#discussion_r1399123100. --- depends/funcs.mk | 2 +- depends/packages.md | 3 +++ depends/packages/bdb.mk | 5 ----- depends/packages/expat.mk | 1 - depends/packages/freetype.mk | 1 - depends/packages/libXau.mk | 1 - depends/packages/libevent.mk | 5 ----- depends/packages/libxcb_util.mk | 1 - depends/packages/qrencode.mk | 2 -- depends/packages/sqlite.mk | 4 ---- depends/packages/zeromq.mk | 5 ----- 11 files changed, 4 insertions(+), 26 deletions(-) diff --git a/depends/funcs.mk b/depends/funcs.mk index 7b5c3d0c59158..0251b913005f0 100644 --- a/depends/funcs.mk +++ b/depends/funcs.mk @@ -147,7 +147,7 @@ $(1)_stage_env+=PATH="$(build_prefix)/bin:$(PATH)" # config.guess, which is what we set it too here. This also quells autoconf # warnings, "If you wanted to set the --build type, don't use --host.", # when using versions older than 2.70. -$(1)_autoconf=./configure --build=$(BUILD) --host=$($($(1)_type)_host) --prefix=$($($(1)_type)_prefix) $$($(1)_config_opts) CC="$$($(1)_cc)" CXX="$$($(1)_cxx)" +$(1)_autoconf=./configure --build=$(BUILD) --host=$($($(1)_type)_host) --prefix=$($($(1)_type)_prefix) --with-pic $$($(1)_config_opts) CC="$$($(1)_cc)" CXX="$$($(1)_cxx)" ifneq ($($(1)_nm),) $(1)_autoconf += NM="$$($(1)_nm)" endif diff --git a/depends/packages.md b/depends/packages.md index ad91eaffee47e..c288032766d64 100644 --- a/depends/packages.md +++ b/depends/packages.md @@ -162,6 +162,9 @@ From the [Gentoo Wiki entry](https://wiki.gentoo.org/wiki/Project:Quality_Assura > creates. This leads to massive overlinking, which is toxic to the Gentoo > ecosystem, as it leads to a massive number of unnecessary rebuilds. +Where possible, packages are built with Position Independant Code. Either using +the autotools `--with-pic` flag, or `DCMAKE_POSITION_INDEPENDENT_CODE` with CMake. + ## Secondary dependencies: Secondary dependency packages relative to the bitcoin binaries/libraries (i.e. diff --git a/depends/packages/bdb.mk b/depends/packages/bdb.mk index 1a21238152ad5..be82b0d309817 100644 --- a/depends/packages/bdb.mk +++ b/depends/packages/bdb.mk @@ -9,11 +9,6 @@ $(package)_patches=clang_cxx_11.patch define $(package)_set_vars $(package)_config_opts=--disable-shared --enable-cxx --disable-replication --enable-option-checking $(package)_config_opts_mingw32=--enable-mingw -$(package)_config_opts_linux=--with-pic -$(package)_config_opts_freebsd=--with-pic -$(package)_config_opts_netbsd=--with-pic -$(package)_config_opts_openbsd=--with-pic -$(package)_config_opts_android=--with-pic $(package)_cflags+=-Wno-error=implicit-function-declaration -Wno-error=format-security -Wno-error=implicit-int $(package)_cppflags_freebsd=-D_XOPEN_SOURCE=600 -D__BSD_VISIBLE=1 $(package)_cppflags_netbsd=-D_XOPEN_SOURCE=600 diff --git a/depends/packages/expat.mk b/depends/packages/expat.mk index bb203d06f8442..2db283ef3cf42 100644 --- a/depends/packages/expat.mk +++ b/depends/packages/expat.mk @@ -11,7 +11,6 @@ define $(package)_set_vars $(package)_config_opts=--disable-shared --without-docbook --without-tests --without-examples $(package)_config_opts += --disable-dependency-tracking --enable-option-checking $(package)_config_opts += --without-xmlwf - $(package)_config_opts_linux=--with-pic $(package)_cppflags += -D_DEFAULT_SOURCE endef diff --git a/depends/packages/freetype.mk b/depends/packages/freetype.mk index 6f5dbe0f01377..c28259ed6701c 100644 --- a/depends/packages/freetype.mk +++ b/depends/packages/freetype.mk @@ -7,7 +7,6 @@ $(package)_sha256_hash=8bee39bd3968c4804b70614a0a3ad597299ad0e824bc8aad5ce8aaf48 define $(package)_set_vars $(package)_config_opts=--without-zlib --without-png --without-harfbuzz --without-bzip2 --disable-static $(package)_config_opts += --enable-option-checking --without-brotli - $(package)_config_opts_linux=--with-pic endef define $(package)_config_cmds diff --git a/depends/packages/libXau.mk b/depends/packages/libXau.mk index b7e032c0b2d13..aeb14dcd6e8e7 100644 --- a/depends/packages/libXau.mk +++ b/depends/packages/libXau.mk @@ -10,7 +10,6 @@ $(package)_dependencies=xproto define $(package)_set_vars $(package)_config_opts=--disable-shared --disable-lint-library --without-lint $(package)_config_opts += --disable-dependency-tracking --enable-option-checking - $(package)_config_opts_linux=--with-pic endef define $(package)_preprocess_cmds diff --git a/depends/packages/libevent.mk b/depends/packages/libevent.mk index 9650f77db9207..d764be5d0aeff 100644 --- a/depends/packages/libevent.mk +++ b/depends/packages/libevent.mk @@ -11,11 +11,6 @@ define $(package)_set_vars $(package)_config_opts=--disable-shared --disable-openssl --disable-libevent-regress --disable-samples $(package)_config_opts += --disable-dependency-tracking --enable-option-checking $(package)_config_opts_release=--disable-debug-mode - $(package)_config_opts_linux=--with-pic - $(package)_config_opts_freebsd=--with-pic - $(package)_config_opts_netbsd=--with-pic - $(package)_config_opts_openbsd=--with-pic - $(package)_config_opts_android=--with-pic $(package)_cppflags_mingw32=-D_WIN32_WINNT=0x0601 ifeq ($(NO_HARDEN),) diff --git a/depends/packages/libxcb_util.mk b/depends/packages/libxcb_util.mk index 6f1b9cd7c65e0..6e4c7359b20ee 100644 --- a/depends/packages/libxcb_util.mk +++ b/depends/packages/libxcb_util.mk @@ -8,7 +8,6 @@ $(package)_dependencies=libxcb define $(package)_set_vars $(package)_config_opts = --disable-shared --disable-devel-docs --without-doxygen $(package)_config_opts += --disable-dependency-tracking --enable-option-checking -$(package)_config_opts += --with-pic endef define $(package)_preprocess_cmds diff --git a/depends/packages/qrencode.mk b/depends/packages/qrencode.mk index 2afd95d7c4fae..9ebd2dd85a454 100644 --- a/depends/packages/qrencode.mk +++ b/depends/packages/qrencode.mk @@ -8,8 +8,6 @@ define $(package)_set_vars $(package)_config_opts=--disable-shared --without-tools --without-tests --without-png $(package)_config_opts += --disable-gprof --disable-gcov --disable-mudflap $(package)_config_opts += --disable-dependency-tracking --enable-option-checking -$(package)_config_opts_linux=--with-pic -$(package)_config_opts_android=--with-pic $(package)_cflags += -Wno-int-conversion -Wno-implicit-function-declaration endef diff --git a/depends/packages/sqlite.mk b/depends/packages/sqlite.mk index 6809b391139fb..7d175ec4bb3d0 100644 --- a/depends/packages/sqlite.mk +++ b/depends/packages/sqlite.mk @@ -7,10 +7,6 @@ $(package)_sha256_hash=5af07de982ba658fd91a03170c945f99c971f6955bc79df3266544373 define $(package)_set_vars $(package)_config_opts=--disable-shared --disable-readline --disable-dynamic-extensions --enable-option-checking $(package)_config_opts+= --disable-rtree --disable-fts4 --disable-fts5 -$(package)_config_opts_linux=--with-pic -$(package)_config_opts_freebsd=--with-pic -$(package)_config_opts_netbsd=--with-pic -$(package)_config_opts_openbsd=--with-pic # We avoid using `--enable-debug` because it overrides CFLAGS, a behavior we want to prevent. $(package)_cflags_debug += -g $(package)_cppflags_debug += -DSQLITE_DEBUG diff --git a/depends/packages/zeromq.mk b/depends/packages/zeromq.mk index cc78999dbbeb9..bfa5e97c60498 100644 --- a/depends/packages/zeromq.mk +++ b/depends/packages/zeromq.mk @@ -11,11 +11,6 @@ define $(package)_set_vars $(package)_config_opts += --without-libsodium --without-libgssapi_krb5 --without-pgm --without-norm --without-vmci $(package)_config_opts += --disable-libunwind --disable-radix-tree --without-gcov --disable-dependency-tracking $(package)_config_opts += --disable-Werror --disable-drafts --enable-option-checking - $(package)_config_opts_linux=--with-pic - $(package)_config_opts_freebsd=--with-pic - $(package)_config_opts_netbsd=--with-pic - $(package)_config_opts_openbsd=--with-pic - $(package)_config_opts_android=--with-pic endef define $(package)_preprocess_cmds From 180973a94180f9849bf7cb0dab7c9177a942efb8 Mon Sep 17 00:00:00 2001 From: ishaanam Date: Thu, 1 Feb 2024 17:59:43 -0500 Subject: [PATCH 25/79] test: Add tests for wallet mempool conflicts --- test/functional/wallet_conflicts.py | 282 ++++++++++++++++++++++++++++ 1 file changed, 282 insertions(+) diff --git a/test/functional/wallet_conflicts.py b/test/functional/wallet_conflicts.py index 802b718cd5e25..3ca7eb246c91f 100755 --- a/test/functional/wallet_conflicts.py +++ b/test/functional/wallet_conflicts.py @@ -9,6 +9,7 @@ from decimal import Decimal +from test_framework.blocktools import COINBASE_MATURITY from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -28,6 +29,20 @@ def get_utxo_of_value(self, from_tx_id, search_value): return next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(from_tx_id)["details"] if tx_out["amount"] == Decimal(f"{search_value}")) def run_test(self): + """ + The following tests check the behavior of the wallet when + transaction conflicts are created. These conflicts are created + using raw transaction RPCs that double-spend UTXOs and have more + fees, replacing the original transaction. + """ + + self.test_block_conflicts() + self.generatetoaddress(self.nodes[0], COINBASE_MATURITY + 7, self.nodes[2].getnewaddress()) + self.test_mempool_conflict() + self.test_mempool_and_block_conflicts() + self.test_descendants_with_mempool_conflicts() + + def test_block_conflicts(self): self.log.info("Send tx from which to conflict outputs later") txid_conflict_from_1 = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) txid_conflict_from_2 = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) @@ -123,5 +138,272 @@ def run_test(self): assert_equal(former_conflicted["confirmations"], 1) assert_equal(former_conflicted["blockheight"], 217) + def test_mempool_conflict(self): + self.nodes[0].createwallet("alice") + alice = self.nodes[0].get_wallet_rpc("alice") + + bob = self.nodes[1] + + self.nodes[2].send(outputs=[{alice.getnewaddress() : 25} for _ in range(3)]) + self.generate(self.nodes[2], 1) + + self.log.info("Test a scenario where a transaction has a mempool conflict") + + unspents = alice.listunspent() + assert_equal(len(unspents), 3) + assert all([tx["amount"] == 25 for tx in unspents]) + + # tx1 spends unspent[0] and unspent[1] + raw_tx = alice.createrawtransaction(inputs=[unspents[0], unspents[1]], outputs=[{bob.getnewaddress() : 49.9999}]) + tx1 = alice.signrawtransactionwithwallet(raw_tx)['hex'] + + # tx2 spends unspent[1] and unspent[2], conflicts with tx1 + raw_tx = alice.createrawtransaction(inputs=[unspents[1], unspents[2]], outputs=[{bob.getnewaddress() : 49.99}]) + tx2 = alice.signrawtransactionwithwallet(raw_tx)['hex'] + + # tx3 spends unspent[2], conflicts with tx2 + raw_tx = alice.createrawtransaction(inputs=[unspents[2]], outputs=[{bob.getnewaddress() : 24.9899}]) + tx3 = alice.signrawtransactionwithwallet(raw_tx)['hex'] + + # broadcast tx1 + tx1_txid = alice.sendrawtransaction(tx1) + + assert_equal(alice.listunspent(), [unspents[2]]) + assert_equal(alice.getbalance(), 25) + + # broadcast tx2, replaces tx1 in mempool + tx2_txid = alice.sendrawtransaction(tx2) + + # Check that unspent[0] is still not available because the wallet does not know that the tx spending it has a mempool conflicted + assert_equal(alice.listunspent(), []) + assert_equal(alice.getbalance(), 0) + + self.log.info("Test scenario where a mempool conflict is removed") + + # broadcast tx3, replaces tx2 in mempool + # Now that tx1's conflict has been removed, tx1 is now + # not conflicted, and instead is inactive until it is + # rebroadcasted. Now unspent[0] is not available, because + # tx1 is no longer conflicted. + alice.sendrawtransaction(tx3) + + assert tx1_txid not in self.nodes[0].getrawmempool() + + # now all of alice's outputs should be considered spent + # unspent[0]: spent by inactive tx1 + # unspent[1]: spent by inactive tx1 + # unspent[2]: spent by active tx3 + assert_equal(alice.listunspent(), []) + assert_equal(alice.getbalance(), 0) + + # Clean up for next test + bob.sendall([self.nodes[2].getnewaddress()]) + self.generate(self.nodes[2], 1) + + alice.unloadwallet() + + def test_mempool_and_block_conflicts(self): + self.nodes[0].createwallet("alice_2") + alice = self.nodes[0].get_wallet_rpc("alice_2") + bob = self.nodes[1] + + self.nodes[2].send(outputs=[{alice.getnewaddress() : 25} for _ in range(3)]) + self.generate(self.nodes[2], 1) + + self.log.info("Test a scenario where a transaction has both a block conflict and a mempool conflict") + unspents = [{"txid" : element["txid"], "vout" : element["vout"]} for element in alice.listunspent()] + + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0) + + # alice and bob nodes are disconnected so that transactions can be + # created by alice, but broadcasted from bob so that alice's wallet + # doesn't know about them + self.disconnect_nodes(0, 1) + + # Sends funds to bob + raw_tx = alice.createrawtransaction(inputs=[unspents[0]], outputs=[{bob.getnewaddress() : 24.99999}]) + raw_tx1 = alice.signrawtransactionwithwallet(raw_tx)['hex'] + tx1_txid = bob.sendrawtransaction(raw_tx1) # broadcast original tx spending unspents[0] only to bob + + # create a conflict to previous tx (also spends unspents[0]), but don't broadcast, sends funds back to alice + raw_tx = alice.createrawtransaction(inputs=[unspents[0], unspents[2]], outputs=[{alice.getnewaddress() : 49.999}]) + tx1_conflict = alice.signrawtransactionwithwallet(raw_tx)['hex'] + + # Sends funds to bob + raw_tx = alice.createrawtransaction(inputs=[unspents[1]], outputs=[{bob.getnewaddress() : 24.9999}]) + raw_tx2 = alice.signrawtransactionwithwallet(raw_tx)['hex'] + tx2_txid = bob.sendrawtransaction(raw_tx2) # broadcast another original tx spending unspents[1] only to bob + + # create a conflict to previous tx (also spends unspents[1]), but don't broadcast, sends funds to alice + raw_tx = alice.createrawtransaction(inputs=[unspents[1]], outputs=[{alice.getnewaddress() : 24.9999}]) + tx2_conflict = alice.signrawtransactionwithwallet(raw_tx)['hex'] + + bob_unspents = [{"txid" : element, "vout" : 0} for element in [tx1_txid, tx2_txid]] + + # tx1 and tx2 are now in bob's mempool, and they are unconflicted, so bob has these funds + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], Decimal("49.99989000")) + + # spend both of bob's unspents, child tx of tx1 and tx2 + raw_tx = bob.createrawtransaction(inputs=[bob_unspents[0], bob_unspents[1]], outputs=[{bob.getnewaddress() : 49.999}]) + raw_tx3 = bob.signrawtransactionwithwallet(raw_tx)['hex'] + tx3_txid = bob.sendrawtransaction(raw_tx3) # broadcast tx only to bob + + # alice knows about 0 txs, bob knows about 3 + assert_equal(len(alice.getrawmempool()), 0) + assert_equal(len(bob.getrawmempool()), 3) + + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], Decimal("49.99900000")) + + # bob broadcasts tx_1 conflict + tx1_conflict_txid = bob.sendrawtransaction(tx1_conflict) + assert_equal(len(alice.getrawmempool()), 0) + assert_equal(len(bob.getrawmempool()), 2) # tx1_conflict kicks out both tx1, and its child tx3 + + assert tx2_txid in bob.getrawmempool() + assert tx1_conflict_txid in bob.getrawmempool() + + # check that the tx2 unspent is still not available because the wallet does not know that the tx spending it has a mempool conflict + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0) + + # we will be disconnecting this block in the future + alice.sendrawtransaction(tx2_conflict) + assert_equal(len(alice.getrawmempool()), 1) # currently alice's mempool is only aware of tx2_conflict + # 11 blocks are mined so that when they are invalidated, tx_2 + # does not get put back into the mempool + blk = self.generate(self.nodes[0], 11, sync_fun=self.no_op)[0] + assert_equal(len(alice.getrawmempool()), 0) # tx2_conflict is now mined + + self.connect_nodes(0, 1) + self.sync_blocks() + assert_equal(alice.getbestblockhash(), bob.getbestblockhash()) + + # now that tx2 has a block conflict, tx1_conflict should be the only tx in bob's mempool + assert tx1_conflict_txid in bob.getrawmempool() + assert_equal(len(bob.getrawmempool()), 1) + + # tx3 should now also be block-conflicted by tx2_conflict + assert_equal(bob.gettransaction(tx3_txid)["confirmations"], -11) + # bob has no pending funds, since tx1, tx2, and tx3 are all conflicted + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0) + bob.invalidateblock(blk) # remove tx2_conflict + # bob should still have no pending funds because tx1 and tx3 are still conflicted, and tx2 has not been re-broadcast + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0) + assert_equal(len(bob.getrawmempool()), 1) + # check that tx3 is no longer block-conflicted + assert_equal(bob.gettransaction(tx3_txid)["confirmations"], 0) + + bob.sendrawtransaction(raw_tx2) + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0) + + # create a conflict to previous tx (also spends unspents[2]), but don't broadcast, sends funds back to alice + raw_tx = alice.createrawtransaction(inputs=[unspents[2]], outputs=[{alice.getnewaddress() : 24.99}]) + tx1_conflict_conflict = alice.signrawtransactionwithwallet(raw_tx)['hex'] + + bob.sendrawtransaction(tx1_conflict_conflict) # kick tx1_conflict out of the mempool + bob.sendrawtransaction(raw_tx1) #re-broadcast tx1 because it is no longer conflicted + + # Now bob has no pending funds because tx1 and tx2 are spent by tx3, which hasn't been re-broadcast yet + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0) + + bob.sendrawtransaction(raw_tx3) + assert_equal(len(bob.getrawmempool()), 4) # The mempool contains: tx1, tx2, tx1_conflict_conflict, tx3 + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], Decimal("49.99900000")) + + # Clean up for next test + bob.reconsiderblock(blk) + assert_equal(alice.getbestblockhash(), bob.getbestblockhash()) + self.sync_mempools() + self.generate(self.nodes[2], 1) + + alice.unloadwallet() + + def test_descendants_with_mempool_conflicts(self): + self.nodes[0].createwallet("alice_3") + alice = self.nodes[0].get_wallet_rpc("alice_3") + + self.nodes[2].send(outputs=[{alice.getnewaddress() : 25} for _ in range(2)]) + self.generate(self.nodes[2], 1) + + self.nodes[1].createwallet("bob_1") + bob = self.nodes[1].get_wallet_rpc("bob_1") + + self.nodes[2].createwallet("carol") + carol = self.nodes[2].get_wallet_rpc("carol") + + self.log.info("Test a scenario where a transaction's parent has a mempool conflict") + + unspents = alice.listunspent() + assert_equal(len(unspents), 2) + assert all([tx["amount"] == 25 for tx in unspents]) + + assert_equal(alice.getrawmempool(), []) + + # Alice spends first utxo to bob in tx1 + raw_tx = alice.createrawtransaction(inputs=[unspents[0]], outputs=[{bob.getnewaddress() : 24.9999}]) + tx1 = alice.signrawtransactionwithwallet(raw_tx)['hex'] + tx1_txid = alice.sendrawtransaction(tx1) + + self.sync_mempools() + + assert_equal(alice.getbalance(), 25) + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], Decimal("24.99990000")) + + raw_tx = bob.createrawtransaction(inputs=[bob.listunspent(minconf=0)[0]], outputs=[{carol.getnewaddress() : 24.999}]) + # Bob creates a child to tx1 + tx1_child = bob.signrawtransactionwithwallet(raw_tx)['hex'] + tx1_child_txid = bob.sendrawtransaction(tx1_child) + + self.sync_mempools() + + # Currently neither tx1 nor tx1_child should have any conflicts + assert tx1_txid in bob.getrawmempool() + assert tx1_child_txid in bob.getrawmempool() + assert_equal(len(bob.getrawmempool()), 2) + + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0) + assert_equal(carol.getbalances()["mine"]["untrusted_pending"], Decimal("24.99900000")) + + # Alice spends first unspent again, conflicting with tx1 + raw_tx = alice.createrawtransaction(inputs=[unspents[0], unspents[1]], outputs=[{carol.getnewaddress() : 49.99}]) + tx1_conflict = alice.signrawtransactionwithwallet(raw_tx)['hex'] + tx1_conflict_txid = alice.sendrawtransaction(tx1_conflict) + + self.sync_mempools() + + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0) + assert_equal(carol.getbalances()["mine"]["untrusted_pending"], Decimal("49.99000000")) + + assert tx1_txid not in bob.getrawmempool() + assert tx1_child_txid not in bob.getrawmempool() + assert tx1_conflict_txid in bob.getrawmempool() + assert_equal(len(bob.getrawmempool()), 1) + + # Now create a conflict to tx1_conflict, so that it gets kicked out of the mempool + raw_tx = alice.createrawtransaction(inputs=[unspents[1]], outputs=[{carol.getnewaddress() : 24.9895}]) + tx1_conflict_conflict = alice.signrawtransactionwithwallet(raw_tx)['hex'] + tx1_conflict_conflict_txid = alice.sendrawtransaction(tx1_conflict_conflict) + + self.sync_mempools() + + # Both tx1 and tx1_child are still not in the mempool because they have not be re-broadcasted + assert tx1_txid not in bob.getrawmempool() + assert tx1_child_txid not in bob.getrawmempool() + assert tx1_conflict_txid not in bob.getrawmempool() + assert tx1_conflict_conflict_txid in bob.getrawmempool() + assert_equal(len(bob.getrawmempool()), 1) + + assert_equal(alice.getbalance(), 0) + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0) + assert_equal(carol.getbalances()["mine"]["untrusted_pending"], Decimal("24.98950000")) + + # Both tx1 and tx1_child can now be re-broadcasted + bob.sendrawtransaction(tx1) + bob.sendrawtransaction(tx1_child) + assert_equal(len(bob.getrawmempool()), 3) + + alice.unloadwallet() + bob.unloadwallet() + carol.unloadwallet() + if __name__ == '__main__': TxConflicts().main() From ffe5ff1fb622a8da11b66289e1b778e45e449811 Mon Sep 17 00:00:00 2001 From: ishaanam Date: Thu, 29 Jun 2023 15:41:26 -0400 Subject: [PATCH 26/79] scripted-diff: wallet: s/TxStateConflicted/TxStateBlockConflicted -BEGIN VERIFY SCRIPT- sed -i 's/TxStateConflicted/TxStateBlockConflicted/g' src/wallet/wallet.cpp src/wallet/interfaces.cpp src/wallet/transaction.h src/wallet/transaction.cpp sed -i 's/isConflicted/isBlockConflicted/g' src/wallet/transaction.h src/wallet/wallet.cpp -END VERIFY SCRIPT- --- src/wallet/interfaces.cpp | 2 +- src/wallet/transaction.cpp | 2 +- src/wallet/transaction.h | 16 ++++++++-------- src/wallet/wallet.cpp | 16 ++++++++-------- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp index d15273dfc9bc6..6405fb9bbae39 100644 --- a/src/wallet/interfaces.cpp +++ b/src/wallet/interfaces.cpp @@ -92,7 +92,7 @@ WalletTxStatus MakeWalletTxStatus(const CWallet& wallet, const CWalletTx& wtx) WalletTxStatus result; result.block_height = wtx.state() ? wtx.state()->confirmed_block_height : - wtx.state() ? wtx.state()->conflicting_block_height : + wtx.state() ? wtx.state()->conflicting_block_height : std::numeric_limits::max(); result.blocks_to_maturity = wallet.GetTxBlocksToMaturity(wtx); result.depth_in_main_chain = wallet.GetTxDepthInMainChain(wtx); diff --git a/src/wallet/transaction.cpp b/src/wallet/transaction.cpp index 6777257e53984..561880482f8c1 100644 --- a/src/wallet/transaction.cpp +++ b/src/wallet/transaction.cpp @@ -45,7 +45,7 @@ void CWalletTx::updateState(interfaces::Chain& chain) }; if (auto* conf = state()) { lookup_block(conf->confirmed_block_hash, conf->confirmed_block_height, m_state); - } else if (auto* conf = state()) { + } else if (auto* conf = state()) { lookup_block(conf->conflicting_block_hash, conf->conflicting_block_height, m_state); } } diff --git a/src/wallet/transaction.h b/src/wallet/transaction.h index ddeb931112793..2ffd85afa9c34 100644 --- a/src/wallet/transaction.h +++ b/src/wallet/transaction.h @@ -43,11 +43,11 @@ struct TxStateInMempool { }; //! State of rejected transaction that conflicts with a confirmed block. -struct TxStateConflicted { +struct TxStateBlockConflicted { uint256 conflicting_block_hash; int conflicting_block_height; - explicit TxStateConflicted(const uint256& block_hash, int height) : conflicting_block_hash(block_hash), conflicting_block_height(height) {} + explicit TxStateBlockConflicted(const uint256& block_hash, int height) : conflicting_block_hash(block_hash), conflicting_block_height(height) {} std::string toString() const { return strprintf("Conflicted (block=%s, height=%i)", conflicting_block_hash.ToString(), conflicting_block_height); } }; @@ -75,7 +75,7 @@ struct TxStateUnrecognized { }; //! All possible CWalletTx states -using TxState = std::variant; +using TxState = std::variant; //! Subset of states transaction sync logic is implemented to handle. using SyncTxState = std::variant; @@ -90,7 +90,7 @@ static inline TxState TxStateInterpretSerialized(TxStateUnrecognized data) } else if (data.index >= 0) { return TxStateConfirmed{data.block_hash, /*height=*/-1, data.index}; } else if (data.index == -1) { - return TxStateConflicted{data.block_hash, /*height=*/-1}; + return TxStateBlockConflicted{data.block_hash, /*height=*/-1}; } return data; } @@ -102,7 +102,7 @@ static inline uint256 TxStateSerializedBlockHash(const TxState& state) [](const TxStateInactive& inactive) { return inactive.abandoned ? uint256::ONE : uint256::ZERO; }, [](const TxStateInMempool& in_mempool) { return uint256::ZERO; }, [](const TxStateConfirmed& confirmed) { return confirmed.confirmed_block_hash; }, - [](const TxStateConflicted& conflicted) { return conflicted.conflicting_block_hash; }, + [](const TxStateBlockConflicted& conflicted) { return conflicted.conflicting_block_hash; }, [](const TxStateUnrecognized& unrecognized) { return unrecognized.block_hash; } }, state); } @@ -114,7 +114,7 @@ static inline int TxStateSerializedIndex(const TxState& state) [](const TxStateInactive& inactive) { return inactive.abandoned ? -1 : 0; }, [](const TxStateInMempool& in_mempool) { return 0; }, [](const TxStateConfirmed& confirmed) { return confirmed.position_in_block; }, - [](const TxStateConflicted& conflicted) { return -1; }, + [](const TxStateBlockConflicted& conflicted) { return -1; }, [](const TxStateUnrecognized& unrecognized) { return unrecognized.index; } }, state); } @@ -335,9 +335,9 @@ class CWalletTx void updateState(interfaces::Chain& chain); bool isAbandoned() const { return state() && state()->abandoned; } - bool isConflicted() const { return state(); } + bool isBlockConflicted() const { return state(); } bool isInactive() const { return state(); } - bool isUnconfirmed() const { return !isAbandoned() && !isConflicted() && !isConfirmed(); } + bool isUnconfirmed() const { return !isAbandoned() && !isBlockConflicted() && !isConfirmed(); } bool isConfirmed() const { return state(); } const Txid& GetHash() const LIFETIMEBOUND { return tx->GetHash(); } const Wtxid& GetWitnessHash() const LIFETIMEBOUND { return tx->GetWitnessHash(); } diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index e93cd4b4b9b8a..f2116a297da9a 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -1197,7 +1197,7 @@ bool CWallet::LoadToWallet(const uint256& hash, const UpdateWalletTxFn& fill_wtx auto it = mapWallet.find(txin.prevout.hash); if (it != mapWallet.end()) { CWalletTx& prevtx = it->second; - if (auto* prev = prevtx.state()) { + if (auto* prev = prevtx.state()) { MarkConflicted(prev->conflicting_block_hash, prev->conflicting_block_height, wtx.GetHash()); } } @@ -1309,7 +1309,7 @@ bool CWallet::AbandonTransaction(const uint256& hashTx) assert(!wtx.isConfirmed()); assert(!wtx.InMempool()); // If already conflicted or abandoned, no need to set abandoned - if (!wtx.isConflicted() && !wtx.isAbandoned()) { + if (!wtx.isBlockConflicted() && !wtx.isAbandoned()) { wtx.m_state = TxStateInactive{/*abandoned=*/true}; return TxUpdate::NOTIFY_CHANGED; } @@ -1346,7 +1346,7 @@ void CWallet::MarkConflicted(const uint256& hashBlock, int conflicting_height, c if (conflictconfirms < GetTxDepthInMainChain(wtx)) { // Block is 'more conflicted' than current confirm; update. // Mark transaction as conflicted with this block. - wtx.m_state = TxStateConflicted{hashBlock, conflicting_height}; + wtx.m_state = TxStateBlockConflicted{hashBlock, conflicting_height}; return TxUpdate::CHANGED; } return TxUpdate::UNCHANGED; @@ -1506,11 +1506,11 @@ void CWallet::blockDisconnected(const interfaces::BlockInfo& block) for (TxSpends::const_iterator _it = range.first; _it != range.second; ++_it) { CWalletTx& wtx = mapWallet.find(_it->second)->second; - if (!wtx.isConflicted()) continue; + if (!wtx.isBlockConflicted()) continue; auto try_updating_state = [&](CWalletTx& tx) { - if (!tx.isConflicted()) return TxUpdate::UNCHANGED; - if (tx.state()->conflicting_block_height >= disconnect_height) { + if (!tx.isBlockConflicted()) return TxUpdate::UNCHANGED; + if (tx.state()->conflicting_block_height >= disconnect_height) { tx.m_state = TxStateInactive{}; return TxUpdate::CHANGED; } @@ -2725,7 +2725,7 @@ unsigned int CWallet::ComputeTimeSmart(const CWalletTx& wtx, bool rescanning_old std::optional block_hash; if (auto* conf = wtx.state()) { block_hash = conf->confirmed_block_hash; - } else if (auto* conf = wtx.state()) { + } else if (auto* conf = wtx.state()) { block_hash = conf->conflicting_block_hash; } @@ -3315,7 +3315,7 @@ int CWallet::GetTxDepthInMainChain(const CWalletTx& wtx) const if (auto* conf = wtx.state()) { assert(conf->confirmed_block_height >= 0); return GetLastBlockHeight() - conf->confirmed_block_height + 1; - } else if (auto* conf = wtx.state()) { + } else if (auto* conf = wtx.state()) { assert(conf->conflicting_block_height >= 0); return -1 * (GetLastBlockHeight() - conf->conflicting_block_height + 1); } else { From 636c9862cfc8b3facc84eb62b51e18877f2022a9 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Fri, 15 Mar 2024 10:47:25 +0000 Subject: [PATCH 27/79] ci: Bump `TIDY_LLVM_V` This change switches to the latest IWYU 0.22, which is compatible with Clang 18. --- ci/test/00_setup_env_native_tidy.sh | 2 +- src/.clang-tidy | 1 + src/script/signingprovider.cpp | 2 -- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/ci/test/00_setup_env_native_tidy.sh b/ci/test/00_setup_env_native_tidy.sh index c12044f461d85..a5ba64da15d2b 100755 --- a/ci/test/00_setup_env_native_tidy.sh +++ b/ci/test/00_setup_env_native_tidy.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" export CONTAINER_NAME=ci_native_tidy -export TIDY_LLVM_V="17" +export TIDY_LLVM_V="18" export PACKAGES="clang-${TIDY_LLVM_V} libclang-${TIDY_LLVM_V}-dev llvm-${TIDY_LLVM_V}-dev libomp-${TIDY_LLVM_V}-dev clang-tidy-${TIDY_LLVM_V} jq bear libevent-dev libboost-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev systemtap-sdt-dev libqt5gui5 libqt5core5a libqt5dbus5 qttools5-dev qttools5-dev-tools libqrencode-dev libsqlite3-dev libdb++-dev" export NO_DEPENDS=1 export RUN_UNIT_TESTS=false diff --git a/src/.clang-tidy b/src/.clang-tidy index bfaa5ab8e7b09..e4b789dcaa908 100644 --- a/src/.clang-tidy +++ b/src/.clang-tidy @@ -12,6 +12,7 @@ modernize-use-noexcept, modernize-use-nullptr, performance-*, -performance-avoid-endl, +-performance-enum-size, -performance-inefficient-string-concatenation, -performance-no-int-to-ptr, -performance-noexcept-move-constructor, diff --git a/src/script/signingprovider.cpp b/src/script/signingprovider.cpp index 0031f94af1138..baabd4d5b5cc8 100644 --- a/src/script/signingprovider.cpp +++ b/src/script/signingprovider.cpp @@ -370,8 +370,6 @@ TaprootBuilder& TaprootBuilder::Add(int depth, Span script, /* Construct NodeInfo object with leaf hash and (if track is true) also leaf information. */ NodeInfo node; node.hash = ComputeTapleafHash(leaf_version, script); - // due to bug in clang-tidy-17: - // NOLINTNEXTLINE(modernize-use-emplace) if (track) node.leaves.emplace_back(LeafInfo{std::vector(script.begin(), script.end()), leaf_version, {}}); /* Insert into the branch. */ Insert(std::move(node), depth); From d64922b5903e5ffc8d2ce0e6761f99f173b60800 Mon Sep 17 00:00:00 2001 From: ishaanam Date: Wed, 1 Mar 2023 13:46:23 -0500 Subject: [PATCH 28/79] wallet refactor: use CWalletTx member functions to determine tx state --- src/wallet/interfaces.cpp | 2 +- src/wallet/receive.cpp | 7 +++---- src/wallet/wallet.cpp | 4 ++-- src/wallet/wallet.h | 5 ----- 4 files changed, 6 insertions(+), 12 deletions(-) diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp index 6405fb9bbae39..d33e6f3873b8f 100644 --- a/src/wallet/interfaces.cpp +++ b/src/wallet/interfaces.cpp @@ -101,7 +101,7 @@ WalletTxStatus MakeWalletTxStatus(const CWallet& wallet, const CWalletTx& wtx) result.is_trusted = CachedTxIsTrusted(wallet, wtx); result.is_abandoned = wtx.isAbandoned(); result.is_coinbase = wtx.IsCoinBase(); - result.is_in_main_chain = wallet.IsTxInMainChain(wtx); + result.is_in_main_chain = wtx.isConfirmed(); return result; } diff --git a/src/wallet/receive.cpp b/src/wallet/receive.cpp index b9d8d9abc92dc..ea3ffff549d20 100644 --- a/src/wallet/receive.cpp +++ b/src/wallet/receive.cpp @@ -149,7 +149,7 @@ CAmount CachedTxGetImmatureCredit(const CWallet& wallet, const CWalletTx& wtx, c { AssertLockHeld(wallet.cs_wallet); - if (wallet.IsTxImmatureCoinBase(wtx) && wallet.IsTxInMainChain(wtx)) { + if (wallet.IsTxImmatureCoinBase(wtx) && wtx.isConfirmed()) { return GetCachableAmount(wallet, wtx, CWalletTx::IMMATURE_CREDIT, filter); } @@ -256,9 +256,8 @@ bool CachedTxIsFromMe(const CWallet& wallet, const CWalletTx& wtx, const isminef bool CachedTxIsTrusted(const CWallet& wallet, const CWalletTx& wtx, std::set& trusted_parents) { AssertLockHeld(wallet.cs_wallet); - int nDepth = wallet.GetTxDepthInMainChain(wtx); - if (nDepth >= 1) return true; - if (nDepth < 0) return false; + if (wtx.isConfirmed()) return true; + if (wtx.isBlockConflicted()) return false; // using wtx's cached debit if (!wallet.m_spend_zero_conf_change || !CachedTxIsFromMe(wallet, wtx, ISMINE_ALL)) return false; diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index f2116a297da9a..1053b69f32c3a 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -752,8 +752,8 @@ bool CWallet::IsSpent(const COutPoint& outpoint) const const uint256& wtxid = it->second; const auto mit = mapWallet.find(wtxid); if (mit != mapWallet.end()) { - int depth = GetTxDepthInMainChain(mit->second); - if (depth > 0 || (depth == 0 && !mit->second.isAbandoned())) + const auto& wtx = mit->second; + if (!wtx.isAbandoned() && !wtx.isBlockConflicted()) return true; // Spent } } diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index cc961068a54a1..d55b683f1cdca 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -515,11 +515,6 @@ class CWallet final : public WalletStorage, public interfaces::Chain::Notificati * referenced in transaction, and might cause assert failures. */ int GetTxDepthInMainChain(const CWalletTx& wtx) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); - bool IsTxInMainChain(const CWalletTx& wtx) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) - { - AssertLockHeld(cs_wallet); - return GetTxDepthInMainChain(wtx) > 0; - } /** * @return number of blocks to maturity for this transaction: From 626f8e398e219b84907ccaad036f69177d39284c Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Sun, 17 Mar 2024 11:35:01 -0400 Subject: [PATCH 29/79] fuzz: actually test garbage >64b in p2p transport test --- src/test/fuzz/p2p_transport_serialization.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/test/fuzz/p2p_transport_serialization.cpp b/src/test/fuzz/p2p_transport_serialization.cpp index a205ce19f4a3a..1b7a732260e15 100644 --- a/src/test/fuzz/p2p_transport_serialization.cpp +++ b/src/test/fuzz/p2p_transport_serialization.cpp @@ -354,6 +354,7 @@ std::unique_ptr MakeV2Transport(NodeId nodeid, bool initiator, RNG& r } else { // If it's longer, generate it from the RNG. This avoids having large amounts of // (hopefully) irrelevant data needing to be stored in the fuzzer data. + garb.resize(garb_len); for (auto& v : garb) v = uint8_t(rng()); } // Retrieve entropy From 64722e4359bc101682d73e5a1a04ef2c68716d4c Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Sun, 17 Mar 2024 16:54:47 +0000 Subject: [PATCH 30/79] ci: Drop `--enable-c++20` option This option has ceased to exist since https://github.com/bitcoin/bitcoin/pull/28349. --- ci/test/00_setup_env_native_asan.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh index 60486f8f613a3..840daf970829f 100755 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -20,7 +20,7 @@ export CONTAINER_NAME=ci_native_asan export PACKAGES="systemtap-sdt-dev clang-17 llvm-17 libclang-rt-17-dev python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev libboost-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libqrencode-dev libsqlite3-dev ${BPFCC_PACKAGE}" export NO_DEPENDS=1 export GOAL="install" -export BITCOIN_CONFIG="--enable-c++20 --enable-usdt --enable-zmq --with-incompatible-bdb --with-gui=qt5 \ +export BITCOIN_CONFIG="--enable-usdt --enable-zmq --with-incompatible-bdb --with-gui=qt5 \ CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' \ --with-sanitizers=address,float-divide-by-zero,integer,undefined \ CC='clang-17 -ftrivial-auto-var-init=pattern' CXX='clang++-17 -ftrivial-auto-var-init=pattern'" From fad7f423249c161cad20a754653f9477e2b98339 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Sat, 2 Mar 2024 21:26:48 +0100 Subject: [PATCH 31/79] lint: Clarify lint runner rust dependency --- test/lint/README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/lint/README.md b/test/lint/README.md index 9cb61b484cb16..83264de06e429 100644 --- a/test/lint/README.md +++ b/test/lint/README.md @@ -16,7 +16,11 @@ result is cached and it prevents issues when the image changes. test runner =========== -To run all the lint checks in the test runner outside the docker, use: +To run all the lint checks in the test runner outside the docker you first need +to install the rust toolchain using your package manager of choice or +[rustup](https://www.rust-lang.org/tools/install). + +Then you can use: ```sh ( cd ./test/lint/test_runner/ && cargo fmt && cargo clippy && RUST_BACKTRACE=1 cargo run ) From cfa057b86d735942adbeb7347a51b6f0c32901f7 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Sat, 2 Mar 2024 21:28:41 +0100 Subject: [PATCH 32/79] lint: Add lint runner build dir to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index c77303f50e2ee..3fe36aba89d7f 100644 --- a/.gitignore +++ b/.gitignore @@ -130,6 +130,7 @@ win32-build test/config.ini test/cache/* test/.mypy_cache/ +test/lint/test_runner/target/ !src/leveldb*/Makefile From 742d2b93473a856786e32c5e35e3b6ce2a95000f Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Sat, 2 Mar 2024 21:39:31 +0100 Subject: [PATCH 33/79] lint: Add lint runner build dir and lint pycache to clean task --- Makefile.am | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile.am b/Makefile.am index eec498dc0e937..5ea690dec87a2 100644 --- a/Makefile.am +++ b/Makefile.am @@ -338,7 +338,7 @@ clean-docs: clean-local: clean-docs rm -rf coverage_percent.txt test_bitcoin.coverage/ total.coverage/ fuzz.coverage/ test/tmp/ cache/ $(OSX_APP) rm -rf test/functional/__pycache__ test/functional/test_framework/__pycache__ test/cache share/rpcauth/__pycache__ - rm -rf osx_volname dist/ + rm -rf osx_volname dist/ test/lint/test_runner/target/ test/lint/__pycache__ test-security-check: if TARGET_DARWIN From ce8e22542ed0b4fa5794d3203207146418d59473 Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Fri, 12 Jan 2024 10:40:41 -0500 Subject: [PATCH 34/79] Add FeeFrac utils Co-authored-by: Suhas Daftuar Co-authored-by: Pieter Wuille --- src/Makefile.am | 3 + src/util/feefrac.cpp | 86 +++++++++++++++++++++++ src/util/feefrac.h | 160 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 249 insertions(+) create mode 100644 src/util/feefrac.cpp create mode 100644 src/util/feefrac.h diff --git a/src/Makefile.am b/src/Makefile.am index 1f55bfbafda66..0328dfc2cd6a8 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -302,6 +302,7 @@ BITCOIN_CORE_H = \ util/error.h \ util/exception.h \ util/fastrange.h \ + util/feefrac.h \ util/fees.h \ util/fs.h \ util/fs_helpers.h \ @@ -741,6 +742,7 @@ libbitcoin_util_a_SOURCES = \ util/check.cpp \ util/error.cpp \ util/exception.cpp \ + util/feefrac.cpp \ util/fees.cpp \ util/fs.cpp \ util/fs_helpers.cpp \ @@ -983,6 +985,7 @@ libbitcoinkernel_la_SOURCES = \ util/batchpriority.cpp \ util/chaintype.cpp \ util/check.cpp \ + util/feefrac.cpp \ util/fs.cpp \ util/fs_helpers.cpp \ util/hasher.cpp \ diff --git a/src/util/feefrac.cpp b/src/util/feefrac.cpp new file mode 100644 index 0000000000000..a271fe585e066 --- /dev/null +++ b/src/util/feefrac.cpp @@ -0,0 +1,86 @@ +// Copyright (c) The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include + +std::vector BuildDiagramFromChunks(const Span chunks) +{ + std::vector diagram; + diagram.reserve(chunks.size() + 1); + + diagram.emplace_back(0, 0); + for (auto& chunk : chunks) { + diagram.emplace_back(diagram.back() + chunk); + } + return diagram; +} + +std::partial_ordering CompareFeerateDiagram(Span dia0, Span dia1) +{ + /** Array to allow indexed access to input diagrams. */ + const std::array, 2> dias = {dia0, dia1}; + /** How many elements we have processed in each input. */ + size_t next_index[2] = {1, 1}; + /** Whether the corresponding input is strictly better than the other at least in one place. */ + bool better_somewhere[2] = {false, false}; + /** Get the first unprocessed point in diagram number dia. */ + const auto next_point = [&](int dia) { return dias[dia][next_index[dia]]; }; + /** Get the last processed point in diagram number dia. */ + const auto prev_point = [&](int dia) { return dias[dia][next_index[dia] - 1]; }; + + // Diagrams should be non-empty, and first elements zero in size and fee + Assert(!dia0.empty() && !dia1.empty()); + Assert(prev_point(0).IsEmpty()); + Assert(prev_point(1).IsEmpty()); + + do { + bool done_0 = next_index[0] == dias[0].size(); + bool done_1 = next_index[1] == dias[1].size(); + if (done_0 && done_1) break; + + // Determine which diagram has the first unprocessed point. If a single side is finished, use the + // other one. Only up to one can be done due to check above. + const int unproc_side = (done_0 || done_1) ? done_0 : next_point(0).size > next_point(1).size; + + // Let `P` be the next point on diagram unproc_side, and `A` and `B` the previous and next points + // on the other diagram. We want to know if P lies above or below the line AB. To determine this, we + // compute the slopes of line AB and of line AP, and compare them. These slopes are fee per size, + // and can thus be expressed as FeeFracs. + const FeeFrac& point_p = next_point(unproc_side); + const FeeFrac& point_a = prev_point(!unproc_side); + + // Slope of AP can be negative, unlike AB + const auto slope_ap = point_p - point_a; + Assume(slope_ap.size > 0); + std::weak_ordering cmp = std::weak_ordering::equivalent; + if (done_0 || done_1) { + // If a single side has no points left, act as if AB has slope tail_feerate(of 0). + Assume(!(done_0 && done_1)); + cmp = FeeRateCompare(slope_ap, FeeFrac(0, 1)); + } else { + // If both sides have points left, compute B, and the slope of AB explicitly. + const FeeFrac& point_b = next_point(!unproc_side); + const auto slope_ab = point_b - point_a; + Assume(slope_ab.size >= slope_ap.size); + cmp = FeeRateCompare(slope_ap, slope_ab); + + // If B and P have the same size, B can be marked as processed (in addition to P, see + // below), as we've already performed a comparison at this size. + if (point_b.size == point_p.size) ++next_index[!unproc_side]; + } + // If P lies above AB, unproc_side is better in P. If P lies below AB, then !unproc_side is + // better in P. + if (std::is_gt(cmp)) better_somewhere[unproc_side] = true; + if (std::is_lt(cmp)) better_somewhere[!unproc_side] = true; + ++next_index[unproc_side]; + } while(true); + + // If both diagrams are better somewhere, they are incomparable. + if (better_somewhere[0] && better_somewhere[1]) return std::partial_ordering::unordered; + // Otherwise compare the better_somewhere values. + return better_somewhere[0] <=> better_somewhere[1]; +} diff --git a/src/util/feefrac.h b/src/util/feefrac.h new file mode 100644 index 0000000000000..48bd287c7cade --- /dev/null +++ b/src/util/feefrac.h @@ -0,0 +1,160 @@ +// Copyright (c) The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_UTIL_FEEFRAC_H +#define BITCOIN_UTIL_FEEFRAC_H + +#include +#include +#include +#include +#include + +/** Data structure storing a fee and size, ordered by increasing fee/size. + * + * The size of a FeeFrac cannot be zero unless the fee is also zero. + * + * FeeFracs have a total ordering, first by increasing feerate (ratio of fee over size), and then + * by decreasing size. The empty FeeFrac (fee and size both 0) sorts last. So for example, the + * following FeeFracs are in sorted order: + * + * - fee=0 size=1 (feerate 0) + * - fee=1 size=2 (feerate 0.5) + * - fee=2 size=3 (feerate 0.667...) + * - fee=2 size=2 (feerate 1) + * - fee=1 size=1 (feerate 1) + * - fee=3 size=2 (feerate 1.5) + * - fee=2 size=1 (feerate 2) + * - fee=0 size=0 (undefined feerate) + * + * A FeeFrac is considered "better" if it sorts after another, by this ordering. All standard + * comparison operators (<=>, ==, !=, >, <, >=, <=) respect this ordering. + * + * The CompareFeeFrac, and >> and << operators only compare feerate and treat equal feerate but + * different size as equivalent. The empty FeeFrac is neither lower or higher in feerate than any + * other. + */ +struct FeeFrac +{ + /** Fallback version for Mul (see below). + * + * Separate to permit testing on platforms where it isn't actually needed. + */ + static inline std::pair MulFallback(int64_t a, int32_t b) noexcept + { + // Otherwise, emulate 96-bit multiplication using two 64-bit multiplies. + int64_t low = int64_t{static_cast(a)} * b; + int64_t high = (a >> 32) * b; + return {high + (low >> 32), static_cast(low)}; + } + + // Compute a * b, returning an unspecified but totally ordered type. +#ifdef __SIZEOF_INT128__ + static inline __int128 Mul(int64_t a, int32_t b) noexcept + { + // If __int128 is available, use 128-bit wide multiply. + return __int128{a} * b; + } +#else + static constexpr auto Mul = MulFallback; +#endif + + int64_t fee; + int32_t size; + + /** Construct an IsEmpty() FeeFrac. */ + inline FeeFrac() noexcept : fee{0}, size{0} {} + + /** Construct a FeeFrac with specified fee and size. */ + inline FeeFrac(int64_t f, int32_t s) noexcept : fee{f}, size{s} {} + + inline FeeFrac(const FeeFrac&) noexcept = default; + inline FeeFrac& operator=(const FeeFrac&) noexcept = default; + + /** Check if this is empty (size and fee are 0). */ + bool inline IsEmpty() const noexcept { + return size == 0; + } + + /** Add fee and size of another FeeFrac to this one. */ + void inline operator+=(const FeeFrac& other) noexcept + { + fee += other.fee; + size += other.size; + } + + /** Subtract fee and size of another FeeFrac from this one. */ + void inline operator-=(const FeeFrac& other) noexcept + { + fee -= other.fee; + size -= other.size; + } + + /** Sum fee and size. */ + friend inline FeeFrac operator+(const FeeFrac& a, const FeeFrac& b) noexcept + { + return {a.fee + b.fee, a.size + b.size}; + } + + /** Subtract both fee and size. */ + friend inline FeeFrac operator-(const FeeFrac& a, const FeeFrac& b) noexcept + { + return {a.fee - b.fee, a.size - b.size}; + } + + /** Check if two FeeFrac objects are equal (both same fee and same size). */ + friend inline bool operator==(const FeeFrac& a, const FeeFrac& b) noexcept + { + return a.fee == b.fee && a.size == b.size; + } + + /** Compare two FeeFracs just by feerate. */ + friend inline std::weak_ordering FeeRateCompare(const FeeFrac& a, const FeeFrac& b) noexcept + { + auto cross_a = Mul(a.fee, b.size), cross_b = Mul(b.fee, a.size); + return cross_a <=> cross_b; + } + + /** Check if a FeeFrac object has strictly lower feerate than another. */ + friend inline bool operator<<(const FeeFrac& a, const FeeFrac& b) noexcept + { + auto cross_a = Mul(a.fee, b.size), cross_b = Mul(b.fee, a.size); + return cross_a < cross_b; + } + + /** Check if a FeeFrac object has strictly higher feerate than another. */ + friend inline bool operator>>(const FeeFrac& a, const FeeFrac& b) noexcept + { + auto cross_a = Mul(a.fee, b.size), cross_b = Mul(b.fee, a.size); + return cross_a > cross_b; + } + + /** Compare two FeeFracs. <, >, <=, and >= are auto-generated from this. */ + friend inline std::strong_ordering operator<=>(const FeeFrac& a, const FeeFrac& b) noexcept + { + auto cross_a = Mul(a.fee, b.size), cross_b = Mul(b.fee, a.size); + if (cross_a == cross_b) return b.size <=> a.size; + return cross_a <=> cross_b; + } + + /** Swap two FeeFracs. */ + friend inline void swap(FeeFrac& a, FeeFrac& b) noexcept + { + std::swap(a.fee, b.fee); + std::swap(a.size, b.size); + } +}; + +/** Takes the pre-computed and topologically-valid chunks and generates a fee diagram which starts at FeeFrac of (0, 0) */ +std::vector BuildDiagramFromChunks(Span chunks); + +/** Compares two feerate diagrams. The shorter one is implicitly + * extended with a horizontal straight line. + * + * A feerate diagram consists of a list of (fee, size) points with the property that size + * is strictly increasing and that the first entry is (0, 0). + */ +std::partial_ordering CompareFeerateDiagram(Span dia0, Span dia1); + +#endif // BITCOIN_UTIL_FEEFRAC_H From 66d966dcfaad3638f84654e710f403cb0a0a2ac7 Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Fri, 12 Jan 2024 10:44:39 -0500 Subject: [PATCH 35/79] Add FeeFrac unit tests Co-authored-by: Suhas Daftuar --- src/Makefile.test.include | 1 + src/test/feefrac_tests.cpp | 124 +++++++++++++++++++++++++++++++++++++ 2 files changed, 125 insertions(+) create mode 100644 src/test/feefrac_tests.cpp diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 9f9bdbbd0cd4b..83ea4b139cac8 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -93,6 +93,7 @@ BITCOIN_TESTS =\ test/denialofservice_tests.cpp \ test/descriptor_tests.cpp \ test/disconnected_transactions.cpp \ + test/feefrac_tests.cpp \ test/flatfile_tests.cpp \ test/fs_tests.cpp \ test/getarg_tests.cpp \ diff --git a/src/test/feefrac_tests.cpp b/src/test/feefrac_tests.cpp new file mode 100644 index 0000000000000..2e015b382e046 --- /dev/null +++ b/src/test/feefrac_tests.cpp @@ -0,0 +1,124 @@ +// Copyright (c) 2024-present The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include + +#include + +BOOST_AUTO_TEST_SUITE(feefrac_tests) + +BOOST_AUTO_TEST_CASE(feefrac_operators) +{ + FeeFrac p1{1000, 100}, p2{500, 300}; + FeeFrac sum{1500, 400}; + FeeFrac diff{500, -200}; + FeeFrac empty{0, 0}; + FeeFrac zero_fee{0, 1}; // zero-fee allowed + + BOOST_CHECK(empty == FeeFrac{}); // same as no-args + + BOOST_CHECK(p1 == p1); + BOOST_CHECK(p1 + p2 == sum); + BOOST_CHECK(p1 - p2 == diff); + + FeeFrac p3{2000, 200}; + BOOST_CHECK(p1 != p3); // feefracs only equal if both fee and size are same + BOOST_CHECK(p2 != p3); + + FeeFrac p4{3000, 300}; + BOOST_CHECK(p1 == p4-p3); + BOOST_CHECK(p1 + p3 == p4); + + // Fee-rate comparison + BOOST_CHECK(p1 > p2); + BOOST_CHECK(p1 >= p2); + BOOST_CHECK(p1 >= p4-p3); + BOOST_CHECK(!(p1 >> p3)); // not strictly better + BOOST_CHECK(p1 >> p2); // strictly greater feerate + + BOOST_CHECK(p2 < p1); + BOOST_CHECK(p2 <= p1); + BOOST_CHECK(p1 <= p4-p3); + BOOST_CHECK(!(p3 << p1)); // not strictly worse + BOOST_CHECK(p2 << p1); // strictly lower feerate + + // "empty" comparisons + BOOST_CHECK(!(p1 >> empty)); // << will always result in false + BOOST_CHECK(!(p1 << empty)); + BOOST_CHECK(!(empty >> empty)); + BOOST_CHECK(!(empty << empty)); + + // empty is always bigger than everything else + BOOST_CHECK(empty > p1); + BOOST_CHECK(empty > p2); + BOOST_CHECK(empty > p3); + BOOST_CHECK(empty >= p1); + BOOST_CHECK(empty >= p2); + BOOST_CHECK(empty >= p3); + + // check "max" values for comparison + FeeFrac oversized_1{4611686000000, 4000000}; + FeeFrac oversized_2{184467440000000, 100000}; + + BOOST_CHECK(oversized_1 < oversized_2); + BOOST_CHECK(oversized_1 <= oversized_2); + BOOST_CHECK(oversized_1 << oversized_2); + BOOST_CHECK(oversized_1 != oversized_2); + + // Tests paths that use double arithmetic + FeeFrac busted{(static_cast(INT32_MAX)) + 1, INT32_MAX}; + BOOST_CHECK(!(busted < busted)); + + FeeFrac max_fee{2100000000000000, INT32_MAX}; + BOOST_CHECK(!(max_fee < max_fee)); + BOOST_CHECK(!(max_fee > max_fee)); + BOOST_CHECK(max_fee <= max_fee); + BOOST_CHECK(max_fee >= max_fee); + + FeeFrac max_fee2{1, 1}; + BOOST_CHECK(max_fee >= max_fee2); + +} + +BOOST_AUTO_TEST_CASE(build_diagram_test) +{ + FeeFrac p1{1000, 100}; + FeeFrac empty{0, 0}; + FeeFrac zero_fee{0, 1}; + FeeFrac oversized_1{4611686000000, 4000000}; + FeeFrac oversized_2{184467440000000, 100000}; + + // Diagram-building will reorder chunks + std::vector chunks; + chunks.push_back(p1); + chunks.push_back(zero_fee); + chunks.push_back(empty); + chunks.push_back(oversized_1); + chunks.push_back(oversized_2); + + // Caller in charge of sorting chunks in case chunk size limit + // differs from cluster size limit + std::sort(chunks.begin(), chunks.end(), [](const FeeFrac& a, const FeeFrac& b) { return a > b; }); + + // Chunks are now sorted in reverse order (largest first) + BOOST_CHECK(chunks[0] == empty); // empty is considered "highest" fee + BOOST_CHECK(chunks[1] == oversized_2); + BOOST_CHECK(chunks[2] == oversized_1); + BOOST_CHECK(chunks[3] == p1); + BOOST_CHECK(chunks[4] == zero_fee); + + std::vector generated_diagram{BuildDiagramFromChunks(chunks)}; + BOOST_CHECK(generated_diagram.size() == 1 + chunks.size()); + + // Prepended with an empty, then the chunks summed in order as above + BOOST_CHECK(generated_diagram[0] == empty); + BOOST_CHECK(generated_diagram[1] == empty); + BOOST_CHECK(generated_diagram[2] == oversized_2); + BOOST_CHECK(generated_diagram[3] == oversized_2 + oversized_1); + BOOST_CHECK(generated_diagram[4] == oversized_2 + oversized_1 + p1); + BOOST_CHECK(generated_diagram[5] == oversized_2 + oversized_1 + p1 + zero_fee); +} + +BOOST_AUTO_TEST_SUITE_END() From 2079b80854e2595f6f696e7c13a56c7f2a7da9f4 Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Fri, 19 Jan 2024 15:20:33 -0500 Subject: [PATCH 36/79] Implement ImprovesFeerateDiagram This new function takes the populated sets of direct and all conflicts computed in the current mempool, assuming the replacements are a single chunk, and computes a diagram check. The diagram check only works against cluster sizes of 2 or less, and fails if it encounters a different topology. Co-authored-by: Suhas Daftuar --- src/policy/rbf.cpp | 23 ++++++++ src/policy/rbf.h | 26 +++++++++ src/txmempool.cpp | 129 +++++++++++++++++++++++++++++++++++++++++++++ src/txmempool.h | 23 ++++++++ 4 files changed, 201 insertions(+) diff --git a/src/policy/rbf.cpp b/src/policy/rbf.cpp index f0830d8f2297c..a2c6990657e33 100644 --- a/src/policy/rbf.cpp +++ b/src/policy/rbf.cpp @@ -19,6 +19,8 @@ #include #include +#include + RBFTransactionState IsRBFOptIn(const CTransaction& tx, const CTxMemPool& pool) { AssertLockHeld(pool.cs); @@ -181,3 +183,24 @@ std::optional PaysForRBF(CAmount original_fees, } return std::nullopt; } + +std::optional> ImprovesFeerateDiagram(CTxMemPool& pool, + const CTxMemPool::setEntries& direct_conflicts, + const CTxMemPool::setEntries& all_conflicts, + CAmount replacement_fees, + int64_t replacement_vsize) +{ + // Require that the replacement strictly improve the mempool's feerate diagram. + std::vector old_diagram, new_diagram; + + const auto diagram_results{pool.CalculateFeerateDiagramsForRBF(replacement_fees, replacement_vsize, direct_conflicts, all_conflicts)}; + + if (!diagram_results.has_value()) { + return std::make_pair(DiagramCheckError::UNCALCULABLE, util::ErrorString(diagram_results).original); + } + + if (!std::is_gt(CompareFeerateDiagram(diagram_results.value().second, diagram_results.value().first))) { + return std::make_pair(DiagramCheckError::FAILURE, "insufficient feerate: does not improve feerate diagram"); + } + return std::nullopt; +} diff --git a/src/policy/rbf.h b/src/policy/rbf.h index 5a33ed64a376c..252fbec8e3152 100644 --- a/src/policy/rbf.h +++ b/src/policy/rbf.h @@ -9,7 +9,9 @@ #include #include #include +#include +#include #include #include #include @@ -33,6 +35,13 @@ enum class RBFTransactionState { FINAL, }; +enum class DiagramCheckError { + /** Unable to calculate due to topology or other reason */ + UNCALCULABLE, + /** New diagram wasn't strictly superior */ + FAILURE, +}; + /** * Determine whether an unconfirmed transaction is signaling opt-in to RBF * according to BIP 125 @@ -106,4 +115,21 @@ std::optional PaysForRBF(CAmount original_fees, CFeeRate relay_fee, const uint256& txid); +/** + * The replacement transaction must improve the feerate diagram of the mempool. + * @param[in] pool The mempool. + * @param[in] direct_conflicts Set of in-mempool txids corresponding to the direct conflicts i.e. + * input double-spends with the proposed transaction + * @param[in] all_conflicts Set of mempool entries corresponding to all transactions to be evicted + * @param[in] replacement_fees Fees of proposed replacement package + * @param[in] replacement_vsize Size of proposed replacement package + * @returns error type and string if mempool diagram doesn't improve, otherwise std::nullopt. + */ +std::optional> ImprovesFeerateDiagram(CTxMemPool& pool, + const CTxMemPool::setEntries& direct_conflicts, + const CTxMemPool::setEntries& all_conflicts, + CAmount replacement_fees, + int64_t replacement_vsize) + EXCLUSIVE_LOCKS_REQUIRED(pool.cs); + #endif // BITCOIN_POLICY_RBF_H diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 0bee27c2b21d9..4047ceda3ccdb 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -1238,3 +1239,131 @@ std::vector CTxMemPool::GatherClusters(const std::vector CTxMemPool::CheckConflictTopology(const setEntries& direct_conflicts) +{ + for (const auto& direct_conflict : direct_conflicts) { + // Ancestor and descendant counts are inclusive of the tx itself. + const auto ancestor_count{direct_conflict->GetCountWithAncestors()}; + const auto descendant_count{direct_conflict->GetCountWithDescendants()}; + const bool has_ancestor{ancestor_count > 1}; + const bool has_descendant{descendant_count > 1}; + const auto& txid_string{direct_conflict->GetSharedTx()->GetHash().ToString()}; + // The only allowed configurations are: + // 1 ancestor and 0 descendant + // 0 ancestor and 1 descendant + // 0 ancestor and 0 descendant + if (ancestor_count > 2) { + return strprintf("%s has %u ancestors, max 1 allowed", txid_string, ancestor_count - 1); + } else if (descendant_count > 2) { + return strprintf("%s has %u descendants, max 1 allowed", txid_string, descendant_count - 1); + } else if (has_ancestor && has_descendant) { + return strprintf("%s has both ancestor and descendant, exceeding cluster limit of 2", txid_string); + } + // Additionally enforce that: + // If we have a child, we are its only parent. + // If we have a parent, we are its only child. + if (has_descendant) { + const auto& our_child = direct_conflict->GetMemPoolChildrenConst().begin(); + if (our_child->get().GetCountWithAncestors() > 2) { + return strprintf("%s is not the only parent of child %s", + txid_string, our_child->get().GetSharedTx()->GetHash().ToString()); + } + } else if (has_ancestor) { + const auto& our_parent = direct_conflict->GetMemPoolParentsConst().begin(); + if (our_parent->get().GetCountWithDescendants() > 2) { + return strprintf("%s is not the only child of parent %s", + txid_string, our_parent->get().GetSharedTx()->GetHash().ToString()); + } + } + } + return std::nullopt; +} + +util::Result, std::vector>> CTxMemPool::CalculateFeerateDiagramsForRBF(CAmount replacement_fees, int64_t replacement_vsize, const setEntries& direct_conflicts, const setEntries& all_conflicts) +{ + Assume(replacement_vsize > 0); + + auto err_string{CheckConflictTopology(direct_conflicts)}; + if (err_string.has_value()) { + // Unsupported topology for calculating a feerate diagram + return util::Error{Untranslated(err_string.value())}; + } + + // new diagram will have chunks that consist of each ancestor of + // direct_conflicts that is at its own fee/size, along with the replacement + // tx/package at its own fee/size + + // old diagram will consist of each element of all_conflicts either at + // its own feerate (followed by any descendant at its own feerate) or as a + // single chunk at its descendant's ancestor feerate. + + std::vector old_chunks; + // Step 1: build the old diagram. + + // The above clusters are all trivially linearized; + // they have a strict topology of 1 or two connected transactions. + + // OLD: Compute existing chunks from all affected clusters + for (auto txiter : all_conflicts) { + // Does this transaction have descendants? + if (txiter->GetCountWithDescendants() > 1) { + // Consider this tx when we consider the descendant. + continue; + } + // Does this transaction have ancestors? + FeeFrac individual{txiter->GetModifiedFee(), txiter->GetTxSize()}; + if (txiter->GetCountWithAncestors() > 1) { + // We'll add chunks for either the ancestor by itself and this tx + // by itself, or for a combined package. + FeeFrac package{txiter->GetModFeesWithAncestors(), static_cast(txiter->GetSizeWithAncestors())}; + if (individual > package) { + // The individual feerate is higher than the package, and + // therefore higher than the parent's fee. Chunk these + // together. + old_chunks.emplace_back(package); + } else { + // Add two points, one for the parent and one for this child. + old_chunks.emplace_back(package - individual); + old_chunks.emplace_back(individual); + } + } else { + old_chunks.emplace_back(individual); + } + } + + // No topology restrictions post-chunking; sort + std::sort(old_chunks.begin(), old_chunks.end(), std::greater()); + std::vector old_diagram = BuildDiagramFromChunks(old_chunks); + + std::vector new_chunks; + + /* Step 2: build the NEW diagram + * CON = Conflicts of proposed chunk + * CNK = Proposed chunk + * NEW = OLD - CON + CNK: New diagram includes all chunks in OLD, minus + * the conflicts, plus the proposed chunk + */ + + // OLD - CON: Add any parents of direct conflicts that are not conflicted themselves + for (auto direct_conflict : direct_conflicts) { + // If a direct conflict has an ancestor that is not in all_conflicts, + // it can be affected by the replacement of the child. + if (direct_conflict->GetMemPoolParentsConst().size() > 0) { + // Grab the parent. + const CTxMemPoolEntry& parent = direct_conflict->GetMemPoolParentsConst().begin()->get(); + if (!all_conflicts.count(mapTx.iterator_to(parent))) { + // This transaction would be left over, so add to the NEW + // diagram. + new_chunks.emplace_back(parent.GetModifiedFee(), parent.GetTxSize()); + } + } + } + // + CNK: Add the proposed chunk itself + new_chunks.emplace_back(replacement_fees, int32_t(replacement_vsize)); + + // No topology restrictions post-chunking; sort + std::sort(new_chunks.begin(), new_chunks.end(), std::greater()); + std::vector new_diagram = BuildDiagramFromChunks(new_chunks); + return std::make_pair(old_diagram, new_diagram); +} diff --git a/src/txmempool.h b/src/txmempool.h index 32f2c024f76e2..9dd4d56da70bd 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -736,6 +737,28 @@ class CTxMemPool return m_sequence_number; } + /** + * Calculate the old and new mempool feerate diagrams relating to the + * clusters that would be affected by a potential replacement transaction. + * (replacement_fees, replacement_vsize) values are gathered from a + * proposed set of replacement transactions that are considered as a single + * chunk, and represent their complete cluster. In other words, they have no + * in-mempool ancestors. + * + * @param[in] replacement_fees Package fees + * @param[in] replacement_vsize Package size (must be greater than 0) + * @param[in] direct_conflicts All transactions that would be removed directly by + * having a conflicting input with a proposed transaction + * @param[in] all_conflicts All transactions that would be removed + * @return old and new diagram pair respectively, or an error string if the conflicts don't match a calculable topology + */ + util::Result, std::vector>> CalculateFeerateDiagramsForRBF(CAmount replacement_fees, int64_t replacement_vsize, const setEntries& direct_conflicts, const setEntries& all_conflicts) EXCLUSIVE_LOCKS_REQUIRED(cs); + + /* Check that all direct conflicts are in a cluster size of two or less. Each + * direct conflict may be in a separate cluster. + */ + std::optional CheckConflictTopology(const setEntries& direct_conflicts); + private: /** UpdateForDescendants is used by UpdateTransactionsFromBlock to update * the descendants for a single transaction that has been added to the From 588a98dccc5dbb6e331f28d83a4a10a13d70eb31 Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Fri, 12 Jan 2024 11:05:05 -0500 Subject: [PATCH 37/79] fuzz: Add fuzz target for ImprovesFeerateDiagram Co-authored-by: Suhas Daftuar --- src/test/fuzz/rbf.cpp | 115 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 114 insertions(+), 1 deletion(-) diff --git a/src/test/fuzz/rbf.cpp b/src/test/fuzz/rbf.cpp index aa6385d12d876..42008c6ad9666 100644 --- a/src/test/fuzz/rbf.cpp +++ b/src/test/fuzz/rbf.cpp @@ -23,12 +23,30 @@ namespace { const BasicTestingSetup* g_setup; } // namespace +const int NUM_ITERS = 10000; + +std::vector g_outpoints; + void initialize_rbf() { static const auto testing_setup = MakeNoLogFileContext<>(); g_setup = testing_setup.get(); } +void initialize_package_rbf() +{ + static const auto testing_setup = MakeNoLogFileContext<>(); + g_setup = testing_setup.get(); + + // Create a fixed set of unique "UTXOs" to source parents from + // to avoid fuzzer giving circular references + for (int i = 0; i < NUM_ITERS; ++i) { + g_outpoints.emplace_back(); + g_outpoints.back().n = i; + } + +} + FUZZ_TARGET(rbf, .init = initialize_rbf) { FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); @@ -40,7 +58,7 @@ FUZZ_TARGET(rbf, .init = initialize_rbf) CTxMemPool pool{MemPoolOptionsForTest(g_setup->m_node)}; - LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10000) + LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), NUM_ITERS) { const std::optional another_mtx = ConsumeDeserializable(fuzzed_data_provider, TX_WITH_WITNESS); if (!another_mtx) { @@ -63,3 +81,98 @@ FUZZ_TARGET(rbf, .init = initialize_rbf) (void)IsRBFOptIn(tx, pool); } } + +void CheckDiagramConcave(std::vector& diagram) +{ + // Diagrams are in monotonically-decreasing feerate order. + FeeFrac last_chunk = diagram.front(); + for (size_t i = 1; i child = ConsumeDeserializable(fuzzed_data_provider, TX_WITH_WITNESS); + if (!child) return; + + CTxMemPool pool{MemPoolOptionsForTest(g_setup->m_node)}; + + // Add a bunch of parent-child pairs to the mempool, and remember them. + std::vector mempool_txs; + size_t iter{0}; + + LOCK2(cs_main, pool.cs); + + LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), NUM_ITERS) + { + // Make sure txns only have one input, and that a unique input is given to avoid circular references + std::optional parent = ConsumeDeserializable(fuzzed_data_provider, TX_WITH_WITNESS); + if (!parent) { + continue; + } + assert(iter <= g_outpoints.size()); + parent->vin.resize(1); + parent->vin[0].prevout = g_outpoints[iter++]; + + mempool_txs.emplace_back(*parent); + pool.addUnchecked(ConsumeTxMemPoolEntry(fuzzed_data_provider, mempool_txs.back())); + if (fuzzed_data_provider.ConsumeBool() && !child->vin.empty()) { + child->vin[0].prevout = COutPoint{mempool_txs.back().GetHash(), 0}; + } + mempool_txs.emplace_back(*child); + pool.addUnchecked(ConsumeTxMemPoolEntry(fuzzed_data_provider, mempool_txs.back())); + } + + // Pick some transactions at random to be the direct conflicts + CTxMemPool::setEntries direct_conflicts; + for (auto& tx : mempool_txs) { + if (fuzzed_data_provider.ConsumeBool()) { + direct_conflicts.insert(*pool.GetIter(tx.GetHash())); + } + } + + // Calculate all conflicts: + CTxMemPool::setEntries all_conflicts; + for (auto& txiter : direct_conflicts) { + pool.CalculateDescendants(txiter, all_conflicts); + } + + // Calculate the feerate diagrams for a replacement. + CAmount replacement_fees = ConsumeMoney(fuzzed_data_provider); + int64_t replacement_vsize = fuzzed_data_provider.ConsumeIntegralInRange(1, 1000000); + auto calc_results{pool.CalculateFeerateDiagramsForRBF(replacement_fees, replacement_vsize, direct_conflicts, all_conflicts)}; + + if (calc_results.has_value()) { + // Sanity checks on the diagrams. + + // Diagrams start at 0. + assert(calc_results->first.front().size == 0); + assert(calc_results->first.front().fee == 0); + assert(calc_results->second.front().size == 0); + assert(calc_results->second.front().fee == 0); + + CheckDiagramConcave(calc_results->first); + CheckDiagramConcave(calc_results->second); + + CAmount replaced_fee{0}; + int64_t replaced_size{0}; + for (auto txiter : all_conflicts) { + replaced_fee += txiter->GetModifiedFee(); + replaced_size += txiter->GetTxSize(); + } + // The total fee of the new diagram should be the total fee of the old + // diagram - replaced_fee + replacement_fees + assert(calc_results->first.back().fee - replaced_fee + replacement_fees == calc_results->second.back().fee); + assert(calc_results->first.back().size - replaced_size + replacement_vsize == calc_results->second.back().size); + } + + // If internals report error, wrapper should too + auto err_tuple{ImprovesFeerateDiagram(pool, direct_conflicts, all_conflicts, replacement_fees, replacement_vsize)}; + if (!calc_results.has_value()) assert(err_tuple.has_value()); +} From e9c5aeb11d641b8cae373452339760809625021d Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Fri, 12 Jan 2024 11:08:06 -0500 Subject: [PATCH 38/79] test: Add tests for CompareFeerateDiagram and CheckConflictTopology --- src/test/rbf_tests.cpp | 217 +++++++++++++++++++++++++++++++++-------- 1 file changed, 177 insertions(+), 40 deletions(-) diff --git a/src/test/rbf_tests.cpp b/src/test/rbf_tests.cpp index e6c135eed98f0..995c570484e7c 100644 --- a/src/test/rbf_tests.cpp +++ b/src/test/rbf_tests.cpp @@ -37,7 +37,7 @@ static inline CTransactionRef make_tx(const std::vector& inputs return MakeTransactionRef(tx); } -static void add_descendants(const CTransactionRef& tx, int32_t num_descendants, CTxMemPool& pool) +static CTransactionRef add_descendants(const CTransactionRef& tx, int32_t num_descendants, CTxMemPool& pool) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, pool.cs) { AssertLockHeld(::cs_main); @@ -50,6 +50,21 @@ static void add_descendants(const CTransactionRef& tx, int32_t num_descendants, pool.addUnchecked(entry.FromTx(next_tx)); tx_to_spend = next_tx; } + // Return last created tx + return tx_to_spend; +} + +static CTransactionRef add_descendant_to_parents(const std::vector& parents, CTxMemPool& pool) + EXCLUSIVE_LOCKS_REQUIRED(::cs_main, pool.cs) +{ + AssertLockHeld(::cs_main); + AssertLockHeld(pool.cs); + TestMemPoolEntryHelper entry; + // Assumes this isn't already spent in mempool + auto child_tx = make_tx(/*inputs=*/parents, /*output_values=*/{50 * CENT}); + pool.addUnchecked(entry.FromTx(child_tx)); + // Return last created tx + return child_tx; } BOOST_FIXTURE_TEST_CASE(rbf_helper_functions, TestChain100Setup) @@ -89,28 +104,46 @@ BOOST_FIXTURE_TEST_CASE(rbf_helper_functions, TestChain100Setup) const auto tx8 = make_tx(/*inputs=*/ {m_coinbase_txns[4]}, /*output_values=*/ {999 * CENT}); pool.addUnchecked(entry.Fee(high_fee).FromTx(tx8)); - const auto entry1 = pool.GetIter(tx1->GetHash()).value(); - const auto entry2 = pool.GetIter(tx2->GetHash()).value(); - const auto entry3 = pool.GetIter(tx3->GetHash()).value(); - const auto entry4 = pool.GetIter(tx4->GetHash()).value(); - const auto entry5 = pool.GetIter(tx5->GetHash()).value(); - const auto entry6 = pool.GetIter(tx6->GetHash()).value(); - const auto entry7 = pool.GetIter(tx7->GetHash()).value(); - const auto entry8 = pool.GetIter(tx8->GetHash()).value(); - - BOOST_CHECK_EQUAL(entry1->GetFee(), normal_fee); - BOOST_CHECK_EQUAL(entry2->GetFee(), normal_fee); - BOOST_CHECK_EQUAL(entry3->GetFee(), low_fee); - BOOST_CHECK_EQUAL(entry4->GetFee(), high_fee); - BOOST_CHECK_EQUAL(entry5->GetFee(), low_fee); - BOOST_CHECK_EQUAL(entry6->GetFee(), low_fee); - BOOST_CHECK_EQUAL(entry7->GetFee(), high_fee); - BOOST_CHECK_EQUAL(entry8->GetFee(), high_fee); - - CTxMemPool::setEntries set_12_normal{entry1, entry2}; - CTxMemPool::setEntries set_34_cpfp{entry3, entry4}; - CTxMemPool::setEntries set_56_low{entry5, entry6}; - CTxMemPool::setEntries all_entries{entry1, entry2, entry3, entry4, entry5, entry6, entry7, entry8}; + // Normal txs, will chain txns right before CheckConflictTopology test + const auto tx9 = make_tx(/*inputs=*/ {m_coinbase_txns[5]}, /*output_values=*/ {995 * CENT}); + pool.addUnchecked(entry.Fee(normal_fee).FromTx(tx9)); + const auto tx10 = make_tx(/*inputs=*/ {m_coinbase_txns[6]}, /*output_values=*/ {995 * CENT}); + pool.addUnchecked(entry.Fee(normal_fee).FromTx(tx10)); + + // Will make these two parents of single child + const auto tx11 = make_tx(/*inputs=*/ {m_coinbase_txns[7]}, /*output_values=*/ {995 * CENT}); + pool.addUnchecked(entry.Fee(normal_fee).FromTx(tx11)); + const auto tx12 = make_tx(/*inputs=*/ {m_coinbase_txns[8]}, /*output_values=*/ {995 * CENT}); + pool.addUnchecked(entry.Fee(normal_fee).FromTx(tx12)); + + const auto entry1_normal = pool.GetIter(tx1->GetHash()).value(); + const auto entry2_normal = pool.GetIter(tx2->GetHash()).value(); + const auto entry3_low = pool.GetIter(tx3->GetHash()).value(); + const auto entry4_high = pool.GetIter(tx4->GetHash()).value(); + const auto entry5_low = pool.GetIter(tx5->GetHash()).value(); + const auto entry6_low_prioritised = pool.GetIter(tx6->GetHash()).value(); + const auto entry7_high = pool.GetIter(tx7->GetHash()).value(); + const auto entry8_high = pool.GetIter(tx8->GetHash()).value(); + const auto entry9_unchained = pool.GetIter(tx9->GetHash()).value(); + const auto entry10_unchained = pool.GetIter(tx10->GetHash()).value(); + const auto entry11_unchained = pool.GetIter(tx11->GetHash()).value(); + const auto entry12_unchained = pool.GetIter(tx12->GetHash()).value(); + + BOOST_CHECK_EQUAL(entry1_normal->GetFee(), normal_fee); + BOOST_CHECK_EQUAL(entry2_normal->GetFee(), normal_fee); + BOOST_CHECK_EQUAL(entry3_low->GetFee(), low_fee); + BOOST_CHECK_EQUAL(entry4_high->GetFee(), high_fee); + BOOST_CHECK_EQUAL(entry5_low->GetFee(), low_fee); + BOOST_CHECK_EQUAL(entry6_low_prioritised->GetFee(), low_fee); + BOOST_CHECK_EQUAL(entry7_high->GetFee(), high_fee); + BOOST_CHECK_EQUAL(entry8_high->GetFee(), high_fee); + + CTxMemPool::setEntries set_12_normal{entry1_normal, entry2_normal}; + CTxMemPool::setEntries set_34_cpfp{entry3_low, entry4_high}; + CTxMemPool::setEntries set_56_low{entry5_low, entry6_low_prioritised}; + CTxMemPool::setEntries set_78_high{entry7_high, entry8_high}; + CTxMemPool::setEntries all_entries{entry1_normal, entry2_normal, entry3_low, entry4_high, + entry5_low, entry6_low_prioritised, entry7_high, entry8_high}; CTxMemPool::setEntries empty_set; const auto unused_txid{GetRandHash()}; @@ -118,29 +151,29 @@ BOOST_FIXTURE_TEST_CASE(rbf_helper_functions, TestChain100Setup) // Tests for PaysMoreThanConflicts // These tests use feerate, not absolute fee. BOOST_CHECK(PaysMoreThanConflicts(/*iters_conflicting=*/set_12_normal, - /*replacement_feerate=*/CFeeRate(entry1->GetModifiedFee() + 1, entry1->GetTxSize() + 2), + /*replacement_feerate=*/CFeeRate(entry1_normal->GetModifiedFee() + 1, entry1_normal->GetTxSize() + 2), /*txid=*/unused_txid).has_value()); // Replacement must be strictly greater than the originals. - BOOST_CHECK(PaysMoreThanConflicts(set_12_normal, CFeeRate(entry1->GetModifiedFee(), entry1->GetTxSize()), unused_txid).has_value()); - BOOST_CHECK(PaysMoreThanConflicts(set_12_normal, CFeeRate(entry1->GetModifiedFee() + 1, entry1->GetTxSize()), unused_txid) == std::nullopt); + BOOST_CHECK(PaysMoreThanConflicts(set_12_normal, CFeeRate(entry1_normal->GetModifiedFee(), entry1_normal->GetTxSize()), unused_txid).has_value()); + BOOST_CHECK(PaysMoreThanConflicts(set_12_normal, CFeeRate(entry1_normal->GetModifiedFee() + 1, entry1_normal->GetTxSize()), unused_txid) == std::nullopt); // These tests use modified fees (including prioritisation), not base fees. - BOOST_CHECK(PaysMoreThanConflicts({entry5}, CFeeRate(entry5->GetModifiedFee() + 1, entry5->GetTxSize()), unused_txid) == std::nullopt); - BOOST_CHECK(PaysMoreThanConflicts({entry6}, CFeeRate(entry6->GetFee() + 1, entry6->GetTxSize()), unused_txid).has_value()); - BOOST_CHECK(PaysMoreThanConflicts({entry6}, CFeeRate(entry6->GetModifiedFee() + 1, entry6->GetTxSize()), unused_txid) == std::nullopt); + BOOST_CHECK(PaysMoreThanConflicts({entry5_low}, CFeeRate(entry5_low->GetModifiedFee() + 1, entry5_low->GetTxSize()), unused_txid) == std::nullopt); + BOOST_CHECK(PaysMoreThanConflicts({entry6_low_prioritised}, CFeeRate(entry6_low_prioritised->GetFee() + 1, entry6_low_prioritised->GetTxSize()), unused_txid).has_value()); + BOOST_CHECK(PaysMoreThanConflicts({entry6_low_prioritised}, CFeeRate(entry6_low_prioritised->GetModifiedFee() + 1, entry6_low_prioritised->GetTxSize()), unused_txid) == std::nullopt); // PaysMoreThanConflicts checks individual feerate, not ancestor feerate. This test compares - // replacement_feerate and entry4's feerate, which are the same. The replacement_feerate is - // considered too low even though entry4 has a low ancestor feerate. - BOOST_CHECK(PaysMoreThanConflicts(set_34_cpfp, CFeeRate(entry4->GetModifiedFee(), entry4->GetTxSize()), unused_txid).has_value()); + // replacement_feerate and entry4_high's feerate, which are the same. The replacement_feerate is + // considered too low even though entry4_high has a low ancestor feerate. + BOOST_CHECK(PaysMoreThanConflicts(set_34_cpfp, CFeeRate(entry4_high->GetModifiedFee(), entry4_high->GetTxSize()), unused_txid).has_value()); // Tests for EntriesAndTxidsDisjoint BOOST_CHECK(EntriesAndTxidsDisjoint(empty_set, {tx1->GetHash()}, unused_txid) == std::nullopt); BOOST_CHECK(EntriesAndTxidsDisjoint(set_12_normal, {tx3->GetHash()}, unused_txid) == std::nullopt); - BOOST_CHECK(EntriesAndTxidsDisjoint({entry2}, {tx2->GetHash()}, unused_txid).has_value()); + BOOST_CHECK(EntriesAndTxidsDisjoint({entry2_normal}, {tx2->GetHash()}, unused_txid).has_value()); BOOST_CHECK(EntriesAndTxidsDisjoint(set_12_normal, {tx1->GetHash()}, unused_txid).has_value()); BOOST_CHECK(EntriesAndTxidsDisjoint(set_12_normal, {tx2->GetHash()}, unused_txid).has_value()); // EntriesAndTxidsDisjoint does not calculate descendants of iters_conflicting; it uses whatever - // the caller passed in. As such, no error is returned even though entry2 is a descendant of tx1. - BOOST_CHECK(EntriesAndTxidsDisjoint({entry2}, {tx1->GetHash()}, unused_txid) == std::nullopt); + // the caller passed in. As such, no error is returned even though entry2_normal is a descendant of tx1. + BOOST_CHECK(EntriesAndTxidsDisjoint({entry2_normal}, {tx1->GetHash()}, unused_txid) == std::nullopt); // Tests for PaysForRBF const CFeeRate incremental_relay_feerate{DEFAULT_INCREMENTAL_RELAY_FEE}; @@ -163,8 +196,8 @@ BOOST_FIXTURE_TEST_CASE(rbf_helper_functions, TestChain100Setup) BOOST_CHECK(PaysForRBF(low_fee, high_fee + 99999999, 99999999, incremental_relay_feerate, unused_txid) == std::nullopt); // Tests for GetEntriesForConflicts - CTxMemPool::setEntries all_parents{entry1, entry3, entry5, entry7, entry8}; - CTxMemPool::setEntries all_children{entry2, entry4, entry6}; + CTxMemPool::setEntries all_parents{entry1_normal, entry3_low, entry5_low, entry7_high, entry8_high}; + CTxMemPool::setEntries all_children{entry2_normal, entry4_high, entry6_low_prioritised}; const std::vector parent_inputs({m_coinbase_txns[0], m_coinbase_txns[1], m_coinbase_txns[2], m_coinbase_txns[3], m_coinbase_txns[4]}); const auto conflicts_with_parents = make_tx(parent_inputs, {50 * CENT}); @@ -215,15 +248,119 @@ BOOST_FIXTURE_TEST_CASE(rbf_helper_functions, TestChain100Setup) BOOST_CHECK(HasNoNewUnconfirmed(/*tx=*/ *spends_unconfirmed.get(), /*pool=*/ pool, /*iters_conflicting=*/ all_entries) == std::nullopt); - BOOST_CHECK(HasNoNewUnconfirmed(*spends_unconfirmed.get(), pool, {entry2}) == std::nullopt); + BOOST_CHECK(HasNoNewUnconfirmed(*spends_unconfirmed.get(), pool, {entry2_normal}) == std::nullopt); BOOST_CHECK(HasNoNewUnconfirmed(*spends_unconfirmed.get(), pool, empty_set).has_value()); const auto spends_new_unconfirmed = make_tx({tx1, tx8}, {36 * CENT}); - BOOST_CHECK(HasNoNewUnconfirmed(*spends_new_unconfirmed.get(), pool, {entry2}).has_value()); + BOOST_CHECK(HasNoNewUnconfirmed(*spends_new_unconfirmed.get(), pool, {entry2_normal}).has_value()); BOOST_CHECK(HasNoNewUnconfirmed(*spends_new_unconfirmed.get(), pool, all_entries).has_value()); const auto spends_conflicting_confirmed = make_tx({m_coinbase_txns[0], m_coinbase_txns[1]}, {45 * CENT}); - BOOST_CHECK(HasNoNewUnconfirmed(*spends_conflicting_confirmed.get(), pool, {entry1, entry3}) == std::nullopt); + BOOST_CHECK(HasNoNewUnconfirmed(*spends_conflicting_confirmed.get(), pool, {entry1_normal, entry3_low}) == std::nullopt); + + // Tests for CheckConflictTopology + + // Tx4 has 23 descendants + BOOST_CHECK(pool.CheckConflictTopology(set_34_cpfp).has_value()); + + // No descendants yet + BOOST_CHECK(pool.CheckConflictTopology({entry9_unchained}) == std::nullopt); + + // Add 1 descendant, still ok + add_descendants(tx9, 1, pool); + BOOST_CHECK(pool.CheckConflictTopology({entry9_unchained}) == std::nullopt); + + // N direct conflicts; ok + BOOST_CHECK(pool.CheckConflictTopology({entry9_unchained, entry10_unchained, entry11_unchained}) == std::nullopt); + + // Add 1 descendant, still ok, even if it's considered a direct conflict as well + const auto child_tx = add_descendants(tx10, 1, pool); + const auto entry10_child = pool.GetIter(child_tx->GetHash()).value(); + BOOST_CHECK(pool.CheckConflictTopology({entry9_unchained, entry10_unchained, entry11_unchained}) == std::nullopt); + BOOST_CHECK(pool.CheckConflictTopology({entry9_unchained, entry10_unchained, entry11_unchained, entry10_child}) == std::nullopt); + + // One more, size 3 cluster too much + const auto grand_child_tx = add_descendants(child_tx, 1, pool); + const auto entry10_grand_child = pool.GetIter(grand_child_tx->GetHash()).value(); + BOOST_CHECK_EQUAL(pool.CheckConflictTopology({entry9_unchained, entry10_unchained, entry11_unchained}).value(), strprintf("%s has 2 descendants, max 1 allowed", entry10_unchained->GetSharedTx()->GetHash().ToString())); + // even if direct conflict is descendent itself + BOOST_CHECK_EQUAL(pool.CheckConflictTopology({entry9_unchained, entry10_grand_child, entry11_unchained}).value(), strprintf("%s has 2 ancestors, max 1 allowed", entry10_grand_child->GetSharedTx()->GetHash().ToString())); + + // Make a single child from two singleton parents + const auto two_parent_child_tx = add_descendant_to_parents({tx11, tx12}, pool); + const auto entry_two_parent_child = pool.GetIter(two_parent_child_tx->GetHash()).value(); + BOOST_CHECK_EQUAL(pool.CheckConflictTopology({entry11_unchained}).value(), strprintf("%s is not the only parent of child %s", entry11_unchained->GetSharedTx()->GetHash().ToString(), entry_two_parent_child->GetSharedTx()->GetHash().ToString())); + BOOST_CHECK_EQUAL(pool.CheckConflictTopology({entry12_unchained}).value(), strprintf("%s is not the only parent of child %s", entry12_unchained->GetSharedTx()->GetHash().ToString(), entry_two_parent_child->GetSharedTx()->GetHash().ToString())); + BOOST_CHECK_EQUAL(pool.CheckConflictTopology({entry_two_parent_child}).value(), strprintf("%s has 2 ancestors, max 1 allowed", entry_two_parent_child->GetSharedTx()->GetHash().ToString())); +} + +BOOST_AUTO_TEST_CASE(feerate_diagram_utilities) +{ + // Sanity check the correctness of the feerate diagram comparison. + + // A strictly better case. + std::vector old_diagram{{FeeFrac{0, 0}, FeeFrac{950, 300}, FeeFrac{1050, 400}}}; + std::vector new_diagram{{FeeFrac{0, 0}, FeeFrac{1000, 300}, FeeFrac{1050, 400}}}; + + BOOST_CHECK(std::is_lt(CompareFeerateDiagram(old_diagram, new_diagram))); + + // Incomparable diagrams + old_diagram = {FeeFrac{0, 0}, FeeFrac{950, 300}, FeeFrac{1050, 400}}; + new_diagram = {FeeFrac{0, 0}, FeeFrac{1000, 300}, FeeFrac{1000, 400}}; + + BOOST_CHECK(CompareFeerateDiagram(old_diagram, new_diagram) == std::partial_ordering::unordered); + + // Strictly better but smaller size. + old_diagram = {FeeFrac{0, 0}, FeeFrac{950, 300}, FeeFrac{1050, 400}}; + new_diagram = {FeeFrac{0, 0}, FeeFrac{1100, 300}}; + + BOOST_CHECK(std::is_lt(CompareFeerateDiagram(old_diagram, new_diagram))); + + // New diagram is strictly better due to the first chunk, even though + // second chunk contributes no fees + old_diagram = {FeeFrac{0, 0}, FeeFrac{950, 300}, FeeFrac{1050, 400}}; + new_diagram = {FeeFrac{0, 0}, FeeFrac{1100, 100}, FeeFrac{1100, 200}}; + + BOOST_CHECK(std::is_lt(CompareFeerateDiagram(old_diagram, new_diagram))); + + // Feerate of first new chunk is better with, but second chunk is worse + old_diagram = {FeeFrac{0, 0}, FeeFrac{950, 300}, FeeFrac{1050, 400}}; + new_diagram = {FeeFrac{0, 0}, FeeFrac{750, 100}, FeeFrac{999, 350}, FeeFrac{1150, 1000}}; + + BOOST_CHECK(CompareFeerateDiagram(old_diagram, new_diagram) == std::partial_ordering::unordered); + + // If we make the second chunk slightly better, the new diagram now wins. + old_diagram = {FeeFrac{0, 0}, FeeFrac{950, 300}, FeeFrac{1050, 400}}; + new_diagram = {FeeFrac{0, 0}, FeeFrac{750, 100}, FeeFrac{1000, 350}, FeeFrac{1150, 500}}; + + BOOST_CHECK(std::is_lt(CompareFeerateDiagram(old_diagram, new_diagram))); + + // Identical diagrams, cannot be strictly better + old_diagram = {FeeFrac{0, 0}, FeeFrac{950, 300}, FeeFrac{1050, 400}}; + new_diagram = {FeeFrac{0, 0}, FeeFrac{950, 300}, FeeFrac{1050, 400}}; + + BOOST_CHECK(std::is_eq(CompareFeerateDiagram(old_diagram, new_diagram))); + + // Same aggregate fee, but different total size (trigger single tail fee check step) + old_diagram = {FeeFrac{0, 0}, FeeFrac{950, 300}, FeeFrac{1050, 399}}; + new_diagram = {FeeFrac{0, 0}, FeeFrac{950, 300}, FeeFrac{1050, 400}}; + + // No change in evaluation when tail check needed. + BOOST_CHECK(std::is_gt(CompareFeerateDiagram(old_diagram, new_diagram))); + + // Padding works on either argument + BOOST_CHECK(std::is_lt(CompareFeerateDiagram(new_diagram, old_diagram))); + + // Trigger multiple tail fee check steps + old_diagram = {FeeFrac{0, 0}, FeeFrac{950, 300}, FeeFrac{1050, 399}}; + new_diagram = {FeeFrac{0, 0}, FeeFrac{950, 300}, FeeFrac{1050, 400}, FeeFrac{1050, 401}, FeeFrac{1050, 402}}; + + BOOST_CHECK(std::is_gt(CompareFeerateDiagram(old_diagram, new_diagram))); + BOOST_CHECK(std::is_lt(CompareFeerateDiagram(new_diagram, old_diagram))); + + // Multiple tail fee check steps, unordered result + new_diagram = {FeeFrac{0, 0}, FeeFrac{950, 300}, FeeFrac{1050, 400}, FeeFrac{1050, 401}, FeeFrac{1050, 402}, FeeFrac{1051, 403}}; + BOOST_CHECK(CompareFeerateDiagram(old_diagram, new_diagram) == std::partial_ordering::unordered); } BOOST_AUTO_TEST_SUITE_END() From 4d6528a3d6bf3821c216c68f99170e2faab5d63c Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Fri, 12 Jan 2024 11:17:47 -0500 Subject: [PATCH 39/79] fuzz: fuzz diagram creation and comparison Co-authored-by: Suhas Daftuar Co-authored-by: Pieter Wuille --- src/Makefile.test.include | 1 + src/test/fuzz/feeratediagram.cpp | 119 +++++++++++++++++++++++++++++++ 2 files changed, 120 insertions(+) create mode 100644 src/test/fuzz/feeratediagram.cpp diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 83ea4b139cac8..2e475f08039a2 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -315,6 +315,7 @@ test_fuzz_fuzz_SOURCES = \ test/fuzz/deserialize.cpp \ test/fuzz/eval_script.cpp \ test/fuzz/fee_rate.cpp \ + test/fuzz/feeratediagram.cpp \ test/fuzz/fees.cpp \ test/fuzz/flatfile.cpp \ test/fuzz/float.cpp \ diff --git a/src/test/fuzz/feeratediagram.cpp b/src/test/fuzz/feeratediagram.cpp new file mode 100644 index 0000000000000..6d710093cbb23 --- /dev/null +++ b/src/test/fuzz/feeratediagram.cpp @@ -0,0 +1,119 @@ +// Copyright (c) 2023 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include + +#include + +#include +#include + +#include +#include + +#include + +namespace { + +/** Evaluate a diagram at a specific size, returning the fee as a fraction. + * + * Fees in diagram cannot exceed 2^32, as the returned evaluation could overflow + * the FeeFrac::fee field in the result. */ +FeeFrac EvaluateDiagram(int32_t size, Span diagram) +{ + assert(diagram.size() > 0); + unsigned not_above = 0; + unsigned not_below = diagram.size() - 1; + // If outside the range of diagram, extend begin/end. + if (size < diagram[not_above].size) return {diagram[not_above].fee, 1}; + if (size > diagram[not_below].size) return {diagram[not_below].fee, 1}; + // Perform bisection search to locate the diagram segment that size is in. + while (not_below > not_above + 1) { + unsigned mid = (not_below + not_above) / 2; + if (diagram[mid].size <= size) not_above = mid; + if (diagram[mid].size >= size) not_below = mid; + } + // If the size matches a transition point between segments, return its fee. + if (not_below == not_above) return {diagram[not_below].fee, 1}; + // Otherwise, interpolate. + auto dir_coef = diagram[not_below] - diagram[not_above]; + assert(dir_coef.size > 0); + // Let A = diagram[not_above] and B = diagram[not_below] + const auto& point_a = diagram[not_above]; + // We want to return: + // A.fee + (B.fee - A.fee) / (B.size - A.size) * (size - A.size) + // = A.fee + dir_coef.fee / dir_coef.size * (size - A.size) + // = (A.fee * dir_coef.size + dir_coef.fee * (size - A.size)) / dir_coef.size + assert(size >= point_a.size); + return {point_a.fee * dir_coef.size + dir_coef.fee * (size - point_a.size), dir_coef.size}; +} + +std::weak_ordering CompareFeeFracWithDiagram(const FeeFrac& ff, Span diagram) +{ + return FeeRateCompare(FeeFrac{ff.fee, 1}, EvaluateDiagram(ff.size, diagram)); +} + +std::partial_ordering CompareDiagrams(Span dia1, Span dia2) +{ + bool all_ge = true; + bool all_le = true; + for (const auto p1 : dia1) { + auto cmp = CompareFeeFracWithDiagram(p1, dia2); + if (std::is_lt(cmp)) all_ge = false; + if (std::is_gt(cmp)) all_le = false; + } + for (const auto p2 : dia2) { + auto cmp = CompareFeeFracWithDiagram(p2, dia1); + if (std::is_lt(cmp)) all_le = false; + if (std::is_gt(cmp)) all_ge = false; + } + if (all_ge && all_le) return std::partial_ordering::equivalent; + if (all_ge && !all_le) return std::partial_ordering::greater; + if (!all_ge && all_le) return std::partial_ordering::less; + return std::partial_ordering::unordered; +} + +void PopulateChunks(FuzzedDataProvider& fuzzed_data_provider, std::vector& chunks) +{ + chunks.clear(); + + LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 50) + { + chunks.emplace_back(fuzzed_data_provider.ConsumeIntegralInRange(INT32_MIN>>1, INT32_MAX>>1), fuzzed_data_provider.ConsumeIntegralInRange(1, 1000000)); + } + return; +} + +} // namespace + +FUZZ_TARGET(build_and_compare_feerate_diagram) +{ + // Generate a random set of chunks + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + std::vector chunks1, chunks2; + FeeFrac empty{0, 0}; + + PopulateChunks(fuzzed_data_provider, chunks1); + PopulateChunks(fuzzed_data_provider, chunks2); + + std::vector diagram1{BuildDiagramFromChunks(chunks1)}; + std::vector diagram2{BuildDiagramFromChunks(chunks2)}; + + assert(diagram1.front() == empty); + assert(diagram2.front() == empty); + + auto real = CompareFeerateDiagram(diagram1, diagram2); + auto sim = CompareDiagrams(diagram1, diagram2); + assert(real == sim); + + // Do explicit evaluation at up to 1000 points, and verify consistency with the result. + LIMITED_WHILE(fuzzed_data_provider.remaining_bytes(), 1000) { + int32_t size = fuzzed_data_provider.ConsumeIntegralInRange(0, diagram2.back().size); + auto eval1 = EvaluateDiagram(size, diagram1); + auto eval2 = EvaluateDiagram(size, diagram2); + auto cmp = FeeRateCompare(eval1, eval2); + if (std::is_lt(cmp)) assert(!std::is_gt(real)); + if (std::is_gt(cmp)) assert(!std::is_lt(real)); + } +} From 7e89b659e1ddd0c04fa2bddba9706b5d1a1daec3 Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Thu, 18 Jan 2024 12:37:56 -0500 Subject: [PATCH 40/79] Add fuzz test for FeeFrac --- src/Makefile.test.include | 1 + src/test/fuzz/feefrac.cpp | 123 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 src/test/fuzz/feefrac.cpp diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 2e475f08039a2..d345b41a0a544 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -314,6 +314,7 @@ test_fuzz_fuzz_SOURCES = \ test/fuzz/descriptor_parse.cpp \ test/fuzz/deserialize.cpp \ test/fuzz/eval_script.cpp \ + test/fuzz/feefrac.cpp \ test/fuzz/fee_rate.cpp \ test/fuzz/feeratediagram.cpp \ test/fuzz/fees.cpp \ diff --git a/src/test/fuzz/feefrac.cpp b/src/test/fuzz/feefrac.cpp new file mode 100644 index 0000000000000..2c7553360e6ac --- /dev/null +++ b/src/test/fuzz/feefrac.cpp @@ -0,0 +1,123 @@ +// Copyright (c) 2024 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include + +#include +#include +#include + +namespace { + +/** Compute a * b, represented in 4x32 bits, highest limb first. */ +std::array Mul128(uint64_t a, uint64_t b) +{ + std::array ret{0, 0, 0, 0}; + + /** Perform ret += v << (32 * pos), at 128-bit precision. */ + auto add_fn = [&](uint64_t v, int pos) { + uint64_t accum{0}; + for (int i = 0; i + pos < 4; ++i) { + // Add current value at limb pos in ret. + accum += ret[3 - pos - i]; + // Add low or high half of v. + if (i == 0) accum += v & 0xffffffff; + if (i == 1) accum += v >> 32; + // Store lower half of result in limb pos in ret. + ret[3 - pos - i] = accum & 0xffffffff; + // Leave carry in accum. + accum >>= 32; + } + // Make sure no overflow. + assert(accum == 0); + }; + + // Multiply the 4 individual limbs (schoolbook multiply, with base 2^32). + add_fn((a & 0xffffffff) * (b & 0xffffffff), 0); + add_fn((a >> 32) * (b & 0xffffffff), 1); + add_fn((a & 0xffffffff) * (b >> 32), 1); + add_fn((a >> 32) * (b >> 32), 2); + return ret; +} + +/* comparison helper for std::array */ +std::strong_ordering compare_arrays(const std::array& a, const std::array& b) { + for (size_t i = 0; i < a.size(); ++i) { + if (a[i] != b[i]) return a[i] <=> b[i]; + } + return std::strong_ordering::equal; +} + +std::strong_ordering MulCompare(int64_t a1, int64_t a2, int64_t b1, int64_t b2) +{ + // Compute and compare signs. + int sign_a = (a1 == 0 ? 0 : a1 < 0 ? -1 : 1) * (a2 == 0 ? 0 : a2 < 0 ? -1 : 1); + int sign_b = (b1 == 0 ? 0 : b1 < 0 ? -1 : 1) * (b2 == 0 ? 0 : b2 < 0 ? -1 : 1); + if (sign_a != sign_b) return sign_a <=> sign_b; + + // Compute absolute values. + uint64_t abs_a1 = static_cast(a1), abs_a2 = static_cast(a2); + uint64_t abs_b1 = static_cast(b1), abs_b2 = static_cast(b2); + // Use (~x + 1) instead of the equivalent (-x) to silence the linter; mod 2^64 behavior is + // intentional here. + if (a1 < 0) abs_a1 = ~abs_a1 + 1; + if (a2 < 0) abs_a2 = ~abs_a2 + 1; + if (b1 < 0) abs_b1 = ~abs_b1 + 1; + if (b2 < 0) abs_b2 = ~abs_b2 + 1; + + // Compute products of absolute values. + auto mul_abs_a = Mul128(abs_a1, abs_a2); + auto mul_abs_b = Mul128(abs_b1, abs_b2); + if (sign_a < 0) { + return compare_arrays(mul_abs_b, mul_abs_a); + } else { + return compare_arrays(mul_abs_a, mul_abs_b); + } +} + +} // namespace + +FUZZ_TARGET(feefrac) +{ + FuzzedDataProvider provider(buffer.data(), buffer.size()); + + int64_t f1 = provider.ConsumeIntegral(); + int32_t s1 = provider.ConsumeIntegral(); + if (s1 == 0) f1 = 0; + FeeFrac fr1(f1, s1); + assert(fr1.IsEmpty() == (s1 == 0)); + + int64_t f2 = provider.ConsumeIntegral(); + int32_t s2 = provider.ConsumeIntegral(); + if (s2 == 0) f2 = 0; + FeeFrac fr2(f2, s2); + assert(fr2.IsEmpty() == (s2 == 0)); + + // Feerate comparisons + auto cmp_feerate = MulCompare(f1, s2, f2, s1); + assert(FeeRateCompare(fr1, fr2) == cmp_feerate); + assert((fr1 << fr2) == std::is_lt(cmp_feerate)); + assert((fr1 >> fr2) == std::is_gt(cmp_feerate)); + + // Compare with manual invocation of FeeFrac::Mul. + auto cmp_mul = FeeFrac::Mul(f1, s2) <=> FeeFrac::Mul(f2, s1); + assert(cmp_mul == cmp_feerate); + + // Same, but using FeeFrac::MulFallback. + auto cmp_fallback = FeeFrac::MulFallback(f1, s2) <=> FeeFrac::MulFallback(f2, s1); + assert(cmp_fallback == cmp_feerate); + + // Total order comparisons + auto cmp_total = std::is_eq(cmp_feerate) ? (s2 <=> s1) : cmp_feerate; + assert((fr1 <=> fr2) == cmp_total); + assert((fr1 < fr2) == std::is_lt(cmp_total)); + assert((fr1 > fr2) == std::is_gt(cmp_total)); + assert((fr1 <= fr2) == std::is_lteq(cmp_total)); + assert((fr1 >= fr2) == std::is_gteq(cmp_total)); + assert((fr1 == fr2) == std::is_eq(cmp_total)); + assert((fr1 != fr2) == std::is_neq(cmp_total)); +} From b767e6bd47cb0fb8f7aea3fb10c597e59a35bf74 Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Fri, 19 Jan 2024 09:53:54 -0500 Subject: [PATCH 41/79] test: unit test for ImprovesFeerateDiagram --- src/test/rbf_tests.cpp | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/src/test/rbf_tests.cpp b/src/test/rbf_tests.cpp index 995c570484e7c..90d8c65956c18 100644 --- a/src/test/rbf_tests.cpp +++ b/src/test/rbf_tests.cpp @@ -294,6 +294,48 @@ BOOST_FIXTURE_TEST_CASE(rbf_helper_functions, TestChain100Setup) BOOST_CHECK_EQUAL(pool.CheckConflictTopology({entry_two_parent_child}).value(), strprintf("%s has 2 ancestors, max 1 allowed", entry_two_parent_child->GetSharedTx()->GetHash().ToString())); } +BOOST_FIXTURE_TEST_CASE(improves_feerate, TestChain100Setup) +{ + CTxMemPool& pool = *Assert(m_node.mempool); + LOCK2(::cs_main, pool.cs); + TestMemPoolEntryHelper entry; + + const CAmount low_fee{CENT/100}; + const CAmount normal_fee{CENT/10}; + + // low feerate parent with normal feerate child + const auto tx1 = make_tx(/*inputs=*/ {m_coinbase_txns[0]}, /*output_values=*/ {10 * COIN}); + pool.addUnchecked(entry.Fee(low_fee).FromTx(tx1)); + const auto tx2 = make_tx(/*inputs=*/ {tx1}, /*output_values=*/ {995 * CENT}); + pool.addUnchecked(entry.Fee(normal_fee).FromTx(tx2)); + + const auto entry1 = pool.GetIter(tx1->GetHash()).value(); + const auto tx1_fee = entry1->GetModifiedFee(); + const auto tx1_size = entry1->GetTxSize(); + const auto entry2 = pool.GetIter(tx2->GetHash()).value(); + const auto tx2_fee = entry2->GetModifiedFee(); + const auto tx2_size = entry2->GetTxSize(); + + // Now test ImprovesFeerateDiagram with various levels of "package rbf" feerates + + // It doesn't improve itself + const auto res1 = ImprovesFeerateDiagram(pool, {entry1}, {entry1, entry2}, tx1_fee + tx2_fee, tx1_size + tx2_size); + BOOST_CHECK(res1.has_value()); + BOOST_CHECK(res1.value().first == DiagramCheckError::FAILURE); + BOOST_CHECK(res1.value().second == "insufficient feerate: does not improve feerate diagram"); + + // With one more satoshi it does + BOOST_CHECK(ImprovesFeerateDiagram(pool, {entry1}, {entry1, entry2}, tx1_fee + tx2_fee + 1, tx1_size + tx2_size) == std::nullopt); + + // Adding a grandchild makes the cluster size 3, which is uncalculable + const auto tx3 = make_tx(/*inputs=*/ {tx2}, /*output_values=*/ {995 * CENT}); + pool.addUnchecked(entry.Fee(normal_fee).FromTx(tx3)); + const auto res3 = ImprovesFeerateDiagram(pool, {entry1}, {entry1, entry2}, tx1_fee + tx2_fee + 1, tx1_size + tx2_size); + BOOST_CHECK(res3.has_value()); + BOOST_CHECK(res3.value().first == DiagramCheckError::UNCALCULABLE); + BOOST_CHECK(res3.value().second == strprintf("%s has 2 descendants, max 1 allowed", tx1->GetHash().GetHex())); +} + BOOST_AUTO_TEST_CASE(feerate_diagram_utilities) { // Sanity check the correctness of the feerate diagram comparison. From 72959867784098137a50c34f86deca8235eef4f8 Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Thu, 15 Feb 2024 11:52:04 -0500 Subject: [PATCH 42/79] Unit tests for CalculateFeerateDiagramsForRBF --- src/test/rbf_tests.cpp | 158 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 158 insertions(+) diff --git a/src/test/rbf_tests.cpp b/src/test/rbf_tests.cpp index 90d8c65956c18..961fd62fe4869 100644 --- a/src/test/rbf_tests.cpp +++ b/src/test/rbf_tests.cpp @@ -334,6 +334,164 @@ BOOST_FIXTURE_TEST_CASE(improves_feerate, TestChain100Setup) BOOST_CHECK(res3.has_value()); BOOST_CHECK(res3.value().first == DiagramCheckError::UNCALCULABLE); BOOST_CHECK(res3.value().second == strprintf("%s has 2 descendants, max 1 allowed", tx1->GetHash().GetHex())); + +} + +BOOST_FIXTURE_TEST_CASE(calc_feerate_diagram_rbf, TestChain100Setup) +{ + CTxMemPool& pool = *Assert(m_node.mempool); + LOCK2(::cs_main, pool.cs); + TestMemPoolEntryHelper entry; + + const CAmount low_fee{CENT/100}; + const CAmount normal_fee{CENT/10}; + const CAmount high_fee{CENT}; + + // low -> high -> medium fee transactions that would result in two chunks together + const auto low_tx = make_tx(/*inputs=*/ {m_coinbase_txns[0]}, /*output_values=*/ {10 * COIN}); + pool.addUnchecked(entry.Fee(low_fee).FromTx(low_tx)); + + const auto entry_low = pool.GetIter(low_tx->GetHash()).value(); + const auto low_size = entry_low->GetTxSize(); + + std::vector old_diagram, new_diagram; + + // Replacement of size 1 + const auto replace_one{pool.CalculateFeerateDiagramsForRBF(/*replacement_fees=*/0, /*replacement_vsize=*/1, {entry_low}, {entry_low})}; + BOOST_CHECK(replace_one.has_value()); + old_diagram = replace_one->first; + new_diagram = replace_one->second; + BOOST_CHECK(old_diagram.size() == 2); + BOOST_CHECK(new_diagram.size() == 2); + BOOST_CHECK(old_diagram[0] == FeeFrac(0, 0)); + BOOST_CHECK(old_diagram[1] == FeeFrac(low_fee, low_size)); + BOOST_CHECK(new_diagram[0] == FeeFrac(0, 0)); + BOOST_CHECK(new_diagram[1] == FeeFrac(0, 1)); + + // Non-zero replacement fee/size + const auto replace_one_fee{pool.CalculateFeerateDiagramsForRBF(/*replacement_fees=*/high_fee, /*replacement_vsize=*/low_size, {entry_low}, {entry_low})}; + BOOST_CHECK(replace_one_fee.has_value()); + old_diagram = replace_one_fee->first; + new_diagram = replace_one_fee->second; + BOOST_CHECK(old_diagram.size() == 2); + BOOST_CHECK(new_diagram.size() == 2); + BOOST_CHECK(old_diagram[0] == FeeFrac(0, 0)); + BOOST_CHECK(old_diagram[1] == FeeFrac(low_fee, low_size)); + BOOST_CHECK(new_diagram[0] == FeeFrac(0, 0)); + BOOST_CHECK(new_diagram[1] == FeeFrac(high_fee, low_size)); + + // Add a second transaction to the cluster that will make a single chunk, to be evicted in the RBF + const auto high_tx = make_tx(/*inputs=*/ {low_tx}, /*output_values=*/ {995 * CENT}); + pool.addUnchecked(entry.Fee(high_fee).FromTx(high_tx)); + const auto entry_high = pool.GetIter(high_tx->GetHash()).value(); + const auto high_size = entry_high->GetTxSize(); + + const auto replace_single_chunk{pool.CalculateFeerateDiagramsForRBF(/*replacement_fees=*/high_fee, /*replacement_vsize=*/low_size, {entry_low}, {entry_low, entry_high})}; + BOOST_CHECK(replace_single_chunk.has_value()); + old_diagram = replace_single_chunk->first; + new_diagram = replace_single_chunk->second; + BOOST_CHECK(old_diagram.size() == 2); + BOOST_CHECK(new_diagram.size() == 2); + BOOST_CHECK(old_diagram[0] == FeeFrac(0, 0)); + BOOST_CHECK(old_diagram[1] == FeeFrac(low_fee + high_fee, low_size + high_size)); + BOOST_CHECK(new_diagram[0] == FeeFrac(0, 0)); + BOOST_CHECK(new_diagram[1] == FeeFrac(high_fee, low_size)); + + // Conflict with the 2nd tx, resulting in new diagram with three entries + const auto replace_cpfp_child{pool.CalculateFeerateDiagramsForRBF(/*replacement_fees=*/high_fee, /*replacement_vsize=*/low_size, {entry_high}, {entry_high})}; + BOOST_CHECK(replace_cpfp_child.has_value()); + old_diagram = replace_cpfp_child->first; + new_diagram = replace_cpfp_child->second; + BOOST_CHECK(old_diagram.size() == 2); + BOOST_CHECK(new_diagram.size() == 3); + BOOST_CHECK(old_diagram[0] == FeeFrac(0, 0)); + BOOST_CHECK(old_diagram[1] == FeeFrac(low_fee + high_fee, low_size + high_size)); + BOOST_CHECK(new_diagram[0] == FeeFrac(0, 0)); + BOOST_CHECK(new_diagram[1] == FeeFrac(high_fee, low_size)); + BOOST_CHECK(new_diagram[2] == FeeFrac(low_fee + high_fee, low_size + low_size)); + + // third transaction causes the topology check to fail + const auto normal_tx = make_tx(/*inputs=*/ {high_tx}, /*output_values=*/ {995 * CENT}); + pool.addUnchecked(entry.Fee(normal_fee).FromTx(normal_tx)); + const auto entry_normal = pool.GetIter(normal_tx->GetHash()).value(); + const auto normal_size = entry_normal->GetTxSize(); + + const auto replace_too_large{pool.CalculateFeerateDiagramsForRBF(/*replacement_fees=*/normal_fee, /*replacement_vsize=*/normal_size, {entry_low}, {entry_low, entry_high, entry_normal})}; + BOOST_CHECK(!replace_too_large.has_value()); + BOOST_CHECK_EQUAL(util::ErrorString(replace_too_large).original, strprintf("%s has 2 descendants, max 1 allowed", low_tx->GetHash().GetHex())); + old_diagram.clear(); + new_diagram.clear(); + + // Make a size 2 cluster that is itself two chunks; evict both txns + const auto high_tx_2 = make_tx(/*inputs=*/ {m_coinbase_txns[1]}, /*output_values=*/ {10 * COIN}); + pool.addUnchecked(entry.Fee(high_fee).FromTx(high_tx_2)); + const auto entry_high_2 = pool.GetIter(high_tx_2->GetHash()).value(); + const auto high_size_2 = entry_high_2->GetTxSize(); + + const auto low_tx_2 = make_tx(/*inputs=*/ {high_tx_2}, /*output_values=*/ {9 * COIN}); + pool.addUnchecked(entry.Fee(low_fee).FromTx(low_tx_2)); + const auto entry_low_2 = pool.GetIter(low_tx_2->GetHash()).value(); + const auto low_size_2 = entry_low_2->GetTxSize(); + + const auto replace_two_chunks_single_cluster{pool.CalculateFeerateDiagramsForRBF(/*replacement_fees=*/high_fee, /*replacement_vsize=*/low_size, {entry_high_2}, {entry_high_2, entry_low_2})}; + BOOST_CHECK(replace_two_chunks_single_cluster.has_value()); + old_diagram = replace_two_chunks_single_cluster->first; + new_diagram = replace_two_chunks_single_cluster->second; + BOOST_CHECK(old_diagram.size() == 3); + BOOST_CHECK(new_diagram.size() == 2); + BOOST_CHECK(old_diagram[0] == FeeFrac(0, 0)); + BOOST_CHECK(old_diagram[1] == FeeFrac(high_fee, high_size_2)); + BOOST_CHECK(old_diagram[2] == FeeFrac(low_fee + high_fee, low_size_2 + high_size_2)); + BOOST_CHECK(new_diagram[0] == FeeFrac(0, 0)); + BOOST_CHECK(new_diagram[1] == FeeFrac(high_fee, low_size_2)); + + // You can have more than two direct conflicts if the there are multiple effected clusters, all of size 2 or less + const auto conflict_1 = make_tx(/*inputs=*/ {m_coinbase_txns[2]}, /*output_values=*/ {10 * COIN}); + pool.addUnchecked(entry.Fee(low_fee).FromTx(conflict_1)); + const auto conflict_1_entry = pool.GetIter(conflict_1->GetHash()).value(); + + const auto conflict_2 = make_tx(/*inputs=*/ {m_coinbase_txns[3]}, /*output_values=*/ {10 * COIN}); + pool.addUnchecked(entry.Fee(low_fee).FromTx(conflict_2)); + const auto conflict_2_entry = pool.GetIter(conflict_2->GetHash()).value(); + + const auto conflict_3 = make_tx(/*inputs=*/ {m_coinbase_txns[4]}, /*output_values=*/ {10 * COIN}); + pool.addUnchecked(entry.Fee(low_fee).FromTx(conflict_3)); + const auto conflict_3_entry = pool.GetIter(conflict_3->GetHash()).value(); + + const auto replace_multiple_clusters{pool.CalculateFeerateDiagramsForRBF(/*replacement_fees=*/high_fee, /*replacement_vsize=*/low_size, {conflict_1_entry, conflict_2_entry, conflict_3_entry}, {conflict_1_entry, conflict_2_entry, conflict_3_entry})}; + + BOOST_CHECK(replace_multiple_clusters.has_value()); + old_diagram = replace_multiple_clusters->first; + new_diagram = replace_multiple_clusters->second; + BOOST_CHECK(old_diagram.size() == 4); + BOOST_CHECK(new_diagram.size() == 2); + + // Add a child transaction to conflict_1 and make it cluster size 2, still one chunk due to same feerate + const auto conflict_1_child = make_tx(/*inputs=*/{conflict_1}, /*output_values=*/ {995 * CENT}); + pool.addUnchecked(entry.Fee(low_fee).FromTx(conflict_1_child)); + const auto conflict_1_child_entry = pool.GetIter(conflict_1_child->GetHash()).value(); + + const auto replace_multiple_clusters_2{pool.CalculateFeerateDiagramsForRBF(/*replacement_fees=*/high_fee, /*replacement_vsize=*/low_size, {conflict_1_entry, conflict_2_entry, conflict_3_entry}, {conflict_1_entry, conflict_2_entry, conflict_3_entry, conflict_1_child_entry})}; + + BOOST_CHECK(replace_multiple_clusters_2.has_value()); + old_diagram = replace_multiple_clusters_2->first; + new_diagram = replace_multiple_clusters_2->second; + BOOST_CHECK(old_diagram.size() == 4); + BOOST_CHECK(new_diagram.size() == 2); + old_diagram.clear(); + new_diagram.clear(); + + // Add another descendant to conflict_1, making the cluster size > 2 should fail at this point. + const auto conflict_1_grand_child = make_tx(/*inputs=*/{conflict_1_child}, /*output_values=*/ {995 * CENT}); + pool.addUnchecked(entry.Fee(high_fee).FromTx(conflict_1_grand_child)); + const auto conflict_1_grand_child_entry = pool.GetIter(conflict_1_child->GetHash()).value(); + + const auto replace_cluster_size_3{pool.CalculateFeerateDiagramsForRBF(/*replacement_fees=*/high_fee, /*replacement_vsize=*/low_size, {conflict_1_entry, conflict_2_entry, conflict_3_entry}, {conflict_1_entry, conflict_2_entry, conflict_3_entry, conflict_1_child_entry, conflict_1_grand_child_entry})}; + + BOOST_CHECK(!replace_cluster_size_3.has_value()); + BOOST_CHECK_EQUAL(util::ErrorString(replace_cluster_size_3).original, strprintf("%s has 2 descendants, max 1 allowed", conflict_1->GetHash().GetHex())); + BOOST_CHECK(old_diagram.empty()); + BOOST_CHECK(new_diagram.empty()); } BOOST_AUTO_TEST_CASE(feerate_diagram_utilities) From f252e687ec94b6ccafb5bc44b7df3daeb473fdea Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Mon, 5 Feb 2024 16:16:14 -0500 Subject: [PATCH 43/79] assumeutxo test: Add RPC test for fake nTx and nChainTx values The fake values will be removed in an upcoming commit, so it is useful to have test coverage confirming the change in behavior. --- test/functional/feature_assumeutxo.py | 42 ++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py index a9ed4a09cea9e..a29ee8be8b1a4 100755 --- a/test/functional/feature_assumeutxo.py +++ b/test/functional/feature_assumeutxo.py @@ -34,6 +34,7 @@ """ from shutil import rmtree +from dataclasses import dataclass from test_framework.messages import tx_from_hex from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( @@ -174,10 +175,18 @@ def run_test(self): # Generate a series of blocks that `n0` will have in the snapshot, # but that n1 and n2 don't yet see. + assert n0.getblockcount() == START_HEIGHT + blocks = {START_HEIGHT: Block(n0.getbestblockhash(), 1, START_HEIGHT + 1)} for i in range(100): + block_tx = 1 if i % 3 == 0: self.mini_wallet.send_self_transfer(from_node=n0) + block_tx += 1 self.generate(n0, nblocks=1, sync_fun=self.no_op) + height = n0.getblockcount() + hash = n0.getbestblockhash() + blocks[height] = Block(hash, block_tx, blocks[height-1].chain_tx + block_tx) + self.log.info("-- Testing assumeutxo + some indexes + pruning") @@ -207,7 +216,7 @@ def run_test(self): assert_equal( dump_output['txoutset_hash'], "a4bf3407ccb2cc0145c49ebba8fa91199f8a3903daf0883875941497d2493c27") - assert_equal(dump_output["nchaintx"], 334) + assert_equal(dump_output["nchaintx"], blocks[SNAPSHOT_BASE_HEIGHT].chain_tx) assert_equal(n0.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT) # Mine more blocks on top of the snapshot that n1 hasn't yet seen. This @@ -228,6 +237,30 @@ def run_test(self): assert_equal(loaded['coins_loaded'], SNAPSHOT_BASE_HEIGHT) assert_equal(loaded['base_height'], SNAPSHOT_BASE_HEIGHT) + def check_tx_counts(final: bool) -> None: + """Check nTx and nChainTx intermediate values right after loading + the snapshot, and final values after the snapshot is validated.""" + for height, block in blocks.items(): + tx = n1.getblockheader(block.hash)["nTx"] + chain_tx = n1.getchaintxstats(nblocks=1, blockhash=block.hash)["txcount"] + + # Intermediate nTx of the starting block should be real, but nTx of + # later blocks should be fake 1 values set by snapshot loading code. + if final or height == START_HEIGHT: + assert_equal(tx, block.tx) + else: + assert_equal(tx, 1) + + # Intermediate nChainTx of the starting block and snapshot block + # should be real, but others will be fake values set by snapshot + # loading code. + if final or height in (START_HEIGHT, SNAPSHOT_BASE_HEIGHT): + assert_equal(chain_tx, block.chain_tx) + else: + assert_equal(chain_tx, height + 1) + + check_tx_counts(final=False) + normal, snapshot = n1.getchainstates()["chainstates"] assert_equal(normal['blocks'], START_HEIGHT) assert_equal(normal.get('snapshot_blockhash'), None) @@ -291,6 +324,8 @@ def run_test(self): } self.wait_until(lambda: n1.getindexinfo() == completed_idx_state) + self.log.info("Re-check nTx and nChainTx values") + check_tx_counts(final=True) for i in (0, 1): n = self.nodes[i] @@ -365,6 +400,11 @@ def run_test(self): self.connect_nodes(0, 2) self.wait_until(lambda: n2.getblockcount() == FINAL_HEIGHT) +@dataclass +class Block: + hash: str + tx: int + chain_tx: int if __name__ == '__main__': AssumeutxoTest().main() From 63e8fc912c21a2f5b47e8eab10fb13c604afed85 Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Mon, 5 Feb 2024 16:16:14 -0500 Subject: [PATCH 44/79] ci: add getchaintxstats ubsan suppressions Add ubsan suppressions for integer overflows in the getchaintxstats RPC. getchainstatstx line "int nTxDiff = pindex->nChainTx - past_block.nChainTx" can trigger ubsan integer overflows when assumeutxo snapshots are loaded, from subtracting unsigned values and assigning the result to a signed int. The overflow behavior probably exists in current code but is hard to trigger because it would require calling getchainstatstx at the right time with specific parameters as background blocks are being downloaded. But the overflow behavior becomes easier to trigger in the upcoming commit removing fake nChainTx values, so a suppression needs to be added before then for CI to pass. getchainstatstx should probably be improved separately in another PR to not need this suppression, and handle edge cases and missing nChainTx values more carefully. --- test/sanitizer_suppressions/ubsan | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan index 2a2f7ca470682..482667a26a2d0 100644 --- a/test/sanitizer_suppressions/ubsan +++ b/test/sanitizer_suppressions/ubsan @@ -51,6 +51,7 @@ unsigned-integer-overflow:CCoinsViewCache::Uncache unsigned-integer-overflow:CompressAmount unsigned-integer-overflow:DecompressAmount unsigned-integer-overflow:crypto/ +unsigned-integer-overflow:getchaintxstats* unsigned-integer-overflow:MurmurHash3 unsigned-integer-overflow:CBlockPolicyEstimator::processBlockTx unsigned-integer-overflow:TxConfirmStats::EstimateMedianVal @@ -61,6 +62,7 @@ implicit-integer-sign-change:CBlockPolicyEstimator::processBlockTx implicit-integer-sign-change:SetStdinEcho implicit-integer-sign-change:compressor.h implicit-integer-sign-change:crypto/ +implicit-integer-sign-change:getchaintxstats* implicit-integer-sign-change:TxConfirmStats::removeTx implicit-integer-sign-change:prevector.h implicit-integer-sign-change:verify_flags From 0fd915ee6bef63bb360ccc5c039a3c11676c38e3 Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Thu, 8 Feb 2024 11:37:19 -0500 Subject: [PATCH 45/79] validation: Check GuessVerificationProgress is not called with disconnected block Use Assume macro as suggested https://github.com/bitcoin/bitcoin/pull/29370#discussion_r1479427801 --- src/validation.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/validation.cpp b/src/validation.cpp index 94d2680db749b..02f415101e432 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5286,6 +5286,12 @@ double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pin if (pindex == nullptr) return 0.0; + if (!Assume(pindex->nChainTx > 0)) { + LogWarning("Internal bug detected: block %d has unset nChainTx (%s %s). Please report this issue here: %s\n", + pindex->nHeight, PACKAGE_NAME, FormatFullVersion(), PACKAGE_BUGREPORT); + return 0.0; + } + int64_t nNow = time(nullptr); double fTxTotal; From 9b97d5bbf980d657a277c85d113c2ae3e870e0ec Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Fri, 2 Feb 2024 10:59:24 -0500 Subject: [PATCH 46/79] doc: Improve comments describing setBlockIndexCandidates checks The checks are changing slightly in the next commit, so try to explains the ones that exist to avoid confusion (https://github.com/bitcoin/bitcoin/pull/29370#discussion_r1499519079) --- src/validation.cpp | 41 ++++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index 02f415101e432..4cb004c7cc114 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5126,19 +5126,42 @@ void ChainstateManager::CheckBlockIndex() // Chainstate-specific checks on setBlockIndexCandidates for (auto c : GetAll()) { if (c->m_chain.Tip() == nullptr) continue; + // Two main factors determine whether pindex is a candidate in + // setBlockIndexCandidates: + // + // - If pindex has less work than the chain tip, it should not be a + // candidate, and this will be asserted below. Otherwise it is a + // potential candidate. + // + // - If pindex or one of its parent blocks never downloaded + // transactions (pindexFirstNeverProcessed is non-null), it should + // not be a candidate, and this will be asserted below. Otherwise + // it is a potential candidate. if (!CBlockIndexWorkComparator()(pindex, c->m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) { + // If pindex was detected as invalid (pindexFirstInvalid is + // non-null), it is not required to be in + // setBlockIndexCandidates. if (pindexFirstInvalid == nullptr) { - const bool is_active = c == &ActiveChainstate(); - // If this block sorts at least as good as the current tip and - // is valid and we have all data for its parents, it must be in - // setBlockIndexCandidates. m_chain.Tip() must also be there - // even if some data has been pruned. + // If pindex and all its parents downloaded transactions, + // and the transactions were not pruned (pindexFirstMissing + // is null), it is a potential candidate. The check + // excludes pruned blocks, because if any blocks were + // pruned between pindex the current chain tip, pindex will + // only temporarily be added to setBlockIndexCandidates, + // before being moved to m_blocks_unlinked. This check + // could be improved to verify that if all blocks between + // the chain tip and pindex have data, pindex must be a + // candidate. // + // If pindex is the chain tip, it also is a potential + // candidate. if ((pindexFirstMissing == nullptr || pindex == c->m_chain.Tip())) { - // The active chainstate should always have this block - // as a candidate, but a background chainstate should - // only have it if it is an ancestor of the snapshot base. - if (is_active || GetSnapshotBaseBlock()->GetAncestor(pindex->nHeight) == pindex) { + // If this chainstate is the active chainstate, pindex + // must be in setBlockIndexCandidates. Otherwise, this + // chainstate is a background validation chainstate, and + // pindex only needs to be added if it is an ancestor of + // the snapshot that is being validated. + if (c == &ActiveChainstate() || GetSnapshotBaseBlock()->GetAncestor(pindex->nHeight) == pindex) { assert(c->setBlockIndexCandidates.count(pindex)); } } From ef29c8b662309a438121a83f27fd7bdd1779700c Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Fri, 2 Feb 2024 10:59:24 -0500 Subject: [PATCH 47/79] assumeutxo: Get rid of faked nTx and nChainTx values The `PopulateAndValidateSnapshot` function introduced in f6e2da5fb7c6406c37612c838c998078ea8d2252 from #19806 has been setting fake `nTx` and `nChainTx` values that can show up in RPC results (see #29328) and make `CBlockIndex` state hard to reason about, because it is difficult to know whether the values are real or fake. Revert to previous behavior of setting `nTx` and `nChainTx` to 0 when the values are unknown, instead of faking them. This commit fixes at least two assert failures in the (pindex->nChainTx == pindex->nTx + prev_chain_tx) check that would happen previously. Tests for these failures are added separately in the next two commits. Compatibility note: This change could result in -checkblockindex failures if a snapshot was loaded by a previous version of Bitcoin Core and not fully validated, because fake nTx values will have been saved to the block index. It would be pretty easy to avoid these failures by adding some compatibility code to `LoadBlockIndex` and changing `nTx` values from 1 to 0 when they are fake (when `(pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS`), but a little simpler not to worry about being compatible in this case. --- src/chain.h | 36 +++-- src/test/util/chainstate.h | 17 ++- .../validation_chainstatemanager_tests.cpp | 2 + src/validation.cpp | 139 +++++++++++------- test/functional/feature_assumeutxo.py | 11 +- 5 files changed, 118 insertions(+), 87 deletions(-) diff --git a/src/chain.h b/src/chain.h index fa165a4aa732e..7faeb25088cae 100644 --- a/src/chain.h +++ b/src/chain.h @@ -98,16 +98,20 @@ enum BlockStatus : uint32_t { /** * Only first tx is coinbase, 2 <= coinbase input script length <= 100, transactions valid, no duplicate txids, - * sigops, size, merkle root. Implies all parents are at least TREE but not necessarily TRANSACTIONS. When all - * parent blocks also have TRANSACTIONS, CBlockIndex::nChainTx will be set. + * sigops, size, merkle root. Implies all parents are at least TREE but not necessarily TRANSACTIONS. + * + * If a block's validity is at least VALID_TRANSACTIONS, CBlockIndex::nTx will be set. If a block and all previous + * blocks back to the genesis block or an assumeutxo snapshot block are at least VALID_TRANSACTIONS, + * CBlockIndex::nChainTx will be set. */ BLOCK_VALID_TRANSACTIONS = 3, //! Outputs do not overspend inputs, no double spends, coinbase output ok, no immature coinbase spends, BIP30. - //! Implies all parents are either at least VALID_CHAIN, or are ASSUMED_VALID + //! Implies all previous blocks back to the genesis block or an assumeutxo snapshot block are at least VALID_CHAIN. BLOCK_VALID_CHAIN = 4, - //! Scripts & signatures ok. Implies all parents are either at least VALID_SCRIPTS, or are ASSUMED_VALID. + //! Scripts & signatures ok. Implies all previous blocks back to the genesis block or an assumeutxo snapshot block + //! are at least VALID_SCRIPTS. BLOCK_VALID_SCRIPTS = 5, //! All validity bits. @@ -173,21 +177,16 @@ class CBlockIndex //! (memory only) Total amount of work (expected number of hashes) in the chain up to and including this block arith_uint256 nChainWork{}; - //! Number of transactions in this block. + //! Number of transactions in this block. This will be nonzero if the block + //! reached the VALID_TRANSACTIONS level, and zero otherwise. //! Note: in a potential headers-first mode, this number cannot be relied upon - //! Note: this value is faked during UTXO snapshot load to ensure that - //! LoadBlockIndex() will load index entries for blocks that we lack data for. - //! @sa ActivateSnapshot unsigned int nTx{0}; //! (memory only) Number of transactions in the chain up to and including this block. - //! This value will be non-zero only if and only if transactions for this block and all its parents are available. + //! This value will be non-zero if this block and all previous blocks back + //! to the genesis block or an assumeutxo snapshot block have reached the + //! VALID_TRANSACTIONS level. //! Change to 64-bit type before 2024 (assuming worst case of 60 byte transactions). - //! - //! Note: this value is faked during use of a UTXO snapshot because we don't - //! have the underlying block data available during snapshot load. - //! @sa AssumeutxoData - //! @sa ActivateSnapshot unsigned int nChainTx{0}; //! Verification status of this block. See enum BlockStatus @@ -262,15 +261,14 @@ class CBlockIndex } /** - * Check whether this block's and all previous blocks' transactions have been - * downloaded (and stored to disk) at some point. + * Check whether this block and all previous blocks back to the genesis block or an assumeutxo snapshot block have + * reached VALID_TRANSACTIONS and had transactions downloaded (and stored to disk) at some point. * * Does not imply the transactions are consensus-valid (ConnectTip might fail) * Does not imply the transactions are still stored on disk. (IsBlockPruned might return true) * - * Note that this will be true for the snapshot base block, if one is loaded (and - * all subsequent assumed-valid blocks) since its nChainTx value will have been set - * manually based on the related AssumeutxoData entry. + * Note that this will be true for the snapshot base block, if one is loaded, since its nChainTx value will have + * been set manually based on the related AssumeutxoData entry. */ bool HaveNumChainTxs() const { return nChainTx != 0; } diff --git a/src/test/util/chainstate.h b/src/test/util/chainstate.h index e2a88eacddabf..ff95e64b7ed38 100644 --- a/src/test/util/chainstate.h +++ b/src/test/util/chainstate.h @@ -91,13 +91,16 @@ CreateAndActivateUTXOSnapshot( // these blocks instead CBlockIndex *pindex = orig_tip; while (pindex && pindex != chain.m_chain.Tip()) { - pindex->nStatus &= ~BLOCK_HAVE_DATA; - pindex->nStatus &= ~BLOCK_HAVE_UNDO; - // We have to set the ASSUMED_VALID flag, because otherwise it - // would not be possible to have a block index entry without HAVE_DATA - // and with nTx > 0 (since we aren't setting the pruned flag); - // see CheckBlockIndex(). - pindex->nStatus |= BLOCK_ASSUMED_VALID; + // Remove all data and validity flags by just setting + // BLOCK_VALID_TREE. Also reset transaction counts and sequence + // ids that are set when blocks are received, to make test setup + // more realistic and satisfy consistency checks in + // CheckBlockIndex(). + assert(pindex->IsValid(BlockStatus::BLOCK_VALID_TREE)); + pindex->nStatus = BlockStatus::BLOCK_VALID_TREE; + pindex->nTx = 0; + pindex->nChainTx = 0; + pindex->nSequenceId = 0; pindex = pindex->pprev; } } diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp index 4bbab1cdcd2ad..26f9ab59a6a16 100644 --- a/src/test/validation_chainstatemanager_tests.cpp +++ b/src/test/validation_chainstatemanager_tests.cpp @@ -464,6 +464,8 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup) // Blocks with heights in range [91, 110] are marked ASSUMED_VALID if (i < last_assumed_valid_idx && i >= assumed_valid_start_idx) { index->nStatus = BlockStatus::BLOCK_VALID_TREE | BlockStatus::BLOCK_ASSUMED_VALID; + index->nTx = 0; + index->nChainTx = 0; } ++num_indexes; diff --git a/src/validation.cpp b/src/validation.cpp index 4cb004c7cc114..f27f47aea0383 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -3660,7 +3660,18 @@ void ChainstateManager::ReceivedBlockTransactions(const CBlock& block, CBlockInd { AssertLockHeld(cs_main); pindexNew->nTx = block.vtx.size(); - pindexNew->nChainTx = 0; + // Typically nChainTx will be 0 at this point, but it can be nonzero if this + // is a pruned block which is being downloaded again, or if this is an + // assumeutxo snapshot block which has a hardcoded nChainTx value from the + // snapshot metadata. If the pindex is not the snapshot block and the + // nChainTx value is not zero, assert that value is actually correct. + auto prev_tx_sum = [](CBlockIndex& block) { return block.nTx + (block.pprev ? block.pprev->nChainTx : 0); }; + if (!Assume(pindexNew->nChainTx == 0 || pindexNew->nChainTx == prev_tx_sum(*pindexNew) || + pindexNew == GetSnapshotBaseBlock())) { + LogWarning("Internal bug detected: block %d has unexpected nChainTx %i that should be %i (%s %s). Please report this issue here: %s\n", + pindexNew->nHeight, pindexNew->nChainTx, prev_tx_sum(*pindexNew), PACKAGE_NAME, FormatFullVersion(), PACKAGE_BUGREPORT); + pindexNew->nChainTx = 0; + } pindexNew->nFile = pos.nFile; pindexNew->nDataPos = pos.nPos; pindexNew->nUndoPos = 0; @@ -3680,7 +3691,15 @@ void ChainstateManager::ReceivedBlockTransactions(const CBlock& block, CBlockInd while (!queue.empty()) { CBlockIndex *pindex = queue.front(); queue.pop_front(); - pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx; + // Before setting nChainTx, assert that it is 0 or already set to + // the correct value. This assert will fail after receiving the + // assumeutxo snapshot block if assumeutxo snapshot metadata has an + // incorrect hardcoded AssumeutxoData::nChainTx value. + if (!Assume(pindex->nChainTx == 0 || pindex->nChainTx == prev_tx_sum(*pindex))) { + LogWarning("Internal bug detected: block %d has unexpected nChainTx %i that should be %i (%s %s). Please report this issue here: %s\n", + pindex->nHeight, pindex->nChainTx, prev_tx_sum(*pindex), PACKAGE_NAME, FormatFullVersion(), PACKAGE_BUGREPORT); + } + pindex->nChainTx = prev_tx_sum(*pindex); pindex->nSequenceId = nBlockSequenceId++; for (Chainstate *c : GetAll()) { c->TryAddBlockIndexCandidate(pindex); @@ -5023,13 +5042,30 @@ void ChainstateManager::CheckBlockIndex() size_t nNodes = 0; int nHeight = 0; CBlockIndex* pindexFirstInvalid = nullptr; // Oldest ancestor of pindex which is invalid. - CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA. - CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0. + CBlockIndex* pindexFirstMissing = nullptr; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA, since assumeutxo snapshot if used. + CBlockIndex* pindexFirstNeverProcessed = nullptr; // Oldest ancestor of pindex for which nTx == 0, since assumeutxo snapshot if used. CBlockIndex* pindexFirstNotTreeValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not). - CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not). - CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not). - CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not). + CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not), since assumeutxo snapshot if used. + CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not), since assumeutxo snapshot if used. + CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not), since assumeutxo snapshot if used. CBlockIndex* pindexFirstAssumeValid = nullptr; // Oldest ancestor of pindex which has BLOCK_ASSUMED_VALID + + // After checking an assumeutxo snapshot block, reset pindexFirst pointers + // to earlier blocks that have not been downloaded or validated yet, so + // checks for later blocks can assume the earlier blocks were validated and + // be stricter, testing for more requirements. + const CBlockIndex* snap_base{GetSnapshotBaseBlock()}; + CBlockIndex *snap_first_missing{}, *snap_first_notx{}, *snap_first_notv{}, *snap_first_nocv{}, *snap_first_nosv{}; + auto snap_update_firsts = [&] { + if (pindex == snap_base) { + std::swap(snap_first_missing, pindexFirstMissing); + std::swap(snap_first_notx, pindexFirstNeverProcessed); + std::swap(snap_first_notv, pindexFirstNotTransactionsValid); + std::swap(snap_first_nocv, pindexFirstNotChainValid); + std::swap(snap_first_nosv, pindexFirstNotScriptsValid); + } + }; + while (pindex != nullptr) { nNodes++; if (pindexFirstAssumeValid == nullptr && pindex->nStatus & BLOCK_ASSUMED_VALID) pindexFirstAssumeValid = pindex; @@ -5040,10 +5076,7 @@ void ChainstateManager::CheckBlockIndex() if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) pindexFirstNeverProcessed = pindex; if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex; - if (pindex->pprev != nullptr && !pindex->IsAssumedValid()) { - // Skip validity flag checks for BLOCK_ASSUMED_VALID index entries, since these - // *_VALID_MASK flags will not be present for index entries we are temporarily assuming - // valid. + if (pindex->pprev != nullptr) { if (pindexFirstNotTransactionsValid == nullptr && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) { pindexFirstNotTransactionsValid = pindex; @@ -5073,36 +5106,26 @@ void ChainstateManager::CheckBlockIndex() if (!pindex->HaveNumChainTxs()) assert(pindex->nSequenceId <= 0); // nSequenceId can't be set positive for blocks that aren't linked (negative is used for preciousblock) // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred). // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred. - // Unless these indexes are assumed valid and pending block download on a - // background chainstate. - if (!m_blockman.m_have_pruned && !pindex->IsAssumedValid()) { + if (!m_blockman.m_have_pruned) { // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0 assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0)); - if (pindexFirstAssumeValid == nullptr) { - // If we've got some assume valid blocks, then we might have - // missing blocks (not HAVE_DATA) but still treat them as - // having been processed (with a fake nTx value). Otherwise, we - // can assert that these are the same. - assert(pindexFirstMissing == pindexFirstNeverProcessed); - } + assert(pindexFirstMissing == pindexFirstNeverProcessed); } else { // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0 if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0); } if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA); if (pindex->IsAssumedValid()) { - // Assumed-valid blocks should have some nTx value. - assert(pindex->nTx > 0); // Assumed-valid blocks should connect to the main chain. assert((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE); - } else { - // Otherwise there should only be an nTx value if we have - // actually seen a block's transactions. - assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent. } + // There should only be an nTx value if we have + // actually seen a block's transactions. + assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent. // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to HaveNumChainTxs(). - assert((pindexFirstNeverProcessed == nullptr) == pindex->HaveNumChainTxs()); - assert((pindexFirstNotTransactionsValid == nullptr) == pindex->HaveNumChainTxs()); + // HaveNumChainTxs will also be set in the assumeutxo snapshot block from snapshot metadata. + assert((pindexFirstNeverProcessed == nullptr || pindex == snap_base) == pindex->HaveNumChainTxs()); + assert((pindexFirstNotTransactionsValid == nullptr || pindex == snap_base) == pindex->HaveNumChainTxs()); assert(pindex->nHeight == nHeight); // nHeight must be consistent. assert(pindex->pprev == nullptr || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's. assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks. @@ -5115,14 +5138,18 @@ void ChainstateManager::CheckBlockIndex() assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents. } // Make sure nChainTx sum is correctly computed. - unsigned int prev_chain_tx = pindex->pprev ? pindex->pprev->nChainTx : 0; - assert((pindex->nChainTx == pindex->nTx + prev_chain_tx) - // Transaction may be completely unset - happens if only the header was accepted but the block hasn't been processed. - || (pindex->nChainTx == 0 && pindex->nTx == 0) - // nChainTx may be unset, but nTx set (if a block has been accepted, but one of its predecessors hasn't been processed yet) - || (pindex->nChainTx == 0 && prev_chain_tx == 0 && pindex->pprev) - // Transaction counts prior to snapshot are unknown. - || pindex->IsAssumedValid()); + if (!pindex->pprev) { + // If no previous block, nTx and nChainTx must be the same. + assert(pindex->nChainTx == pindex->nTx); + } else if (pindex->pprev->nChainTx > 0 && pindex->nTx > 0) { + // If previous nChainTx is set and number of transactions in block is known, sum must be set. + assert(pindex->nChainTx == pindex->nTx + pindex->pprev->nChainTx); + } else { + // Otherwise nChainTx should only be set if this is a snapshot + // block, and must be set if it is. + assert((pindex->nChainTx != 0) == (pindex == snap_base)); + } + // Chainstate-specific checks on setBlockIndexCandidates for (auto c : GetAll()) { if (c->m_chain.Tip() == nullptr) continue; @@ -5133,16 +5160,19 @@ void ChainstateManager::CheckBlockIndex() // candidate, and this will be asserted below. Otherwise it is a // potential candidate. // - // - If pindex or one of its parent blocks never downloaded - // transactions (pindexFirstNeverProcessed is non-null), it should - // not be a candidate, and this will be asserted below. Otherwise - // it is a potential candidate. - if (!CBlockIndexWorkComparator()(pindex, c->m_chain.Tip()) && pindexFirstNeverProcessed == nullptr) { + // - If pindex or one of its parent blocks back to the genesis block + // or an assumeutxo snapshot never downloaded transactions + // (pindexFirstNeverProcessed is non-null), it should not be a + // candidate, and this will be asserted below. The only exception + // is if pindex itself is an assumeutxo snapshot block. Then it is + // also a potential candidate. + if (!CBlockIndexWorkComparator()(pindex, c->m_chain.Tip()) && (pindexFirstNeverProcessed == nullptr || pindex == snap_base)) { // If pindex was detected as invalid (pindexFirstInvalid is // non-null), it is not required to be in // setBlockIndexCandidates. if (pindexFirstInvalid == nullptr) { - // If pindex and all its parents downloaded transactions, + // If pindex and all its parents back to the genesis block + // or an assumeutxo snapshot block downloaded transactions, // and the transactions were not pruned (pindexFirstMissing // is null), it is a potential candidate. The check // excludes pruned blocks, because if any blocks were @@ -5155,13 +5185,17 @@ void ChainstateManager::CheckBlockIndex() // // If pindex is the chain tip, it also is a potential // candidate. - if ((pindexFirstMissing == nullptr || pindex == c->m_chain.Tip())) { + // + // If the chainstate was loaded from a snapshot and pindex + // is the base of the snapshot, pindex is also a potential + // candidate. + if (pindexFirstMissing == nullptr || pindex == c->m_chain.Tip() || pindex == c->SnapshotBase()) { // If this chainstate is the active chainstate, pindex // must be in setBlockIndexCandidates. Otherwise, this // chainstate is a background validation chainstate, and // pindex only needs to be added if it is an ancestor of // the snapshot that is being validated. - if (c == &ActiveChainstate() || GetSnapshotBaseBlock()->GetAncestor(pindex->nHeight) == pindex) { + if (c == &ActiveChainstate() || snap_base->GetAncestor(pindex->nHeight) == pindex) { assert(c->setBlockIndexCandidates.count(pindex)); } } @@ -5192,7 +5226,7 @@ void ChainstateManager::CheckBlockIndex() if (pindexFirstMissing == nullptr) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in m_blocks_unlinked. if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == nullptr && pindexFirstMissing != nullptr) { // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent. - assert(m_blockman.m_have_pruned || pindexFirstAssumeValid != nullptr); // We must have pruned, or else we're using a snapshot (causing us to have faked the received data for some parent(s)). + assert(m_blockman.m_have_pruned); // This block may have entered m_blocks_unlinked if: // - it has a descendant that at some point had more work than the // tip, and @@ -5205,7 +5239,7 @@ void ChainstateManager::CheckBlockIndex() const bool is_active = c == &ActiveChainstate(); if (!CBlockIndexWorkComparator()(pindex, c->m_chain.Tip()) && c->setBlockIndexCandidates.count(pindex) == 0) { if (pindexFirstInvalid == nullptr) { - if (is_active || GetSnapshotBaseBlock()->GetAncestor(pindex->nHeight) == pindex) { + if (is_active || snap_base->GetAncestor(pindex->nHeight) == pindex) { assert(foundInUnlinked); } } @@ -5216,6 +5250,7 @@ void ChainstateManager::CheckBlockIndex() // End: actual consistency checks. // Try descending into the first subnode. + snap_update_firsts(); std::pair::iterator,std::multimap::iterator> range = forward.equal_range(pindex); if (range.first != range.second) { // A subnode was found. @@ -5227,6 +5262,7 @@ void ChainstateManager::CheckBlockIndex() // Move upwards until we reach a node of which we have not yet visited the last child. while (pindex) { // We are going to either move to a parent or a sibling of pindex. + snap_update_firsts(); // If pindex was the first with a certain property, unset the corresponding variable. if (pindex == pindexFirstInvalid) pindexFirstInvalid = nullptr; if (pindex == pindexFirstMissing) pindexFirstMissing = nullptr; @@ -5730,14 +5766,6 @@ bool ChainstateManager::PopulateAndValidateSnapshot( for (int i = AFTER_GENESIS_START; i <= snapshot_chainstate.m_chain.Height(); ++i) { index = snapshot_chainstate.m_chain[i]; - // Fake nTx so that LoadBlockIndex() loads assumed-valid CBlockIndex - // entries (among other things) - if (!index->nTx) { - index->nTx = 1; - } - // Fake nChainTx so that GuessVerificationProgress reports accurately - index->nChainTx = index->pprev->nChainTx + index->nTx; - // Mark unvalidated block index entries beneath the snapshot base block as assumed-valid. if (!index->IsValid(BLOCK_VALID_SCRIPTS)) { // This flag will be removed once the block is fully validated by a @@ -5760,6 +5788,7 @@ bool ChainstateManager::PopulateAndValidateSnapshot( } assert(index); + assert(index == snapshot_start_block); index->nChainTx = au_data.nChainTx; snapshot_chainstate.setBlockIndexCandidates.insert(snapshot_start_block); diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py index a29ee8be8b1a4..27910c3909857 100755 --- a/test/functional/feature_assumeutxo.py +++ b/test/functional/feature_assumeutxo.py @@ -244,20 +244,19 @@ def check_tx_counts(final: bool) -> None: tx = n1.getblockheader(block.hash)["nTx"] chain_tx = n1.getchaintxstats(nblocks=1, blockhash=block.hash)["txcount"] - # Intermediate nTx of the starting block should be real, but nTx of - # later blocks should be fake 1 values set by snapshot loading code. + # Intermediate nTx of the starting block should be set, but nTx of + # later blocks should be 0 before they are downloaded. if final or height == START_HEIGHT: assert_equal(tx, block.tx) else: - assert_equal(tx, 1) + assert_equal(tx, 0) # Intermediate nChainTx of the starting block and snapshot block - # should be real, but others will be fake values set by snapshot - # loading code. + # should be set, but others should be 0 until they are downloaded. if final or height in (START_HEIGHT, SNAPSHOT_BASE_HEIGHT): assert_equal(chain_tx, block.chain_tx) else: - assert_equal(chain_tx, height + 1) + assert_equal(chain_tx, 0) check_tx_counts(final=False) From 0391458d767b842a7925785a7053400c0e1cb55a Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Fri, 2 Feb 2024 10:59:24 -0500 Subject: [PATCH 48/79] test: assumeutxo stale block CheckBlockIndex crash test Add a test for a CheckBlockIndex crash that would happen before previous "assumeutxo: Get rid of faked nTx and nChainTx values" commit. The crash was an assert failure in the (pindex->nChainTx == pindex->nTx + prev_chain_tx) check that would previously happen if a snapshot was loaded, and a block was submitted which forked from the chain before the snapshot block and after the last downloaded background chain block. This block would not be marked assumed-valid because it would not be an ancestor of the snapshot, and it would have nTx set, nChainTx unset, and prev->nChainTx set with a fake value, so the assert would fail. After the fix, prev->nChainTx is unset instead of being set to a fake value, so the assert succeeds. This test was originally posted by maflcko in https://github.com/bitcoin/bitcoin/issues/29261#issuecomment-1918947945 Co-authored-by: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> --- test/functional/feature_assumeutxo.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py index 27910c3909857..04de8b299e2e9 100755 --- a/test/functional/feature_assumeutxo.py +++ b/test/functional/feature_assumeutxo.py @@ -186,6 +186,14 @@ def run_test(self): height = n0.getblockcount() hash = n0.getbestblockhash() blocks[height] = Block(hash, block_tx, blocks[height-1].chain_tx + block_tx) + if i == 4: + # Create a stale block that forks off the main chain before the snapshot. + temp_invalid = n0.getbestblockhash() + n0.invalidateblock(temp_invalid) + stale_hash = self.generateblock(n0, output="raw(aaaa)", transactions=[], sync_fun=self.no_op)["hash"] + n0.invalidateblock(stale_hash) + n0.reconsiderblock(temp_invalid) + stale_block = n0.getblock(stale_hash, 0) self.log.info("-- Testing assumeutxo + some indexes + pruning") @@ -270,6 +278,15 @@ def check_tx_counts(final: bool) -> None: assert_equal(n1.getblockchaininfo()["blocks"], SNAPSHOT_BASE_HEIGHT) + self.log.info("Submit a stale block that forked off the chain before the snapshot") + # Normally a block like this would not be downloaded, but if it is + # submitted early before the background chain catches up to the fork + # point, it winds up in m_blocks_unlinked and triggers a corner case + # that previously crashed CheckBlockIndex. + n1.submitblock(stale_block) + n1.getchaintips() + n1.getblock(stale_hash) + self.log.info("Submit a spending transaction for a snapshot chainstate coin to the mempool") # spend the coinbase output of the first block that is not available on node1 spend_coin_blockhash = n1.getblockhash(START_HEIGHT + 1) From ef174e9ed21c08f38e5d4b537b6decfd1f646db9 Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Fri, 2 Feb 2024 10:59:24 -0500 Subject: [PATCH 49/79] test: assumeutxo snapshot block CheckBlockIndex crash test Add a test for a CheckBlockIndex crash that would happen before previous "assumeutxo: Get rid of faked nTx and nChainTx values" commit. The crash was an assert failure in the (pindex->nChainTx == pindex->nTx + prev_chain_tx) check that would previously happen if the snapshot block was submitted after loading the snapshot and downloading a few blocks after the snapshot. In that case ReceivedBlockTransactions() previously would overwrite the nChainTx value of the submitted snapshot block with a fake value based on the previous block, so the (pindex->nChainTx == pindex->nTx + prev_chain_tx) check would later fail on the first block after the snapshot. This test was originally posted by Martin Zumsande in https://github.com/bitcoin/bitcoin/pull/29370#issuecomment-1974096225 Co-authored-by: Martin Zumsande --- test/functional/feature_assumeutxo.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py index 04de8b299e2e9..eb9ea65c4f23b 100755 --- a/test/functional/feature_assumeutxo.py +++ b/test/functional/feature_assumeutxo.py @@ -324,6 +324,16 @@ def check_tx_counts(final: bool) -> None: self.log.info("Restarted node before snapshot validation completed, reloading...") self.restart_node(1, extra_args=self.extra_args[1]) + + # Send snapshot block to n1 out of order. This makes the test less + # realistic because normally the snapshot block is one of the last + # blocks downloaded, but its useful to test because it triggers more + # corner cases in ReceivedBlockTransactions() and CheckBlockIndex() + # setting and testing nChainTx values, and it exposed previous bugs. + snapshot_hash = n0.getblockhash(SNAPSHOT_BASE_HEIGHT) + snapshot_block = n0.getblock(snapshot_hash, 0) + n1.submitblock(snapshot_block) + self.connect_nodes(0, 1) self.log.info(f"Ensuring snapshot chain syncs to tip. ({FINAL_HEIGHT})") From 9d9a7458a2570f7db56ab626b22010591089c312 Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Mon, 5 Feb 2024 17:10:27 -0500 Subject: [PATCH 50/79] assumeutxo: Remove BLOCK_ASSUMED_VALID flag Flag adds complexity and is not currently used for anything. --- doc/design/assumeutxo.md | 12 ++----- src/chain.h | 31 ++----------------- .../validation_chainstatemanager_tests.cpp | 19 +++--------- src/validation.cpp | 19 +++--------- src/validation.h | 7 +++-- 5 files changed, 17 insertions(+), 71 deletions(-) diff --git a/doc/design/assumeutxo.md b/doc/design/assumeutxo.md index abb623fc698f3..f527ac0f2da56 100644 --- a/doc/design/assumeutxo.md +++ b/doc/design/assumeutxo.md @@ -51,18 +51,12 @@ The utility script ## Design notes -- A new block index `nStatus` flag is introduced, `BLOCK_ASSUMED_VALID`, to mark block - index entries that are required to be assumed-valid by a chainstate created - from a UTXO snapshot. This flag is used as a way to modify certain - CheckBlockIndex() logic to account for index entries that are pending validation by a - chainstate running asynchronously in the background. - - The concept of UTXO snapshots is treated as an implementation detail that lives behind the ChainstateManager interface. The external presentation of the changes required to facilitate the use of UTXO snapshots is the understanding that there are - now certain regions of the chain that can be temporarily assumed to be valid (using - the nStatus flag mentioned above). In certain cases, e.g. wallet rescanning, this is - very similar to dealing with a pruned chain. + now certain regions of the chain that can be temporarily assumed to be valid. + In certain cases, e.g. wallet rescanning, this is very similar to dealing with + a pruned chain. Logic outside ChainstateManager should try not to know about snapshots, instead preferring to work in terms of more general states like assumed-valid. diff --git a/src/chain.h b/src/chain.h index 7faeb25088cae..bb70dbd8bcd68 100644 --- a/src/chain.h +++ b/src/chain.h @@ -128,21 +128,8 @@ enum BlockStatus : uint32_t { BLOCK_OPT_WITNESS = 128, //!< block data in blk*.dat was received with a witness-enforcing client - /** - * If ASSUMED_VALID is set, it means that this block has not been validated - * and has validity status less than VALID_SCRIPTS. Also that it may have - * descendant blocks with VALID_SCRIPTS set, because they can be validated - * based on an assumeutxo snapshot. - * - * When an assumeutxo snapshot is loaded, the ASSUMED_VALID flag is added to - * unvalidated blocks at the snapshot height and below. Then, as the background - * validation progresses, and these blocks are validated, the ASSUMED_VALID - * flags are removed. See `doc/design/assumeutxo.md` for details. - * - * This flag is only used to implement checks in CheckBlockIndex() and - * should not be used elsewhere. - */ - BLOCK_ASSUMED_VALID = 256, + BLOCK_STATUS_RESERVED = 256, //!< Unused flag that was previously set on assumeutxo snapshot blocks and their + //!< ancestors before they were validated, and unset when they were validated. }; /** The block chain is a tree shaped structure starting with the @@ -316,14 +303,6 @@ class CBlockIndex return ((nStatus & BLOCK_VALID_MASK) >= nUpTo); } - //! @returns true if the block is assumed-valid; this means it is queued to be - //! validated by a background chainstate. - bool IsAssumedValid() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main) - { - AssertLockHeld(::cs_main); - return nStatus & BLOCK_ASSUMED_VALID; - } - //! Raise the validity level of this block index entry. //! Returns true if the validity was changed. bool RaiseValidity(enum BlockStatus nUpTo) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) @@ -333,12 +312,6 @@ class CBlockIndex if (nStatus & BLOCK_FAILED_MASK) return false; if ((nStatus & BLOCK_VALID_MASK) < nUpTo) { - // If this block had been marked assumed-valid and we're raising - // its validity to a certain point, there is no longer an assumption. - if (nStatus & BLOCK_ASSUMED_VALID && nUpTo >= BLOCK_VALID_SCRIPTS) { - nStatus &= ~BLOCK_ASSUMED_VALID; - } - nStatus = (nStatus & ~BLOCK_VALID_MASK) | nUpTo; return true; } diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp index 26f9ab59a6a16..4bf66a55ebf91 100644 --- a/src/test/validation_chainstatemanager_tests.cpp +++ b/src/test/validation_chainstatemanager_tests.cpp @@ -276,9 +276,6 @@ struct SnapshotTestSetup : TestChain100Setup { BOOST_CHECK_EQUAL( *node::ReadSnapshotBaseBlockhash(found), *chainman.SnapshotBlockhash()); - - // Ensure that the genesis block was not marked assumed-valid. - BOOST_CHECK(!chainman.ActiveChain().Genesis()->IsAssumedValid()); } const auto& au_data = ::Params().AssumeutxoForHeight(snapshot_height); @@ -410,7 +407,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_activate_snapshot, SnapshotTestSetup) //! - First, verify that setBlockIndexCandidates is as expected when using a single, //! fully-validating chainstate. //! -//! - Then mark a region of the chain BLOCK_ASSUMED_VALID and introduce a second chainstate +//! - Then mark a region of the chain as missing data and introduce a second chainstate //! that will tolerate assumed-valid blocks. Run LoadBlockIndex() and ensure that the first //! chainstate only contains fully validated blocks and the other chainstate contains all blocks, //! except those marked assume-valid, because those entries don't HAVE_DATA. @@ -421,7 +418,6 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup) Chainstate& cs1 = chainman.ActiveChainstate(); int num_indexes{0}; - int num_assumed_valid{0}; // Blocks in range [assumed_valid_start_idx, last_assumed_valid_idx) will be // marked as assumed-valid and not having data. const int expected_assumed_valid{20}; @@ -456,37 +452,30 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup) reload_all_block_indexes(); BOOST_CHECK_EQUAL(cs1.setBlockIndexCandidates.size(), 1); - // Mark some region of the chain assumed-valid, and remove the HAVE_DATA flag. + // Reset some region of the chain's nStatus, removing the HAVE_DATA flag. for (int i = 0; i <= cs1.m_chain.Height(); ++i) { LOCK(::cs_main); auto index = cs1.m_chain[i]; - // Blocks with heights in range [91, 110] are marked ASSUMED_VALID + // Blocks with heights in range [91, 110] are marked as missing data. if (i < last_assumed_valid_idx && i >= assumed_valid_start_idx) { - index->nStatus = BlockStatus::BLOCK_VALID_TREE | BlockStatus::BLOCK_ASSUMED_VALID; + index->nStatus = BlockStatus::BLOCK_VALID_TREE; index->nTx = 0; index->nChainTx = 0; } ++num_indexes; - if (index->IsAssumedValid()) ++num_assumed_valid; // Note the last fully-validated block as the expected validated tip. if (i == (assumed_valid_start_idx - 1)) { validated_tip = index; - BOOST_CHECK(!index->IsAssumedValid()); } // Note the last assumed valid block as the snapshot base if (i == last_assumed_valid_idx - 1) { assumed_base = index; - BOOST_CHECK(index->IsAssumedValid()); - } else if (i == last_assumed_valid_idx) { - BOOST_CHECK(!index->IsAssumedValid()); } } - BOOST_CHECK_EQUAL(expected_assumed_valid, num_assumed_valid); - // Note: cs2's tip is not set when ActivateExistingSnapshot is called. Chainstate& cs2 = WITH_LOCK(::cs_main, return chainman.ActivateExistingSnapshot(*assumed_base->phashBlock)); diff --git a/src/validation.cpp b/src/validation.cpp index f27f47aea0383..a01b3da9ca234 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5048,7 +5048,6 @@ void ChainstateManager::CheckBlockIndex() CBlockIndex* pindexFirstNotTransactionsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not), since assumeutxo snapshot if used. CBlockIndex* pindexFirstNotChainValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not), since assumeutxo snapshot if used. CBlockIndex* pindexFirstNotScriptsValid = nullptr; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not), since assumeutxo snapshot if used. - CBlockIndex* pindexFirstAssumeValid = nullptr; // Oldest ancestor of pindex which has BLOCK_ASSUMED_VALID // After checking an assumeutxo snapshot block, reset pindexFirst pointers // to earlier blocks that have not been downloaded or validated yet, so @@ -5068,7 +5067,6 @@ void ChainstateManager::CheckBlockIndex() while (pindex != nullptr) { nNodes++; - if (pindexFirstAssumeValid == nullptr && pindex->nStatus & BLOCK_ASSUMED_VALID) pindexFirstAssumeValid = pindex; if (pindexFirstInvalid == nullptr && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex; if (pindexFirstMissing == nullptr && !(pindex->nStatus & BLOCK_HAVE_DATA)) { pindexFirstMissing = pindex; @@ -5115,7 +5113,7 @@ void ChainstateManager::CheckBlockIndex() if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0); } if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA); - if (pindex->IsAssumedValid()) { + if (snap_base && snap_base->GetAncestor(pindex->nHeight) == pindex) { // Assumed-valid blocks should connect to the main chain. assert((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE); } @@ -5271,7 +5269,6 @@ void ChainstateManager::CheckBlockIndex() if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = nullptr; if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = nullptr; if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = nullptr; - if (pindex == pindexFirstAssumeValid) pindexFirstAssumeValid = nullptr; // Find our parent. CBlockIndex* pindexPar = pindex->pprev; // Find which child we just visited. @@ -5757,22 +5754,14 @@ bool ChainstateManager::PopulateAndValidateSnapshot( // Fake various pieces of CBlockIndex state: CBlockIndex* index = nullptr; - // Don't make any modifications to the genesis block. - // This is especially important because we don't want to erroneously - // apply BLOCK_ASSUMED_VALID to genesis, which would happen if we didn't skip - // it here (since it apparently isn't BLOCK_VALID_SCRIPTS). + // Don't make any modifications to the genesis block since it shouldn't be + // neccessary, and since the genesis block doesn't have normal flags like + // BLOCK_VALID_SCRIPTS set. constexpr int AFTER_GENESIS_START{1}; for (int i = AFTER_GENESIS_START; i <= snapshot_chainstate.m_chain.Height(); ++i) { index = snapshot_chainstate.m_chain[i]; - // Mark unvalidated block index entries beneath the snapshot base block as assumed-valid. - if (!index->IsValid(BLOCK_VALID_SCRIPTS)) { - // This flag will be removed once the block is fully validated by a - // background chainstate. - index->nStatus |= BLOCK_ASSUMED_VALID; - } - // Fake BLOCK_OPT_WITNESS so that Chainstate::NeedsRedownload() // won't ask to rewind the entire assumed-valid chain on startup. if (DeploymentActiveAt(*index, *this, Consensus::DEPLOYMENT_SEGWIT)) { diff --git a/src/validation.h b/src/validation.h index 71aac46f81247..57e0777a2aadd 100644 --- a/src/validation.h +++ b/src/validation.h @@ -583,9 +583,10 @@ class Chainstate const CBlockIndex* SnapshotBase() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** - * The set of all CBlockIndex entries with either BLOCK_VALID_TRANSACTIONS (for - * itself and all ancestors) *or* BLOCK_ASSUMED_VALID (if using background - * chainstates) and as good as our current tip or better. Entries may be failed, + * The set of all CBlockIndex entries that have as much work as our current + * tip or more, and transaction data needed to be validated (with + * BLOCK_VALID_TRANSACTIONS for each block and its parents back to the + * genesis block or an assumeutxo snapshot block). Entries may be failed, * though, and pruning nodes may be missing the data for the block. */ std::set setBlockIndexCandidates; From 6205466512d4b94d1e507a77ab2151425790d29f Mon Sep 17 00:00:00 2001 From: 0xb10c Date: Mon, 4 Dec 2023 15:56:40 +0100 Subject: [PATCH 51/79] rpc: "addpeeraddress tried" return error on failure When trying to add an address to the IP address manager tried table, it's first added to the new table and then moved to the tried table. Previously, adding a conflicting address to the address manager's tried table with test-only `addpeeraddress tried=true` RPC would return `{ "success": true }`. However, the address would not be added to the tried table, but would remain in the new table. This caused, e.g., issue 28964. This is fixed by returning `{ "success": false, "error": "failed-adding-to-tried" }` for failed tried table additions. Since the address remaining in the new table can't be removed (the address manager interface does not support removing addresses at the moment and adding this seems to be a bigger effort), an error message is returned. This indicates to a user why the RPC failed and allows accounting for the extra address in the new table. Also: To check the number of addresses in each addrman table, the addrman checks were re-run and the log output of this check was asserted. Ideally, logs shouldn't be used as an interface in automated tests. To avoid asserting the logs, use the getaddrmaninfo and getrawaddrman RPCs (which weren't implemented when the test was added). Removing the "getnodeaddress" calls would also remove the addrman checks from the test, which could reduce the test coverage. To avoid this, these are kept. --- src/rpc/net.cpp | 12 ++++++--- test/functional/rpc_net.py | 50 +++++++++++++++++++++++++++++--------- 2 files changed, 47 insertions(+), 15 deletions(-) diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 3fa2b18495314..f935a3b08f490 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -951,7 +951,7 @@ static RPCHelpMan getnodeaddresses() static RPCHelpMan addpeeraddress() { return RPCHelpMan{"addpeeraddress", - "\nAdd the address of a potential peer to the address manager. This RPC is for testing only.\n", + "Add the address of a potential peer to an address manager table. This RPC is for testing only.", { {"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The IP address of the peer"}, {"port", RPCArg::Type::NUM, RPCArg::Optional::NO, "The port of the peer"}, @@ -960,7 +960,8 @@ static RPCHelpMan addpeeraddress() RPCResult{ RPCResult::Type::OBJ, "", "", { - {RPCResult::Type::BOOL, "success", "whether the peer address was successfully added to the address manager"}, + {RPCResult::Type::BOOL, "success", "whether the peer address was successfully added to the address manager table"}, + {RPCResult::Type::STR, "error", /*optional=*/true, "error description, if the address could not be added"}, }, }, RPCExamples{ @@ -989,8 +990,13 @@ static RPCHelpMan addpeeraddress() success = true; if (tried) { // Attempt to move the address to the tried addresses table. - addrman.Good(address); + if (!addrman.Good(address)) { + success = false; + obj.pushKV("error", "failed-adding-to-tried"); + } } + } else { + obj.pushKV("error", "failed-adding-to-new"); } } diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 22789644f2733..e50aeebad2622 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -340,26 +340,52 @@ def test_addpeeraddress(self): assert_raises_rpc_error(-1, "JSON integer out of range", self.nodes[0].addpeeraddress, address="1.2.3.4", port=-1) assert_raises_rpc_error(-1, "JSON integer out of range", self.nodes[0].addpeeraddress, address="1.2.3.4", port=65536) + self.log.debug("Test that adding a valid address to the new table succeeds") + assert_equal(node.addpeeraddress(address="1.0.0.0", tried=False, port=8333), {"success": True}) + addrman = node.getrawaddrman() + assert_equal(len(addrman["tried"]), 0) + new_table = list(addrman["new"].values()) + assert_equal(len(new_table), 1) + assert_equal(new_table[0]["address"], "1.0.0.0") + assert_equal(new_table[0]["port"], 8333) + + self.log.debug("Test that adding an already-present new address to the new and tried tables fails") + for value in [True, False]: + assert_equal(node.addpeeraddress(address="1.0.0.0", tried=value, port=8333), {"success": False, "error": "failed-adding-to-new"}) + assert_equal(len(node.getnodeaddresses(count=0)), 1) + self.log.debug("Test that adding a valid address to the tried table succeeds") self.addr_time = int(time.time()) node.setmocktime(self.addr_time) assert_equal(node.addpeeraddress(address="1.2.3.4", tried=True, port=8333), {"success": True}) - with node.assert_debug_log(expected_msgs=["CheckAddrman: new 0, tried 1, total 1 started"]): - addrs = node.getnodeaddresses(count=0) # getnodeaddresses re-runs the addrman checks - assert_equal(len(addrs), 1) - assert_equal(addrs[0]["address"], "1.2.3.4") - assert_equal(addrs[0]["port"], 8333) + addrman = node.getrawaddrman() + assert_equal(len(addrman["new"]), 1) + tried_table = list(addrman["tried"].values()) + assert_equal(len(tried_table), 1) + assert_equal(tried_table[0]["address"], "1.2.3.4") + assert_equal(tried_table[0]["port"], 8333) + node.getnodeaddresses(count=0) # getnodeaddresses re-runs the addrman checks self.log.debug("Test that adding an already-present tried address to the new and tried tables fails") for value in [True, False]: - assert_equal(node.addpeeraddress(address="1.2.3.4", tried=value, port=8333), {"success": False}) - assert_equal(len(node.getnodeaddresses(count=0)), 1) - - self.log.debug("Test that adding a second address, this time to the new table, succeeds") + assert_equal(node.addpeeraddress(address="1.2.3.4", tried=value, port=8333), {"success": False, "error": "failed-adding-to-new"}) + assert_equal(len(node.getnodeaddresses(count=0)), 2) + + self.log.debug("Test that adding an address, which collides with the address in tried table, fails") + colliding_address = "1.2.5.45" # grinded address that produces a tried-table collision + assert_equal(node.addpeeraddress(address=colliding_address, tried=True, port=8333), {"success": False, "error": "failed-adding-to-tried"}) + # When adding an address to the tried table, it's first added to the new table. + # As we fail to move it to the tried table, it remains in the new table. + addrman_info = node.getaddrmaninfo() + assert_equal(addrman_info["all_networks"]["tried"], 1) + assert_equal(addrman_info["all_networks"]["new"], 2) + + self.log.debug("Test that adding an another address to the new table succeeds") assert_equal(node.addpeeraddress(address="2.0.0.0", port=8333), {"success": True}) - with node.assert_debug_log(expected_msgs=["CheckAddrman: new 1, tried 1, total 2 started"]): - addrs = node.getnodeaddresses(count=0) # getnodeaddresses re-runs the addrman checks - assert_equal(len(addrs), 2) + addrman_info = node.getaddrmaninfo() + assert_equal(addrman_info["all_networks"]["tried"], 1) + assert_equal(addrman_info["all_networks"]["new"], 3) + node.getnodeaddresses(count=0) # getnodeaddresses re-runs the addrman checks def test_sendmsgtopeer(self): node = self.nodes[0] From 0d01f6f0c6e53c9765f84e0616ab46b83923a6ad Mon Sep 17 00:00:00 2001 From: 0xb10c Date: Tue, 12 Mar 2024 13:42:20 +0100 Subject: [PATCH 52/79] test: remove unused mocktime in test_addpeeraddress Drops the mocktime added in fa4c6836c9366c3cc575cb386a397840d5f1aa57. Setting the mocktime in test_addpeeraddress() isn't needed anymore as it doesn't leak into test_getrawaddrman() anymore (since 2cc8ca19f4185490f30a49516c890b2289fbab71). test_getrawaddrman() clear's the addrman and sets it's own mocktime. --- test/functional/rpc_net.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index e50aeebad2622..5801f2ce43fe2 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -355,8 +355,6 @@ def test_addpeeraddress(self): assert_equal(len(node.getnodeaddresses(count=0)), 1) self.log.debug("Test that adding a valid address to the tried table succeeds") - self.addr_time = int(time.time()) - node.setmocktime(self.addr_time) assert_equal(node.addpeeraddress(address="1.2.3.4", tried=True, port=8333), {"success": True}) addrman = node.getrawaddrman() assert_equal(len(addrman["new"]), 1) From 99954f914f031c80aa53daa367fc049c4c55bdf3 Mon Sep 17 00:00:00 2001 From: stratospher <44024636+stratospher@users.noreply.github.com> Date: Tue, 19 Mar 2024 10:58:18 +0530 Subject: [PATCH 53/79] test: fix test to ensure hidden RPC is present in detailed help current check to make sure that detailed help for hidden RPC is displayed won't work because the assertion isn't sufficient. Even if unknown RPCs are passed, RPC names would still be present in node.help(). --- test/functional/rpc_net.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 5801f2ce43fe2..48d86ab59dd99 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -324,10 +324,10 @@ def test_addpeeraddress(self): self.restart_node(1, ["-checkaddrman=1", "-test=addrman"], clear_addrman=True) node = self.nodes[1] - self.log.debug("Test that addpeerinfo is a hidden RPC") + self.log.debug("Test that addpeeraddress is a hidden RPC") # It is hidden from general help, but its detailed help may be called directly. - assert "addpeerinfo" not in node.help() - assert "addpeerinfo" in node.help("addpeerinfo") + assert "addpeeraddress" not in node.help() + assert "unknown command: addpeeraddress" not in node.help("addpeeraddress") self.log.debug("Test that adding an empty address fails") assert_equal(node.addpeeraddress(address="", port=8333), {"success": False}) @@ -452,7 +452,7 @@ def test_getrawaddrman(self): self.log.debug("Test that getrawaddrman is a hidden RPC") # It is hidden from general help, but its detailed help may be called directly. assert "getrawaddrman" not in node.help() - assert "getrawaddrman" in node.help("getrawaddrman") + assert "unknown command: getrawaddrman" not in node.help("getrawaddrman") def check_addr_information(result, expected): """Utility to compare a getrawaddrman result entry with an expected entry""" From faecf3a7e6779c2cacadd91a6eba446431778849 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Tue, 19 Mar 2024 09:17:19 +0100 Subject: [PATCH 54/79] ci: Bump msan to llvm-18 --- ci/test/00_setup_env_native_fuzz_with_msan.sh | 2 +- ci/test/00_setup_env_native_msan.sh | 2 +- ci/test/01_base_install.sh | 7 ++++--- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh index 0b32049013d06..0a9dee2ed826f 100755 --- a/ci/test/00_setup_env_native_fuzz_with_msan.sh +++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh @@ -6,7 +6,7 @@ export LC_ALL=C.UTF-8 -export CI_IMAGE_NAME_TAG="docker.io/ubuntu:22.04" +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" LIBCXX_DIR="/msan/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh index 60987f50118a2..cbcd51e21878c 100755 --- a/ci/test/00_setup_env_native_msan.sh +++ b/ci/test/00_setup_env_native_msan.sh @@ -6,7 +6,7 @@ export LC_ALL=C.UTF-8 -export CI_IMAGE_NAME_TAG="docker.io/ubuntu:22.04" +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" LIBCXX_DIR="/msan/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index 99813da106b54..6f1498963ae87 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copyright (c) 2018-2022 The Bitcoin Core developers +# Copyright (c) 2018-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -36,7 +36,7 @@ if [ -n "$PIP_PACKAGES" ]; then fi if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b llvmorg-17.0.6 /msan/llvm-project + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-18.1.1" /msan/llvm-project cmake -G Ninja -B /msan/clang_build/ \ -DLLVM_ENABLE_PROJECTS="clang" \ @@ -53,13 +53,14 @@ if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /msan/clang_build/bin/llvm-symbolizer 100 cmake -G Ninja -B /msan/cxx_build/ \ - -DLLVM_ENABLE_RUNTIMES='libcxx;libcxxabi' \ + -DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \ -DCMAKE_BUILD_TYPE=Release \ -DLLVM_USE_SANITIZER=MemoryWithOrigins \ -DCMAKE_C_COMPILER=clang \ -DCMAKE_CXX_COMPILER=clang++ \ -DLLVM_TARGETS_TO_BUILD=Native \ -DLLVM_ENABLE_PER_TARGET_RUNTIME_DIR=OFF \ + -DLIBCXXABI_USE_LLVM_UNWINDER=OFF \ -DLIBCXX_HARDENING_MODE=debug \ -S /msan/llvm-project/runtimes From f65b0f6401091e4a4ca4c9f4db1cf388f0336bad Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Sun, 17 Mar 2024 20:37:42 +0100 Subject: [PATCH 55/79] index: Move last_locator_write_time and logging to end of threadsync loop This avoids having commit print a needless error message during init. Co-authored-by: furszy --- src/index/base.cpp | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/index/base.cpp b/src/index/base.cpp index 036292cd8a11c..21b93209b94e1 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -177,20 +177,6 @@ void BaseIndex::ThreadSync() pindex = pindex_next; } - auto current_time{std::chrono::steady_clock::now()}; - if (last_log_time + SYNC_LOG_INTERVAL < current_time) { - LogPrintf("Syncing %s with block chain from height %d\n", - GetName(), pindex->nHeight); - last_log_time = current_time; - } - - if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) { - SetBestBlockIndex(pindex->pprev); - last_locator_write_time = current_time; - // No need to handle errors in Commit. See rationale above. - Commit(); - } - CBlock block; interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex); if (!m_chainstate->m_blockman.ReadBlockFromDisk(block, *pindex)) { @@ -205,6 +191,20 @@ void BaseIndex::ThreadSync() __func__, pindex->GetBlockHash().ToString()); return; } + + auto current_time{std::chrono::steady_clock::now()}; + if (last_log_time + SYNC_LOG_INTERVAL < current_time) { + LogPrintf("Syncing %s with block chain from height %d\n", + GetName(), pindex->nHeight); + last_log_time = current_time; + } + + if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) { + SetBestBlockIndex(pindex); + last_locator_write_time = current_time; + // No need to handle errors in Commit. See rationale above. + Commit(); + } } } From a8bfc3dea1d986b458202bf5e49cf1944392d676 Mon Sep 17 00:00:00 2001 From: brunoerg Date: Mon, 20 Nov 2023 11:16:46 -0300 Subject: [PATCH 56/79] test: add coverage for bech32m in `wallet_keypool_topup` --- test/functional/wallet_keypool_topup.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/test/functional/wallet_keypool_topup.py b/test/functional/wallet_keypool_topup.py index 48180e82942b0..e1bd85d8a9963 100755 --- a/test/functional/wallet_keypool_topup.py +++ b/test/functional/wallet_keypool_topup.py @@ -25,8 +25,10 @@ def add_options(self, parser): def set_test_params(self): self.setup_clean_chain = True - self.num_nodes = 4 - self.extra_args = [[], ['-keypool=100'], ['-keypool=100'], ['-keypool=100']] + self.num_nodes = 5 + self.extra_args = [[]] + for _ in range(self.num_nodes - 1): + self.extra_args.append(['-keypool=100']) def skip_test_if_missing_module(self): self.skip_if_no_wallet() @@ -40,12 +42,13 @@ def run_test(self): self.stop_node(1) shutil.copyfile(wallet_path, wallet_backup_path) self.start_node(1, self.extra_args[1]) - self.connect_nodes(0, 1) - self.connect_nodes(0, 2) - self.connect_nodes(0, 3) - - for i, output_type in enumerate(["legacy", "p2sh-segwit", "bech32"]): + for i in [1, 2, 3, 4]: + self.connect_nodes(0, i) + output_types = ["legacy", "p2sh-segwit", "bech32"] + if self.options.descriptors: + output_types.append("bech32m") + for i, output_type in enumerate(output_types): self.log.info("Generate keys for wallet with address type: {}".format(output_type)) idx = i+1 for _ in range(90): @@ -59,9 +62,10 @@ def run_test(self): assert not address_details["isscript"] and not address_details["iswitness"] elif i == 1: assert address_details["isscript"] and not address_details["iswitness"] - else: + elif i == 2: assert not address_details["isscript"] and address_details["iswitness"] - + elif i == 3: + assert address_details["isscript"] and address_details["iswitness"] self.log.info("Send funds to wallet") self.nodes[0].sendtoaddress(addr_oldpool, 10) @@ -87,6 +91,8 @@ def run_test(self): assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/49h/1h/0h/0/110") elif output_type == 'bech32': assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/84h/1h/0h/0/110") + elif output_type == 'bech32m': + assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/86h/1h/0h/0/110") else: assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/0'/0'/110'") From 54e07ee22ff16fc68583ade0d2b8ffffc81d444a Mon Sep 17 00:00:00 2001 From: ishaanam Date: Wed, 17 May 2023 20:56:25 -0400 Subject: [PATCH 57/79] wallet: track mempool conflicts Behavior changes are: - if a tx has a mempool conflict, the wallet will not attempt to rebroadcast it - if a txo is spent by a mempool-conflicted tx, that txo is no longer considered spent --- src/wallet/transaction.h | 13 ++++++-- src/wallet/wallet.cpp | 36 +++++++++++++++++++++-- src/wallet/wallet.h | 1 + test/functional/wallet_abandonconflict.py | 6 +++- test/functional/wallet_conflicts.py | 12 ++++---- 5 files changed, 57 insertions(+), 11 deletions(-) diff --git a/src/wallet/transaction.h b/src/wallet/transaction.h index 2ffd85afa9c34..9c27574103bd0 100644 --- a/src/wallet/transaction.h +++ b/src/wallet/transaction.h @@ -48,7 +48,7 @@ struct TxStateBlockConflicted { int conflicting_block_height; explicit TxStateBlockConflicted(const uint256& block_hash, int height) : conflicting_block_hash(block_hash), conflicting_block_height(height) {} - std::string toString() const { return strprintf("Conflicted (block=%s, height=%i)", conflicting_block_hash.ToString(), conflicting_block_height); } + std::string toString() const { return strprintf("BlockConflicted (block=%s, height=%i)", conflicting_block_hash.ToString(), conflicting_block_height); } }; //! State of transaction not confirmed or conflicting with a known block and @@ -258,6 +258,14 @@ class CWalletTx CTransactionRef tx; TxState m_state; + // Set of mempool transactions that conflict + // directly with the transaction, or that conflict + // with an ancestor transaction. This set will be + // empty if state is InMempool or Confirmed, but + // can be nonempty if state is Inactive or + // BlockConflicted. + std::set mempool_conflicts; + template void Serialize(Stream& s) const { @@ -335,9 +343,10 @@ class CWalletTx void updateState(interfaces::Chain& chain); bool isAbandoned() const { return state() && state()->abandoned; } + bool isMempoolConflicted() const { return !mempool_conflicts.empty(); } bool isBlockConflicted() const { return state(); } bool isInactive() const { return state(); } - bool isUnconfirmed() const { return !isAbandoned() && !isBlockConflicted() && !isConfirmed(); } + bool isUnconfirmed() const { return !isAbandoned() && !isBlockConflicted() && !isMempoolConflicted() && !isConfirmed(); } bool isConfirmed() const { return state(); } const Txid& GetHash() const LIFETIMEBOUND { return tx->GetHash(); } const Wtxid& GetWitnessHash() const LIFETIMEBOUND { return tx->GetWitnessHash(); } diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 1053b69f32c3a..2adc502642a96 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -753,7 +753,7 @@ bool CWallet::IsSpent(const COutPoint& outpoint) const const auto mit = mapWallet.find(wtxid); if (mit != mapWallet.end()) { const auto& wtx = mit->second; - if (!wtx.isAbandoned() && !wtx.isBlockConflicted()) + if (!wtx.isAbandoned() && !wtx.isBlockConflicted() && !wtx.isMempoolConflicted()) return true; // Spent } } @@ -1360,7 +1360,10 @@ void CWallet::MarkConflicted(const uint256& hashBlock, int conflicting_height, c void CWallet::RecursiveUpdateTxState(const uint256& tx_hash, const TryUpdatingStateFn& try_updating_state) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) { // Do not flush the wallet here for performance reasons WalletBatch batch(GetDatabase(), false); + RecursiveUpdateTxState(&batch, tx_hash, try_updating_state); +} +void CWallet::RecursiveUpdateTxState(WalletBatch* batch, const uint256& tx_hash, const TryUpdatingStateFn& try_updating_state) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) { std::set todo; std::set done; @@ -1377,7 +1380,7 @@ void CWallet::RecursiveUpdateTxState(const uint256& tx_hash, const TryUpdatingSt TxUpdate update_state = try_updating_state(wtx); if (update_state != TxUpdate::UNCHANGED) { wtx.MarkDirty(); - batch.WriteTx(wtx); + if (batch) batch->WriteTx(wtx); // Iterate over all its outputs, and update those tx states as well (if applicable) for (unsigned int i = 0; i < wtx.tx->vout.size(); ++i) { std::pair range = mapTxSpends.equal_range(COutPoint(Txid::FromUint256(now), i)); @@ -1418,6 +1421,20 @@ void CWallet::transactionAddedToMempool(const CTransactionRef& tx) { if (it != mapWallet.end()) { RefreshMempoolStatus(it->second, chain()); } + + const Txid& txid = tx->GetHash(); + + for (const CTxIn& tx_in : tx->vin) { + // For each wallet transaction spending this prevout.. + for (auto range = mapTxSpends.equal_range(tx_in.prevout); range.first != range.second; range.first++) { + const uint256& spent_id = range.first->second; + // Skip the recently added tx + if (spent_id == txid) continue; + RecursiveUpdateTxState(/*batch=*/nullptr, spent_id, [&txid](CWalletTx& wtx) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) { + return wtx.mempool_conflicts.insert(txid).second ? TxUpdate::CHANGED : TxUpdate::UNCHANGED; + }); + } + } } void CWallet::transactionRemovedFromMempool(const CTransactionRef& tx, MemPoolRemovalReason reason) { @@ -1455,6 +1472,21 @@ void CWallet::transactionRemovedFromMempool(const CTransactionRef& tx, MemPoolRe // https://github.com/bitcoin-core/bitcoin-devwiki/wiki/Wallet-Transaction-Conflict-Tracking SyncTransaction(tx, TxStateInactive{}); } + + const Txid& txid = tx->GetHash(); + + for (const CTxIn& tx_in : tx->vin) { + // Iterate over all wallet transactions spending txin.prev + // and recursively mark them as no longer conflicting with + // txid + for (auto range = mapTxSpends.equal_range(tx_in.prevout); range.first != range.second; range.first++) { + const uint256& spent_id = range.first->second; + + RecursiveUpdateTxState(/*batch=*/nullptr, spent_id, [&txid](CWalletTx& wtx) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) { + return wtx.mempool_conflicts.erase(txid) ? TxUpdate::CHANGED : TxUpdate::UNCHANGED; + }); + } + } } void CWallet::blockConnected(ChainstateRole role, const interfaces::BlockInfo& block) diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index d55b683f1cdca..0bc460942c984 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -364,6 +364,7 @@ class CWallet final : public WalletStorage, public interfaces::Chain::Notificati /** Mark a transaction (and its in-wallet descendants) as a particular tx state. */ void RecursiveUpdateTxState(const uint256& tx_hash, const TryUpdatingStateFn& try_updating_state) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + void RecursiveUpdateTxState(WalletBatch* batch, const uint256& tx_hash, const TryUpdatingStateFn& try_updating_state) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); /** Mark a transaction's inputs dirty, thus forcing the outputs to be recomputed */ void MarkInputsDirty(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); diff --git a/test/functional/wallet_abandonconflict.py b/test/functional/wallet_abandonconflict.py index 26915077736d5..bc489524315fe 100755 --- a/test/functional/wallet_abandonconflict.py +++ b/test/functional/wallet_abandonconflict.py @@ -232,7 +232,11 @@ def run_test(self): balance = newbalance # Invalidate the block with the double spend. B & C's 10 BTC outputs should no longer be available - self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) + blk = self.nodes[0].getbestblockhash() + # mine 10 blocks so that when the blk is invalidated, the transactions are not + # returned to the mempool + self.generate(self.nodes[1], 10) + self.nodes[0].invalidateblock(blk) assert_equal(alice.gettransaction(txAB1)["confirmations"], 0) newbalance = alice.getbalance() assert_equal(newbalance, balance - Decimal("20")) diff --git a/test/functional/wallet_conflicts.py b/test/functional/wallet_conflicts.py index 3ca7eb246c91f..cb6b1c7eaaf9d 100755 --- a/test/functional/wallet_conflicts.py +++ b/test/functional/wallet_conflicts.py @@ -174,9 +174,9 @@ def test_mempool_conflict(self): # broadcast tx2, replaces tx1 in mempool tx2_txid = alice.sendrawtransaction(tx2) - # Check that unspent[0] is still not available because the wallet does not know that the tx spending it has a mempool conflicted - assert_equal(alice.listunspent(), []) - assert_equal(alice.getbalance(), 0) + # Check that unspent[0] is now available because the transaction spending it has been replaced in the mempool + assert_equal(alice.listunspent(), [unspents[0]]) + assert_equal(alice.getbalance(), 25) self.log.info("Test scenario where a mempool conflict is removed") @@ -262,8 +262,8 @@ def test_mempool_and_block_conflicts(self): assert tx2_txid in bob.getrawmempool() assert tx1_conflict_txid in bob.getrawmempool() - # check that the tx2 unspent is still not available because the wallet does not know that the tx spending it has a mempool conflict - assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0) + # check that tx3 is now conflicted, so the output from tx2 can now be spent + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], Decimal("24.99990000")) # we will be disconnecting this block in the future alice.sendrawtransaction(tx2_conflict) @@ -293,7 +293,7 @@ def test_mempool_and_block_conflicts(self): assert_equal(bob.gettransaction(tx3_txid)["confirmations"], 0) bob.sendrawtransaction(raw_tx2) - assert_equal(bob.getbalances()["mine"]["untrusted_pending"], 0) + assert_equal(bob.getbalances()["mine"]["untrusted_pending"], Decimal("24.99990000")) # create a conflict to previous tx (also spends unspents[2]), but don't broadcast, sends funds back to alice raw_tx = alice.createrawtransaction(inputs=[unspents[2]], outputs=[{alice.getnewaddress() : 24.99}]) From 5952292133d6cc889f51ae771f2e0557311e1efe Mon Sep 17 00:00:00 2001 From: ishaanam Date: Sun, 16 Jul 2023 21:42:33 -0400 Subject: [PATCH 58/79] wallet, rpc: show mempool conflicts in `gettransaction` result --- src/wallet/rpc/transactions.cpp | 8 ++++++++ test/functional/wallet_basic.py | 2 +- test/functional/wallet_conflicts.py | 19 +++++++++++++++++++ 3 files changed, 28 insertions(+), 1 deletion(-) diff --git a/src/wallet/rpc/transactions.cpp b/src/wallet/rpc/transactions.cpp index e6c021d4261aa..05b340995d08f 100644 --- a/src/wallet/rpc/transactions.cpp +++ b/src/wallet/rpc/transactions.cpp @@ -40,6 +40,10 @@ static void WalletTxToJSON(const CWallet& wallet, const CWalletTx& wtx, UniValue for (const uint256& conflict : wallet.GetTxConflicts(wtx)) conflicts.push_back(conflict.GetHex()); entry.pushKV("walletconflicts", conflicts); + UniValue mempool_conflicts(UniValue::VARR); + for (const Txid& mempool_conflict : wtx.mempool_conflicts) + mempool_conflicts.push_back(mempool_conflict.GetHex()); + entry.pushKV("mempoolconflicts", mempool_conflicts); entry.pushKV("time", wtx.GetTxTime()); entry.pushKV("timereceived", int64_t{wtx.nTimeReceived}); @@ -417,6 +421,10 @@ static std::vector TransactionDescriptionString() }}, {RPCResult::Type::STR_HEX, "replaced_by_txid", /*optional=*/true, "Only if 'category' is 'send'. The txid if this tx was replaced."}, {RPCResult::Type::STR_HEX, "replaces_txid", /*optional=*/true, "Only if 'category' is 'send'. The txid if this tx replaces another."}, + {RPCResult::Type::ARR, "mempoolconflicts", "Transactions that directly conflict with either this transaction or an ancestor transaction", + { + {RPCResult::Type::STR_HEX, "txid", "The transaction id."}, + }}, {RPCResult::Type::STR, "to", /*optional=*/true, "If a comment to is associated with the transaction."}, {RPCResult::Type::NUM_TIME, "time", "The transaction time expressed in " + UNIX_EPOCH_TIME + "."}, {RPCResult::Type::NUM_TIME, "timereceived", "The time received expressed in " + UNIX_EPOCH_TIME + "."}, diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py index f798eee365cfb..0b52ed79142dc 100755 --- a/test/functional/wallet_basic.py +++ b/test/functional/wallet_basic.py @@ -679,7 +679,7 @@ def run_test(self): "category": baz["category"], "vout": baz["vout"]} expected_fields = frozenset({'amount', 'bip125-replaceable', 'confirmations', 'details', 'fee', - 'hex', 'lastprocessedblock', 'time', 'timereceived', 'trusted', 'txid', 'wtxid', 'walletconflicts'}) + 'hex', 'lastprocessedblock', 'time', 'timereceived', 'trusted', 'txid', 'wtxid', 'walletconflicts', 'mempoolconflicts'}) verbose_field = "decoded" expected_verbose_fields = expected_fields | {verbose_field} diff --git a/test/functional/wallet_conflicts.py b/test/functional/wallet_conflicts.py index cb6b1c7eaaf9d..e5739a6a597b8 100755 --- a/test/functional/wallet_conflicts.py +++ b/test/functional/wallet_conflicts.py @@ -178,6 +178,8 @@ def test_mempool_conflict(self): assert_equal(alice.listunspent(), [unspents[0]]) assert_equal(alice.getbalance(), 25) + assert_equal(alice.gettransaction(tx1_txid)["mempoolconflicts"], [tx2_txid]) + self.log.info("Test scenario where a mempool conflict is removed") # broadcast tx3, replaces tx2 in mempool @@ -187,6 +189,7 @@ def test_mempool_conflict(self): # tx1 is no longer conflicted. alice.sendrawtransaction(tx3) + assert_equal(alice.gettransaction(tx1_txid)["mempoolconflicts"], []) assert tx1_txid not in self.nodes[0].getrawmempool() # now all of alice's outputs should be considered spent @@ -262,6 +265,10 @@ def test_mempool_and_block_conflicts(self): assert tx2_txid in bob.getrawmempool() assert tx1_conflict_txid in bob.getrawmempool() + assert_equal(bob.gettransaction(tx1_txid)["mempoolconflicts"], [tx1_conflict_txid]) + assert_equal(bob.gettransaction(tx2_txid)["mempoolconflicts"], []) + assert_equal(bob.gettransaction(tx3_txid)["mempoolconflicts"], [tx1_conflict_txid]) + # check that tx3 is now conflicted, so the output from tx2 can now be spent assert_equal(bob.getbalances()["mine"]["untrusted_pending"], Decimal("24.99990000")) @@ -348,6 +355,8 @@ def test_descendants_with_mempool_conflicts(self): assert_equal(alice.getbalance(), 25) assert_equal(bob.getbalances()["mine"]["untrusted_pending"], Decimal("24.99990000")) + assert_equal(bob.gettransaction(tx1_txid)["mempoolconflicts"], []) + raw_tx = bob.createrawtransaction(inputs=[bob.listunspent(minconf=0)[0]], outputs=[{carol.getnewaddress() : 24.999}]) # Bob creates a child to tx1 tx1_child = bob.signrawtransactionwithwallet(raw_tx)['hex'] @@ -356,6 +365,8 @@ def test_descendants_with_mempool_conflicts(self): self.sync_mempools() # Currently neither tx1 nor tx1_child should have any conflicts + assert_equal(bob.gettransaction(tx1_txid)["mempoolconflicts"], []) + assert_equal(bob.gettransaction(tx1_child_txid)["mempoolconflicts"], []) assert tx1_txid in bob.getrawmempool() assert tx1_child_txid in bob.getrawmempool() assert_equal(len(bob.getrawmempool()), 2) @@ -378,6 +389,10 @@ def test_descendants_with_mempool_conflicts(self): assert tx1_conflict_txid in bob.getrawmempool() assert_equal(len(bob.getrawmempool()), 1) + # Now both tx1 and tx1_child are conflicted by tx1_conflict + assert_equal(bob.gettransaction(tx1_txid)["mempoolconflicts"], [tx1_conflict_txid]) + assert_equal(bob.gettransaction(tx1_child_txid)["mempoolconflicts"], [tx1_conflict_txid]) + # Now create a conflict to tx1_conflict, so that it gets kicked out of the mempool raw_tx = alice.createrawtransaction(inputs=[unspents[1]], outputs=[{carol.getnewaddress() : 24.9895}]) tx1_conflict_conflict = alice.signrawtransactionwithwallet(raw_tx)['hex'] @@ -385,6 +400,10 @@ def test_descendants_with_mempool_conflicts(self): self.sync_mempools() + # Now that tx1_conflict has been removed, both tx1 and tx1_child + assert_equal(bob.gettransaction(tx1_txid)["mempoolconflicts"], []) + assert_equal(bob.gettransaction(tx1_child_txid)["mempoolconflicts"], []) + # Both tx1 and tx1_child are still not in the mempool because they have not be re-broadcasted assert tx1_txid not in bob.getrawmempool() assert tx1_child_txid not in bob.getrawmempool() From ddc7872c08b7ddf9b1e83abdb97c21303f4a9172 Mon Sep 17 00:00:00 2001 From: TheCharlatan Date: Fri, 15 Mar 2024 21:42:44 +0100 Subject: [PATCH 59/79] node: Make translations of fatal errors consistent The extra `bilingual_str` argument of the fatal error notifications and `node::AbortNode()` is often unused and when used usually contains the same string as the message argument. It also seems to be confusing, since it is not consistently used for errors requiring user action. For example some assumeutxo fatal errors require the user to do something, but are not translated. So simplify the fatal error and abort node interfaces by only passing a translated string. This slightly changes the fatal errors displayed to the user. Also de-duplicate the abort error log since it is repeated in noui.cpp. --- src/bitcoin-chainstate.cpp | 9 +++---- src/index/base.cpp | 2 +- src/init.cpp | 2 +- src/kernel/notifications_interface.h | 8 +++--- src/node/abort.cpp | 9 +++---- src/node/abort.h | 7 +++-- src/node/blockstorage.cpp | 16 ++++++------ src/node/kernel_notifications.cpp | 8 +++--- src/node/kernel_notifications.h | 5 ++-- src/validation.cpp | 36 +++++++++++++------------- src/validation.h | 2 +- test/functional/feature_abortnode.py | 2 +- test/functional/feature_assumeutxo.py | 2 +- test/functional/feature_index_prune.py | 2 +- 14 files changed, 52 insertions(+), 58 deletions(-) diff --git a/src/bitcoin-chainstate.cpp b/src/bitcoin-chainstate.cpp index 3eb64aa344b13..642af06e82d2c 100644 --- a/src/bitcoin-chainstate.cpp +++ b/src/bitcoin-chainstate.cpp @@ -89,14 +89,13 @@ int main(int argc, char* argv[]) { std::cout << "Warning: " << warning.original << std::endl; } - void flushError(const std::string& debug_message) override + void flushError(const bilingual_str& message) override { - std::cerr << "Error flushing block data to disk: " << debug_message << std::endl; + std::cerr << "Error flushing block data to disk: " << message.original << std::endl; } - void fatalError(const std::string& debug_message, const bilingual_str& user_message) override + void fatalError(const bilingual_str& message) override { - std::cerr << "Error: " << debug_message << std::endl; - std::cerr << (user_message.empty() ? "A fatal internal error occurred." : user_message.original) << std::endl; + std::cerr << "Error: " << message.original << std::endl; } }; auto notifications = std::make_unique(); diff --git a/src/index/base.cpp b/src/index/base.cpp index b4bda2fca6ac8..5f2c3786c2091 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -31,7 +31,7 @@ template void BaseIndex::FatalErrorf(const char* fmt, const Args&... args) { auto message = tfm::format(fmt, args...); - node::AbortNode(m_chain->context()->shutdown, m_chain->context()->exit_status, message); + node::AbortNode(m_chain->context()->shutdown, m_chain->context()->exit_status, Untranslated(message)); } CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash) diff --git a/src/init.cpp b/src/init.cpp index 0aa04755cbeba..349d4ff1abca5 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1757,7 +1757,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // Start indexes initial sync if (!StartIndexBackgroundSync(node)) { bilingual_str err_str = _("Failed to start indexes, shutting down.."); - chainman.GetNotifications().fatalError(err_str.original, err_str); + chainman.GetNotifications().fatalError(err_str); return; } // Load mempool from disk diff --git a/src/kernel/notifications_interface.h b/src/kernel/notifications_interface.h index c5e77b0df906b..7283a88e862c3 100644 --- a/src/kernel/notifications_interface.h +++ b/src/kernel/notifications_interface.h @@ -5,14 +5,12 @@ #ifndef BITCOIN_KERNEL_NOTIFICATIONS_INTERFACE_H #define BITCOIN_KERNEL_NOTIFICATIONS_INTERFACE_H -#include - #include -#include #include class CBlockIndex; enum class SynchronizationState; +struct bilingual_str; namespace kernel { @@ -48,7 +46,7 @@ class Notifications //! perform. Applications can choose to handle the flush error notification //! by logging the error, or notifying the user, or triggering an early //! shutdown as a precaution against causing more errors. - virtual void flushError(const std::string& debug_message) {} + virtual void flushError(const bilingual_str& message) {} //! The fatal error notification is sent to notify the user when an error //! occurs in kernel code that can't be recovered from. After this @@ -57,7 +55,7 @@ class Notifications //! handle the fatal error notification by logging the error, or notifying //! the user, or triggering an early shutdown as a precaution against //! causing more errors. - virtual void fatalError(const std::string& debug_message, const bilingual_str& user_message = {}) {} + virtual void fatalError(const bilingual_str& message) {} }; } // namespace kernel diff --git a/src/node/abort.cpp b/src/node/abort.cpp index 1bdc91670d731..b727608384234 100644 --- a/src/node/abort.cpp +++ b/src/node/abort.cpp @@ -16,14 +16,13 @@ namespace node { -void AbortNode(util::SignalInterrupt* shutdown, std::atomic& exit_status, const std::string& debug_message, const bilingual_str& user_message) +void AbortNode(util::SignalInterrupt* shutdown, std::atomic& exit_status, const bilingual_str& message) { - SetMiscWarning(Untranslated(debug_message)); - LogPrintf("*** %s\n", debug_message); - InitError(user_message.empty() ? _("A fatal internal error occurred, see debug.log for details") : user_message); + SetMiscWarning(message); + InitError(_("A fatal internal error occurred, see debug.log for details: ") + message); exit_status.store(EXIT_FAILURE); if (shutdown && !(*shutdown)()) { - LogPrintf("Error: failed to send shutdown signal\n"); + LogError("Failed to send shutdown signal\n"); }; } } // namespace node diff --git a/src/node/abort.h b/src/node/abort.h index 28d021cc7876c..10922791421f7 100644 --- a/src/node/abort.h +++ b/src/node/abort.h @@ -5,17 +5,16 @@ #ifndef BITCOIN_NODE_ABORT_H #define BITCOIN_NODE_ABORT_H -#include - #include -#include + +struct bilingual_str; namespace util { class SignalInterrupt; } // namespace util namespace node { -void AbortNode(util::SignalInterrupt* shutdown, std::atomic& exit_status, const std::string& debug_message, const bilingual_str& user_message = {}); +void AbortNode(util::SignalInterrupt* shutdown, std::atomic& exit_status, const bilingual_str& message); } // namespace node #endif // BITCOIN_NODE_ABORT_H diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index f78f33e3713c0..576c07a833e49 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -404,7 +404,7 @@ bool BlockManager::LoadBlockIndex(const std::optional& snapshot_blockha if (snapshot_blockhash) { const std::optional maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash); if (!maybe_au_data) { - m_opts.notifications.fatalError(strprintf("Assumeutxo data not found for the given blockhash '%s'.", snapshot_blockhash->ToString())); + m_opts.notifications.fatalError(strprintf(_("Assumeutxo data not found for the given blockhash '%s'."), snapshot_blockhash->ToString())); return false; } const AssumeutxoData& au_data = *Assert(maybe_au_data); @@ -741,7 +741,7 @@ bool BlockManager::FlushUndoFile(int block_file, bool finalize) { FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize); if (!UndoFileSeq().Flush(undo_pos_old, finalize)) { - m_opts.notifications.flushError("Flushing undo file to disk failed. This is likely the result of an I/O error."); + m_opts.notifications.flushError(_("Flushing undo file to disk failed. This is likely the result of an I/O error.")); return false; } return true; @@ -763,7 +763,7 @@ bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finali FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize); if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) { - m_opts.notifications.flushError("Flushing block file to disk failed. This is likely the result of an I/O error."); + m_opts.notifications.flushError(_("Flushing block file to disk failed. This is likely the result of an I/O error.")); success = false; } // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks, @@ -935,7 +935,7 @@ bool BlockManager::FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigne bool out_of_space; size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space); if (out_of_space) { - m_opts.notifications.fatalError("Disk space is too low!", _("Disk space is too low!")); + m_opts.notifications.fatalError(_("Disk space is too low!")); return false; } if (bytes_allocated != 0 && IsPruneMode()) { @@ -960,7 +960,7 @@ bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFileP bool out_of_space; size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space); if (out_of_space) { - return FatalError(m_opts.notifications, state, "Disk space is too low!", _("Disk space is too low!")); + return FatalError(m_opts.notifications, state, _("Disk space is too low!")); } if (bytes_allocated != 0 && IsPruneMode()) { m_check_for_pruning = true; @@ -1008,7 +1008,7 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid return false; } if (!UndoWriteToDisk(blockundo, _pos, block.pprev->GetBlockHash())) { - return FatalError(m_opts.notifications, state, "Failed to write undo data"); + return FatalError(m_opts.notifications, state, _("Failed to write undo data.")); } // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order) // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height @@ -1149,7 +1149,7 @@ FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, cons } if (!position_known) { if (!WriteBlockToDisk(block, blockPos)) { - m_opts.notifications.fatalError("Failed to write block"); + m_opts.notifications.fatalError(_("Failed to write block.")); return FlatFilePos(); } } @@ -1233,7 +1233,7 @@ void ImportBlocks(ChainstateManager& chainman, std::vector vImportFile for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) { BlockValidationState state; if (!chainstate->ActivateBestChain(state, nullptr)) { - chainman.GetNotifications().fatalError(strprintf("Failed to connect best block (%s)", state.ToString())); + chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString())); return; } } diff --git a/src/node/kernel_notifications.cpp b/src/node/kernel_notifications.cpp index 1fd3bad296813..99f909ff75c7d 100644 --- a/src/node/kernel_notifications.cpp +++ b/src/node/kernel_notifications.cpp @@ -84,15 +84,15 @@ void KernelNotifications::warning(const bilingual_str& warning) DoWarning(warning); } -void KernelNotifications::flushError(const std::string& debug_message) +void KernelNotifications::flushError(const bilingual_str& message) { - AbortNode(&m_shutdown, m_exit_status, debug_message); + AbortNode(&m_shutdown, m_exit_status, message); } -void KernelNotifications::fatalError(const std::string& debug_message, const bilingual_str& user_message) +void KernelNotifications::fatalError(const bilingual_str& message) { node::AbortNode(m_shutdown_on_fatal_error ? &m_shutdown : nullptr, - m_exit_status, debug_message, user_message); + m_exit_status, message); } void ReadNotificationArgs(const ArgsManager& args, KernelNotifications& notifications) diff --git a/src/node/kernel_notifications.h b/src/node/kernel_notifications.h index 38d8600ac6c62..f4d97a0fff28f 100644 --- a/src/node/kernel_notifications.h +++ b/src/node/kernel_notifications.h @@ -9,7 +9,6 @@ #include #include -#include class ArgsManager; class CBlockIndex; @@ -37,9 +36,9 @@ class KernelNotifications : public kernel::Notifications void warning(const bilingual_str& warning) override; - void flushError(const std::string& debug_message) override; + void flushError(const bilingual_str& message) override; - void fatalError(const std::string& debug_message, const bilingual_str& user_message = {}) override; + void fatalError(const bilingual_str& message) override; //! Block height after which blockTip notification will return Interrupted{}, if >0. int m_stop_at_height{DEFAULT_STOPATHEIGHT}; diff --git a/src/validation.cpp b/src/validation.cpp index b6d0c38f391da..85d8ee2fb541f 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2051,10 +2051,10 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, return true; } -bool FatalError(Notifications& notifications, BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage) +bool FatalError(Notifications& notifications, BlockValidationState& state, const bilingual_str& message) { - notifications.fatalError(strMessage, userMessage); - return state.Error(strMessage); + notifications.fatalError(message); + return state.Error(message.original); } /** @@ -2276,7 +2276,7 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, // We don't write down blocks to disk if they may have been // corrupted, so this should be impossible unless we're having hardware // problems. - return FatalError(m_chainman.GetNotifications(), state, "Corrupt block found indicating potential hardware failure; shutting down"); + return FatalError(m_chainman.GetNotifications(), state, _("Corrupt block found indicating potential hardware failure.")); } LogError("%s: Consensus::CheckBlock: %s\n", __func__, state.ToString()); return false; @@ -2702,7 +2702,7 @@ bool Chainstate::FlushStateToDisk( if (fDoFullFlush || fPeriodicWrite) { // Ensure we can write block index if (!CheckDiskSpace(m_blockman.m_opts.blocks_dir)) { - return FatalError(m_chainman.GetNotifications(), state, "Disk space is too low!", _("Disk space is too low!")); + return FatalError(m_chainman.GetNotifications(), state, _("Disk space is too low!")); } { LOG_TIME_MILLIS_WITH_CATEGORY("write block and undo data to disk", BCLog::BENCH); @@ -2720,7 +2720,7 @@ bool Chainstate::FlushStateToDisk( LOG_TIME_MILLIS_WITH_CATEGORY("write block index to disk", BCLog::BENCH); if (!m_blockman.WriteBlockIndexDB()) { - return FatalError(m_chainman.GetNotifications(), state, "Failed to write to block index database"); + return FatalError(m_chainman.GetNotifications(), state, _("Failed to write to block index database.")); } } // Finally remove any pruned files @@ -2742,11 +2742,11 @@ bool Chainstate::FlushStateToDisk( // an overestimation, as most will delete an existing entry or // overwrite one. Still, use a conservative safety factor of 2. if (!CheckDiskSpace(m_chainman.m_options.datadir, 48 * 2 * 2 * CoinsTip().GetCacheSize())) { - return FatalError(m_chainman.GetNotifications(), state, "Disk space is too low!", _("Disk space is too low!")); + return FatalError(m_chainman.GetNotifications(), state, _("Disk space is too low!")); } // Flush the chainstate (which may refer to block index entries). if (!CoinsTip().Flush()) - return FatalError(m_chainman.GetNotifications(), state, "Failed to write to coin database"); + return FatalError(m_chainman.GetNotifications(), state, _("Failed to write to coin database.")); m_last_flush = nNow; full_flush_completed = true; TRACE5(utxocache, flush, @@ -2762,7 +2762,7 @@ bool Chainstate::FlushStateToDisk( m_chainman.m_options.signals->ChainStateFlushed(this->GetRole(), m_chain.GetLocator()); } } catch (const std::runtime_error& e) { - return FatalError(m_chainman.GetNotifications(), state, std::string("System error while flushing: ") + e.what()); + return FatalError(m_chainman.GetNotifications(), state, strprintf(_("System error while flushing: %s"), e.what())); } return true; } @@ -2998,7 +2998,7 @@ bool Chainstate::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, if (!pblock) { std::shared_ptr pblockNew = std::make_shared(); if (!m_blockman.ReadBlockFromDisk(*pblockNew, *pindexNew)) { - return FatalError(m_chainman.GetNotifications(), state, "Failed to read block"); + return FatalError(m_chainman.GetNotifications(), state, _("Failed to read block.")); } pthisBlock = pblockNew; } else { @@ -3185,7 +3185,7 @@ bool Chainstate::ActivateBestChainStep(BlockValidationState& state, CBlockIndex* // If we're unable to disconnect a block during normal operation, // then that is a failure of our local system -- we should abort // rather than stay on a less work chain. - FatalError(m_chainman.GetNotifications(), state, "Failed to disconnect block; see debug.log for details"); + FatalError(m_chainman.GetNotifications(), state, _("Failed to disconnect block.")); return false; } fBlocksDisconnected = true; @@ -4345,7 +4345,7 @@ bool ChainstateManager::AcceptBlock(const std::shared_ptr& pblock, } ReceivedBlockTransactions(block, pindex, blockPos); } catch (const std::runtime_error& e) { - return FatalError(GetNotifications(), state, std::string("System error: ") + e.what()); + return FatalError(GetNotifications(), state, strprintf(_("System error while saving block to disk: %s"), e.what())); } // TODO: FlushStateToDisk() handles flushing of both block and chainstate @@ -5029,7 +5029,7 @@ void ChainstateManager::LoadExternalBlockFile( } } } catch (const std::runtime_error& e) { - GetNotifications().fatalError(std::string("System error: ") + e.what()); + GetNotifications().fatalError(strprintf(_("System error while loading external block file: %s"), e.what())); } LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, Ticks(SteadyClock::now() - start)); } @@ -5539,8 +5539,8 @@ bool ChainstateManager::ActivateSnapshot( snapshot_chainstate.reset(); bool removed = DeleteCoinsDBFromDisk(*snapshot_datadir, /*is_snapshot=*/true); if (!removed) { - GetNotifications().fatalError(strprintf("Failed to remove snapshot chainstate dir (%s). " - "Manually remove it before restarting.\n", fs::PathToString(*snapshot_datadir))); + GetNotifications().fatalError(strprintf(_("Failed to remove snapshot chainstate dir (%s). " + "Manually remove it before restarting.\n"), fs::PathToString(*snapshot_datadir))); } } return false; @@ -5879,7 +5879,7 @@ SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation() user_error = strprintf(Untranslated("%s\n%s"), user_error, util::ErrorString(rename_result)); } - GetNotifications().fatalError(user_error.original, user_error); + GetNotifications().fatalError(user_error); }; if (index_new.GetBlockHash() != snapshot_blockhash) { @@ -6220,9 +6220,9 @@ bool ChainstateManager::ValidatedSnapshotCleanup() const fs::filesystem_error& err) { LogPrintf("Error renaming path (%s) -> (%s): %s\n", fs::PathToString(p_old), fs::PathToString(p_new), err.what()); - GetNotifications().fatalError(strprintf( + GetNotifications().fatalError(strprintf(_( "Rename of '%s' -> '%s' failed. " - "Cannot clean up the background chainstate leveldb directory.", + "Cannot clean up the background chainstate leveldb directory."), fs::PathToString(p_old), fs::PathToString(p_new))); }; diff --git a/src/validation.h b/src/validation.h index bcf153719af9a..0f00a48b9c724 100644 --- a/src/validation.h +++ b/src/validation.h @@ -93,7 +93,7 @@ extern const std::vector CHECKLEVEL_DOC; CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams); -bool FatalError(kernel::Notifications& notifications, BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage = {}); +bool FatalError(kernel::Notifications& notifications, BlockValidationState& state, const bilingual_str& message); /** Guess verification progress (as a fraction between 0.0=genesis and 1.0=current tip). */ double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex* pindex); diff --git a/test/functional/feature_abortnode.py b/test/functional/feature_abortnode.py index 740d3b7f0ecdc..01ba2834c4221 100755 --- a/test/functional/feature_abortnode.py +++ b/test/functional/feature_abortnode.py @@ -36,7 +36,7 @@ def run_test(self): # Check that node0 aborted self.log.info("Waiting for crash") - self.nodes[0].wait_until_stopped(timeout=5, expect_error=True, expected_stderr="Error: A fatal internal error occurred, see debug.log for details") + self.nodes[0].wait_until_stopped(timeout=5, expect_error=True, expected_stderr="Error: A fatal internal error occurred, see debug.log for details: Failed to disconnect block.") self.log.info("Node crashed - now verifying restart fails") self.nodes[0].assert_start_raises_init_error() diff --git a/test/functional/feature_assumeutxo.py b/test/functional/feature_assumeutxo.py index eb9ea65c4f23b..3e882f47b83d0 100755 --- a/test/functional/feature_assumeutxo.py +++ b/test/functional/feature_assumeutxo.py @@ -134,7 +134,7 @@ def expected_error(log_msg="", error_msg=""): with self.nodes[0].assert_debug_log([log_msg]): self.nodes[0].assert_start_raises_init_error(expected_msg=error_msg) - expected_error_msg = f"Error: A fatal internal error occurred, see debug.log for details" + expected_error_msg = f"Error: A fatal internal error occurred, see debug.log for details: Assumeutxo data not found for the given blockhash '7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a7a'." error_details = f"Assumeutxo data not found for the given blockhash" expected_error(log_msg=error_details, error_msg=expected_error_msg) diff --git a/test/functional/feature_index_prune.py b/test/functional/feature_index_prune.py index d6e802b399e9d..b3bf35b5243c8 100755 --- a/test/functional/feature_index_prune.py +++ b/test/functional/feature_index_prune.py @@ -128,7 +128,7 @@ def run_test(self): self.log.info("make sure we get an init error when starting the nodes again with the indices") filter_msg = "Error: basic block filter index best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)" stats_msg = "Error: coinstatsindex best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)" - end_msg = f"{os.linesep}Error: Failed to start indexes, shutting down.." + end_msg = f"{os.linesep}Error: A fatal internal error occurred, see debug.log for details: Failed to start indexes, shutting down.." for i, msg in enumerate([filter_msg, stats_msg, filter_msg]): self.nodes[i].assert_start_raises_init_error(extra_args=self.extra_args[i], expected_msg=msg+end_msg) From 824f47294a309ba8e58ba8d1da0af15d8d828f43 Mon Sep 17 00:00:00 2001 From: TheCharlatan Date: Thu, 21 Mar 2024 16:09:02 +0100 Subject: [PATCH 60/79] node: Use log levels in noui_ThreadSafeMessageBox --- src/noui.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/noui.cpp b/src/noui.cpp index af5a180ce3687..23637dfa1f750 100644 --- a/src/noui.cpp +++ b/src/noui.cpp @@ -28,20 +28,21 @@ bool noui_ThreadSafeMessageBox(const bilingual_str& message, const std::string& switch (style) { case CClientUIInterface::MSG_ERROR: strCaption = "Error: "; + if (!fSecure) LogError("%s\n", message.original); break; case CClientUIInterface::MSG_WARNING: strCaption = "Warning: "; + if (!fSecure) LogWarning("%s\n", message.original); break; case CClientUIInterface::MSG_INFORMATION: strCaption = "Information: "; + if (!fSecure) LogInfo("%s\n", message.original); break; default: strCaption = caption + ": "; // Use supplied caption (can be empty) + if (!fSecure) LogInfo("%s%s\n", strCaption, message.original); } - if (!fSecure) { - LogPrintf("%s%s\n", strCaption, message.original); - } tfm::format(std::cerr, "%s%s\n", strCaption, message.original); return false; } From 7850c5fe20a034438e00f6c12ce51efc6af3a1aa Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 14 Aug 2023 13:23:25 +0100 Subject: [PATCH 61/79] guix: build GCC with --enable-standard-branch-protection To enable Branch Target Identification Mechanism and Return Address Signing by default at configure time use the `--enable-standard-branch-protection` option. This is equivalent to having `-mbranch-protection=standard` during compilation. This can be explicitly disabled during compilation by passing the `-mbranch-protection=none` option which turns off all types of branch protections. See: https://gcc.gnu.org/install/specific.html#aarch64-x-x --- contrib/guix/manifest.scm | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index 3353c8a87446a..e24a61bf9da24 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -423,6 +423,7 @@ inspecting signatures in Mach-O binaries.") (list "--enable-initfini-array=yes", "--enable-default-ssp=yes", "--enable-default-pie=yes", + "--enable-standard-branch-protection=yes", building-on))) ((#:phases phases) `(modify-phases ,phases From fa1146d01b148dd60fcada36a3b37ed37532ce2b Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Fri, 15 Mar 2024 15:00:13 +0100 Subject: [PATCH 62/79] lint: Fix COMMIT_RANGE issues --- ci/lint/06_script.sh | 23 +++++++++++++---------- ci/lint/container-entrypoint.sh | 2 +- test/lint/lint-git-commit-check.py | 23 +++++------------------ 3 files changed, 19 insertions(+), 29 deletions(-) diff --git a/ci/lint/06_script.sh b/ci/lint/06_script.sh index 318b2bb81942f..cdf0f60147d9c 100755 --- a/ci/lint/06_script.sh +++ b/ci/lint/06_script.sh @@ -8,21 +8,24 @@ export LC_ALL=C set -ex -if [ -n "$LOCAL_BRANCH" ]; then - # To faithfully recreate CI linting locally, specify all commits on the current - # branch. - COMMIT_RANGE="$(git merge-base HEAD master)..HEAD" -elif [ -n "$CIRRUS_PR" ]; then +if [ -n "$CIRRUS_PR" ]; then COMMIT_RANGE="HEAD~..HEAD" - echo - git log --no-merges --oneline "$COMMIT_RANGE" - echo - test/lint/commit-script-check.sh "$COMMIT_RANGE" + if [ "$(git rev-list -1 HEAD)" != "$(git rev-list -1 --merges HEAD)" ]; then + echo "Error: The top commit must be a merge commit, usually the remote 'pull/${PR_NUMBER}/merge' branch." + false + fi else - COMMIT_RANGE="SKIP_EMPTY_NOT_A_PR" + # Otherwise, assume that a merge commit exists. This merge commit is assumed + # to be the base, after which linting will be done. If the merge commit is + # HEAD, the range will be empty. + COMMIT_RANGE="$( git rev-list --max-count=1 --merges HEAD )..HEAD" fi export COMMIT_RANGE +echo +git log --no-merges --oneline "$COMMIT_RANGE" +echo +test/lint/commit-script-check.sh "$COMMIT_RANGE" RUST_BACKTRACE=1 "${LINT_RUNNER_PATH}/test_runner" if [ "$CIRRUS_REPO_FULL_NAME" = "bitcoin/bitcoin" ] && [ "$CIRRUS_PR" = "" ] ; then diff --git a/ci/lint/container-entrypoint.sh b/ci/lint/container-entrypoint.sh index a403f923a21a5..c8519a39129cf 100755 --- a/ci/lint/container-entrypoint.sh +++ b/ci/lint/container-entrypoint.sh @@ -14,7 +14,7 @@ export PATH="/python_build/bin:${PATH}" export LINT_RUNNER_PATH="/lint_test_runner" if [ -z "$1" ]; then - LOCAL_BRANCH=1 bash -ic "./ci/lint/06_script.sh" + bash -ic "./ci/lint/06_script.sh" else exec "$@" fi diff --git a/test/lint/lint-git-commit-check.py b/test/lint/lint-git-commit-check.py index 5897a17e70b68..5dc30cc755fa7 100755 --- a/test/lint/lint-git-commit-check.py +++ b/test/lint/lint-git-commit-check.py @@ -23,31 +23,18 @@ def parse_args(): """, epilog=f""" You can manually set the commit-range with the COMMIT_RANGE - environment variable (e.g. "COMMIT_RANGE='47ba2c3...ee50c9e' - {sys.argv[0]}"). Defaults to current merge base when neither - prev-commits nor the environment variable is set. + environment variable (e.g. "COMMIT_RANGE='HEAD~n..HEAD' + {sys.argv[0]}") for the last 'n' commits. """) - - parser.add_argument("--prev-commits", "-p", required=False, help="The previous n commits to check") - return parser.parse_args() def main(): - args = parse_args() + parse_args() exit_code = 0 - if not os.getenv("COMMIT_RANGE"): - if args.prev_commits: - commit_range = "HEAD~" + args.prev_commits + "...HEAD" - else: - # This assumes that the target branch of the pull request will be master. - merge_base = check_output(["git", "merge-base", "HEAD", "master"], text=True, encoding="utf8").rstrip("\n") - commit_range = merge_base + "..HEAD" - else: - commit_range = os.getenv("COMMIT_RANGE") - if commit_range == "SKIP_EMPTY_NOT_A_PR": - sys.exit(0) + assert os.getenv("COMMIT_RANGE") # E.g. COMMIT_RANGE='HEAD~n..HEAD' + commit_range = os.getenv("COMMIT_RANGE") commit_hashes = check_output(["git", "log", commit_range, "--format=%H"], text=True, encoding="utf8").splitlines() From 669ea0aa4adb2875a26cd35463d48b857b366a60 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Fri, 22 Mar 2024 10:53:21 +0000 Subject: [PATCH 63/79] doc: Rename `contrib/devtools/bitcoin-tidy/README` to `README.md` This change fixes the file formatting on the GitHub website. --- contrib/devtools/bitcoin-tidy/{README => README.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename contrib/devtools/bitcoin-tidy/{README => README.md} (100%) diff --git a/contrib/devtools/bitcoin-tidy/README b/contrib/devtools/bitcoin-tidy/README.md similarity index 100% rename from contrib/devtools/bitcoin-tidy/README rename to contrib/devtools/bitcoin-tidy/README.md From 24410e560ac9add5dbae424964bc96554e6fd1a9 Mon Sep 17 00:00:00 2001 From: fanquake Date: Thu, 21 Mar 2024 17:49:01 +0000 Subject: [PATCH 64/79] tidy: set minimum CMake to 3.22 Matches https://github.com/hebasto/bitcoin/pull/123. This also also dev/ci only code. --- contrib/devtools/bitcoin-tidy/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/devtools/bitcoin-tidy/CMakeLists.txt b/contrib/devtools/bitcoin-tidy/CMakeLists.txt index 35e60d1d87e2b..d10d584b03d90 100644 --- a/contrib/devtools/bitcoin-tidy/CMakeLists.txt +++ b/contrib/devtools/bitcoin-tidy/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.9) +cmake_minimum_required(VERSION 3.22) project(bitcoin-tidy VERSION 1.0.0 DESCRIPTION "clang-tidy checks for Bitcoin Core") From 5b690aeb1583e207b083e83b8d882f7d1c2d2683 Mon Sep 17 00:00:00 2001 From: fanquake Date: Thu, 21 Mar 2024 11:26:59 +0000 Subject: [PATCH 65/79] tidy: remove terminfo TODO At the same time, also disable searching for CURL, LibEdit, LibXml2, ZLIB and zstd none of which we use. --- contrib/devtools/bitcoin-tidy/CMakeLists.txt | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/contrib/devtools/bitcoin-tidy/CMakeLists.txt b/contrib/devtools/bitcoin-tidy/CMakeLists.txt index d10d584b03d90..081c3050fd5c5 100644 --- a/contrib/devtools/bitcoin-tidy/CMakeLists.txt +++ b/contrib/devtools/bitcoin-tidy/CMakeLists.txt @@ -8,7 +8,13 @@ set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED True) set(CMAKE_CXX_EXTENSIONS False) -# TODO: Figure out how to avoid the terminfo check +set(CMAKE_DISABLE_FIND_PACKAGE_CURL ON) +set(CMAKE_DISABLE_FIND_PACKAGE_LibEdit ON) +set(CMAKE_DISABLE_FIND_PACKAGE_LibXml2 ON) +set(CMAKE_DISABLE_FIND_PACKAGE_Terminfo ON) +set(CMAKE_DISABLE_FIND_PACKAGE_ZLIB ON) +set(CMAKE_DISABLE_FIND_PACKAGE_zstd ON) + find_package(LLVM REQUIRED CONFIG) find_program(CLANG_TIDY_EXE NAMES "clang-tidy-${LLVM_VERSION_MAJOR}" "clang-tidy" HINTS ${LLVM_TOOLS_BINARY_DIR}) message(STATUS "Found LLVM ${LLVM_PACKAGE_VERSION}") From c3a4ea19715de292517b932d0a3b24ace72e9919 Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 22 Mar 2024 13:38:23 +0000 Subject: [PATCH 66/79] tidy: set CMAKE_CXX_STANDARD to 20 --- contrib/devtools/bitcoin-tidy/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/devtools/bitcoin-tidy/CMakeLists.txt b/contrib/devtools/bitcoin-tidy/CMakeLists.txt index 081c3050fd5c5..f0d3f40a751c4 100644 --- a/contrib/devtools/bitcoin-tidy/CMakeLists.txt +++ b/contrib/devtools/bitcoin-tidy/CMakeLists.txt @@ -4,7 +4,7 @@ project(bitcoin-tidy VERSION 1.0.0 DESCRIPTION "clang-tidy checks for Bitcoin Co include(GNUInstallDirs) -set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED True) set(CMAKE_CXX_EXTENSIONS False) From 11ee058ef5794de5f1b8e89d62bfa69c64693fff Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 22 Mar 2024 13:48:00 +0000 Subject: [PATCH 67/79] tidy: remove C compiler check Also requires disabling FFI. --- contrib/devtools/bitcoin-tidy/CMakeLists.txt | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/contrib/devtools/bitcoin-tidy/CMakeLists.txt b/contrib/devtools/bitcoin-tidy/CMakeLists.txt index f0d3f40a751c4..1260c714236ff 100644 --- a/contrib/devtools/bitcoin-tidy/CMakeLists.txt +++ b/contrib/devtools/bitcoin-tidy/CMakeLists.txt @@ -1,6 +1,10 @@ cmake_minimum_required(VERSION 3.22) -project(bitcoin-tidy VERSION 1.0.0 DESCRIPTION "clang-tidy checks for Bitcoin Core") +project(bitcoin-tidy + VERSION + 1.0.0 + DESCRIPTION "clang-tidy checks for Bitcoin Core" + LANGUAGES CXX) include(GNUInstallDirs) @@ -9,6 +13,7 @@ set(CMAKE_CXX_STANDARD_REQUIRED True) set(CMAKE_CXX_EXTENSIONS False) set(CMAKE_DISABLE_FIND_PACKAGE_CURL ON) +set(CMAKE_DISABLE_FIND_PACKAGE_FFI ON) set(CMAKE_DISABLE_FIND_PACKAGE_LibEdit ON) set(CMAKE_DISABLE_FIND_PACKAGE_LibXml2 ON) set(CMAKE_DISABLE_FIND_PACKAGE_Terminfo ON) From 032a59748295859845b2a9181ceb1c4ae70bae5c Mon Sep 17 00:00:00 2001 From: stickies-v Date: Fri, 22 Mar 2024 14:20:39 +0000 Subject: [PATCH 68/79] test: make p2p_handshake robust against timeoffset warnings The test requires that limited nodes are not peered with when the node's system time exceeds ~ 24h of the node's chaintip timestamp, as per PeerManagerImpl::GetDesirableServiceFlags. By patching this test to modify the timestamp of the chaintip as opposed to mocking the node's system time, we make it resilient to future commits where the node raises a warning if it detects its system time is too much out of sync with its outbound peers. See https://github.com/bitcoin/bitcoin/pull/29623 --- test/functional/p2p_handshake.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/test/functional/p2p_handshake.py b/test/functional/p2p_handshake.py index 3fbb940cbdc55..f0b62e291d322 100755 --- a/test/functional/p2p_handshake.py +++ b/test/functional/p2p_handshake.py @@ -62,6 +62,11 @@ def test_desirable_service_flags(self, node, service_flag_tests, desirable_servi assert (services & desirable_service_flags) == desirable_service_flags self.add_outbound_connection(node, conn_type, services, wait_for_disconnect=False) + def generate_at_mocktime(self, time): + self.nodes[0].setmocktime(time) + self.generate(self.nodes[0], 1) + self.nodes[0].setmocktime(0) + def run_test(self): node = self.nodes[0] self.log.info("Check that lacking desired service flags leads to disconnect (non-pruned peers)") @@ -71,10 +76,10 @@ def run_test(self): DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=False) self.log.info("Check that limited peers are only desired if the local chain is close to the tip (<24h)") - node.setmocktime(int(time.time()) + 25 * 3600) # tip outside the 24h window, should fail + self.generate_at_mocktime(int(time.time()) - 25 * 3600) # tip outside the 24h window, should fail self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS], DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True) - node.setmocktime(int(time.time()) + 23 * 3600) # tip inside the 24h window, should succeed + self.generate_at_mocktime(int(time.time()) - 23 * 3600) # tip inside the 24h window, should succeed self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS], DESIRABLE_SERVICE_FLAGS_PRUNED, expect_disconnect=False) From 89b84ea91ae40876a52879c509c63d0bacbfaade Mon Sep 17 00:00:00 2001 From: 0xb10c Date: Tue, 12 Mar 2024 16:17:01 +0100 Subject: [PATCH 69/79] test: check that addrman seeding is successful The addpeeraddress calls can fail due to collisions. As we are using a deteministic addrman, they won't fail with the current bucket/position calculation. However, if the calculation is changed, they might collide and fail silently causing tests using `seed_addrman()` to fail. Assert that the addpeeraddress calls are successful. --- test/functional/rpc_net.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 48d86ab59dd99..2701d2471d006 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -45,14 +45,18 @@ def seed_addrman(node): """ Populate the addrman with addresses from different networks. Here 2 ipv4, 2 ipv6, 1 cjdns, 2 onion and 1 i2p addresses are added. """ - node.addpeeraddress(address="1.2.3.4", tried=True, port=8333) - node.addpeeraddress(address="2.0.0.0", port=8333) - node.addpeeraddress(address="1233:3432:2434:2343:3234:2345:6546:4534", tried=True, port=8333) - node.addpeeraddress(address="2803:0:1234:abcd::1", port=45324) - node.addpeeraddress(address="fc00:1:2:3:4:5:6:7", port=8333) - node.addpeeraddress(address="pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion", tried=True, port=8333) - node.addpeeraddress(address="nrfj6inpyf73gpkyool35hcmne5zwfmse3jl3aw23vk7chdemalyaqad.onion", port=45324, tried=True) - node.addpeeraddress(address="c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p", port=8333) + # These addresses currently don't collide with a deterministic addrman. + # If the addrman positioning/bucketing is changed, these might collide + # and adding them fails. + success = { "success": True } + assert_equal(node.addpeeraddress(address="1.2.3.4", tried=True, port=8333), success) + assert_equal(node.addpeeraddress(address="2.0.0.0", port=8333), success) + assert_equal(node.addpeeraddress(address="1233:3432:2434:2343:3234:2345:6546:4534", tried=True, port=8333), success) + assert_equal(node.addpeeraddress(address="2803:0:1234:abcd::1", port=45324), success) + assert_equal(node.addpeeraddress(address="fc00:1:2:3:4:5:6:7", port=8333), success) + assert_equal(node.addpeeraddress(address="pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion", tried=True, port=8333), success) + assert_equal(node.addpeeraddress(address="nrfj6inpyf73gpkyool35hcmne5zwfmse3jl3aw23vk7chdemalyaqad.onion", port=45324, tried=True), success) + assert_equal(node.addpeeraddress(address="c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p", port=8333), success) class NetTest(BitcoinTestFramework): From 3047c3e3a99112c38f118034daa672db70fa4a60 Mon Sep 17 00:00:00 2001 From: 0xb10c Date: Tue, 12 Mar 2024 16:26:42 +0100 Subject: [PATCH 70/79] addrman: drop /*deterministic=*/ comment Just having deterministic is enough. See https://github.com/bitcoin/bitcoin/pull/29007#discussion_r1488241966 --- src/addrdb.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/addrdb.cpp b/src/addrdb.cpp index f8d4240f3fe9b..14dc314c3650f 100644 --- a/src/addrdb.cpp +++ b/src/addrdb.cpp @@ -195,7 +195,7 @@ util::Result> LoadAddrman(const NetGroupManager& netgro auto check_addrman = std::clamp(args.GetIntArg("-checkaddrman", DEFAULT_ADDRMAN_CONSISTENCY_CHECKS), 0, 1000000); bool deterministic = HasTestOption(args, "addrman"); // use a deterministic addrman only for tests - auto addrman{std::make_unique(netgroupman, /*deterministic=*/deterministic, /*consistency_check_ratio=*/check_addrman)}; + auto addrman{std::make_unique(netgroupman, deterministic, /*consistency_check_ratio=*/check_addrman)}; const auto start{SteadyClock::now()}; const auto path_addr{args.GetDataDirNet() / "peers.dat"}; @@ -204,7 +204,7 @@ util::Result> LoadAddrman(const NetGroupManager& netgro LogPrintf("Loaded %i addresses from peers.dat %dms\n", addrman->Size(), Ticks(SteadyClock::now() - start)); } catch (const DbNotFoundError&) { // Addrman can be in an inconsistent state after failure, reset it - addrman = std::make_unique(netgroupman, /*deterministic=*/deterministic, /*consistency_check_ratio=*/check_addrman); + addrman = std::make_unique(netgroupman, deterministic, /*consistency_check_ratio=*/check_addrman); LogPrintf("Creating peers.dat because the file was not found (%s)\n", fs::quoted(fs::PathToString(path_addr))); DumpPeerAddresses(args, *addrman); } catch (const InvalidAddrManVersionError&) { @@ -212,7 +212,7 @@ util::Result> LoadAddrman(const NetGroupManager& netgro return util::Error{strprintf(_("Failed to rename invalid peers.dat file. Please move or delete it and try again."))}; } // Addrman can be in an inconsistent state after failure, reset it - addrman = std::make_unique(netgroupman, /*deterministic=*/deterministic, /*consistency_check_ratio=*/check_addrman); + addrman = std::make_unique(netgroupman, deterministic, /*consistency_check_ratio=*/check_addrman); LogPrintf("Creating new peers.dat because the file version was not compatible (%s). Original backed up to peers.dat.bak\n", fs::quoted(fs::PathToString(path_addr))); DumpPeerAddresses(args, *addrman); } catch (const std::exception& e) { From 9a44a20fb790f3be5d5d5d8f5d0f48aac633b2a4 Mon Sep 17 00:00:00 2001 From: 0xb10c Date: Tue, 12 Mar 2024 16:31:39 +0100 Subject: [PATCH 71/79] init: clarify -test error See https://github.com/bitcoin/bitcoin/pull/29007#discussion_r1469388717 --- src/init.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/init.cpp b/src/init.cpp index 349d4ff1abca5..885c0673ddb7a 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1031,7 +1031,7 @@ bool AppInitParameterInteraction(const ArgsManager& args) if (args.IsArgSet("-test")) { if (chainparams.GetChainType() != ChainType::REGTEST) { - return InitError(Untranslated("-test=