From c323991bf1262fefdd5e343c413b674b60f49084 Mon Sep 17 00:00:00 2001 From: Matthew Cummings Date: Wed, 9 Nov 2022 16:17:20 -0600 Subject: [PATCH] vtc: reduce getheaders spam by serializing getheaders requests per peer This is gracefully borrowed from: https://github.com/dogecoin/dogecoin/pull/2417 Introduces a counter for getheader requests that have been sent to a peer but are pending response, reducing the number of parallel requests a node pushes out to its peers when needing to sync large amounts of headers. All getheader requests are serialized during initial sync, except when a non-connecting header is received, allowing the node to resolve issues with peers sending faulty blocks using the DoS mechanism, and when we get an inv for a block that we do not know, because it's possible we're only connected to legacy nodes that do not implement header announcement properly. (cherry picked from commit ed4fb9875b41cacee0a0dfea2a74cda69a2d760d, #211) --- src/net.cpp | 2 ++ src/net.h | 5 ++++- src/net_processing.cpp | 50 ++++++++++++++++++++++++++++++++++++------ 3 files changed, 49 insertions(+), 8 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index 1b0e7369a3..b69a46bee9 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -3030,6 +3030,8 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, std::shared_ptr s m_conn_type(conn_type_in), nLocalServices(nLocalServicesIn) { + nPendingHeaderRequests = 0; + if (inbound_onion) assert(conn_type_in == ConnectionType::INBOUND); if (conn_type_in != ConnectionType::BLOCK_RELAY) { m_tx_relay = std::make_unique(); diff --git a/src/net.h b/src/net.h index 2e0a45b874..3853de0481 100644 --- a/src/net.h +++ b/src/net.h @@ -576,7 +576,10 @@ class CNode std::chrono::microseconds m_next_send_feefilter{0}; }; - // m_tx_relay == nullptr if we're not relaying transactions with this peer + // Counts getheaders requests sent to this peer + std::atomic nPendingHeaderRequests; + + // m_tx_relay == nullptr if we're not relaying transactions with this peer std::unique_ptr m_tx_relay; /** UNIX epoch time of the last block received from this peer that we had diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 6e829b3edb..23bb02fd89 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -329,6 +329,7 @@ class PeerManagerImpl final : public PeerManager void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message) override; void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv, const std::chrono::microseconds time_received, const std::atomic& interruptMsgProc) override; + void RequestHeadersFrom(CNode& pto, CConnman& connman, const CBlockIndex* pindex, uint256 untilHash, bool fforceQuery); private: void _RelayTransaction(const uint256& txid, const uint256& wtxid) @@ -1123,6 +1124,26 @@ void PeerManagerImpl::FindNextBlocksToDownload(NodeId nodeid, unsigned int count } // namespace +// Do not request headers from a peer we are +// already requesting headers from, unless forced. +void PeerManagerImpl::RequestHeadersFrom(CNode& pto, CConnman& connman, const CBlockIndex* pindex, uint256 untilHash, bool fforceQuery) +{ + if (pto.nPendingHeaderRequests > 0) { + if (fforceQuery) { + LogPrint(BCLog::NET, "forcing getheaders request (%d) to peer=%d (%d open)\n", + pindex->nHeight, pto.GetId(), pto.nPendingHeaderRequests); + } else { + LogPrint(BCLog::NET, "dropped getheaders request (%d) to peer=%d\n", pindex->nHeight, pto.GetId()); + return; + } + } + const CNetMsgMaker msgMaker(pto.GetCommonVersion()); + LogPrint(BCLog::NET, "getheaders request (%d) to peer=%d (%d open)\n", pindex->nHeight, pto.GetId(), pto.nPendingHeaderRequests); + connman.PushMessage(&pto, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindex), untilHash)); + pto.nPendingHeaderRequests += 1; + +} + void PeerManagerImpl::PushNodeVersion(CNode& pnode) { // Note that pnode->GetLocalServices() is a reflection of the local @@ -2133,7 +2154,8 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer, // nUnconnectingHeaders gets reset back to 0. if (!m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) { nodestate->nUnconnectingHeaders++; - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexBestHeader), uint256())); + // Allow a single getheaders query before triggering DoS + RequestHeadersFrom(pfrom, m_connman, pindexBestHeader, uint256(), true); LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", headers[0].GetHash().ToString(), headers[0].hashPrevBlock.ToString(), @@ -2197,9 +2219,13 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer, // Headers message had its maximum size; the peer may have more headers. // TODO: optimize: if pindexLast is an ancestor of m_chainman.ActiveChain().Tip or pindexBestHeader, continue // from there instead. + // + // Do not allow multiple getheader queries in parallel at + // this point - makes sure that any parallel queries will end here, + // preventing "getheaders" spam. LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height); - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexLast), uint256())); + RequestHeadersFrom(pfrom, m_connman, pindexLast, uint256(), false); } // If this set of headers is valid and ends in a block with at least as @@ -3060,7 +3086,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } if (best_block != nullptr) { - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexBestHeader), *best_block)); + // We force this check, in case we're only connected to nodes that send invs + RequestHeadersFrom(pfrom, m_connman, pindexBestHeader, *best_block, true); LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, best_block->ToString(), pfrom.GetId()); } @@ -3538,8 +3565,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (!m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock)) { // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers - if (!m_chainman.ActiveChainstate().IsInitialBlockDownload()) - m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexBestHeader), uint256())); + if (!m_chainman.ActiveChainstate().IsInitialBlockDownload()) { + RequestHeadersFrom(pfrom, m_connman, pindexBestHeader, uint256(), true); + } return; } @@ -3821,7 +3849,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, std::vector headers; - // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks. + if (pfrom.nPendingHeaderRequests > 0) { + pfrom.nPendingHeaderRequests -= 1; + } + + // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks. unsigned int nCount = ReadCompactSize(vRecv); if (nCount > MAX_HEADERS_RESULTS) { Misbehaving(pfrom.GetId(), 20, strprintf("headers message size = %u", nCount)); @@ -4676,10 +4708,14 @@ bool PeerManagerImpl::SendMessages(CNode* pto) the peer's known best block. This wouldn't be possible if we requested starting at pindexBestHeader and got back an empty response. */ + + // Make sure that if we are already processing an inv + // or header message from this peer caused by a new block being + // mined at chaintip, we do not send another getheaders request if (pindexStart->pprev) pindexStart = pindexStart->pprev; LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height); - m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(pindexStart), uint256())); + RequestHeadersFrom(*pto, m_connman, pindexStart, uint256(), false); } }