Skip to content

Commit d4833e9

Browse files
committed
Merge bitcoin/bitcoin#26140: refactor: Move CNodeState members guarded by g_msgproc_mutex to Peer
3a060ae scripted-diff: Rename nUnconnectingHeaders and fPreferHeaders (dergoegge) 279c53d [net processing] Move m_recently_announced_invs from CNodeState to Peer (dergoegge) 938a8e2 [net processing] Annotate m_recently_announced_invs as guarded by g_msgproc_mutex (dergoegge) 8a2cb1f [net processing] Move fPreferHeaders from CNodeState to Peer (dergoegge) 3605011 [net processing] Annotate fPreferHeaders as guarded by g_msgproc_mutex (dergoegge) 4b84e50 [net processing] Move m_headers_sync_timeout from CNodeState to Peer (dergoegge) 689b747 [net processing] Annotate m_headers_sync_timeout as guarded by g_msgproc_mutex (dergoegge) d8c0d1c [net processing] Move nUnconnectingHeaders from CNodeState to Peer (dergoegge) 5f80d8d [net processing] Annotate nUnconnectingHeaders as guarded by g_msgproc_mutex (dergoegge) 1d87137 [validation] Annotate ChainstateManager::m_best_header as guarded by cs_main (dergoegge) Pull request description: `nUnconnectingHeaders`, `m_headers_sync_timeout`, `fPreferHeaders` and `m_recently_announced_headers` are currently all `CNodeState` members even though they are only ever accessed from the message processing thread (therefore sufficiently guarded exclusively by `g_msgproc_mutex`). `CNodeState` exists purely to hold validation-specific state guarded by `cs_main` that is accessed by multiple threads. This PR adds thread-safety annotations for the above mentioned `CNodeState` members and moves them to `Peer`. ACKs for top commit: glozow: code review ACK 3a060ae, as in I am convinced these members shouldn't be guarded by cs_main and belong in Peer/TxRelay. clang checked the annotations for me. hebasto: ACK 3a060ae Tree-SHA512: 2db27c03f2c6ed36ad7dfbb4f862eeed3c3e57f845cf8abb9e7cada36f976257311892020bbcff513fbe662a881c93270e3a126946ceb0c3f94213b546bcaa81
2 parents 84f4ac3 + 3a060ae commit d4833e9

File tree

3 files changed

+62
-54
lines changed

3 files changed

+62
-54
lines changed

src/net_processing.cpp

Lines changed: 56 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
135135
/** Maximum number of headers to announce when relaying blocks with headers message.*/
136136
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
137137
/** Maximum number of unconnecting headers announcements before DoS score */
138-
static const int MAX_UNCONNECTING_HEADERS = 10;
138+
static const int MAX_NUM_UNCONNECTING_HEADERS_MSGS = 10;
139139
/** Minimum blocks required to signal NODE_NETWORK_LIMITED */
140140
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
141141
/** Average delay between local address broadcasts */
@@ -278,6 +278,9 @@ struct Peer {
278278
/** A bloom filter for which transactions to announce to the peer. See BIP37. */
279279
std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr};
280280

281+
/** A rolling bloom filter of all announced tx CInvs to this peer */
282+
CRollingBloomFilter m_recently_announced_invs GUARDED_BY(NetEventsInterface::g_msgproc_mutex){INVENTORY_MAX_RECENT_RELAY, 0.000001};
283+
281284
mutable RecursiveMutex m_tx_inventory_mutex;
282285
/** A filter of all the txids and wtxids that the peer has announced to
283286
* us or we have announced to the peer. We use this to avoid announcing
@@ -314,6 +317,10 @@ struct Peer {
314317
{
315318
return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
316319
};
320+
const TxRelay* GetTxRelay() const EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
321+
{
322+
return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
323+
};
317324

318325
/** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */
319326
std::vector<CAddress> m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
@@ -385,13 +392,22 @@ struct Peer {
385392
/** Whether we've sent our peer a sendheaders message. **/
386393
std::atomic<bool> m_sent_sendheaders{false};
387394

395+
/** Length of current-streak of unconnecting headers announcements */
396+
int m_num_unconnecting_headers_msgs GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
397+
398+
/** When to potentially disconnect peer for stalling headers download */
399+
std::chrono::microseconds m_headers_sync_timeout GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us};
400+
401+
/** Whether this peer wants invs or headers (when possible) for block announcements */
402+
bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
403+
388404
explicit Peer(NodeId id, ServiceFlags our_services)
389405
: m_id{id}
390406
, m_our_services{our_services}
391407
{}
392408

393409
private:
394-
Mutex m_tx_relay_mutex;
410+
mutable Mutex m_tx_relay_mutex;
395411

396412
/** Transaction relay data. May be a nullptr. */
397413
std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex);
@@ -414,12 +430,8 @@ struct CNodeState {
414430
const CBlockIndex* pindexLastCommonBlock{nullptr};
415431
//! The best header we have sent our peer.
416432
const CBlockIndex* pindexBestHeaderSent{nullptr};
417-
//! Length of current-streak of unconnecting headers announcements
418-
int nUnconnectingHeaders{0};
419433
//! Whether we've started headers synchronization with this peer.
420434
bool fSyncStarted{false};
421-
//! When to potentially disconnect peer for stalling headers download
422-
std::chrono::microseconds m_headers_sync_timeout{0us};
423435
//! Since when we're stalling block download progress (in microseconds), or 0.
424436
std::chrono::microseconds m_stalling_since{0us};
425437
std::list<QueuedBlock> vBlocksInFlight;
@@ -428,8 +440,6 @@ struct CNodeState {
428440
int nBlocksInFlight{0};
429441
//! Whether we consider this a preferred download peer.
430442
bool fPreferredDownload{false};
431-
//! Whether this peer wants invs or headers (when possible) for block announcements.
432-
bool fPreferHeaders{false};
433443
/** Whether this peer wants invs or cmpctblocks (when possible) for block announcements. */
434444
bool m_requested_hb_cmpctblocks{false};
435445
/** Whether this peer will send us cmpctblocks if we request them. */
@@ -478,9 +488,6 @@ struct CNodeState {
478488
//! Whether this peer is an inbound connection
479489
const bool m_is_inbound;
480490

481-
//! A rolling bloom filter of all announced tx CInvs to this peer.
482-
CRollingBloomFilter m_recently_announced_invs = CRollingBloomFilter{INVENTORY_MAX_RECENT_RELAY, 0.000001};
483-
484491
CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
485492
};
486493

@@ -666,7 +673,8 @@ class PeerManagerImpl final : public PeerManager
666673
/** Potentially fetch blocks from this peer upon receipt of a new headers tip */
667674
void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header);
668675
/** Update peer state based on received headers message */
669-
void UpdatePeerStateForReceivedHeaders(CNode& pfrom, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers);
676+
void UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
677+
EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
670678

671679
void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req);
672680

@@ -900,10 +908,12 @@ class PeerManagerImpl final : public PeerManager
900908
std::atomic<std::chrono::seconds> m_last_tip_update{0s};
901909

902910
/** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */
903-
CTransactionRef FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main);
911+
CTransactionRef FindTxForGetData(const Peer& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now)
912+
LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex);
904913

905914
void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc)
906-
EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex) LOCKS_EXCLUDED(::cs_main);
915+
EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex)
916+
LOCKS_EXCLUDED(::cs_main);
907917

908918
/** Process a new block. Perform any post-processing housekeeping */
909919
void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked);
@@ -2248,7 +2258,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
22482258
}
22492259
}
22502260

2251-
CTransactionRef PeerManagerImpl::FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now)
2261+
CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now)
22522262
{
22532263
auto txinfo = m_mempool.info(gtxid);
22542264
if (txinfo.tx) {
@@ -2263,7 +2273,7 @@ CTransactionRef PeerManagerImpl::FindTxForGetData(const CNode& peer, const GenTx
22632273
{
22642274
LOCK(cs_main);
22652275
// Otherwise, the transaction must have been announced recently.
2266-
if (State(peer.GetId())->m_recently_announced_invs.contains(gtxid.GetHash())) {
2276+
if (Assume(peer.GetTxRelay())->m_recently_announced_invs.contains(gtxid.GetHash())) {
22672277
// If it was, it can be relayed from either the mempool...
22682278
if (txinfo.tx) return std::move(txinfo.tx);
22692279
// ... or the relay pool.
@@ -2306,7 +2316,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic
23062316
continue;
23072317
}
23082318

2309-
CTransactionRef tx = FindTxForGetData(pfrom, ToGenTxid(inv), mempool_req, now);
2319+
CTransactionRef tx = FindTxForGetData(peer, ToGenTxid(inv), mempool_req, now);
23102320
if (tx) {
23112321
// WTX and WITNESS_TX imply we serialize with witness
23122322
int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
@@ -2330,8 +2340,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic
23302340
for (const uint256& parent_txid : parent_ids_to_add) {
23312341
// Relaying a transaction with a recent but unconfirmed parent.
23322342
if (WITH_LOCK(tx_relay->m_tx_inventory_mutex, return !tx_relay->m_tx_inventory_known_filter.contains(parent_txid))) {
2333-
LOCK(cs_main);
2334-
State(pfrom.GetId())->m_recently_announced_invs.insert(parent_txid);
2343+
tx_relay->m_recently_announced_invs.insert(parent_txid);
23352344
}
23362345
}
23372346
} else {
@@ -2430,36 +2439,35 @@ arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold()
24302439
*
24312440
* We'll send a getheaders message in response to try to connect the chain.
24322441
*
2433-
* The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
2442+
* The peer can send up to MAX_NUM_UNCONNECTING_HEADERS_MSGS in a row that
24342443
* don't connect before given DoS points.
24352444
*
24362445
* Once a headers message is received that is valid and does connect,
2437-
* nUnconnectingHeaders gets reset back to 0.
2446+
* m_num_unconnecting_headers_msgs gets reset back to 0.
24382447
*/
24392448
void PeerManagerImpl::HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer,
24402449
const std::vector<CBlockHeader>& headers)
24412450
{
2442-
LOCK(cs_main);
2443-
CNodeState *nodestate = State(pfrom.GetId());
2444-
2445-
nodestate->nUnconnectingHeaders++;
2451+
peer.m_num_unconnecting_headers_msgs++;
24462452
// Try to fill in the missing headers.
2447-
if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), peer)) {
2448-
LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
2453+
const CBlockIndex* best_header{WITH_LOCK(cs_main, return m_chainman.m_best_header)};
2454+
if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) {
2455+
LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, m_num_unconnecting_headers_msgs=%d)\n",
24492456
headers[0].GetHash().ToString(),
24502457
headers[0].hashPrevBlock.ToString(),
2451-
m_chainman.m_best_header->nHeight,
2452-
pfrom.GetId(), nodestate->nUnconnectingHeaders);
2458+
best_header->nHeight,
2459+
pfrom.GetId(), peer.m_num_unconnecting_headers_msgs);
24532460
}
2461+
24542462
// Set hashLastUnknownBlock for this peer, so that if we
24552463
// eventually get the headers - even from a different peer -
24562464
// we can use this peer to download.
2457-
UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
2465+
WITH_LOCK(cs_main, UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()));
24582466

24592467
// The peer may just be broken, so periodically assign DoS points if this
24602468
// condition persists.
2461-
if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
2462-
Misbehaving(peer, 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders));
2469+
if (peer.m_num_unconnecting_headers_msgs % MAX_NUM_UNCONNECTING_HEADERS_MSGS == 0) {
2470+
Misbehaving(peer, 20, strprintf("%d non-connecting headers", peer.m_num_unconnecting_headers_msgs));
24632471
}
24642472
}
24652473

@@ -2707,15 +2715,16 @@ void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, c
27072715
* whether that header was new and whether the headers message was full,
27082716
* update the state we keep for the peer.
27092717
*/
2710-
void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom,
2718+
void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer,
27112719
const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
27122720
{
2721+
if (peer.m_num_unconnecting_headers_msgs > 0) {
2722+
LogPrint(BCLog::NET, "peer=%d: resetting m_num_unconnecting_headers_msgs (%d -> 0)\n", pfrom.GetId(), peer.m_num_unconnecting_headers_msgs);
2723+
}
2724+
peer.m_num_unconnecting_headers_msgs = 0;
2725+
27132726
LOCK(cs_main);
27142727
CNodeState *nodestate = State(pfrom.GetId());
2715-
if (nodestate->nUnconnectingHeaders > 0) {
2716-
LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders);
2717-
}
2718-
nodestate->nUnconnectingHeaders = 0;
27192728

27202729
UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash());
27212730

@@ -2900,7 +2909,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
29002909
}
29012910
}
29022911

2903-
UpdatePeerStateForReceivedHeaders(pfrom, *pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS);
2912+
UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS);
29042913

29052914
// Consider immediately downloading blocks.
29062915
HeadersDirectFetchBlocks(pfrom, peer, *pindexLast);
@@ -3444,8 +3453,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
34443453
}
34453454

34463455
if (msg_type == NetMsgType::SENDHEADERS) {
3447-
LOCK(cs_main);
3448-
State(pfrom.GetId())->fPreferHeaders = true;
3456+
peer->m_prefers_headers = true;
34493457
return;
34503458
}
34513459

@@ -5426,7 +5434,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
54265434
LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height);
54275435

54285436
state.fSyncStarted = true;
5429-
state.m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
5437+
peer->m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
54305438
(
54315439
// Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
54325440
// to maintain precision
@@ -5451,7 +5459,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
54515459
// add all to the inv queue.
54525460
LOCK(peer->m_block_inv_mutex);
54535461
std::vector<CBlock> vHeaders;
5454-
bool fRevertToInv = ((!state.fPreferHeaders &&
5462+
bool fRevertToInv = ((!peer->m_prefers_headers &&
54555463
(!state.m_requested_hb_cmpctblocks || peer->m_blocks_for_headers_relay.size() > 1)) ||
54565464
peer->m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE);
54575465
const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
@@ -5528,7 +5536,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
55285536
m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
55295537
}
55305538
state.pindexBestHeaderSent = pBestIndex;
5531-
} else if (state.fPreferHeaders) {
5539+
} else if (peer->m_prefers_headers) {
55325540
if (vHeaders.size() > 1) {
55335541
LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
55345542
vHeaders.size(),
@@ -5682,7 +5690,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
56825690
}
56835691
if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue;
56845692
// Send
5685-
State(pto->GetId())->m_recently_announced_invs.insert(hash);
5693+
tx_relay->m_recently_announced_invs.insert(hash);
56865694
vInv.push_back(inv);
56875695
nRelayedTransactions++;
56885696
{
@@ -5753,10 +5761,10 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
57535761
}
57545762
}
57555763
// Check for headers sync timeouts
5756-
if (state.fSyncStarted && state.m_headers_sync_timeout < std::chrono::microseconds::max()) {
5764+
if (state.fSyncStarted && peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
57575765
// Detect whether this is a stalling initial-headers-sync peer
57585766
if (m_chainman.m_best_header->Time() <= GetAdjustedTime() - 24h) {
5759-
if (current_time > state.m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) {
5767+
if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) {
57605768
// Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer,
57615769
// and we have others we could be using instead.
57625770
// Note: If all our peers are inbound, then we won't
@@ -5775,13 +5783,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
57755783
// this peer (eventually).
57765784
state.fSyncStarted = false;
57775785
nSyncStarted--;
5778-
state.m_headers_sync_timeout = 0us;
5786+
peer->m_headers_sync_timeout = 0us;
57795787
}
57805788
}
57815789
} else {
57825790
// After we've caught up once, reset the timeout so we can't trigger
57835791
// disconnect later.
5784-
state.m_headers_sync_timeout = std::chrono::microseconds::max();
5792+
peer->m_headers_sync_timeout = std::chrono::microseconds::max();
57855793
}
57865794
}
57875795

src/validation.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -997,7 +997,7 @@ class ChainstateManager
997997
std::set<CBlockIndex*> m_failed_blocks;
998998

999999
/** Best header we've seen so far (used for getheaders queries' starting points). */
1000-
CBlockIndex* m_best_header = nullptr;
1000+
CBlockIndex* m_best_header GUARDED_BY(::cs_main){nullptr};
10011001

10021002
//! The total number of bytes available for us to use across all in-memory
10031003
//! coins caches. This will be split somehow across chainstates.

test/functional/p2p_sendheaders.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -546,15 +546,15 @@ def test_nonnull_locators(self, test_node, inv_node):
546546
blocks = []
547547
# Now we test that if we repeatedly don't send connecting headers, we
548548
# don't go into an infinite loop trying to get them to connect.
549-
MAX_UNCONNECTING_HEADERS = 10
550-
for _ in range(MAX_UNCONNECTING_HEADERS + 1):
549+
MAX_NUM_UNCONNECTING_HEADERS_MSGS = 10
550+
for _ in range(MAX_NUM_UNCONNECTING_HEADERS_MSGS + 1):
551551
blocks.append(create_block(tip, create_coinbase(height), block_time))
552552
blocks[-1].solve()
553553
tip = blocks[-1].sha256
554554
block_time += 1
555555
height += 1
556556

557-
for i in range(1, MAX_UNCONNECTING_HEADERS):
557+
for i in range(1, MAX_NUM_UNCONNECTING_HEADERS_MSGS):
558558
# Send a header that doesn't connect, check that we get a getheaders.
559559
with p2p_lock:
560560
test_node.last_message.pop("getheaders", None)
@@ -568,8 +568,8 @@ def test_nonnull_locators(self, test_node, inv_node):
568568
blocks = blocks[2:]
569569

570570
# Now try to see how many unconnecting headers we can send
571-
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
572-
for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
571+
# before we get disconnected. Should be 5*MAX_NUM_UNCONNECTING_HEADERS_MSGS
572+
for i in range(5 * MAX_NUM_UNCONNECTING_HEADERS_MSGS - 1):
573573
# Send a header that doesn't connect, check that we get a getheaders.
574574
with p2p_lock:
575575
test_node.last_message.pop("getheaders", None)

0 commit comments

Comments
 (0)