@@ -135,7 +135,7 @@ static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
135
135
/* * Maximum number of headers to announce when relaying blocks with headers message.*/
136
136
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8 ;
137
137
/* * Maximum number of unconnecting headers announcements before DoS score */
138
- static const int MAX_UNCONNECTING_HEADERS = 10 ;
138
+ static const int MAX_NUM_UNCONNECTING_HEADERS_MSGS = 10 ;
139
139
/* * Minimum blocks required to signal NODE_NETWORK_LIMITED */
140
140
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288 ;
141
141
/* * Average delay between local address broadcasts */
@@ -278,6 +278,9 @@ struct Peer {
278
278
/* * A bloom filter for which transactions to announce to the peer. See BIP37. */
279
279
std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY (m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr };
280
280
281
+ /* * A rolling bloom filter of all announced tx CInvs to this peer */
282
+ CRollingBloomFilter m_recently_announced_invs GUARDED_BY (NetEventsInterface::g_msgproc_mutex){INVENTORY_MAX_RECENT_RELAY, 0.000001 };
283
+
281
284
mutable RecursiveMutex m_tx_inventory_mutex;
282
285
/* * A filter of all the txids and wtxids that the peer has announced to
283
286
* us or we have announced to the peer. We use this to avoid announcing
@@ -314,6 +317,10 @@ struct Peer {
314
317
{
315
318
return WITH_LOCK (m_tx_relay_mutex, return m_tx_relay.get ());
316
319
};
320
+ const TxRelay* GetTxRelay () const EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex)
321
+ {
322
+ return WITH_LOCK (m_tx_relay_mutex, return m_tx_relay.get ());
323
+ };
317
324
318
325
/* * A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */
319
326
std::vector<CAddress> m_addrs_to_send GUARDED_BY (NetEventsInterface::g_msgproc_mutex);
@@ -385,13 +392,22 @@ struct Peer {
385
392
/* * Whether we've sent our peer a sendheaders message. **/
386
393
std::atomic<bool > m_sent_sendheaders{false };
387
394
395
+ /* * Length of current-streak of unconnecting headers announcements */
396
+ int m_num_unconnecting_headers_msgs GUARDED_BY (NetEventsInterface::g_msgproc_mutex){0 };
397
+
398
+ /* * When to potentially disconnect peer for stalling headers download */
399
+ std::chrono::microseconds m_headers_sync_timeout GUARDED_BY (NetEventsInterface::g_msgproc_mutex){0us};
400
+
401
+ /* * Whether this peer wants invs or headers (when possible) for block announcements */
402
+ bool m_prefers_headers GUARDED_BY (NetEventsInterface::g_msgproc_mutex){false };
403
+
388
404
explicit Peer (NodeId id, ServiceFlags our_services)
389
405
: m_id{id}
390
406
, m_our_services{our_services}
391
407
{}
392
408
393
409
private:
394
- Mutex m_tx_relay_mutex;
410
+ mutable Mutex m_tx_relay_mutex;
395
411
396
412
/* * Transaction relay data. May be a nullptr. */
397
413
std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY (m_tx_relay_mutex);
@@ -414,12 +430,8 @@ struct CNodeState {
414
430
const CBlockIndex* pindexLastCommonBlock{nullptr };
415
431
// ! The best header we have sent our peer.
416
432
const CBlockIndex* pindexBestHeaderSent{nullptr };
417
- // ! Length of current-streak of unconnecting headers announcements
418
- int nUnconnectingHeaders{0 };
419
433
// ! Whether we've started headers synchronization with this peer.
420
434
bool fSyncStarted {false };
421
- // ! When to potentially disconnect peer for stalling headers download
422
- std::chrono::microseconds m_headers_sync_timeout{0us};
423
435
// ! Since when we're stalling block download progress (in microseconds), or 0.
424
436
std::chrono::microseconds m_stalling_since{0us};
425
437
std::list<QueuedBlock> vBlocksInFlight;
@@ -428,8 +440,6 @@ struct CNodeState {
428
440
int nBlocksInFlight{0 };
429
441
// ! Whether we consider this a preferred download peer.
430
442
bool fPreferredDownload {false };
431
- // ! Whether this peer wants invs or headers (when possible) for block announcements.
432
- bool fPreferHeaders {false };
433
443
/* * Whether this peer wants invs or cmpctblocks (when possible) for block announcements. */
434
444
bool m_requested_hb_cmpctblocks{false };
435
445
/* * Whether this peer will send us cmpctblocks if we request them. */
@@ -478,9 +488,6 @@ struct CNodeState {
478
488
// ! Whether this peer is an inbound connection
479
489
const bool m_is_inbound;
480
490
481
- // ! A rolling bloom filter of all announced tx CInvs to this peer.
482
- CRollingBloomFilter m_recently_announced_invs = CRollingBloomFilter{INVENTORY_MAX_RECENT_RELAY, 0.000001 };
483
-
484
491
CNodeState (bool is_inbound) : m_is_inbound(is_inbound) {}
485
492
};
486
493
@@ -666,7 +673,8 @@ class PeerManagerImpl final : public PeerManager
666
673
/* * Potentially fetch blocks from this peer upon receipt of a new headers tip */
667
674
void HeadersDirectFetchBlocks (CNode& pfrom, const Peer& peer, const CBlockIndex& last_header);
668
675
/* * Update peer state based on received headers message */
669
- void UpdatePeerStateForReceivedHeaders (CNode& pfrom, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers);
676
+ void UpdatePeerStateForReceivedHeaders (CNode& pfrom, Peer& peer, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
677
+ EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
670
678
671
679
void SendBlockTransactions (CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req);
672
680
@@ -900,10 +908,12 @@ class PeerManagerImpl final : public PeerManager
900
908
std::atomic<std::chrono::seconds> m_last_tip_update{0s};
901
909
902
910
/* * Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */
903
- CTransactionRef FindTxForGetData (const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main);
911
+ CTransactionRef FindTxForGetData (const Peer& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now)
912
+ LOCKS_EXCLUDED(cs_main) EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex);
904
913
905
914
void ProcessGetData (CNode& pfrom, Peer& peer, const std::atomic<bool >& interruptMsgProc)
906
- EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex) LOCKS_EXCLUDED(::cs_main);
915
+ EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex)
916
+ LOCKS_EXCLUDED(::cs_main);
907
917
908
918
/* * Process a new block. Perform any post-processing housekeeping */
909
919
void ProcessBlock (CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked);
@@ -2248,7 +2258,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
2248
2258
}
2249
2259
}
2250
2260
2251
- CTransactionRef PeerManagerImpl::FindTxForGetData (const CNode & peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now)
2261
+ CTransactionRef PeerManagerImpl::FindTxForGetData (const Peer & peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now)
2252
2262
{
2253
2263
auto txinfo = m_mempool.info (gtxid);
2254
2264
if (txinfo.tx ) {
@@ -2263,7 +2273,7 @@ CTransactionRef PeerManagerImpl::FindTxForGetData(const CNode& peer, const GenTx
2263
2273
{
2264
2274
LOCK (cs_main);
2265
2275
// Otherwise, the transaction must have been announced recently.
2266
- if (State (peer.GetId ())->m_recently_announced_invs .contains (gtxid.GetHash ())) {
2276
+ if (Assume (peer.GetTxRelay ())->m_recently_announced_invs .contains (gtxid.GetHash ())) {
2267
2277
// If it was, it can be relayed from either the mempool...
2268
2278
if (txinfo.tx ) return std::move (txinfo.tx );
2269
2279
// ... or the relay pool.
@@ -2306,7 +2316,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic
2306
2316
continue ;
2307
2317
}
2308
2318
2309
- CTransactionRef tx = FindTxForGetData (pfrom , ToGenTxid (inv), mempool_req, now);
2319
+ CTransactionRef tx = FindTxForGetData (peer , ToGenTxid (inv), mempool_req, now);
2310
2320
if (tx) {
2311
2321
// WTX and WITNESS_TX imply we serialize with witness
2312
2322
int nSendFlags = (inv.IsMsgTx () ? SERIALIZE_TRANSACTION_NO_WITNESS : 0 );
@@ -2330,8 +2340,7 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic
2330
2340
for (const uint256& parent_txid : parent_ids_to_add) {
2331
2341
// Relaying a transaction with a recent but unconfirmed parent.
2332
2342
if (WITH_LOCK (tx_relay->m_tx_inventory_mutex , return !tx_relay->m_tx_inventory_known_filter .contains (parent_txid))) {
2333
- LOCK (cs_main);
2334
- State (pfrom.GetId ())->m_recently_announced_invs .insert (parent_txid);
2343
+ tx_relay->m_recently_announced_invs .insert (parent_txid);
2335
2344
}
2336
2345
}
2337
2346
} else {
@@ -2430,36 +2439,35 @@ arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold()
2430
2439
*
2431
2440
* We'll send a getheaders message in response to try to connect the chain.
2432
2441
*
2433
- * The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
2442
+ * The peer can send up to MAX_NUM_UNCONNECTING_HEADERS_MSGS in a row that
2434
2443
* don't connect before given DoS points.
2435
2444
*
2436
2445
* Once a headers message is received that is valid and does connect,
2437
- * nUnconnectingHeaders gets reset back to 0.
2446
+ * m_num_unconnecting_headers_msgs gets reset back to 0.
2438
2447
*/
2439
2448
void PeerManagerImpl::HandleFewUnconnectingHeaders (CNode& pfrom, Peer& peer,
2440
2449
const std::vector<CBlockHeader>& headers)
2441
2450
{
2442
- LOCK (cs_main);
2443
- CNodeState *nodestate = State (pfrom.GetId ());
2444
-
2445
- nodestate->nUnconnectingHeaders ++;
2451
+ peer.m_num_unconnecting_headers_msgs ++;
2446
2452
// Try to fill in the missing headers.
2447
- if (MaybeSendGetHeaders (pfrom, GetLocator (m_chainman.m_best_header ), peer)) {
2448
- LogPrint (BCLog::NET, " received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n " ,
2453
+ const CBlockIndex* best_header{WITH_LOCK (cs_main, return m_chainman.m_best_header )};
2454
+ if (MaybeSendGetHeaders (pfrom, GetLocator (best_header), peer)) {
2455
+ LogPrint (BCLog::NET, " received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, m_num_unconnecting_headers_msgs=%d)\n " ,
2449
2456
headers[0 ].GetHash ().ToString (),
2450
2457
headers[0 ].hashPrevBlock .ToString (),
2451
- m_chainman. m_best_header ->nHeight ,
2452
- pfrom.GetId (), nodestate-> nUnconnectingHeaders );
2458
+ best_header ->nHeight ,
2459
+ pfrom.GetId (), peer. m_num_unconnecting_headers_msgs );
2453
2460
}
2461
+
2454
2462
// Set hashLastUnknownBlock for this peer, so that if we
2455
2463
// eventually get the headers - even from a different peer -
2456
2464
// we can use this peer to download.
2457
- UpdateBlockAvailability (pfrom.GetId (), headers.back ().GetHash ());
2465
+ WITH_LOCK (cs_main, UpdateBlockAvailability (pfrom.GetId (), headers.back ().GetHash () ));
2458
2466
2459
2467
// The peer may just be broken, so periodically assign DoS points if this
2460
2468
// condition persists.
2461
- if (nodestate-> nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0 ) {
2462
- Misbehaving (peer, 20 , strprintf (" %d non-connecting headers" , nodestate-> nUnconnectingHeaders ));
2469
+ if (peer. m_num_unconnecting_headers_msgs % MAX_NUM_UNCONNECTING_HEADERS_MSGS == 0 ) {
2470
+ Misbehaving (peer, 20 , strprintf (" %d non-connecting headers" , peer. m_num_unconnecting_headers_msgs ));
2463
2471
}
2464
2472
}
2465
2473
@@ -2707,15 +2715,16 @@ void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, c
2707
2715
* whether that header was new and whether the headers message was full,
2708
2716
* update the state we keep for the peer.
2709
2717
*/
2710
- void PeerManagerImpl::UpdatePeerStateForReceivedHeaders (CNode& pfrom,
2718
+ void PeerManagerImpl::UpdatePeerStateForReceivedHeaders (CNode& pfrom, Peer& peer,
2711
2719
const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
2712
2720
{
2721
+ if (peer.m_num_unconnecting_headers_msgs > 0 ) {
2722
+ LogPrint (BCLog::NET, " peer=%d: resetting m_num_unconnecting_headers_msgs (%d -> 0)\n " , pfrom.GetId (), peer.m_num_unconnecting_headers_msgs );
2723
+ }
2724
+ peer.m_num_unconnecting_headers_msgs = 0 ;
2725
+
2713
2726
LOCK (cs_main);
2714
2727
CNodeState *nodestate = State (pfrom.GetId ());
2715
- if (nodestate->nUnconnectingHeaders > 0 ) {
2716
- LogPrint (BCLog::NET, " peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n " , pfrom.GetId (), nodestate->nUnconnectingHeaders );
2717
- }
2718
- nodestate->nUnconnectingHeaders = 0 ;
2719
2728
2720
2729
UpdateBlockAvailability (pfrom.GetId (), last_header.GetBlockHash ());
2721
2730
@@ -2900,7 +2909,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
2900
2909
}
2901
2910
}
2902
2911
2903
- UpdatePeerStateForReceivedHeaders (pfrom, *pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS);
2912
+ UpdatePeerStateForReceivedHeaders (pfrom, peer, *pindexLast, received_new_header, nCount == MAX_HEADERS_RESULTS);
2904
2913
2905
2914
// Consider immediately downloading blocks.
2906
2915
HeadersDirectFetchBlocks (pfrom, peer, *pindexLast);
@@ -3444,8 +3453,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
3444
3453
}
3445
3454
3446
3455
if (msg_type == NetMsgType::SENDHEADERS) {
3447
- LOCK (cs_main);
3448
- State (pfrom.GetId ())->fPreferHeaders = true ;
3456
+ peer->m_prefers_headers = true ;
3449
3457
return ;
3450
3458
}
3451
3459
@@ -5426,7 +5434,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
5426
5434
LogPrint (BCLog::NET, " initial getheaders (%d) to peer=%d (startheight:%d)\n " , pindexStart->nHeight , pto->GetId (), peer->m_starting_height );
5427
5435
5428
5436
state.fSyncStarted = true ;
5429
- state. m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
5437
+ peer-> m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
5430
5438
(
5431
5439
// Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling
5432
5440
// to maintain precision
@@ -5451,7 +5459,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
5451
5459
// add all to the inv queue.
5452
5460
LOCK (peer->m_block_inv_mutex );
5453
5461
std::vector<CBlock> vHeaders;
5454
- bool fRevertToInv = ((!state. fPreferHeaders &&
5462
+ bool fRevertToInv = ((!peer-> m_prefers_headers &&
5455
5463
(!state.m_requested_hb_cmpctblocks || peer->m_blocks_for_headers_relay .size () > 1 )) ||
5456
5464
peer->m_blocks_for_headers_relay .size () > MAX_BLOCKS_TO_ANNOUNCE);
5457
5465
const CBlockIndex *pBestIndex = nullptr ; // last header queued for delivery
@@ -5528,7 +5536,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
5528
5536
m_connman.PushMessage (pto, msgMaker.Make (NetMsgType::CMPCTBLOCK, cmpctblock));
5529
5537
}
5530
5538
state.pindexBestHeaderSent = pBestIndex;
5531
- } else if (state. fPreferHeaders ) {
5539
+ } else if (peer-> m_prefers_headers ) {
5532
5540
if (vHeaders.size () > 1 ) {
5533
5541
LogPrint (BCLog::NET, " %s: %u headers, range (%s, %s), to peer=%d\n " , __func__,
5534
5542
vHeaders.size (),
@@ -5682,7 +5690,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
5682
5690
}
5683
5691
if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter ->IsRelevantAndUpdate (*txinfo.tx )) continue ;
5684
5692
// Send
5685
- State (pto-> GetId ()) ->m_recently_announced_invs .insert (hash);
5693
+ tx_relay ->m_recently_announced_invs .insert (hash);
5686
5694
vInv.push_back (inv);
5687
5695
nRelayedTransactions++;
5688
5696
{
@@ -5753,10 +5761,10 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
5753
5761
}
5754
5762
}
5755
5763
// Check for headers sync timeouts
5756
- if (state.fSyncStarted && state. m_headers_sync_timeout < std::chrono::microseconds::max ()) {
5764
+ if (state.fSyncStarted && peer-> m_headers_sync_timeout < std::chrono::microseconds::max ()) {
5757
5765
// Detect whether this is a stalling initial-headers-sync peer
5758
5766
if (m_chainman.m_best_header ->Time () <= GetAdjustedTime () - 24h) {
5759
- if (current_time > state. m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1 )) {
5767
+ if (current_time > peer-> m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1 )) {
5760
5768
// Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer,
5761
5769
// and we have others we could be using instead.
5762
5770
// Note: If all our peers are inbound, then we won't
@@ -5775,13 +5783,13 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
5775
5783
// this peer (eventually).
5776
5784
state.fSyncStarted = false ;
5777
5785
nSyncStarted--;
5778
- state. m_headers_sync_timeout = 0us;
5786
+ peer-> m_headers_sync_timeout = 0us;
5779
5787
}
5780
5788
}
5781
5789
} else {
5782
5790
// After we've caught up once, reset the timeout so we can't trigger
5783
5791
// disconnect later.
5784
- state. m_headers_sync_timeout = std::chrono::microseconds::max ();
5792
+ peer-> m_headers_sync_timeout = std::chrono::microseconds::max ();
5785
5793
}
5786
5794
}
5787
5795
0 commit comments