@@ -676,15 +676,8 @@ class CNode
676
676
// Setting fDisconnect to true will cause the node to be disconnected the
677
677
// next time DisconnectNodes() runs
678
678
std::atomic_bool fDisconnect {false };
679
- // We use fRelayTxes for two purposes -
680
- // a) it allows us to not relay tx invs before receiving the peer's version message
681
- // b) the peer may tell us in its version message that we should not relay tx invs
682
- // unless it loads a bloom filter.
683
- bool fRelayTxes GUARDED_BY (cs_filter){false };
684
679
bool fSentAddr {false };
685
680
CSemaphoreGrant grantOutbound;
686
- mutable CCriticalSection cs_filter;
687
- std::unique_ptr<CBloomFilter> pfilter PT_GUARDED_BY (cs_filter);
688
681
std::atomic<int > nRefCount{0 };
689
682
690
683
const uint64_t nKeyedNetGroup;
@@ -706,24 +699,43 @@ class CNode
706
699
int64_t nNextAddrSend GUARDED_BY (cs_sendProcessing){0 };
707
700
int64_t nNextLocalAddrSend GUARDED_BY (cs_sendProcessing){0 };
708
701
709
- // inventory based relay
710
- CRollingBloomFilter filterInventoryKnown GUARDED_BY (cs_inventory);
711
- // Set of transaction ids we still have to announce.
712
- // They are sorted by the mempool before relay, so the order is not important.
713
- std::set<uint256> setInventoryTxToSend;
714
702
// List of block ids we still have announce.
715
703
// There is no final sorting before sending, as they are always sent immediately
716
704
// and in the order requested.
717
705
std::vector<uint256> vInventoryBlockToSend GUARDED_BY (cs_inventory);
718
706
CCriticalSection cs_inventory;
719
- int64_t nNextInvSend{0 };
707
+
708
+ struct TxRelay {
709
+ TxRelay () { pfilter = MakeUnique<CBloomFilter>(); }
710
+ mutable CCriticalSection cs_filter;
711
+ // We use fRelayTxes for two purposes -
712
+ // a) it allows us to not relay tx invs before receiving the peer's version message
713
+ // b) the peer may tell us in its version message that we should not relay tx invs
714
+ // unless it loads a bloom filter.
715
+ bool fRelayTxes GUARDED_BY (cs_filter){false };
716
+ std::unique_ptr<CBloomFilter> pfilter PT_GUARDED_BY (cs_filter) GUARDED_BY(cs_filter);
717
+
718
+ mutable CCriticalSection cs_tx_inventory;
719
+ CRollingBloomFilter filterInventoryKnown GUARDED_BY (cs_tx_inventory){50000 , 0.000001 };
720
+ // Set of transaction ids we still have to announce.
721
+ // They are sorted by the mempool before relay, so the order is not important.
722
+ std::set<uint256> setInventoryTxToSend;
723
+ // Used for BIP35 mempool sending
724
+ bool fSendMempool GUARDED_BY (cs_tx_inventory){false };
725
+ // Last time a "MEMPOOL" request was serviced.
726
+ std::atomic<int64_t > timeLastMempoolReq{0 };
727
+ int64_t nNextInvSend{0 };
728
+
729
+ CCriticalSection cs_feeFilter;
730
+ // Minimum fee rate with which to filter inv's to this node
731
+ CAmount minFeeFilter GUARDED_BY (cs_feeFilter){0 };
732
+ CAmount lastSentFeeFilter{0 };
733
+ int64_t nextSendTimeFeeFilter{0 };
734
+ };
735
+
736
+ TxRelay m_tx_relay;
720
737
// Used for headers announcements - unfiltered blocks to relay
721
738
std::vector<uint256> vBlockHashesToAnnounce GUARDED_BY (cs_inventory);
722
- // Used for BIP35 mempool sending
723
- bool fSendMempool GUARDED_BY (cs_inventory){false };
724
-
725
- // Last time a "MEMPOOL" request was serviced.
726
- std::atomic<int64_t > timeLastMempoolReq{0 };
727
739
728
740
// Block and TXN accept times
729
741
std::atomic<int64_t > nLastBlockTime{0 };
@@ -740,11 +752,6 @@ class CNode
740
752
std::atomic<int64_t > nMinPingUsecTime{std::numeric_limits<int64_t >::max ()};
741
753
// Whether a ping is requested.
742
754
std::atomic<bool > fPingQueued {false };
743
- // Minimum fee rate with which to filter inv's to this node
744
- CAmount minFeeFilter GUARDED_BY (cs_feeFilter){0 };
745
- CCriticalSection cs_feeFilter;
746
- CAmount lastSentFeeFilter{0 };
747
- int64_t nextSendTimeFeeFilter{0 };
748
755
749
756
std::set<uint256> orphan_work_set;
750
757
@@ -842,19 +849,20 @@ class CNode
842
849
void AddInventoryKnown (const CInv& inv)
843
850
{
844
851
{
845
- LOCK (cs_inventory );
846
- filterInventoryKnown.insert (inv.hash );
852
+ LOCK (m_tx_relay. cs_tx_inventory );
853
+ m_tx_relay. filterInventoryKnown .insert (inv.hash );
847
854
}
848
855
}
849
856
850
857
void PushInventory (const CInv& inv)
851
858
{
852
- LOCK (cs_inventory);
853
859
if (inv.type == MSG_TX) {
854
- if (!filterInventoryKnown.contains (inv.hash )) {
855
- setInventoryTxToSend.insert (inv.hash );
860
+ LOCK (m_tx_relay.cs_tx_inventory );
861
+ if (!m_tx_relay.filterInventoryKnown .contains (inv.hash )) {
862
+ m_tx_relay.setInventoryTxToSend .insert (inv.hash );
856
863
}
857
864
} else if (inv.type == MSG_BLOCK) {
865
+ LOCK (cs_inventory);
858
866
vInventoryBlockToSend.push_back (inv.hash );
859
867
}
860
868
}
0 commit comments