@@ -515,10 +515,10 @@ struct Peer {
515515 /* * Set of txids to reconsider once their parent transactions have been accepted **/
516516 std::set<uint256> m_orphan_work_set GUARDED_BY (g_cs_orphans);
517517
518- /* * Protects vRecvGetData **/
518+ /* * Protects m_getdata_requests **/
519519 Mutex m_getdata_requests_mutex;
520520 /* * Work queue of items requested by this peer **/
521- std::deque<CInv> vRecvGetData GUARDED_BY (m_getdata_requests_mutex);
521+ std::deque<CInv> m_getdata_requests GUARDED_BY (m_getdata_requests_mutex);
522522
523523 Peer (NodeId id) : m_id(id) {}
524524};
@@ -1759,7 +1759,7 @@ void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainpa
17591759{
17601760 AssertLockNotHeld (cs_main);
17611761
1762- std::deque<CInv>::iterator it = peer.vRecvGetData .begin ();
1762+ std::deque<CInv>::iterator it = peer.m_getdata_requests .begin ();
17631763 std::vector<CInv> vNotFound;
17641764 const CNetMsgMaker msgMaker (pfrom.GetCommonVersion ());
17651765
@@ -1771,7 +1771,7 @@ void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainpa
17711771 // Process as many TX items from the front of the getdata queue as
17721772 // possible, since they're common and it's efficient to batch process
17731773 // them.
1774- while (it != peer.vRecvGetData .end () && it->IsGenTxMsg ()) {
1774+ while (it != peer.m_getdata_requests .end () && it->IsGenTxMsg ()) {
17751775 if (interruptMsgProc) return ;
17761776 // The send buffer provides backpressure. If there's no space in
17771777 // the buffer, pause processing until the next call.
@@ -1819,7 +1819,7 @@ void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainpa
18191819
18201820 // Only process one BLOCK item per call, since they're uncommon and can be
18211821 // expensive to process.
1822- if (it != peer.vRecvGetData .end () && !pfrom.fPauseSend ) {
1822+ if (it != peer.m_getdata_requests .end () && !pfrom.fPauseSend ) {
18231823 const CInv &inv = *it++;
18241824 if (inv.IsGenBlkMsg ()) {
18251825 ProcessGetBlockData (pfrom, chainparams, inv, connman);
@@ -1828,7 +1828,7 @@ void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainpa
18281828 // and continue processing the queue on the next call.
18291829 }
18301830
1831- peer.vRecvGetData .erase (peer.vRecvGetData .begin (), it);
1831+ peer.m_getdata_requests .erase (peer.m_getdata_requests .begin (), it);
18321832
18331833 if (!vNotFound.empty ()) {
18341834 // Let the peer know that we didn't find what it asked for, so it doesn't
@@ -2812,7 +2812,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
28122812
28132813 {
28142814 LOCK (peer->m_getdata_requests_mutex );
2815- peer->vRecvGetData .insert (peer->vRecvGetData .end (), vInv.begin (), vInv.end ());
2815+ peer->m_getdata_requests .insert (peer->m_getdata_requests .end (), vInv.begin (), vInv.end ());
28162816 ProcessGetData (pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc);
28172817 }
28182818
@@ -2933,7 +2933,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
29332933 CInv inv;
29342934 WITH_LOCK (cs_main, inv.type = State (pfrom.GetId ())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK);
29352935 inv.hash = req.blockhash ;
2936- WITH_LOCK (peer->m_getdata_requests_mutex , peer->vRecvGetData .push_back (inv));
2936+ WITH_LOCK (peer->m_getdata_requests_mutex , peer->m_getdata_requests .push_back (inv));
29372937 // The message processing loop will go around again (without pausing) and we'll respond then
29382938 return ;
29392939 }
@@ -3886,7 +3886,7 @@ bool PeerManager::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgP
38863886
38873887 {
38883888 LOCK (peer->m_getdata_requests_mutex );
3889- if (!peer->vRecvGetData .empty ()) {
3889+ if (!peer->m_getdata_requests .empty ()) {
38903890 ProcessGetData (*pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc);
38913891 }
38923892 }
@@ -3902,10 +3902,10 @@ bool PeerManager::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgP
39023902 return false ;
39033903
39043904 // this maintains the order of responses
3905- // and prevents vRecvGetData to grow unbounded
3905+ // and prevents m_getdata_requests to grow unbounded
39063906 {
39073907 LOCK (peer->m_getdata_requests_mutex );
3908- if (!peer->vRecvGetData .empty ()) return true ;
3908+ if (!peer->m_getdata_requests .empty ()) return true ;
39093909 }
39103910
39113911 {
@@ -3941,7 +3941,7 @@ bool PeerManager::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgP
39413941 if (interruptMsgProc) return false ;
39423942 {
39433943 LOCK (peer->m_getdata_requests_mutex );
3944- if (!peer->vRecvGetData .empty ()) fMoreWork = true ;
3944+ if (!peer->m_getdata_requests .empty ()) fMoreWork = true ;
39453945 }
39463946 } catch (const std::exception& e) {
39473947 LogPrint (BCLog::NET, " %s(%s, %u bytes): Exception '%s' (%s) caught\n " , __func__, SanitizeString (msg_type), nMessageSize, e.what (), typeid (e).name ());
0 commit comments