@@ -129,6 +129,7 @@ class CTxMemPoolEntry
129
129
int64_t GetSigOpCostWithAncestors () const { return nSigOpCostWithAncestors; }
130
130
131
131
mutable size_t vTxHashesIdx; // !< Index in mempool's vTxHashes
132
+ mutable uint64_t m_epoch; // !< epoch when last touched, useful for graph algorithms
132
133
};
133
134
134
135
// Helpers for modifying CTxMemPool::mapTx, which is a boost multi_index.
@@ -453,6 +454,8 @@ class CTxMemPool
453
454
mutable int64_t lastRollingFeeUpdate;
454
455
mutable bool blockSinceLastRollingFeeBump;
455
456
mutable double rollingMinimumFeeRate; // !< minimum fee to get into the pool, decreases exponentially
457
+ mutable uint64_t m_epoch;
458
+ mutable bool m_has_epoch_guard;
456
459
457
460
void trackPackageRemoved (const CFeeRate& rate) EXCLUSIVE_LOCKS_REQUIRED(cs);
458
461
@@ -736,6 +739,55 @@ class CTxMemPool
736
739
* removal.
737
740
*/
738
741
void removeUnchecked (txiter entry, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs);
742
+ public:
743
+ /* * EpochGuard: RAII-style guard for using epoch-based graph traversal algorithms.
744
+ * When walking ancestors or descendants, we generally want to avoid
745
+ * visiting the same transactions twice. Some traversal algorithms use
746
+ * std::set (or setEntries) to deduplicate the transaction we visit.
747
+ * However, use of std::set is algorithmically undesirable because it both
748
+ * adds an asymptotic factor of O(log n) to traverals cost and triggers O(n)
749
+ * more dynamic memory allocations.
750
+ * In many algorithms we can replace std::set with an internal mempool
751
+ * counter to track the time (or, "epoch") that we began a traversal, and
752
+ * check + update a per-transaction epoch for each transaction we look at to
753
+ * determine if that transaction has not yet been visited during the current
754
+ * traversal's epoch.
755
+ * Algorithms using std::set can be replaced on a one by one basis.
756
+ * Both techniques are not fundamentally incomaptible across the codebase.
757
+ * Generally speaking, however, the remaining use of std::set for mempool
758
+ * traversal should be viewed as a TODO for replacement with an epoch based
759
+ * traversal, rather than a preference for std::set over epochs in that
760
+ * algorithm.
761
+ */
762
+ class EpochGuard {
763
+ const CTxMemPool& pool;
764
+ public:
765
+ EpochGuard (const CTxMemPool& in);
766
+ ~EpochGuard ();
767
+ };
768
+ // N.B. GetFreshEpoch modifies mutable state via the EpochGuard construction
769
+ // (and later destruction)
770
+ EpochGuard GetFreshEpoch () const EXCLUSIVE_LOCKS_REQUIRED(cs);
771
+
772
+ /* * visited marks a CTxMemPoolEntry as having been traversed
773
+ * during the lifetime of the most recently created EpochGuard
774
+ * and returns false if we are the first visitor, true otherwise.
775
+ *
776
+ * An EpochGuard must be held when visited is called or an assert will be
777
+ * triggered.
778
+ *
779
+ */
780
+ bool visited (txiter it) const EXCLUSIVE_LOCKS_REQUIRED(cs) {
781
+ assert (m_has_epoch_guard);
782
+ bool ret = it->m_epoch >= m_epoch;
783
+ it->m_epoch = std::max (it->m_epoch , m_epoch);
784
+ return ret;
785
+ }
786
+
787
+ bool visited (Optional<txiter> it) const EXCLUSIVE_LOCKS_REQUIRED(cs) {
788
+ assert (m_has_epoch_guard);
789
+ return !it || visited (*it);
790
+ }
739
791
};
740
792
741
793
/* *
0 commit comments