@@ -584,7 +584,8 @@ using llvm::hash_value;
584584// / process. It has no destructor, to avoid generating useless global destructor
585585// / calls. The memory it allocates can be freed by calling clear() with no
586586// / outstanding readers, but this won't destroy the static mutex it uses.
587- template <class ElemTy > struct ConcurrentReadableHashMap {
587+ template <class ElemTy , class MutexTy = StaticMutex>
588+ struct ConcurrentReadableHashMap {
588589 // We use memcpy and don't call destructors. Make sure the elements will put
589590 // up with this.
590591 static_assert (std::is_trivially_copyable<ElemTy>::value,
@@ -593,6 +594,9 @@ template <class ElemTy> struct ConcurrentReadableHashMap {
593594 " Elements must not have destructors (they won't be called)." );
594595
595596private:
597+ // A scoped lock type to use on MutexTy.
598+ using ScopedLockTy = ScopedLockT<MutexTy, false >;
599+
596600 // / The reciprocal of the load factor at which we expand the table. A value of
597601 // / 4 means that we resize at 1/4 = 75% load factor.
598602 static const size_t ResizeProportion = 4 ;
@@ -752,7 +756,7 @@ template <class ElemTy> struct ConcurrentReadableHashMap {
752756 std::atomic<IndexStorage *> Indices{nullptr };
753757
754758 // / The writer lock, which must be taken before any mutation of the table.
755- StaticMutex WriterLock;
759+ MutexTy WriterLock;
756760
757761 // / The list of pointers to be freed once no readers are active.
758762 FreeListNode *FreeList{nullptr };
@@ -966,7 +970,7 @@ template <class ElemTy> struct ConcurrentReadableHashMap {
966970 // / The return value is ignored when `created` is `false`.
967971 template <class KeyTy , typename Call>
968972 void getOrInsert (KeyTy key, const Call &call) {
969- StaticScopedLock guard (WriterLock);
973+ ScopedLockTy guard (WriterLock);
970974
971975 auto *indices = Indices.load (std::memory_order_relaxed);
972976 if (!indices)
@@ -1018,7 +1022,7 @@ template <class ElemTy> struct ConcurrentReadableHashMap {
10181022 // / Clear the hash table, freeing (when safe) all memory currently used for
10191023 // / indices and elements.
10201024 void clear () {
1021- StaticScopedLock guard (WriterLock);
1025+ ScopedLockTy guard (WriterLock);
10221026
10231027 auto *indices = Indices.load (std::memory_order_relaxed);
10241028 auto *elements = Elements.load (std::memory_order_relaxed);
@@ -1054,28 +1058,38 @@ template <class ElemTy> struct HashMapElementWrapper {
10541058// / by allocating them separately and storing pointers to them. The elements of
10551059// / the hash table are instances of HashMapElementWrapper. A new getOrInsert
10561060// / method is provided that directly returns the stable element pointer.
1057- template <class ElemTy , class Allocator >
1061+ template <class ElemTy , class Allocator , class MutexTy = StaticMutex >
10581062struct StableAddressConcurrentReadableHashMap
1059- : public ConcurrentReadableHashMap<HashMapElementWrapper<ElemTy>> {
1063+ : public ConcurrentReadableHashMap<HashMapElementWrapper<ElemTy>, MutexTy > {
10601064 // Implicitly trivial destructor.
10611065 ~StableAddressConcurrentReadableHashMap () = default ;
10621066
1067+ std::atomic<ElemTy *> LastFound{nullptr };
1068+
10631069 // / Get or insert an element for the given key and arguments. Returns the
10641070 // / pointer to the existing or new element, and a bool indicating whether the
10651071 // / element was created. When false, the element already existed before the
10661072 // / call.
10671073 template <class KeyTy , class ... ArgTys>
10681074 std::pair<ElemTy *, bool > getOrInsert (KeyTy key, ArgTys &&...args) {
1075+ // See if this is a repeat of the previous search.
1076+ if (auto lastFound = LastFound.load (std::memory_order_acquire))
1077+ if (lastFound->matchesKey (key))
1078+ return {lastFound, false };
1079+
10691080 // Optimize for the case where the value already exists.
1070- if (auto wrapper = this ->snapshot ().find (key))
1081+ if (auto wrapper = this ->snapshot ().find (key)) {
1082+ LastFound.store (wrapper->Ptr , std::memory_order_relaxed);
10711083 return {wrapper->Ptr , false };
1084+ }
10721085
10731086 // No such element. Insert if needed. Note: another thread may have inserted
10741087 // it in the meantime, so we still have to handle both cases!
10751088 ElemTy *ptr = nullptr ;
10761089 bool outerCreated = false ;
1077- ConcurrentReadableHashMap<HashMapElementWrapper<ElemTy>>::getOrInsert (
1078- key, [&](HashMapElementWrapper<ElemTy> *wrapper, bool created) {
1090+ ConcurrentReadableHashMap<HashMapElementWrapper<ElemTy>, MutexTy>::
1091+ getOrInsert (key, [&](HashMapElementWrapper<ElemTy> *wrapper,
1092+ bool created) {
10791093 if (created) {
10801094 // Created the indirect entry. Allocate the actual storage.
10811095 size_t allocSize =
@@ -1088,9 +1102,17 @@ struct StableAddressConcurrentReadableHashMap
10881102 outerCreated = created;
10891103 return true ; // Keep the new entry.
10901104 });
1105+ LastFound.store (ptr, std::memory_order_relaxed);
10911106 return {ptr, outerCreated};
10921107 }
10931108
1109+ template <class KeyTy > ElemTy *find (const KeyTy &key) {
1110+ auto result = this ->snapshot ().find (key);
1111+ if (!result)
1112+ return nullptr ;
1113+ return result->Ptr ;
1114+ }
1115+
10941116private:
10951117 // Clearing would require deallocating elements, which we don't support.
10961118 void clear () = delete;
0 commit comments