@@ -283,7 +283,7 @@ struct Slab {
283283
284284// / A wait-free guard around a pointer resource to be created dynamically if
285285// / space is available and freed once there are no more users.
286- template < typename T> struct GuardPtr {
286+ struct GuardPtr {
287287private:
288288 struct RefCounter {
289289 // Indicates that the object is in its deallocation phase and thus invalid.
@@ -339,22 +339,22 @@ template <typename T> struct GuardPtr {
339339 cpp::Atomic<uint64_t > counter{0 };
340340 };
341341
342- cpp::Atomic<T *> ptr{nullptr };
342+ cpp::Atomic<Slab *> ptr{nullptr };
343343 RefCounter ref{};
344344
345345 // Should be called be a single lane for each different pointer.
346346 template <typename ... Args>
347- T *try_lock_impl (uint32_t n, uint64_t &count, Args &&...args) {
348- T *expected = ptr.load (cpp::MemoryOrder::RELAXED);
347+ Slab *try_lock_impl (uint32_t n, uint64_t &count, Args &&...args) {
348+ Slab *expected = ptr.load (cpp::MemoryOrder::RELAXED);
349349 if (!expected &&
350- ptr.compare_exchange_strong (expected, reinterpret_cast <T *>(SENTINEL),
351- cpp::MemoryOrder::RELAXED ,
352- cpp::MemoryOrder::RELAXED)) {
350+ ptr.compare_exchange_strong (
351+ expected, reinterpret_cast <Slab *>(SENTINEL) ,
352+ cpp::MemoryOrder::RELAXED, cpp::MemoryOrder::RELAXED)) {
353353 count = cpp::numeric_limits<uint64_t >::max ();
354- void *raw = impl::rpc_allocate (sizeof (T ));
354+ void *raw = impl::rpc_allocate (sizeof (Slab ));
355355 if (!raw)
356356 return nullptr ;
357- T *mem = new (raw) T (cpp::forward<Args>(args)...);
357+ Slab *mem = new (raw) Slab (cpp::forward<Args>(args)...);
358358
359359 cpp::atomic_thread_fence (cpp::MemoryOrder::RELEASE);
360360 ptr.store (mem, cpp::MemoryOrder::RELAXED);
@@ -364,7 +364,7 @@ template <typename T> struct GuardPtr {
364364 return mem;
365365 }
366366
367- if (!expected || expected == reinterpret_cast <T *>(SENTINEL))
367+ if (!expected || expected == reinterpret_cast <Slab *>(SENTINEL))
368368 return nullptr ;
369369
370370 if (!ref.acquire (n, count))
@@ -379,10 +379,10 @@ template <typename T> struct GuardPtr {
379379 // The uniform mask represents which lanes share the same pointer. For each
380380 // uniform value we elect a leader to handle it on behalf of the other lanes.
381381 template <typename ... Args>
382- T *try_lock (uint64_t lane_mask, uint64_t uniform, uint64_t &count,
383- Args &&...args) {
382+ Slab *try_lock (uint64_t lane_mask, uint64_t uniform, uint64_t &count,
383+ Args &&...args) {
384384 count = 0 ;
385- T *result = nullptr ;
385+ Slab *result = nullptr ;
386386 if (gpu::get_lane_id () == uint32_t (cpp::countr_zero (uniform)))
387387 result = try_lock_impl (cpp::popcount (uniform), count,
388388 cpp::forward<Args>(args)...);
@@ -403,8 +403,8 @@ template <typename T> struct GuardPtr {
403403 cpp::atomic_thread_fence (cpp::MemoryOrder::RELEASE);
404404 if (gpu::get_lane_id () == uint32_t (cpp::countr_zero (mask)) &&
405405 ref.release (cpp::popcount (mask))) {
406- T *p = ptr.load (cpp::MemoryOrder::RELAXED);
407- p->~T ();
406+ Slab *p = ptr.load (cpp::MemoryOrder::RELAXED);
407+ p->~Slab ();
408408 impl::rpc_free (p);
409409 cpp::atomic_thread_fence (cpp::MemoryOrder::RELEASE);
410410 ptr.store (nullptr , cpp::MemoryOrder::RELAXED);
@@ -417,7 +417,7 @@ template <typename T> struct GuardPtr {
417417};
418418
419419// The global array used to search for a valid slab to allocate from.
420- static GuardPtr<Slab> slots[ARRAY_SIZE] = {};
420+ static GuardPtr slots[ARRAY_SIZE] = {};
421421
422422// Tries to find a slab in the table that can support the given chunk size.
423423static Slab *find_slab (uint32_t chunk_size) {
0 commit comments