@@ -229,6 +229,154 @@ template <typename T> struct Atomic {
229229 LIBC_INLINE void set (T rhs) { val = rhs; }
230230};
231231
232+ template <typename T> struct AtomicRef {
233+ static_assert (is_trivially_copyable_v<T> && is_copy_constructible_v<T> &&
234+ is_move_constructible_v<T> && is_copy_assignable_v<T> &&
235+ is_move_assignable_v<T>,
236+ " AtomicRef<T> requires T to be trivially copyable, copy "
237+ " constructible, move constructible, copy assignable, "
238+ " and move assignable." );
239+
240+ static_assert (cpp::has_unique_object_representations_v<T>,
241+ " AtomicRef<T> only supports types with unique object "
242+ " representations." );
243+
244+ private:
245+ T *ptr;
246+
247+ LIBC_INLINE static int order (MemoryOrder mem_ord) {
248+ return static_cast <int >(mem_ord);
249+ }
250+
251+ LIBC_INLINE static int scope (MemoryScope mem_scope) {
252+ return static_cast <int >(mem_scope);
253+ }
254+
255+ public:
256+ // Constructor from T reference
257+ LIBC_INLINE explicit constexpr AtomicRef (T &obj) : ptr(&obj) {}
258+
259+ // Non-standard Implicit conversion from T*
260+ LIBC_INLINE constexpr AtomicRef (T *obj) : ptr(obj) {}
261+
262+ LIBC_INLINE AtomicRef (const AtomicRef &) = default;
263+ LIBC_INLINE AtomicRef &operator =(const AtomicRef &) = default ;
264+
265+ // Atomic load
266+ LIBC_INLINE operator T () const { return load (); }
267+
268+ LIBC_INLINE T
269+ load (MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
270+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
271+ T res;
272+ #if __has_builtin(__scoped_atomic_load)
273+ __scoped_atomic_load (ptr, &res, order (mem_ord), scope (mem_scope));
274+ #else
275+ __atomic_load (ptr, &res, order (mem_ord));
276+ #endif
277+ return res;
278+ }
279+
280+ // Atomic store
281+ LIBC_INLINE T operator =(T rhs) const {
282+ store (rhs);
283+ return rhs;
284+ }
285+
286+ LIBC_INLINE void
287+ store (T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
288+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
289+ #if __has_builtin(__scoped_atomic_store)
290+ __scoped_atomic_store (ptr, &rhs, order (mem_ord), scope (mem_scope));
291+ #else
292+ __atomic_store (ptr, &rhs, order (mem_ord));
293+ #endif
294+ }
295+
296+ // Atomic compare exchange (strong)
297+ LIBC_INLINE bool compare_exchange_strong (
298+ T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
299+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
300+ return __atomic_compare_exchange (ptr, &expected, &desired, false ,
301+ order (mem_ord), order (mem_ord));
302+ }
303+
304+ // Atomic compare exchange (strong, separate success/failure memory orders)
305+ LIBC_INLINE bool compare_exchange_strong (
306+ T &expected, T desired, MemoryOrder success_order,
307+ MemoryOrder failure_order,
308+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
309+ return __atomic_compare_exchange (ptr, &expected, &desired, false ,
310+ order (success_order),
311+ order (failure_order));
312+ }
313+
314+ // Atomic exchange
315+ LIBC_INLINE T
316+ exchange (T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
317+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
318+ T ret;
319+ #if __has_builtin(__scoped_atomic_exchange)
320+ __scoped_atomic_exchange (ptr, &desired, &ret, order (mem_ord),
321+ scope (mem_scope));
322+ #else
323+ __atomic_exchange (ptr, &desired, &ret, order (mem_ord));
324+ #endif
325+ return ret;
326+ }
327+
328+ LIBC_INLINE T fetch_add (
329+ T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
330+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
331+ static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
332+ #if __has_builtin(__scoped_atomic_fetch_add)
333+ return __scoped_atomic_fetch_add (ptr, increment, order (mem_ord),
334+ scope (mem_scope));
335+ #else
336+ return __atomic_fetch_add (ptr, increment, order (mem_ord));
337+ #endif
338+ }
339+
340+ LIBC_INLINE T
341+ fetch_or (T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
342+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
343+ static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
344+ #if __has_builtin(__scoped_atomic_fetch_or)
345+ return __scoped_atomic_fetch_or (ptr, mask, order (mem_ord),
346+ scope (mem_scope));
347+ #else
348+ return __atomic_fetch_or (ptr, mask, order (mem_ord));
349+ #endif
350+ }
351+
352+ LIBC_INLINE T fetch_and (
353+ T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
354+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
355+ static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
356+ #if __has_builtin(__scoped_atomic_fetch_and)
357+ return __scoped_atomic_fetch_and (ptr, mask, order (mem_ord),
358+ scope (mem_scope));
359+ #else
360+ return __atomic_fetch_and (ptr, mask, order (mem_ord));
361+ #endif
362+ }
363+
364+ LIBC_INLINE T fetch_sub (
365+ T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
366+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) const {
367+ static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
368+ #if __has_builtin(__scoped_atomic_fetch_sub)
369+ return __scoped_atomic_fetch_sub (ptr, decrement, order (mem_ord),
370+ scope (mem_scope));
371+ #else
372+ return __atomic_fetch_sub (ptr, decrement, order (mem_ord));
373+ #endif
374+ }
375+ };
376+
377+ // Permit CTAD when generating an atomic reference.
378+ template <typename T> AtomicRef (T &) -> AtomicRef<T>;
379+
232380// Issue a thread fence with the given memory ordering.
233381LIBC_INLINE void atomic_thread_fence (
234382 MemoryOrder mem_ord,
0 commit comments