@@ -40,15 +40,27 @@ enum class MemoryScope : int {
4040};
4141
4242template <typename T> struct Atomic {
43- // For now, we will restrict to only arithmetic types.
44- static_assert (is_arithmetic_v<T>, " Only arithmetic types can be atomic." );
43+ static_assert (is_trivially_copyable_v<T> && is_copy_constructible_v<T> &&
44+ is_move_constructible_v<T> && is_copy_assignable_v<T> &&
45+ is_move_assignable_v<T>,
46+ " atomic<T> requires T to be trivially copyable, copy "
47+ " constructible, move constructible, copy assignable, "
48+ " and move assignable." );
4549
4650private:
4751 // The value stored should be appropriately aligned so that
4852 // hardware instructions used to perform atomic operations work
4953 // correctly.
5054 static constexpr int ALIGNMENT = sizeof (T) > alignof (T) ? sizeof(T)
5155 : alignof(T);
56+ // type conversion helper to avoid long c++ style casts
57+ LIBC_INLINE static int order (MemoryOrder mem_ord) {
58+ return static_cast <int >(mem_ord);
59+ }
60+
61+ LIBC_INLINE static int scope (MemoryScope mem_scope) {
62+ return static_cast <int >(mem_scope);
63+ }
5264
5365public:
5466 using value_type = T;
@@ -59,131 +71,146 @@ template <typename T> struct Atomic {
5971 // operations should be performed using the atomic methods however.
6072 alignas (ALIGNMENT) value_type val;
6173
62- constexpr Atomic () = default;
74+ LIBC_INLINE constexpr Atomic () = default;
6375
6476 // Intializes the value without using atomic operations.
65- constexpr Atomic (value_type v) : val(v) {}
77+ LIBC_INLINE constexpr Atomic (value_type v) : val(v) {}
6678
67- Atomic (const Atomic &) = delete ;
68- Atomic &operator =(const Atomic &) = delete ;
79+ LIBC_INLINE Atomic (const Atomic &) = delete;
80+ LIBC_INLINE Atomic &operator =(const Atomic &) = delete ;
6981
7082 // Atomic load.
71- operator T () { return __atomic_load_n (&val, int (MemoryOrder::SEQ_CST)); }
72-
73- T load (MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
74- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
75- #if __has_builtin(__scoped_atomic_load_n)
76- return __scoped_atomic_load_n (&val, int (mem_ord), (int )(mem_scope));
83+ LIBC_INLINE operator T () { return load (); }
84+
85+ LIBC_INLINE T
86+ load (MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
87+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
88+ T res;
89+ #if __has_builtin(__scoped_atomic_load)
90+ __scoped_atomic_load (&val, &res, order (mem_ord), scope (mem_scope));
7791#else
78- return __atomic_load_n (&val, int (mem_ord));
92+ __atomic_load (&val, &res, order (mem_ord));
7993#endif
94+ return res;
8095 }
8196
8297 // Atomic store.
83- T operator =(T rhs) {
84- __atomic_store_n (&val, rhs, int (MemoryOrder::SEQ_CST) );
98+ LIBC_INLINE T operator =(T rhs) {
99+ store ( rhs);
85100 return rhs;
86101 }
87102
88- void store (T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
89- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
90- #if __has_builtin(__scoped_atomic_store_n)
91- __scoped_atomic_store_n (&val, rhs, int (mem_ord), (int )(mem_scope));
103+ LIBC_INLINE void
104+ store (T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
105+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
106+ #if __has_builtin(__scoped_atomic_store)
107+ __scoped_atomic_store (&val, &rhs, order (mem_ord), scope (mem_scope));
92108#else
93- __atomic_store_n (&val, rhs, int (mem_ord));
109+ __atomic_store (&val, & rhs, order (mem_ord));
94110#endif
95111 }
96112
97113 // Atomic compare exchange
98- bool compare_exchange_strong (
114+ LIBC_INLINE bool compare_exchange_strong (
99115 T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
100116 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
101- return __atomic_compare_exchange_n (&val, &expected, desired, false ,
102- int (mem_ord), int (mem_ord));
117+ return __atomic_compare_exchange (&val, &expected, & desired, false ,
118+ order (mem_ord), order (mem_ord));
103119 }
104120
105121 // Atomic compare exchange (separate success and failure memory orders)
106- bool compare_exchange_strong (
122+ LIBC_INLINE bool compare_exchange_strong (
107123 T &expected, T desired, MemoryOrder success_order,
108124 MemoryOrder failure_order,
109125 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
110- return __atomic_compare_exchange_n (&val, &expected, desired, false ,
111- static_cast < int > (success_order),
112- static_cast < int > (failure_order));
126+ return __atomic_compare_exchange (&val, &expected, & desired, false ,
127+ order (success_order),
128+ order (failure_order));
113129 }
114130
115131 // Atomic compare exchange (weak version)
116- bool compare_exchange_weak (
132+ LIBC_INLINE bool compare_exchange_weak (
117133 T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
118134 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
119- return __atomic_compare_exchange_n (&val, &expected, desired, true ,
120- static_cast <int >(mem_ord),
121- static_cast <int >(mem_ord));
135+ return __atomic_compare_exchange (&val, &expected, &desired, true ,
136+ order (mem_ord), order (mem_ord));
122137 }
123138
124139 // Atomic compare exchange (weak version with separate success and failure
125140 // memory orders)
126- bool compare_exchange_weak (
141+ LIBC_INLINE bool compare_exchange_weak (
127142 T &expected, T desired, MemoryOrder success_order,
128143 MemoryOrder failure_order,
129144 [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
130- return __atomic_compare_exchange_n (&val, &expected, desired, true ,
131- static_cast < int > (success_order),
132- static_cast < int > (failure_order));
145+ return __atomic_compare_exchange (&val, &expected, & desired, true ,
146+ order (success_order),
147+ order (failure_order));
133148 }
134149
135- T exchange (T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
136- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
137- #if __has_builtin(__scoped_atomic_exchange_n)
138- return __scoped_atomic_exchange_n (&val, desired, int (mem_ord),
139- (int )(mem_scope));
150+ LIBC_INLINE T
151+ exchange (T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
152+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
153+ T ret;
154+ #if __has_builtin(__scoped_atomic_exchange)
155+ __scoped_atomic_exchange (&val, &desired, &ret, order (mem_ord),
156+ scope (mem_scope));
140157#else
141- return __atomic_exchange_n (&val, desired, int (mem_ord));
158+ __atomic_exchange (&val, & desired, &ret, order (mem_ord));
142159#endif
160+ return ret;
143161 }
144162
145- T fetch_add (T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
146- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
163+ LIBC_INLINE T
164+ fetch_add (T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
165+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
166+ static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
147167#if __has_builtin(__scoped_atomic_fetch_add)
148- return __scoped_atomic_fetch_add (&val, increment, int (mem_ord),
149- ( int ) (mem_scope));
168+ return __scoped_atomic_fetch_add (&val, increment, order (mem_ord),
169+ scope (mem_scope));
150170#else
151- return __atomic_fetch_add (&val, increment, int (mem_ord));
171+ return __atomic_fetch_add (&val, increment, order (mem_ord));
152172#endif
153173 }
154174
155- T fetch_or (T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
156- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
175+ LIBC_INLINE T
176+ fetch_or (T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
177+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
178+ static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
157179#if __has_builtin(__scoped_atomic_fetch_or)
158- return __scoped_atomic_fetch_or (&val, mask, int (mem_ord), (int )(mem_scope));
180+ return __scoped_atomic_fetch_or (&val, mask, order (mem_ord),
181+ scope (mem_scope));
159182#else
160- return __atomic_fetch_or (&val, mask, int (mem_ord));
183+ return __atomic_fetch_or (&val, mask, order (mem_ord));
161184#endif
162185 }
163186
164- T fetch_and (T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
165- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
187+ LIBC_INLINE T
188+ fetch_and (T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
189+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
190+ static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
166191#if __has_builtin(__scoped_atomic_fetch_and)
167- return __scoped_atomic_fetch_and (&val, mask, int (mem_ord),
168- ( int ) (mem_scope));
192+ return __scoped_atomic_fetch_and (&val, mask, order (mem_ord),
193+ scope (mem_scope));
169194#else
170- return __atomic_fetch_and (&val, mask, int (mem_ord));
195+ return __atomic_fetch_and (&val, mask, order (mem_ord));
171196#endif
172197 }
173198
174- T fetch_sub (T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
175- [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
199+ LIBC_INLINE T
200+ fetch_sub (T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
201+ [[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
202+ static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
176203#if __has_builtin(__scoped_atomic_fetch_sub)
177- return __scoped_atomic_fetch_sub (&val, decrement, int (mem_ord),
178- ( int ) (mem_scope));
204+ return __scoped_atomic_fetch_sub (&val, decrement, order (mem_ord),
205+ scope (mem_scope));
179206#else
180- return __atomic_fetch_sub (&val, decrement, int (mem_ord));
207+ return __atomic_fetch_sub (&val, decrement, order (mem_ord));
181208#endif
182209 }
183210
184211 // Set the value without using an atomic operation. This is useful
185212 // in initializing atomic values without a constructor.
186- void set (T rhs) { val = rhs; }
213+ LIBC_INLINE void set (T rhs) { val = rhs; }
187214};
188215
189216// Issue a thread fence with the given memory ordering.
0 commit comments