21
21
#include " utils_windows_intrin.h"
22
22
23
23
#pragma intrinsic(_BitScanForward64)
24
- #else
24
+ #else /* !_WIN32 */
25
25
#include < pthread.h>
26
26
27
27
#ifndef __cplusplus
30
30
#include < atomic>
31
31
#define _Atomic (X ) std::atomic<X>
32
32
33
- using std::memory_order_acq_rel;
34
- using std::memory_order_acquire;
35
- using std::memory_order_relaxed;
36
- using std::memory_order_release;
37
-
38
33
#endif /* __cplusplus */
39
34
40
- #endif /* _WIN32 */
35
+ #endif /* ! _WIN32 */
41
36
42
37
#include " utils_common.h"
43
38
#include " utils_sanitizers.h"
@@ -118,14 +113,6 @@ static __inline void utils_atomic_load_acquire_ptr(void **ptr, void **out) {
118
113
*(uintptr_t *)out = ret;
119
114
}
120
115
121
- static __inline void utils_atomic_store_release_u64 (uint64_t *ptr,
122
- uint64_t *val) {
123
- ASSERT_IS_ALIGNED ((uintptr_t )ptr, 8 );
124
- ASSERT_IS_ALIGNED ((uintptr_t )val, 8 );
125
- utils_annotate_release (ptr);
126
- InterlockedExchange64 ((LONG64 volatile *)ptr, *(LONG64 *)val);
127
- }
128
-
129
116
static __inline void utils_atomic_store_release_ptr (void **ptr, void *val) {
130
117
ASSERT_IS_ALIGNED ((uintptr_t )ptr, 8 );
131
118
utils_annotate_release (ptr);
@@ -146,14 +133,12 @@ static __inline uint64_t utils_atomic_decrement_u64(uint64_t *ptr) {
146
133
147
134
static __inline uint64_t utils_fetch_and_add_u64 (uint64_t *ptr, uint64_t val) {
148
135
ASSERT_IS_ALIGNED ((uintptr_t )ptr, 8 );
149
- ASSERT_IS_ALIGNED ((uintptr_t )&val, 8 );
150
136
// return the value that had previously been in *ptr
151
137
return InterlockedExchangeAdd64 ((LONG64 volatile *)(ptr), val);
152
138
}
153
139
154
140
static __inline uint64_t utils_fetch_and_sub_u64 (uint64_t *ptr, uint64_t val) {
155
141
ASSERT_IS_ALIGNED ((uintptr_t )ptr, 8 );
156
- ASSERT_IS_ALIGNED ((uintptr_t )&val, 8 );
157
142
// return the value that had previously been in *ptr
158
143
// NOTE: on Windows there is no *Sub* version of InterlockedExchange
159
144
return InterlockedExchangeAdd64 ((LONG64 volatile *)(ptr), -(LONG64)val);
@@ -203,14 +188,6 @@ static inline void utils_atomic_load_acquire_ptr(void **ptr, void **out) {
203
188
utils_annotate_acquire ((void *)ptr);
204
189
}
205
190
206
- static inline void utils_atomic_store_release_u64 (uint64_t *ptr,
207
- uint64_t *val) {
208
- ASSERT_IS_ALIGNED ((uintptr_t )ptr, 8 );
209
- ASSERT_IS_ALIGNED ((uintptr_t )val, 8 );
210
- utils_annotate_release (ptr);
211
- __atomic_store (ptr, val, memory_order_release);
212
- }
213
-
214
191
static inline void utils_atomic_store_release_ptr (void **ptr, void *val) {
215
192
ASSERT_IS_ALIGNED ((uintptr_t )ptr, 8 );
216
193
utils_annotate_release (ptr);
0 commit comments