@@ -66,8 +66,7 @@ class alignas(Size) atomic_impl {
66
66
}
67
67
};
68
68
69
- // FIXME: get this to build reliably
70
- #if 0 && defined(_WIN64)
69
+ #if defined(_WIN64)
71
70
#include < intrin.h>
72
71
73
72
// / MSVC's std::atomic uses an inline spin lock for 16-byte atomics,
@@ -76,11 +75,7 @@ class alignas(Size) atomic_impl {
76
75
// / AMD processors that lack cmpxchg16b, so we just use the intrinsic.
77
76
template <class Value >
78
77
class alignas (2 * sizeof (void *)) atomic_impl<Value, 2 * sizeof (void *)> {
79
- // MSVC is not strict about aliasing, so we can get away with this.
80
- union {
81
- volatile Value atomicValue;
82
- volatile __int64 atomicArray[2];
83
- };
78
+ volatile Value atomicValue;
84
79
public:
85
80
constexpr atomic_impl (Value initialValue) : atomicValue (initialValue) {}
86
81
@@ -98,10 +93,14 @@ class alignas(2 * sizeof(void*)) atomic_impl<Value, 2 * sizeof(void*)> {
98
93
__int64 resultArray[2 ] = {};
99
94
#if SWIFT_HAS_MSVC_ARM_ATOMICS
100
95
if (order != std::memory_order_acquire) {
101
- (void) _InterlockedCompareExchange128_nf(atomicArray, 0, 0, resultArray);
96
+ (void ) _InterlockedCompareExchange128_nf (
97
+ reinterpret_cast <volatile __int64*>(&atomicValue),
98
+ 0 , 0 , resultArray);
102
99
} else {
103
100
#endif
104
- (void) _InterlockedCompareExchange128(atomicArray, 0, 0, resultArray);
101
+ (void ) _InterlockedCompareExchange128 (
102
+ reinterpret_cast <volatile __int64*>(&atomicValue),
103
+ 0 , 0 , resultArray);
105
104
#if SWIFT_HAS_MSVC_ARM_ATOMICS
106
105
}
107
106
#endif
@@ -116,31 +115,33 @@ class alignas(2 * sizeof(void*)) atomic_impl<Value, 2 * sizeof(void*)> {
116
115
failureOrder == std::memory_order_consume);
117
116
assert (successOrder == std::memory_order_relaxed ||
118
117
successOrder == std::memory_order_release);
119
- __int64 newValueArray[2];
120
- memcpy(newValueArray, &newValue, sizeof(Value));
121
118
#if SWIFT_HAS_MSVC_ARM_ATOMICS
122
119
if (successOrder == std::memory_order_relaxed &&
123
120
failureOrder != std::memory_order_acquire) {
124
- return _InterlockedCompareExchange128_nf(atomicArray,
125
- newValueArray[0],
126
- newValueArray[1],
127
- reinterpret_cast<__int64*>(&oldValue));
121
+ return _InterlockedCompareExchange128_nf (
122
+ reinterpret_cast <volatile __int64*>(&atomicValue),
123
+ reinterpret_cast <const __int64*>(&newValue)[1 ],
124
+ reinterpret_cast <const __int64*>(&newValue)[0 ],
125
+ reinterpret_cast <__int64*>(&oldValue));
128
126
} else if (successOrder == std::memory_order_relaxed) {
129
- return _InterlockedCompareExchange128_acq(atomicArray,
130
- newValueArray[0],
131
- newValueArray[1],
132
- reinterpret_cast<__int64*>(&oldValue));
127
+ return _InterlockedCompareExchange128_acq (
128
+ reinterpret_cast <volatile __int64*>(&atomicValue),
129
+ reinterpret_cast <const __int64*>(&newValue)[1 ],
130
+ reinterpret_cast <const __int64*>(&newValue)[0 ],
131
+ reinterpret_cast <__int64*>(&oldValue));
133
132
} else if (failureOrder != std::memory_order_acquire) {
134
- return _InterlockedCompareExchange128_rel(atomicArray,
135
- newValueArray[0],
136
- newValueArray[1],
137
- reinterpret_cast<__int64*>(&oldValue));
133
+ return _InterlockedCompareExchange128_rel (
134
+ reinterpret_cast <volatile __int64*>(&atomicValue),
135
+ reinterpret_cast <const __int64*>(&newValue)[1 ],
136
+ reinterpret_cast <const __int64*>(&newValue)[0 ],
137
+ reinterpret_cast <__int64*>(&oldValue));
138
138
} else {
139
139
#endif
140
- return _InterlockedCompareExchange128(atomicArray,
141
- newValueArray[0],
142
- newValueArray[1],
143
- reinterpret_cast<__int64*>(&oldValue));
140
+ return _InterlockedCompareExchange128 (
141
+ reinterpret_cast <volatile __int64*>(&atomicValue),
142
+ reinterpret_cast <const __int64*>(&newValue)[1 ],
143
+ reinterpret_cast <const __int64*>(&newValue)[0 ],
144
+ reinterpret_cast <__int64*>(&oldValue));
144
145
#if SWIFT_HAS_MSVC_ARM_ATOMICS
145
146
}
146
147
#endif
0 commit comments