Skip to content

Commit 249ec7a

Browse files
authored
Merge pull request #41222 from compnerd/spinning
Runtime: change atomic implementation for `AllocationPool`
2 parents b9c4c3a + 1b5b9d3 commit 249ec7a

File tree

2 files changed

+24
-16
lines changed

2 files changed

+24
-16
lines changed

include/swift/Runtime/Atomic.h

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,13 @@ class alignas(Size) atomic_impl {
7272
return value.compare_exchange_weak(oldValue, newValue, successOrder,
7373
failureOrder);
7474
}
75+
76+
bool compare_exchange_strong(Value &oldValue, Value newValue,
77+
std::memory_order successOrder,
78+
std::memory_order failureOrder) {
79+
return value.compare_exchange_strong(oldValue, newValue, successOrder,
80+
failureOrder);
81+
}
7582
};
7683

7784
#if defined(_WIN64)
@@ -128,11 +135,14 @@ class alignas(2 * sizeof(void*)) atomic_impl<Value, 2 * sizeof(void*)> {
128135
bool compare_exchange_weak(Value &oldValue, Value newValue,
129136
std::memory_order successOrder,
130137
std::memory_order failureOrder) {
131-
assert(failureOrder == std::memory_order_relaxed ||
132-
failureOrder == std::memory_order_acquire ||
133-
failureOrder == std::memory_order_consume);
134-
assert(successOrder == std::memory_order_relaxed ||
135-
successOrder == std::memory_order_release);
138+
// We do not have weak CAS intrinsics, fallback to strong
139+
return compare_exchange_strong(oldValue, newValue, successOrder,
140+
failureOrder);
141+
}
142+
143+
bool compare_exchange_strong(Value &oldValue, Value newValue,
144+
std::memory_order successOrder,
145+
std::memory_order failureOrder) {
136146
#if SWIFT_HAS_MSVC_ARM_ATOMICS
137147
if (successOrder == std::memory_order_relaxed &&
138148
failureOrder != std::memory_order_acquire) {
@@ -180,7 +190,7 @@ class alignas(2 * sizeof(void*)) atomic_impl<Value, 2 * sizeof(void*)> {
180190
template <class T>
181191
class atomic : public impl::atomic_impl<T> {
182192
public:
183-
atomic(T value) : impl::atomic_impl<T>(value) {}
193+
constexpr atomic(T value) : impl::atomic_impl<T>(value) {}
184194
};
185195

186196
} // end namespace swift

stdlib/public/runtime/Metadata.cpp

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -5918,7 +5918,7 @@ namespace {
59185918
alignas(void *) static struct {
59195919
char Pool[InitialPoolSize];
59205920
} InitialAllocationPool;
5921-
static std::atomic<PoolRange>
5921+
static swift::atomic<PoolRange>
59225922
AllocationPool{PoolRange{InitialAllocationPool.Pool,
59235923
sizeof(InitialAllocationPool.Pool)}};
59245924

@@ -6071,10 +6071,9 @@ void *MetadataAllocator::Allocate(size_t size, size_t alignment) {
60716071
}
60726072

60736073
// Swap in the new state.
6074-
if (std::atomic_compare_exchange_weak_explicit(&AllocationPool,
6075-
&curState, newState,
6076-
std::memory_order_relaxed,
6077-
std::memory_order_relaxed)) {
6074+
if (AllocationPool.compare_exchange_weak(curState, newState,
6075+
std::memory_order_relaxed,
6076+
std::memory_order_relaxed)) {
60786077
// If that succeeded, we've successfully allocated.
60796078
__msan_allocated_memory(allocation, sizeWithHeader);
60806079
__asan_unpoison_memory_region(allocation, sizeWithHeader);
@@ -6129,11 +6128,10 @@ void MetadataAllocator::Deallocate(const void *allocation, size_t size,
61296128
// don't bother trying again; we'll just leak the allocation.
61306129
PoolRange newState = { reinterpret_cast<char*>(const_cast<void*>(allocation)),
61316130
curState.Remaining + size };
6132-
(void)
6133-
std::atomic_compare_exchange_strong_explicit(&AllocationPool,
6134-
&curState, newState,
6135-
std::memory_order_relaxed,
6136-
std::memory_order_relaxed);
6131+
6132+
AllocationPool.compare_exchange_weak(curState, newState,
6133+
std::memory_order_relaxed,
6134+
std::memory_order_relaxed);
61376135
}
61386136

61396137
#endif

0 commit comments

Comments
 (0)