Skip to content

Commit 41b9b34

Browse files
committed
[libc++] Allows any types of size 4 and 8 to use native platform ulock_wait
1 parent 138e0ff commit 41b9b34

File tree

6 files changed

+172
-55
lines changed

6 files changed

+172
-55
lines changed

libcxx/include/__atomic/atomic.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,8 @@ struct __atomic_base<_Tp, true> : public __atomic_base<_Tp, false> {
206206
// __atomic_base<int, false>. So specializing __atomic_base<_Tp> does not work
207207
template <class _Tp, bool _IsIntegral>
208208
struct __atomic_waitable_traits<__atomic_base<_Tp, _IsIntegral> > {
209+
using __inner_type _LIBCPP_NODEBUG = _Tp;
210+
209211
static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_base<_Tp, _IsIntegral>& __a, memory_order __order) {
210212
return __a.load(__order);
211213
}

libcxx/include/__atomic/atomic_flag.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,8 @@ struct atomic_flag {
7676

7777
template <>
7878
struct __atomic_waitable_traits<atomic_flag> {
79+
using __inner_type _LIBCPP_NODEBUG = _LIBCPP_ATOMIC_FLAG_TYPE;
80+
7981
static _LIBCPP_HIDE_FROM_ABI _LIBCPP_ATOMIC_FLAG_TYPE __atomic_load(const atomic_flag& __a, memory_order __order) {
8082
return std::__cxx_atomic_load(&__a.__a_, __order);
8183
}

libcxx/include/__atomic/atomic_ref.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,8 @@ struct __atomic_ref_base {
233233

234234
template <class _Tp>
235235
struct __atomic_waitable_traits<__atomic_ref_base<_Tp>> {
236+
using __inner_type _LIBCPP_NODEBUG = _Tp;
237+
236238
static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_ref_base<_Tp>& __a, memory_order __order) {
237239
return __a.load(__order);
238240
}

libcxx/include/__atomic/atomic_sync.h

Lines changed: 54 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD
3838
// The below implementations look ugly to support C++03
3939
template <class _Tp, class = void>
4040
struct __atomic_waitable_traits {
41+
using __inner_type _LIBCPP_NODEBUG = void;
42+
4143
template <class _AtomicWaitable>
4244
static void __atomic_load(_AtomicWaitable&&, memory_order) = delete;
4345

@@ -58,6 +60,7 @@ struct __atomic_waitable< _Tp,
5860
#if _LIBCPP_STD_VER >= 20
5961
# if _LIBCPP_HAS_THREADS
6062

63+
<<<<<<< HEAD
6164
_LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one(void const volatile*) _NOEXCEPT;
6265
_LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all(void const volatile*) _NOEXCEPT;
6366
_LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t __libcpp_atomic_monitor(void const volatile*) _NOEXCEPT;
@@ -69,6 +72,28 @@ _LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t
6972
__libcpp_atomic_monitor(__cxx_atomic_contention_t const volatile*) _NOEXCEPT;
7073
_LIBCPP_EXPORTED_FROM_ABI void
7174
__libcpp_atomic_wait(__cxx_atomic_contention_t const volatile*, __cxx_contention_t) _NOEXCEPT;
75+
=======
76+
template <std::size_t _Size>
77+
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void
78+
__libcpp_atomic_wait_native(void const volatile* __address, void const volatile* __old_value) _NOEXCEPT;
79+
80+
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t
81+
__libcpp_atomic_monitor_global(void const volatile* __address) _NOEXCEPT;
82+
83+
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void
84+
__libcpp_atomic_wait_global_table(void const volatile* __address, __cxx_contention_t __monitor_value) _NOEXCEPT;
85+
86+
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one_global_table(void const volatile*) _NOEXCEPT;
87+
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all_global_table(void const volatile*) _NOEXCEPT;
88+
89+
template <std::size_t _Size>
90+
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void
91+
__cxx_atomic_notify_one_native(const volatile void*) _NOEXCEPT;
92+
93+
template <std::size_t _Size>
94+
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void
95+
__cxx_atomic_notify_all_native(const volatile void*) _NOEXCEPT;
96+
>>>>>>> 59d6fc2ba487 ([libc++] Allows any types of size 4 and 8 to use native platform ulock_wait)
7297

7398
template <class _AtomicWaitable, class _Poll>
7499
struct __atomic_wait_backoff_impl {
@@ -77,6 +102,7 @@ struct __atomic_wait_backoff_impl {
77102
memory_order __order_;
78103

79104
using __waitable_traits _LIBCPP_NODEBUG = __atomic_waitable_traits<__decay_t<_AtomicWaitable> >;
105+
<<<<<<< HEAD
80106

81107
_LIBCPP_HIDE_FROM_ABI bool
82108
__update_monitor_val_and_poll(__cxx_atomic_contention_t const volatile*, __cxx_contention_t& __monitor_val) const {
@@ -98,14 +124,26 @@ struct __atomic_wait_backoff_impl {
98124
auto __current_val = __waitable_traits::__atomic_load(__a_, __order_);
99125
return __poll_(__current_val);
100126
}
127+
=======
128+
using __inner_type _LIBCPP_NODEBUG = typename __waitable_traits::__inner_type;
129+
>>>>>>> 59d6fc2ba487 ([libc++] Allows any types of size 4 and 8 to use native platform ulock_wait)
101130

102131
_LIBCPP_HIDE_FROM_ABI bool operator()(chrono::nanoseconds __elapsed) const {
103132
if (__elapsed > chrono::microseconds(4)) {
104133
auto __contention_address = __waitable_traits::__atomic_contention_address(__a_);
105-
__cxx_contention_t __monitor_val;
106-
if (__update_monitor_val_and_poll(__contention_address, __monitor_val))
107-
return true;
108-
std::__libcpp_atomic_wait(__contention_address, __monitor_val);
134+
135+
if constexpr (__is_atomic_wait_native_type<__inner_type>::value) {
136+
auto __atomic_value = __waitable_traits::__atomic_load(__a_, __order_);
137+
if (__poll_(__atomic_value))
138+
return true;
139+
std::__libcpp_atomic_wait_native<sizeof(__inner_type)>(__contention_address, &__atomic_value);
140+
} else {
141+
__cxx_contention_t __monitor_val = std::__libcpp_atomic_monitor_global(__contention_address);
142+
auto __atomic_value = __waitable_traits::__atomic_load(__a_, __order_);
143+
if (__poll_(__atomic_value))
144+
return true;
145+
std::__libcpp_atomic_wait_global_table(__contention_address, __monitor_val);
146+
}
109147
} else {
110148
} // poll
111149
return false;
@@ -136,13 +174,23 @@ _LIBCPP_HIDE_FROM_ABI void __atomic_wait_unless(const _AtomicWaitable& __a, memo
136174
template <class _AtomicWaitable>
137175
_LIBCPP_HIDE_FROM_ABI void __atomic_notify_one(const _AtomicWaitable& __a) {
138176
static_assert(__atomic_waitable<_AtomicWaitable>::value, "");
139-
std::__cxx_atomic_notify_one(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a));
177+
using __inner_type _LIBCPP_NODEBUG = typename __atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__inner_type;
178+
if constexpr (__is_atomic_wait_native_type<__inner_type>::value) {
179+
std::__cxx_atomic_notify_one_native<sizeof(__inner_type)>(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a));
180+
} else {
181+
std::__cxx_atomic_notify_one_global_table(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a));
182+
}
140183
}
141184

142185
template <class _AtomicWaitable>
143186
_LIBCPP_HIDE_FROM_ABI void __atomic_notify_all(const _AtomicWaitable& __a) {
144187
static_assert(__atomic_waitable<_AtomicWaitable>::value, "");
145-
std::__cxx_atomic_notify_all(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a));
188+
using __inner_type _LIBCPP_NODEBUG = typename __atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__inner_type;
189+
if constexpr (__is_atomic_wait_native_type<__inner_type>::value) {
190+
std::__cxx_atomic_notify_all_native<sizeof(__inner_type)>(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a));
191+
} else {
192+
std::__cxx_atomic_notify_all_global_table(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a));
193+
}
146194
}
147195

148196
# else // _LIBCPP_HAS_THREADS

libcxx/include/__atomic/contention_t.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,10 @@
1111

1212
#include <__atomic/support.h>
1313
#include <__config>
14+
#include <__type_traits/enable_if.h>
15+
#include <__type_traits/integral_constant.h>
16+
#include <__type_traits/is_integral.h>
17+
#include <cstddef>
1418
#include <cstdint>
1519

1620
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
@@ -19,10 +23,23 @@
1923

2024
_LIBCPP_BEGIN_NAMESPACE_STD
2125

26+
template <class _Tp, class = void>
27+
struct __is_atomic_wait_native_type : false_type {};
28+
2229
#if defined(__linux__) || (defined(_AIX) && !defined(__64BIT__))
2330
using __cxx_contention_t _LIBCPP_NODEBUG = int32_t;
31+
32+
template <class _Tp>
33+
struct __is_atomic_wait_native_type<_Tp, __enable_if_t<is_integral<_Tp>::value && sizeof(_Tp) == 4> > : true_type {};
34+
2435
#else
2536
using __cxx_contention_t _LIBCPP_NODEBUG = int64_t;
37+
38+
template <class _Tp>
39+
struct __is_atomic_wait_native_type<_Tp,
40+
__enable_if_t<is_integral<_Tp>::value && (sizeof(_Tp) == 4 || sizeof(_Tp) == 8)> >
41+
: true_type {};
42+
2643
#endif // __linux__ || (_AIX && !__64BIT__)
2744

2845
using __cxx_atomic_contention_t _LIBCPP_NODEBUG = __cxx_atomic_impl<__cxx_contention_t>;

libcxx/src/atomic.cpp

Lines changed: 95 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <__thread/timed_backoff_policy.h>
1010
#include <atomic>
1111
#include <climits>
12+
#include <cstddef>
1213
#include <functional>
1314
#include <thread>
1415

@@ -53,6 +54,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD
5354

5455
#ifdef __linux__
5556

57+
58+
// TODO : update
5659
static void
5760
__libcpp_platform_wait_on_address(__cxx_atomic_contention_t const volatile* __ptr, __cxx_contention_t __val) {
5861
static constexpr timespec __timeout = {2, 0};
@@ -70,22 +73,32 @@ extern "C" int __ulock_wait(
7073
extern "C" int __ulock_wake(uint32_t operation, void* addr, uint64_t wake_value);
7174

7275
// https://github.com/apple/darwin-xnu/blob/2ff845c2e033bd0ff64b5b6aa6063a1f8f65aa32/bsd/sys/ulock.h#L82
76+
# define UL_COMPARE_AND_WAIT 1
7377
# define UL_COMPARE_AND_WAIT64 5
7478
# define ULF_WAKE_ALL 0x00000100
7579

76-
static void
77-
__libcpp_platform_wait_on_address(__cxx_atomic_contention_t const volatile* __ptr, __cxx_contention_t __val) {
78-
static_assert(sizeof(__cxx_atomic_contention_t) == 8, "Waiting on 8 bytes value");
79-
__ulock_wait(UL_COMPARE_AND_WAIT64, const_cast<__cxx_atomic_contention_t*>(__ptr), __val, 0);
80+
template <std::size_t _Size>
81+
static void __libcpp_platform_wait_on_address(void const volatile* __ptr, void const volatile* __val) {
82+
static_assert(_Size == 8 || _Size == 4, "Can only wait on 8 bytes or 4 bytes value");
83+
if constexpr (_Size == 4)
84+
__ulock_wait(UL_COMPARE_AND_WAIT, const_cast<void*>(__ptr), *reinterpret_cast<uint32_t const volatile*>(__val), 0);
85+
else
86+
__ulock_wait(
87+
UL_COMPARE_AND_WAIT64, const_cast<void*>(__ptr), *reinterpret_cast<uint64_t const volatile*>(__val), 0);
8088
}
8189

82-
static void __libcpp_platform_wake_by_address(__cxx_atomic_contention_t const volatile* __ptr, bool __notify_one) {
83-
static_assert(sizeof(__cxx_atomic_contention_t) == 8, "Waking up on 8 bytes value");
84-
__ulock_wake(
85-
UL_COMPARE_AND_WAIT64 | (__notify_one ? 0 : ULF_WAKE_ALL), const_cast<__cxx_atomic_contention_t*>(__ptr), 0);
90+
template <std::size_t _Size>
91+
static void __libcpp_platform_wake_by_address(void const volatile* __ptr, bool __notify_one) {
92+
static_assert(_Size == 8 || _Size == 4, "Can only wake up on 8 bytes or 4 bytes value");
93+
94+
if constexpr (_Size == 4)
95+
__ulock_wake(UL_COMPARE_AND_WAIT | (__notify_one ? 0 : ULF_WAKE_ALL), const_cast<void*>(__ptr), 0);
96+
else
97+
__ulock_wake(UL_COMPARE_AND_WAIT64 | (__notify_one ? 0 : ULF_WAKE_ALL), const_cast<void*>(__ptr), 0);
8698
}
8799

88100
#elif defined(__FreeBSD__) && __SIZEOF_LONG__ == 8
101+
// TODO : update
89102
/*
90103
* Since __cxx_contention_t is int64_t even on 32bit FreeBSD
91104
* platforms, we have to use umtx ops that work on the long type, and
@@ -104,6 +117,7 @@ static void __libcpp_platform_wake_by_address(__cxx_atomic_contention_t const vo
104117
#else // <- Add other operating systems here
105118

106119
// Baseline is just a timed backoff
120+
// TODO : update
107121

108122
static void
109123
__libcpp_platform_wait_on_address(__cxx_atomic_contention_t const volatile* __ptr, __cxx_contention_t __val) {
@@ -128,83 +142,115 @@ static __libcpp_contention_table_entry __libcpp_contention_table[__libcpp_conten
128142

129143
static hash<void const volatile*> __libcpp_contention_hasher;
130144

131-
static __libcpp_contention_table_entry* __libcpp_contention_state(void const volatile* p) {
145+
static __libcpp_contention_table_entry* __get_global_contention_state(void const volatile* p) {
132146
return &__libcpp_contention_table[__libcpp_contention_hasher(p) & (__libcpp_contention_table_size - 1)];
133147
}
134148

135149
/* Given an atomic to track contention and an atomic to actually wait on, which may be
136150
the same atomic, we try to detect contention to avoid spuriously calling the platform. */
137151

138-
static void __libcpp_contention_notify(__cxx_atomic_contention_t volatile* __contention_state,
139-
__cxx_atomic_contention_t const volatile* __platform_state,
152+
template <std::size_t _Size>
153+
static void __libcpp_contention_notify(__cxx_atomic_contention_t volatile* __global_contention_state,
154+
void const volatile* __address_to_notify,
140155
bool __notify_one) {
141-
if (0 != __cxx_atomic_load(__contention_state, memory_order_seq_cst))
156+
if (0 != __cxx_atomic_load(__global_contention_state, memory_order_seq_cst))
142157
// We only call 'wake' if we consumed a contention bit here.
143-
__libcpp_platform_wake_by_address(__platform_state, __notify_one);
144-
}
145-
static __cxx_contention_t
146-
__libcpp_contention_monitor_for_wait(__cxx_atomic_contention_t volatile* /*__contention_state*/,
147-
__cxx_atomic_contention_t const volatile* __platform_state) {
148-
// We will monitor this value.
149-
return __cxx_atomic_load(__platform_state, memory_order_acquire);
158+
__libcpp_platform_wake_by_address<_Size>(__address_to_notify, __notify_one);
150159
}
160+
161+
template <std::size_t _Size>
151162
static void __libcpp_contention_wait(__cxx_atomic_contention_t volatile* __contention_state,
152-
__cxx_atomic_contention_t const volatile* __platform_state,
153-
__cxx_contention_t __old_value) {
163+
void const volatile* __address_to_wait,
164+
void const volatile* __old_value) {
154165
__cxx_atomic_fetch_add(__contention_state, __cxx_contention_t(1), memory_order_relaxed);
155166
// https://llvm.org/PR109290
156167
// There are no platform guarantees of a memory barrier in the platform wait implementation
157168
__cxx_atomic_thread_fence(memory_order_seq_cst);
158169
// We sleep as long as the monitored value hasn't changed.
159-
__libcpp_platform_wait_on_address(__platform_state, __old_value);
170+
__libcpp_platform_wait_on_address<_Size>(__address_to_wait, __old_value);
160171
__cxx_atomic_fetch_sub(__contention_state, __cxx_contention_t(1), memory_order_release);
161172
}
162173

163174
/* When the incoming atomic is the wrong size for the platform wait size, need to
164175
launder the value sequence through an atomic from our table. */
165176

166-
static void __libcpp_atomic_notify(void const volatile* __location) {
167-
auto const __entry = __libcpp_contention_state(__location);
177+
static void __atomic_notify_global_table(void const volatile* __location) {
178+
auto const __entry = __get_global_contention_state(__location);
168179
// The value sequence laundering happens on the next line below.
169180
__cxx_atomic_fetch_add(&__entry->__platform_state, __cxx_contention_t(1), memory_order_seq_cst);
170-
__libcpp_contention_notify(
181+
__libcpp_contention_notify<sizeof(__cxx_atomic_contention_t)>(
171182
&__entry->__contention_state,
172183
&__entry->__platform_state,
173184
false /* when laundering, we can't handle notify_one */);
174185
}
175-
_LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one(void const volatile* __location) noexcept {
176-
__libcpp_atomic_notify(__location);
186+
187+
_LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t __libcpp_atomic_monitor_global(void const volatile* __location) noexcept {
188+
auto const __entry = __get_global_contention_state(__location);
189+
return __cxx_atomic_load(&__entry->__platform_state, memory_order_acquire);
177190
}
178-
_LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all(void const volatile* __location) noexcept {
179-
__libcpp_atomic_notify(__location);
191+
192+
_LIBCPP_EXPORTED_FROM_ABI void
193+
__libcpp_atomic_wait_global_table(void const volatile* __location, __cxx_contention_t __old_value) noexcept {
194+
auto const __entry = __get_global_contention_state(__location);
195+
__libcpp_contention_wait<sizeof(__cxx_atomic_contention_t)>(
196+
&__entry->__contention_state, &__entry->__platform_state, &__old_value);
180197
}
181-
_LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t __libcpp_atomic_monitor(void const volatile* __location) noexcept {
182-
auto const __entry = __libcpp_contention_state(__location);
183-
return __libcpp_contention_monitor_for_wait(&__entry->__contention_state, &__entry->__platform_state);
198+
199+
template <std::size_t _Size>
200+
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void
201+
__libcpp_atomic_wait_native(void const volatile* __address, void const volatile* __old_value) noexcept {
202+
__libcpp_contention_wait<_Size>(
203+
&__get_global_contention_state(__address)->__contention_state, __address, __old_value);
184204
}
185-
_LIBCPP_EXPORTED_FROM_ABI void
186-
__libcpp_atomic_wait(void const volatile* __location, __cxx_contention_t __old_value) noexcept {
187-
auto const __entry = __libcpp_contention_state(__location);
188-
__libcpp_contention_wait(&__entry->__contention_state, &__entry->__platform_state, __old_value);
205+
206+
_LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one_global_table(void const volatile* __location) noexcept {
207+
__atomic_notify_global_table(__location);
208+
}
209+
_LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all_global_table(void const volatile* __location) noexcept {
210+
__atomic_notify_global_table(__location);
189211
}
190212

191213
/* When the incoming atomic happens to be the platform wait size, we still need to use the
192214
table for the contention detection, but we can use the atomic directly for the wait. */
193215

194-
_LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one(__cxx_atomic_contention_t const volatile* __location) noexcept {
195-
__libcpp_contention_notify(&__libcpp_contention_state(__location)->__contention_state, __location, true);
196-
}
197-
_LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all(__cxx_atomic_contention_t const volatile* __location) noexcept {
198-
__libcpp_contention_notify(&__libcpp_contention_state(__location)->__contention_state, __location, false);
216+
template <std::size_t _Size>
217+
_LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one_native(void const volatile* __location) noexcept {
218+
__libcpp_contention_notify<_Size>(&__get_global_contention_state(__location)->__contention_state, __location, true);
199219
}
200-
// This function is never used, but still exported for ABI compatibility.
201-
_LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t
202-
__libcpp_atomic_monitor(__cxx_atomic_contention_t const volatile* __location) noexcept {
203-
return __libcpp_contention_monitor_for_wait(&__libcpp_contention_state(__location)->__contention_state, __location);
204-
}
205-
_LIBCPP_EXPORTED_FROM_ABI void
206-
__libcpp_atomic_wait(__cxx_atomic_contention_t const volatile* __location, __cxx_contention_t __old_value) noexcept {
207-
__libcpp_contention_wait(&__libcpp_contention_state(__location)->__contention_state, __location, __old_value);
220+
221+
template <std::size_t _Size>
222+
_LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all_native(void const volatile* __location) noexcept {
223+
__libcpp_contention_notify<_Size>(&__get_global_contention_state(__location)->__contention_state, __location, false);
208224
}
209225

226+
#ifdef __linux__
227+
228+
// TODO
229+
230+
#elif defined(__APPLE__) && defined(_LIBCPP_USE_ULOCK)
231+
232+
template _LIBCPP_EXPORTED_FROM_ABI void
233+
__libcpp_atomic_wait_native<4>(void const volatile* __address, void const volatile* __old_value) noexcept;
234+
235+
template _LIBCPP_EXPORTED_FROM_ABI void
236+
__libcpp_atomic_wait_native<8>(void const volatile* __address, void const volatile* __old_value) noexcept;
237+
238+
template _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one_native<4>(void const volatile* __location) noexcept;
239+
240+
template _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one_native<8>(void const volatile* __location) noexcept;
241+
242+
template _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all_native<4>(void const volatile* __location) noexcept;
243+
244+
template _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all_native<8>(void const volatile* __location) noexcept;
245+
246+
#elif defined(__FreeBSD__) && __SIZEOF_LONG__ == 8
247+
248+
// TODO
249+
250+
#else // <- Add other operating systems here
251+
252+
// TODO
253+
254+
#endif // __linux__
255+
210256
_LIBCPP_END_NAMESPACE_STD

0 commit comments

Comments
 (0)