Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions libcxx/include/__atomic/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,8 @@ struct __atomic_base<_Tp, true> : public __atomic_base<_Tp, false> {
// __atomic_base<int, false>. So specializing __atomic_base<_Tp> does not work
template <class _Tp, bool _IsIntegral>
struct __atomic_waitable_traits<__atomic_base<_Tp, _IsIntegral> > {
using __inner_type _LIBCPP_NODEBUG = _Tp;

static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_base<_Tp, _IsIntegral>& __a, memory_order __order) {
return __a.load(__order);
}
Expand Down
2 changes: 2 additions & 0 deletions libcxx/include/__atomic/atomic_flag.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,8 @@ struct atomic_flag {

template <>
struct __atomic_waitable_traits<atomic_flag> {
using __inner_type _LIBCPP_NODEBUG = _LIBCPP_ATOMIC_FLAG_TYPE;

static _LIBCPP_HIDE_FROM_ABI _LIBCPP_ATOMIC_FLAG_TYPE __atomic_load(const atomic_flag& __a, memory_order __order) {
return std::__cxx_atomic_load(&__a.__a_, __order);
}
Expand Down
2 changes: 2 additions & 0 deletions libcxx/include/__atomic/atomic_ref.h
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,8 @@ struct __atomic_ref_base {

template <class _Tp>
struct __atomic_waitable_traits<__atomic_ref_base<_Tp>> {
using __inner_type _LIBCPP_NODEBUG = _Tp;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a reason why we don't simply use ::value_type on all of these types?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

atomic_flag does not have value_type


static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_ref_base<_Tp>& __a, memory_order __order) {
return __a.load(__order);
}
Expand Down
82 changes: 43 additions & 39 deletions libcxx/include/__atomic/atomic_sync.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD
// The below implementations look ugly to support C++03
template <class _Tp, class = void>
struct __atomic_waitable_traits {
using __inner_type _LIBCPP_NODEBUG = void;

template <class _AtomicWaitable>
static void __atomic_load(_AtomicWaitable&&, memory_order) = delete;

Expand All @@ -58,21 +60,26 @@ struct __atomic_waitable< _Tp,
#if _LIBCPP_STD_VER >= 20
# if _LIBCPP_HAS_THREADS

_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one(void const volatile*) _NOEXCEPT;
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all(void const volatile*) _NOEXCEPT;
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t
__libcpp_atomic_monitor(void const volatile*) _NOEXCEPT;
template <std::size_t _Size>
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void
__libcpp_atomic_wait(void const volatile*, __cxx_contention_t) _NOEXCEPT;
__libcpp_atomic_wait_native(void const volatile* __address, void const volatile* __old_value) _NOEXCEPT;

_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t
__libcpp_atomic_monitor_global(void const volatile* __address) _NOEXCEPT;

_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void
__cxx_atomic_notify_one(__cxx_atomic_contention_t const volatile*) _NOEXCEPT;
__libcpp_atomic_wait_global_table(void const volatile* __address, __cxx_contention_t __monitor_value) _NOEXCEPT;

_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one_global_table(void const volatile*) _NOEXCEPT;
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all_global_table(void const volatile*) _NOEXCEPT;

template <std::size_t _Size>
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void
__cxx_atomic_notify_all(__cxx_atomic_contention_t const volatile*) _NOEXCEPT;
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t
__libcpp_atomic_monitor(__cxx_atomic_contention_t const volatile*) _NOEXCEPT;
__cxx_atomic_notify_one_native(const volatile void*) _NOEXCEPT;

template <std::size_t _Size>
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void
__libcpp_atomic_wait(__cxx_atomic_contention_t const volatile*, __cxx_contention_t) _NOEXCEPT;
__cxx_atomic_notify_all_native(const volatile void*) _NOEXCEPT;

template <class _AtomicWaitable, class _Poll>
struct __atomic_wait_backoff_impl {
Expand All @@ -81,38 +88,25 @@ struct __atomic_wait_backoff_impl {
memory_order __order_;

using __waitable_traits _LIBCPP_NODEBUG = __atomic_waitable_traits<__decay_t<_AtomicWaitable> >;

_LIBCPP_AVAILABILITY_SYNC
_LIBCPP_HIDE_FROM_ABI bool
__update_monitor_val_and_poll(__cxx_atomic_contention_t const volatile*, __cxx_contention_t& __monitor_val) const {
// In case the contention type happens to be __cxx_atomic_contention_t, i.e. __cxx_atomic_impl<int64_t>,
// the platform wait is directly monitoring the atomic value itself.
// `__poll_` takes the current value of the atomic as an in-out argument
// to potentially modify it. After it returns, `__monitor` has a value
// which can be safely waited on by `std::__libcpp_atomic_wait` without any
// ABA style issues.
__monitor_val = __waitable_traits::__atomic_load(__a_, __order_);
return __poll_(__monitor_val);
}

_LIBCPP_AVAILABILITY_SYNC
_LIBCPP_HIDE_FROM_ABI bool
__update_monitor_val_and_poll(void const volatile* __contention_address, __cxx_contention_t& __monitor_val) const {
// In case the contention type is anything else, platform wait is monitoring a __cxx_atomic_contention_t
// from the global pool, the monitor comes from __libcpp_atomic_monitor
__monitor_val = std::__libcpp_atomic_monitor(__contention_address);
auto __current_val = __waitable_traits::__atomic_load(__a_, __order_);
return __poll_(__current_val);
}
using __inner_type _LIBCPP_NODEBUG = typename __waitable_traits::__inner_type;

_LIBCPP_AVAILABILITY_SYNC
_LIBCPP_HIDE_FROM_ABI bool operator()(chrono::nanoseconds __elapsed) const {
if (__elapsed > chrono::microseconds(4)) {
auto __contention_address = __waitable_traits::__atomic_contention_address(__a_);
__cxx_contention_t __monitor_val;
if (__update_monitor_val_and_poll(__contention_address, __monitor_val))
return true;
std::__libcpp_atomic_wait(__contention_address, __monitor_val);

if constexpr (__is_atomic_wait_native_type<__inner_type>::value) {
auto __atomic_value = __waitable_traits::__atomic_load(__a_, __order_);
if (__poll_(__atomic_value))
return true;
std::__libcpp_atomic_wait_native<sizeof(__inner_type)>(__contention_address, &__atomic_value);
} else {
__cxx_contention_t __monitor_val = std::__libcpp_atomic_monitor_global(__contention_address);
auto __atomic_value = __waitable_traits::__atomic_load(__a_, __order_);
if (__poll_(__atomic_value))
return true;
std::__libcpp_atomic_wait_global_table(__contention_address, __monitor_val);
}
} else {
} // poll
return false;
Expand Down Expand Up @@ -144,13 +138,23 @@ __atomic_wait_unless(const _AtomicWaitable& __a, memory_order __order, _Poll&& _
template <class _AtomicWaitable>
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __atomic_notify_one(const _AtomicWaitable& __a) {
static_assert(__atomic_waitable<_AtomicWaitable>::value, "");
std::__cxx_atomic_notify_one(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a));
using __inner_type _LIBCPP_NODEBUG = typename __atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__inner_type;
if constexpr (__is_atomic_wait_native_type<__inner_type>::value) {
std::__cxx_atomic_notify_one_native<sizeof(__inner_type)>(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a));
} else {
std::__cxx_atomic_notify_one_global_table(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a));
}
}

template <class _AtomicWaitable>
_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __atomic_notify_all(const _AtomicWaitable& __a) {
static_assert(__atomic_waitable<_AtomicWaitable>::value, "");
std::__cxx_atomic_notify_all(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a));
using __inner_type _LIBCPP_NODEBUG = typename __atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__inner_type;
if constexpr (__is_atomic_wait_native_type<__inner_type>::value) {
std::__cxx_atomic_notify_all_native<sizeof(__inner_type)>(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a));
} else {
std::__cxx_atomic_notify_all_global_table(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a));
}
}

# else // _LIBCPP_HAS_THREADS
Expand Down
17 changes: 17 additions & 0 deletions libcxx/include/__atomic/contention_t.h
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You should introduce this behind an ABI macro. I think the easiest way to do that is probably to constrain the set of types that you perform this optimization for to be just uin64_t when we're being ABI compatible.

Also, we're introducing new symbols to the dylib. That means we need to handle back deployment. You'll need to add an entry to __configuration/availability.h (we have examples there), and only call your new symbols in the dylib when the deployment target supports that. It's not difficult, but it will be a bit of a challenge to refactor things so that you can still call the old symbols.

Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@

#include <__atomic/support.h>
#include <__config>
#include <__type_traits/enable_if.h>
#include <__type_traits/integral_constant.h>
#include <__type_traits/is_standard_layout.h>
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Use of is_standard_layout looks incorrect to me. I guess we want object types without paddings, which is orthogonal to the standard-layout property.

#include <cstddef>
#include <cstdint>

#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
Expand All @@ -19,10 +23,23 @@

_LIBCPP_BEGIN_NAMESPACE_STD

template <class _Tp, class = void>
struct __is_atomic_wait_native_type : false_type {};

#if defined(__linux__) || (defined(_AIX) && !defined(__64BIT__))
using __cxx_contention_t _LIBCPP_NODEBUG = int32_t;

template <class _Tp>
struct __is_atomic_wait_native_type<_Tp, __enable_if_t<is_standard_layout<_Tp>::value && sizeof(_Tp) == 4> > : true_type {};
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe what you mean here is just trivially copyable? What properties of the type do you use?

Or do we need has_unique_object_representations? If an object has padding bytes in it, we probably don't want to consider them for the purpose of atomic-waiting?


#else
using __cxx_contention_t _LIBCPP_NODEBUG = int64_t;

template <class _Tp>
struct __is_atomic_wait_native_type<_Tp,
__enable_if_t<is_standard_layout<_Tp>::value && (sizeof(_Tp) == 4 || sizeof(_Tp) == 8)> >
: true_type {};

#endif // __linux__ || (_AIX && !__64BIT__)

using __cxx_atomic_contention_t _LIBCPP_NODEBUG = __cxx_atomic_impl<__cxx_contention_t>;
Expand Down
Loading
Loading