diff --git a/libcxx/docs/ReleaseNotes/21.rst b/libcxx/docs/ReleaseNotes/21.rst index d31ca0130cb80..83a3ac96719eb 100644 --- a/libcxx/docs/ReleaseNotes/21.rst +++ b/libcxx/docs/ReleaseNotes/21.rst @@ -53,6 +53,7 @@ Implemented Papers - P2711R1: Making multi-param constructors of ``views`` ``explicit`` (`Github `__) - P2770R0: Stashing stashing ``iterators`` for proper flattening (`Github `__) - P2655R3: ``common_reference_t`` of ``reference_wrapper`` Should Be a Reference Type (`Github `__) +- P3323R1: cv-qualified types in ``atomic`` and ``atomic_ref`` (`Github `__) Improvements and New Features ----------------------------- diff --git a/libcxx/docs/Status/Cxx2cPapers.csv b/libcxx/docs/Status/Cxx2cPapers.csv index febb0c176f9c4..5f340ca57aa97 100644 --- a/libcxx/docs/Status/Cxx2cPapers.csv +++ b/libcxx/docs/Status/Cxx2cPapers.csv @@ -86,7 +86,7 @@ "`P3050R2 `__","Fix C++26 by optimizing ``linalg::conjugated`` for noncomplex value types","2024-11 (Wrocław)","","","" "`P3396R1 `__","``std::execution`` wording fixes","2024-11 (Wrocław)","","","" "`P2835R7 `__","Expose ``std::atomic_ref``'s object address","2024-11 (Wrocław)","","","" -"`P3323R1 `__","cv-qualified types in ``atomic`` and ``atomic_ref``","2024-11 (Wrocław)","","","" +"`P3323R1 `__","cv-qualified types in ``atomic`` and ``atomic_ref``","2024-11 (Wrocław)","|Complete|","21","Implemented as DR against C++20." "`P3508R0 `__","Wording for ""constexpr for specialized memory algorithms""","2024-11 (Wrocław)","","","" "`P3369R0 `__","constexpr for ``uninitialized_default_construct``","2024-11 (Wrocław)","","","" "`P3370R1 `__","Add new library headers from C23","2024-11 (Wrocław)","","","" diff --git a/libcxx/include/__atomic/atomic_ref.h b/libcxx/include/__atomic/atomic_ref.h index b5493662c518e..8db2370dc12ee 100644 --- a/libcxx/include/__atomic/atomic_ref.h +++ b/libcxx/include/__atomic/atomic_ref.h @@ -29,7 +29,13 @@ #include <__cstddef/ptrdiff_t.h> #include <__memory/addressof.h> #include <__type_traits/has_unique_object_representation.h> +#include <__type_traits/is_const.h> +#include <__type_traits/is_object.h> +#include <__type_traits/is_pointer.h> #include <__type_traits/is_trivially_copyable.h> +#include <__type_traits/is_volatile.h> +#include <__type_traits/remove_cv.h> +#include <__type_traits/remove_pointer.h> #include #include @@ -59,9 +65,11 @@ struct __get_aligner_instance { template struct __atomic_ref_base { + using value_type = __remove_cv_t<_Tp>; + private: - _LIBCPP_HIDE_FROM_ABI static _Tp* __clear_padding(_Tp& __val) noexcept { - _Tp* __ptr = std::addressof(__val); + _LIBCPP_HIDE_FROM_ABI static value_type* __clear_padding(value_type& __val) noexcept { + value_type* __ptr = std::addressof(__val); # if __has_builtin(__builtin_clear_padding) __builtin_clear_padding(__ptr); # endif @@ -69,18 +77,23 @@ struct __atomic_ref_base { } _LIBCPP_HIDE_FROM_ABI static bool __compare_exchange( - _Tp* __ptr, _Tp* __expected, _Tp* __desired, bool __is_weak, int __success, int __failure) noexcept { + _Tp* __ptr, + value_type* __expected, + value_type* __desired, + bool __is_weak, + int __success, + int __failure) noexcept { if constexpr ( # if __has_builtin(__builtin_clear_padding) - has_unique_object_representations_v<_Tp> || floating_point<_Tp> + has_unique_object_representations_v || floating_point # else true // NOLINT(readability-simplify-boolean-expr) # endif ) { return __atomic_compare_exchange(__ptr, __expected, __desired, __is_weak, __success, __failure); - } else { // _Tp has padding bits and __builtin_clear_padding is available + } else { // value_type has padding bits and __builtin_clear_padding is available __clear_padding(*__desired); - _Tp __copy = *__expected; + value_type __copy = *__expected; __clear_padding(__copy); // The algorithm we use here is basically to perform `__atomic_compare_exchange` on the // values until it has either succeeded, or failed because the value representation of the @@ -88,15 +101,15 @@ struct __atomic_ref_base { // we basically loop until its failure is caused by the value representation of the objects // being different, not only their object representation. while (true) { - _Tp __prev = __copy; + value_type __prev = __copy; if (__atomic_compare_exchange(__ptr, std::addressof(__copy), __desired, __is_weak, __success, __failure)) { return true; } - _Tp __curr = __copy; - if (std::memcmp(__clear_padding(__prev), __clear_padding(__curr), sizeof(_Tp)) != 0) { + value_type __curr = __copy; + if (std::memcmp(__clear_padding(__prev), __clear_padding(__curr), sizeof(value_type)) != 0) { // Value representation without padding bits do not compare equal -> // write the current content of *ptr into *expected - std::memcpy(__expected, std::addressof(__copy), sizeof(_Tp)); + std::memcpy(__expected, std::addressof(__copy), sizeof(value_type)); return false; } } @@ -110,8 +123,6 @@ struct __atomic_ref_base { static constexpr size_t __min_alignment = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || (sizeof(_Tp) > 16) ? 0 : sizeof(_Tp); public: - using value_type = _Tp; - static constexpr size_t required_alignment = alignof(_Tp) > __min_alignment ? alignof(_Tp) : __min_alignment; // The __atomic_always_lock_free builtin takes into account the alignment of the pointer if provided, @@ -123,43 +134,50 @@ struct __atomic_ref_base { _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const noexcept { return __atomic_is_lock_free(sizeof(_Tp), __ptr_); } - _LIBCPP_HIDE_FROM_ABI void store(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept - _LIBCPP_CHECK_STORE_MEMORY_ORDER(__order) { + _LIBCPP_HIDE_FROM_ABI void store(value_type __desired, memory_order __order = memory_order::seq_cst) const noexcept + requires(!is_const_v<_Tp>) + _LIBCPP_CHECK_STORE_MEMORY_ORDER(__order) { _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( __order == memory_order::relaxed || __order == memory_order::release || __order == memory_order::seq_cst, "atomic_ref: memory order argument to atomic store operation is invalid"); __atomic_store(__ptr_, __clear_padding(__desired), std::__to_gcc_order(__order)); } - _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { + _LIBCPP_HIDE_FROM_ABI value_type operator=(value_type __desired) const noexcept + requires(!is_const_v<_Tp>) + { store(__desired); return __desired; } - _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __order = memory_order::seq_cst) const noexcept + _LIBCPP_HIDE_FROM_ABI value_type load(memory_order __order = memory_order::seq_cst) const noexcept _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__order) { _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire || __order == memory_order::seq_cst, "atomic_ref: memory order argument to atomic load operation is invalid"); - alignas(_Tp) byte __mem[sizeof(_Tp)]; - auto* __ret = reinterpret_cast<_Tp*>(__mem); + alignas(value_type) byte __mem[sizeof(value_type)]; + auto* __ret = reinterpret_cast(__mem); __atomic_load(__ptr_, __ret, std::__to_gcc_order(__order)); return *__ret; } - _LIBCPP_HIDE_FROM_ABI operator _Tp() const noexcept { return load(); } + _LIBCPP_HIDE_FROM_ABI operator value_type() const noexcept { return load(); } - _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept { - alignas(_Tp) byte __mem[sizeof(_Tp)]; - auto* __ret = reinterpret_cast<_Tp*>(__mem); + _LIBCPP_HIDE_FROM_ABI value_type + exchange(value_type __desired, memory_order __order = memory_order::seq_cst) const noexcept + requires(!is_const_v<_Tp>) + { + alignas(value_type) byte __mem[sizeof(value_type)]; + auto* __ret = reinterpret_cast(__mem); __atomic_exchange(__ptr_, __clear_padding(__desired), __ret, std::__to_gcc_order(__order)); return *__ret; } - _LIBCPP_HIDE_FROM_ABI bool - compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept - _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) { + _LIBCPP_HIDE_FROM_ABI bool compare_exchange_weak( + value_type& __expected, value_type __desired, memory_order __success, memory_order __failure) const noexcept + requires(!is_const_v<_Tp>) + _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) { _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( __failure == memory_order::relaxed || __failure == memory_order::consume || __failure == memory_order::acquire || __failure == memory_order::seq_cst, @@ -172,9 +190,10 @@ struct __atomic_ref_base { std::__to_gcc_order(__success), std::__to_gcc_order(__failure)); } - _LIBCPP_HIDE_FROM_ABI bool - compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept - _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) { + _LIBCPP_HIDE_FROM_ABI bool compare_exchange_strong( + value_type& __expected, value_type __desired, memory_order __success, memory_order __failure) const noexcept + requires(!is_const_v<_Tp>) + _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) { _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( __failure == memory_order::relaxed || __failure == memory_order::consume || __failure == memory_order::acquire || __failure == memory_order::seq_cst, @@ -188,8 +207,10 @@ struct __atomic_ref_base { std::__to_gcc_order(__failure)); } - _LIBCPP_HIDE_FROM_ABI bool - compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept { + _LIBCPP_HIDE_FROM_ABI bool compare_exchange_weak( + value_type& __expected, value_type __desired, memory_order __order = memory_order::seq_cst) const noexcept + requires(!is_const_v<_Tp>) + { return __compare_exchange( __ptr_, std::addressof(__expected), @@ -198,8 +219,10 @@ struct __atomic_ref_base { std::__to_gcc_order(__order), std::__to_gcc_failure_order(__order)); } - _LIBCPP_HIDE_FROM_ABI bool - compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept { + _LIBCPP_HIDE_FROM_ABI bool compare_exchange_strong( + value_type& __expected, value_type __desired, memory_order __order = memory_order::seq_cst) const noexcept + requires(!is_const_v<_Tp>) + { return __compare_exchange( __ptr_, std::addressof(__expected), @@ -209,7 +232,7 @@ struct __atomic_ref_base { std::__to_gcc_failure_order(__order)); } - _LIBCPP_HIDE_FROM_ABI void wait(_Tp __old, memory_order __order = memory_order::seq_cst) const noexcept + _LIBCPP_HIDE_FROM_ABI void wait(value_type __old, memory_order __order = memory_order::seq_cst) const noexcept _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__order) { _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire || @@ -217,8 +240,16 @@ struct __atomic_ref_base { "atomic_ref: memory order argument to atomic wait operation is invalid"); std::__atomic_wait(*this, __old, __order); } - _LIBCPP_HIDE_FROM_ABI void notify_one() const noexcept { std::__atomic_notify_one(*this); } - _LIBCPP_HIDE_FROM_ABI void notify_all() const noexcept { std::__atomic_notify_all(*this); } + _LIBCPP_HIDE_FROM_ABI void notify_one() const noexcept + requires(!is_const_v<_Tp>) + { + std::__atomic_notify_one(*this); + } + _LIBCPP_HIDE_FROM_ABI void notify_all() const noexcept + requires(!is_const_v<_Tp>) + { + std::__atomic_notify_all(*this); + } protected: using _Aligned_Tp [[__gnu__::__aligned__(required_alignment), __gnu__::__nodebug__]] = _Tp; @@ -243,6 +274,8 @@ struct atomic_ref : public __atomic_ref_base<_Tp> { using __base _LIBCPP_NODEBUG = __atomic_ref_base<_Tp>; + static_assert(__base::is_always_lock_free || !is_volatile_v<_Tp>); + _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) { _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( reinterpret_cast(std::addressof(__obj)) % __base::required_alignment == 0, @@ -251,17 +284,24 @@ struct atomic_ref : public __atomic_ref_base<_Tp> { _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default; - _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); } + _LIBCPP_HIDE_FROM_ABI __base::value_type operator=(__base::value_type __desired) const noexcept + requires(!is_const_v<_Tp>) + { + return __base::operator=(__desired); + } atomic_ref& operator=(const atomic_ref&) = delete; }; template - requires(std::integral<_Tp> && !std::same_as) + requires(integral<_Tp> && !same_as>) struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> { using __base _LIBCPP_NODEBUG = __atomic_ref_base<_Tp>; + static_assert(__base::is_always_lock_free || !is_volatile_v<_Tp>); + using difference_type = __base::value_type; + using typename __base::value_type; _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) { _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( @@ -271,43 +311,101 @@ struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> { _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default; - _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); } + _LIBCPP_HIDE_FROM_ABI value_type operator=(value_type __desired) const noexcept + requires(!is_const_v<_Tp>) + { + return __base::operator=(__desired); + } atomic_ref& operator=(const atomic_ref&) = delete; - _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + _LIBCPP_HIDE_FROM_ABI value_type + fetch_add(value_type __arg, memory_order __order = memory_order_seq_cst) const noexcept + requires(!is_const_v) + { return __atomic_fetch_add(this->__ptr_, __arg, std::__to_gcc_order(__order)); } - _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + _LIBCPP_HIDE_FROM_ABI value_type + fetch_sub(value_type __arg, memory_order __order = memory_order_seq_cst) const noexcept + requires(!is_const_v) + { return __atomic_fetch_sub(this->__ptr_, __arg, std::__to_gcc_order(__order)); } - _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + _LIBCPP_HIDE_FROM_ABI value_type + fetch_and(value_type __arg, memory_order __order = memory_order_seq_cst) const noexcept + requires(!is_const_v) + { return __atomic_fetch_and(this->__ptr_, __arg, std::__to_gcc_order(__order)); } - _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + _LIBCPP_HIDE_FROM_ABI value_type + fetch_or(value_type __arg, memory_order __order = memory_order_seq_cst) const noexcept + requires(!is_const_v) + { return __atomic_fetch_or(this->__ptr_, __arg, std::__to_gcc_order(__order)); } - _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + _LIBCPP_HIDE_FROM_ABI value_type + fetch_xor(value_type __arg, memory_order __order = memory_order_seq_cst) const noexcept + requires(!is_const_v) + { return __atomic_fetch_xor(this->__ptr_, __arg, std::__to_gcc_order(__order)); } - _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) const noexcept { return fetch_add(_Tp(1)); } - _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) const noexcept { return fetch_sub(_Tp(1)); } - _LIBCPP_HIDE_FROM_ABI _Tp operator++() const noexcept { return fetch_add(_Tp(1)) + _Tp(1); } - _LIBCPP_HIDE_FROM_ABI _Tp operator--() const noexcept { return fetch_sub(_Tp(1)) - _Tp(1); } - _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; } - _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; } - _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __arg) const noexcept { return fetch_and(__arg) & __arg; } - _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __arg) const noexcept { return fetch_or(__arg) | __arg; } - _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __arg) const noexcept { return fetch_xor(__arg) ^ __arg; } + _LIBCPP_HIDE_FROM_ABI value_type operator++(int) const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_add(value_type(1)); + } + _LIBCPP_HIDE_FROM_ABI value_type operator--(int) const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_sub(value_type(1)); + } + _LIBCPP_HIDE_FROM_ABI value_type operator++() const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_add(value_type(1)) + value_type(1); + } + _LIBCPP_HIDE_FROM_ABI value_type operator--() const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_sub(value_type(1)) - value_type(1); + } + _LIBCPP_HIDE_FROM_ABI value_type operator+=(value_type __arg) const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_add(__arg) + __arg; + } + _LIBCPP_HIDE_FROM_ABI value_type operator-=(value_type __arg) const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_sub(__arg) - __arg; + } + _LIBCPP_HIDE_FROM_ABI value_type operator&=(value_type __arg) const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_and(__arg) & __arg; + } + _LIBCPP_HIDE_FROM_ABI value_type operator|=(value_type __arg) const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_or(__arg) | __arg; + } + _LIBCPP_HIDE_FROM_ABI value_type operator^=(value_type __arg) const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_xor(__arg) ^ __arg; + } }; template - requires std::floating_point<_Tp> + requires floating_point<_Tp> struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> { using __base _LIBCPP_NODEBUG = __atomic_ref_base<_Tp>; + static_assert(__base::is_always_lock_free || !is_volatile_v<_Tp>); + using difference_type = __base::value_type; + using typename __base::value_type; _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) { _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( @@ -317,56 +415,112 @@ struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> { _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default; - _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); } + _LIBCPP_HIDE_FROM_ABI value_type operator=(value_type __desired) const noexcept + requires(!is_const_v<_Tp>) + { + return __base::operator=(__desired); + } atomic_ref& operator=(const atomic_ref&) = delete; - _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { - _Tp __old = this->load(memory_order_relaxed); - _Tp __new = __old + __arg; + _LIBCPP_HIDE_FROM_ABI value_type + fetch_add(value_type __arg, memory_order __order = memory_order_seq_cst) const noexcept + requires(!is_const_v<_Tp>) + { + value_type __old = this->load(memory_order_relaxed); + value_type __new = __old + __arg; while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) { __new = __old + __arg; } return __old; } - _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { - _Tp __old = this->load(memory_order_relaxed); - _Tp __new = __old - __arg; + _LIBCPP_HIDE_FROM_ABI value_type + fetch_sub(value_type __arg, memory_order __order = memory_order_seq_cst) const noexcept + requires(!is_const_v<_Tp>) + { + value_type __old = this->load(memory_order_relaxed); + value_type __new = __old - __arg; while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) { __new = __old - __arg; } return __old; } - _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; } - _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; } + _LIBCPP_HIDE_FROM_ABI value_type operator+=(value_type __arg) const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_add(__arg) + __arg; + } + _LIBCPP_HIDE_FROM_ABI value_type operator-=(value_type __arg) const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_sub(__arg) - __arg; + } }; template -struct atomic_ref<_Tp*> : public __atomic_ref_base<_Tp*> { - using __base _LIBCPP_NODEBUG = __atomic_ref_base<_Tp*>; + requires(is_pointer_v<_Tp> && is_object_v>) +struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> { + using __base _LIBCPP_NODEBUG = __atomic_ref_base<_Tp>; using difference_type = ptrdiff_t; + using typename __base::value_type; - _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp*& __ptr) : __base(__ptr) {} + _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __ptr) : __base(__ptr) {} - _LIBCPP_HIDE_FROM_ABI _Tp* operator=(_Tp* __desired) const noexcept { return __base::operator=(__desired); } + _LIBCPP_HIDE_FROM_ABI value_type operator=(value_type __desired) const noexcept + requires(!is_const_v<_Tp>) + { + return __base::operator=(__desired); + } atomic_ref& operator=(const atomic_ref&) = delete; - _LIBCPP_HIDE_FROM_ABI _Tp* fetch_add(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept { - return __atomic_fetch_add(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order)); + _LIBCPP_HIDE_FROM_ABI value_type + fetch_add(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept + requires(!is_const_v<_Tp>) + { + return __atomic_fetch_add( + this->__ptr_, __arg * sizeof(__remove_pointer_t), std::__to_gcc_order(__order)); } - _LIBCPP_HIDE_FROM_ABI _Tp* fetch_sub(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept { - return __atomic_fetch_sub(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order)); + _LIBCPP_HIDE_FROM_ABI value_type + fetch_sub(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept + requires(!is_const_v<_Tp>) + { + return __atomic_fetch_sub( + this->__ptr_, __arg * sizeof(__remove_pointer_t), std::__to_gcc_order(__order)); } - _LIBCPP_HIDE_FROM_ABI _Tp* operator++(int) const noexcept { return fetch_add(1); } - _LIBCPP_HIDE_FROM_ABI _Tp* operator--(int) const noexcept { return fetch_sub(1); } - _LIBCPP_HIDE_FROM_ABI _Tp* operator++() const noexcept { return fetch_add(1) + 1; } - _LIBCPP_HIDE_FROM_ABI _Tp* operator--() const noexcept { return fetch_sub(1) - 1; } - _LIBCPP_HIDE_FROM_ABI _Tp* operator+=(ptrdiff_t __arg) const noexcept { return fetch_add(__arg) + __arg; } - _LIBCPP_HIDE_FROM_ABI _Tp* operator-=(ptrdiff_t __arg) const noexcept { return fetch_sub(__arg) - __arg; } + _LIBCPP_HIDE_FROM_ABI value_type operator++(int) const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_add(1); + } + _LIBCPP_HIDE_FROM_ABI value_type operator--(int) const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_sub(1); + } + _LIBCPP_HIDE_FROM_ABI value_type operator++() const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_add(1) + 1; + } + _LIBCPP_HIDE_FROM_ABI value_type operator--() const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_sub(1) - 1; + } + _LIBCPP_HIDE_FROM_ABI value_type operator+=(ptrdiff_t __arg) const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_add(__arg) + __arg; + } + _LIBCPP_HIDE_FROM_ABI value_type operator-=(ptrdiff_t __arg) const noexcept + requires(!is_const_v<_Tp>) + { + return fetch_sub(__arg) - __arg; + } }; _LIBCPP_CTAD_SUPPORTED_FOR_TYPE(atomic_ref); diff --git a/libcxx/include/__atomic/support.h b/libcxx/include/__atomic/support.h index 99d0f6aa543ca..64533036125b2 100644 --- a/libcxx/include/__atomic/support.h +++ b/libcxx/include/__atomic/support.h @@ -10,6 +10,8 @@ #define _LIBCPP___ATOMIC_SUPPORT_H #include <__config> +#include <__type_traits/is_const.h> +#include <__type_traits/is_volatile.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -112,6 +114,9 @@ _LIBCPP_BEGIN_NAMESPACE_STD template > struct __cxx_atomic_impl : public _Base { + static_assert(!is_const<_Tp>::value && !is_volatile<_Tp>::value, + "std::atomic requires that 'T' be a cv-unqualified type"); + _LIBCPP_HIDE_FROM_ABI __cxx_atomic_impl() _NOEXCEPT = default; _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __cxx_atomic_impl(_Tp __value) _NOEXCEPT : _Base(__value) {} }; diff --git a/libcxx/test/std/atomics/atomics.ref/assign.pass.cpp b/libcxx/test/std/atomics/atomics.ref/assign.pass.cpp index 9b2f9042e9836..80235d9436923 100644 --- a/libcxx/test/std/atomics/atomics.ref/assign.pass.cpp +++ b/libcxx/test/std/atomics/atomics.ref/assign.pass.cpp @@ -23,29 +23,50 @@ template struct TestAssign { void operator()() const { - { - T x(T(1)); - std::atomic_ref const a(x); + using Unqualified = std::remove_cv_t; + static_assert(std::is_assignable_v, Unqualified> == !std::is_const_v); - std::same_as decltype(auto) y = (a = T(2)); - assert(y == T(2)); - assert(x == T(2)); + if constexpr (!std::is_const_v) { + { + T x(Unqualified(1)); + std::atomic_ref const a(x); - ASSERT_NOEXCEPT(a = T(0)); - static_assert(std::is_nothrow_assignable_v, T>); + std::same_as decltype(auto) y = (a = Unqualified(2)); + assert(y == Unqualified(2)); + assert(const_cast(x) == Unqualified(2)); - static_assert(!std::is_copy_assignable_v>); - } + ASSERT_NOEXCEPT(a = Unqualified(0)); + static_assert(std::is_nothrow_assignable_v, Unqualified>); + static_assert(!std::is_copy_assignable_v>); + } - { - auto assign = [](std::atomic_ref const& y, T, T new_val) { y = new_val; }; - auto load = [](std::atomic_ref const& y) { return y.load(); }; - test_seq_cst(assign, load); + { + auto assign = [](std::atomic_ref const& y, T const&, T const& new_val) { + y = const_cast(new_val); + }; + auto load = [](std::atomic_ref const& y) { return y.load(); }; + test_seq_cst(assign, load); + } } } }; +template