@@ -30,65 +30,73 @@ namespace detail {
30
30
31
31
template <typename T>
32
32
struct atomic_ref_base {
33
- static_assert (sizeof (T) == sizeof (std::atomic<T>), " size mismatch" );
34
- static_assert (
35
- std::is_trivially_copyable_v<T>, " value not trivially-copyable" );
33
+ using value_type = remove_cvref_t <T>;
34
+
35
+ private:
36
+ using atomic_reference = copy_cvref_t <T, std::atomic<value_type>>&;
36
37
37
- using value_type = T;
38
+ public:
39
+ static_assert (
40
+ sizeof (value_type) == sizeof (std::atomic<value_type>), " size mismatch" );
41
+ static_assert (
42
+ std::is_trivially_copyable_v<value_type>, " value not trivially-copyable" );
38
43
39
44
static inline constexpr std::size_t required_alignment =
40
- alignof (std::atomic<T >);
45
+ alignof (std::atomic<value_type >);
41
46
42
47
explicit atomic_ref_base (T& ref) : ref_(ref) { check_alignment_ (); }
43
48
atomic_ref_base (atomic_ref_base const &) = default ;
44
49
45
- void store (T desired, std::memory_order order = std::memory_order_seq_cst)
46
- const noexcept {
50
+ void store (
51
+ value_type desired,
52
+ std::memory_order order = std::memory_order_seq_cst) const noexcept {
47
53
return atomic ().store (desired, order);
48
54
}
49
55
50
- T load (std::memory_order order = std::memory_order_seq_cst) const noexcept {
56
+ value_type load (
57
+ std::memory_order order = std::memory_order_seq_cst) const noexcept {
51
58
return atomic ().load (order);
52
59
}
53
60
54
- T exchange (T desired, std::memory_order order = std::memory_order_seq_cst)
55
- const noexcept {
61
+ value_type exchange (
62
+ value_type desired,
63
+ std::memory_order order = std::memory_order_seq_cst) const noexcept {
56
64
return atomic ().exchange (desired, order);
57
65
}
58
66
59
67
bool compare_exchange_weak (
60
- T & expected,
61
- T desired,
68
+ value_type & expected,
69
+ value_type desired,
62
70
std::memory_order success,
63
71
std::memory_order failure) const noexcept {
64
72
return atomic ().compare_exchange_weak (expected, desired, success, failure);
65
73
}
66
74
67
75
bool compare_exchange_weak (
68
- T & expected,
69
- T desired,
76
+ value_type & expected,
77
+ value_type desired,
70
78
std::memory_order order = std::memory_order_seq_cst) const noexcept {
71
79
return atomic ().compare_exchange_weak (expected, desired, order);
72
80
}
73
81
74
82
bool compare_exchange_strong (
75
- T & expected,
76
- T desired,
83
+ value_type & expected,
84
+ value_type desired,
77
85
std::memory_order success,
78
86
std::memory_order failure) const noexcept {
79
87
return atomic ().compare_exchange_strong (
80
88
expected, desired, success, failure);
81
89
}
82
90
83
91
bool compare_exchange_strong (
84
- T & expected,
85
- T desired,
92
+ value_type & expected,
93
+ value_type desired,
86
94
std::memory_order order = std::memory_order_seq_cst) const noexcept {
87
95
return atomic ().compare_exchange_strong (expected, desired, order);
88
96
}
89
97
90
- std::atomic<T>& atomic () const noexcept {
91
- return reinterpret_cast <std::atomic<T>& >(ref_); // ub dragons be here
98
+ atomic_reference atomic () const noexcept {
99
+ return reinterpret_cast <atomic_reference >(ref_); // ub dragons be here
92
100
}
93
101
94
102
private:
@@ -103,38 +111,45 @@ struct atomic_ref_base {
103
111
104
112
template <typename T>
105
113
struct atomic_ref_integral_base : atomic_ref_base<T> {
114
+ using typename atomic_ref_base<T>::value_type;
115
+
106
116
using atomic_ref_base<T>::atomic_ref_base;
107
117
using atomic_ref_base<T>::atomic;
108
118
109
- T fetch_add (T arg, std::memory_order order = std::memory_order_seq_cst)
110
- const noexcept {
119
+ value_type fetch_add (
120
+ value_type arg,
121
+ std::memory_order order = std::memory_order_seq_cst) const noexcept {
111
122
return atomic ().fetch_add (arg, order);
112
123
}
113
124
114
- T fetch_sub (T arg, std::memory_order order = std::memory_order_seq_cst)
115
- const noexcept {
125
+ value_type fetch_sub (
126
+ value_type arg,
127
+ std::memory_order order = std::memory_order_seq_cst) const noexcept {
116
128
return atomic ().fetch_sub (arg, order);
117
129
}
118
130
119
- T fetch_and (T arg, std::memory_order order = std::memory_order_seq_cst)
120
- const noexcept {
131
+ value_type fetch_and (
132
+ value_type arg,
133
+ std::memory_order order = std::memory_order_seq_cst) const noexcept {
121
134
return atomic ().fetch_and (arg, order);
122
135
}
123
136
124
- T fetch_or (T arg, std::memory_order order = std::memory_order_seq_cst)
125
- const noexcept {
137
+ value_type fetch_or (
138
+ value_type arg,
139
+ std::memory_order order = std::memory_order_seq_cst) const noexcept {
126
140
return atomic ().fetch_or (arg, order);
127
141
}
128
142
129
- T fetch_xor (T arg, std::memory_order order = std::memory_order_seq_cst)
130
- const noexcept {
143
+ value_type fetch_xor (
144
+ value_type arg,
145
+ std::memory_order order = std::memory_order_seq_cst) const noexcept {
131
146
return atomic ().fetch_xor (arg, order);
132
147
}
133
148
};
134
149
135
- template <typename T>
150
+ template <typename T, typename TD = remove_cvref_t <T> >
136
151
using atomic_ref_select = conditional_t <
137
- std::is_integral<T >::value && !std::is_same<T , bool >::value,
152
+ std::is_integral<TD >::value && !std::is_same<TD , bool >::value,
138
153
atomic_ref_integral_base<T>,
139
154
atomic_ref_base<T>>;
140
155
@@ -166,10 +181,12 @@ atomic_ref(T&) -> atomic_ref<T>;
166
181
struct make_atomic_ref_t {
167
182
template <
168
183
typename T,
184
+ typename ...,
185
+ typename TD = remove_cvref_t <T>,
186
+ typename ATD = std::atomic<TD>,
169
187
std::enable_if_t <
170
- std::is_trivially_copyable_v<T> &&
171
- sizeof (T) == sizeof (std::atomic<T>) &&
172
- alignof (T) == alignof (std::atomic<T>),
188
+ std::is_trivially_copyable_v<TD> && //
189
+ sizeof (TD) == sizeof (ATD) && alignof (TD) == alignof (ATD),
173
190
int > = 0 >
174
191
atomic_ref<T> operator ()(T& ref) const {
175
192
return atomic_ref<T>{ref};
0 commit comments