9
9
#ifndef LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
10
10
#define LLVM_LIBC_SRC___SUPPORT_CPP_ATOMIC_H
11
11
12
+ #include " src/__support/CPP/type_traits/has_unique_object_representations.h"
12
13
#include " src/__support/macros/attributes.h"
13
14
#include " src/__support/macros/config.h"
14
15
#include " src/__support/macros/properties/architectures.h"
@@ -47,12 +48,11 @@ template <typename T> struct Atomic {
47
48
" constructible, move constructible, copy assignable, "
48
49
" and move assignable." );
49
50
51
+ static_assert (cpp::has_unique_object_representations_v<T>,
52
+ " atomic<T> in libc only support types whose values has unique "
53
+ " object representations." );
54
+
50
55
private:
51
- // The value stored should be appropriately aligned so that
52
- // hardware instructions used to perform atomic operations work
53
- // correctly.
54
- static constexpr int ALIGNMENT = sizeof (T) > alignof (T) ? sizeof(T)
55
- : alignof(T);
56
56
// type conversion helper to avoid long c++ style casts
57
57
LIBC_INLINE static int order (MemoryOrder mem_ord) {
58
58
return static_cast <int >(mem_ord);
@@ -62,6 +62,17 @@ template <typename T> struct Atomic {
62
62
return static_cast <int >(mem_scope);
63
63
}
64
64
65
+ LIBC_INLINE static T *addressof (T &ref) { return __builtin_addressof (ref); }
66
+
67
+ // Require types that are 1, 2, 4, 8, or 16 bytes in length to be aligned to
68
+ // at least their size to be potentially used lock-free.
69
+ LIBC_INLINE_VAR static constexpr size_t MIN_ALIGNMENT =
70
+ (sizeof (T) & (sizeof (T) - 1 )) || (sizeof (T) > 16 ) ? 0 : sizeof (T);
71
+
72
+ LIBC_INLINE_VAR static constexpr size_t ALIGNMENT = alignof (T) > MIN_ALIGNMENT
73
+ ? alignof (T)
74
+ : MIN_ALIGNMENT;
75
+
65
76
public:
66
77
using value_type = T;
67
78
@@ -87,9 +98,10 @@ template <typename T> struct Atomic {
87
98
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
88
99
T res;
89
100
#if __has_builtin(__scoped_atomic_load)
90
- __scoped_atomic_load (&val, &res, order (mem_ord), scope (mem_scope));
101
+ __scoped_atomic_load (addressof (val), addressof (res), order (mem_ord),
102
+ scope (mem_scope));
91
103
#else
92
- __atomic_load (& val, & res, order (mem_ord));
104
+ __atomic_load (addressof ( val), addressof ( res) , order (mem_ord));
93
105
#endif
94
106
return res;
95
107
}
@@ -104,36 +116,39 @@ template <typename T> struct Atomic {
104
116
store (T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
105
117
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
106
118
#if __has_builtin(__scoped_atomic_store)
107
- __scoped_atomic_store (&val, &rhs, order (mem_ord), scope (mem_scope));
119
+ __scoped_atomic_store (addressof (val), addressof (rhs), order (mem_ord),
120
+ scope (mem_scope));
108
121
#else
109
- __atomic_store (& val, & rhs, order (mem_ord));
122
+ __atomic_store (addressof ( val), addressof ( rhs) , order (mem_ord));
110
123
#endif
111
124
}
112
125
113
126
// Atomic compare exchange
114
127
LIBC_INLINE bool compare_exchange_strong (
115
128
T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
116
129
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
117
- return __atomic_compare_exchange (&val, &expected, &desired, false ,
118
- order (mem_ord), order (mem_ord));
130
+ return __atomic_compare_exchange (addressof (val), addressof (expected),
131
+ addressof (desired), false , order (mem_ord),
132
+ order (mem_ord));
119
133
}
120
134
121
135
// Atomic compare exchange (separate success and failure memory orders)
122
136
LIBC_INLINE bool compare_exchange_strong (
123
137
T &expected, T desired, MemoryOrder success_order,
124
138
MemoryOrder failure_order,
125
139
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
126
- return __atomic_compare_exchange (&val, &expected, &desired, false ,
127
- order (success_order) ,
128
- order (failure_order));
140
+ return __atomic_compare_exchange (
141
+ addressof (val), addressof (expected), addressof (desired), false ,
142
+ order (success_order), order (failure_order));
129
143
}
130
144
131
145
// Atomic compare exchange (weak version)
132
146
LIBC_INLINE bool compare_exchange_weak (
133
147
T &expected, T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
134
148
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
135
- return __atomic_compare_exchange (&val, &expected, &desired, true ,
136
- order (mem_ord), order (mem_ord));
149
+ return __atomic_compare_exchange (addressof (val), addressof (expected),
150
+ addressof (desired), true , order (mem_ord),
151
+ order (mem_ord));
137
152
}
138
153
139
154
// Atomic compare exchange (weak version with separate success and failure
@@ -142,20 +157,21 @@ template <typename T> struct Atomic {
142
157
T &expected, T desired, MemoryOrder success_order,
143
158
MemoryOrder failure_order,
144
159
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
145
- return __atomic_compare_exchange (&val, &expected, &desired, true ,
146
- order (success_order) ,
147
- order (failure_order));
160
+ return __atomic_compare_exchange (
161
+ addressof (val), addressof (expected), addressof (desired), true ,
162
+ order (success_order), order (failure_order));
148
163
}
149
164
150
165
LIBC_INLINE T
151
166
exchange (T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST,
152
167
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
153
168
T ret;
154
169
#if __has_builtin(__scoped_atomic_exchange)
155
- __scoped_atomic_exchange (& val, & desired, &ret, order (mem_ord ),
156
- scope (mem_scope));
170
+ __scoped_atomic_exchange (addressof ( val), addressof ( desired), addressof (ret ),
171
+ order (mem_ord), scope (mem_scope));
157
172
#else
158
- __atomic_exchange (&val, &desired, &ret, order (mem_ord));
173
+ __atomic_exchange (addressof (val), addressof (desired), addressof (ret),
174
+ order (mem_ord));
159
175
#endif
160
176
return ret;
161
177
}
@@ -165,10 +181,10 @@ template <typename T> struct Atomic {
165
181
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
166
182
static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
167
183
#if __has_builtin(__scoped_atomic_fetch_add)
168
- return __scoped_atomic_fetch_add (& val, increment, order (mem_ord),
184
+ return __scoped_atomic_fetch_add (addressof ( val) , increment, order (mem_ord),
169
185
scope (mem_scope));
170
186
#else
171
- return __atomic_fetch_add (& val, increment, order (mem_ord));
187
+ return __atomic_fetch_add (addressof ( val) , increment, order (mem_ord));
172
188
#endif
173
189
}
174
190
@@ -177,10 +193,10 @@ template <typename T> struct Atomic {
177
193
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
178
194
static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
179
195
#if __has_builtin(__scoped_atomic_fetch_or)
180
- return __scoped_atomic_fetch_or (& val, mask, order (mem_ord),
196
+ return __scoped_atomic_fetch_or (addressof ( val) , mask, order (mem_ord),
181
197
scope (mem_scope));
182
198
#else
183
- return __atomic_fetch_or (& val, mask, order (mem_ord));
199
+ return __atomic_fetch_or (addressof ( val) , mask, order (mem_ord));
184
200
#endif
185
201
}
186
202
@@ -189,10 +205,10 @@ template <typename T> struct Atomic {
189
205
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
190
206
static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
191
207
#if __has_builtin(__scoped_atomic_fetch_and)
192
- return __scoped_atomic_fetch_and (& val, mask, order (mem_ord),
208
+ return __scoped_atomic_fetch_and (addressof ( val) , mask, order (mem_ord),
193
209
scope (mem_scope));
194
210
#else
195
- return __atomic_fetch_and (& val, mask, order (mem_ord));
211
+ return __atomic_fetch_and (addressof ( val) , mask, order (mem_ord));
196
212
#endif
197
213
}
198
214
@@ -201,10 +217,10 @@ template <typename T> struct Atomic {
201
217
[[maybe_unused]] MemoryScope mem_scope = MemoryScope::DEVICE) {
202
218
static_assert (cpp::is_integral_v<T>, " T must be an integral type." );
203
219
#if __has_builtin(__scoped_atomic_fetch_sub)
204
- return __scoped_atomic_fetch_sub (& val, decrement, order (mem_ord),
220
+ return __scoped_atomic_fetch_sub (addressof ( val) , decrement, order (mem_ord),
205
221
scope (mem_scope));
206
222
#else
207
- return __atomic_fetch_sub (& val, decrement, order (mem_ord));
223
+ return __atomic_fetch_sub (addressof ( val) , decrement, order (mem_ord));
208
224
#endif
209
225
}
210
226
0 commit comments