@@ -48,40 +48,40 @@ bson_memory_barrier (void)
48
48
}
49
49
50
50
/**
51
- * 32-bit x86 does not support 64-bit atomic integer operations.
51
+ * Some platforms do not support compiler intrinsics for atomic operations.
52
52
* We emulate that here using a spin lock and regular arithmetic operations
53
53
*/
54
- static int8_t g64bitAtomicLock = 0 ;
54
+ static int8_t gEmulAtomicLock = 0 ;
55
55
56
56
static void
57
- _lock_64bit_atomic ()
57
+ _lock_emul_atomic ()
58
58
{
59
59
int i ;
60
60
if (bson_atomic_int8_compare_exchange_weak (
61
- & g64bitAtomicLock , 0 , 1 , bson_memory_order_acquire ) == 0 ) {
61
+ & gEmulAtomicLock , 0 , 1 , bson_memory_order_acquire ) == 0 ) {
62
62
/* Successfully took the spinlock */
63
63
return ;
64
64
}
65
65
/* Failed. Try taking ten more times, then begin sleeping. */
66
66
for (i = 0 ; i < 10 ; ++ i ) {
67
67
if (bson_atomic_int8_compare_exchange_weak (
68
- & g64bitAtomicLock , 0 , 1 , bson_memory_order_acquire ) == 0 ) {
68
+ & gEmulAtomicLock , 0 , 1 , bson_memory_order_acquire ) == 0 ) {
69
69
/* Succeeded in taking the lock */
70
70
return ;
71
71
}
72
72
}
73
73
/* Still don't have the lock. Spin and yield */
74
74
while (bson_atomic_int8_compare_exchange_weak (
75
- & g64bitAtomicLock , 0 , 1 , bson_memory_order_acquire ) != 0 ) {
75
+ & gEmulAtomicLock , 0 , 1 , bson_memory_order_acquire ) != 0 ) {
76
76
bson_thrd_yield ();
77
77
}
78
78
}
79
79
80
80
static void
81
- _unlock_64bit_atomic ()
81
+ _unlock_emul_atomic ()
82
82
{
83
83
int64_t rv = bson_atomic_int8_exchange (
84
- & g64bitAtomicLock , 0 , bson_memory_order_release );
84
+ & gEmulAtomicLock , 0 , bson_memory_order_release );
85
85
BSON_ASSERT (rv == 1 && "Released atomic lock while not holding it" );
86
86
}
87
87
@@ -91,10 +91,10 @@ _bson_emul_atomic_int64_fetch_add (volatile int64_t *p,
91
91
enum bson_memory_order _unused )
92
92
{
93
93
int64_t ret ;
94
- _lock_64bit_atomic ();
94
+ _lock_emul_atomic ();
95
95
ret = * p ;
96
96
* p += n ;
97
- _unlock_64bit_atomic ();
97
+ _unlock_emul_atomic ();
98
98
return ret ;
99
99
}
100
100
@@ -104,10 +104,10 @@ _bson_emul_atomic_int64_exchange (volatile int64_t *p,
104
104
enum bson_memory_order _unused )
105
105
{
106
106
int64_t ret ;
107
- _lock_64bit_atomic ();
107
+ _lock_emul_atomic ();
108
108
ret = * p ;
109
109
* p = n ;
110
- _unlock_64bit_atomic ();
110
+ _unlock_emul_atomic ();
111
111
return ret ;
112
112
}
113
113
@@ -118,12 +118,12 @@ _bson_emul_atomic_int64_compare_exchange_strong (volatile int64_t *p,
118
118
enum bson_memory_order _unused )
119
119
{
120
120
int64_t ret ;
121
- _lock_64bit_atomic ();
121
+ _lock_emul_atomic ();
122
122
ret = * p ;
123
123
if (ret == expect_value ) {
124
124
* p = new_value ;
125
125
}
126
- _unlock_64bit_atomic ();
126
+ _unlock_emul_atomic ();
127
127
return ret ;
128
128
}
129
129
@@ -137,3 +137,111 @@ _bson_emul_atomic_int64_compare_exchange_weak (volatile int64_t *p,
137
137
return _bson_emul_atomic_int64_compare_exchange_strong (
138
138
p , expect_value , new_value , order );
139
139
}
140
+
141
+
142
+ int32_t
143
+ _bson_emul_atomic_int32_fetch_add (volatile int32_t * p ,
144
+ int32_t n ,
145
+ enum bson_memory_order _unused )
146
+ {
147
+ int32_t ret ;
148
+ _lock_emul_atomic ();
149
+ ret = * p ;
150
+ * p += n ;
151
+ _unlock_emul_atomic ();
152
+ return ret ;
153
+ }
154
+
155
+ int32_t
156
+ _bson_emul_atomic_int32_exchange (volatile int32_t * p ,
157
+ int32_t n ,
158
+ enum bson_memory_order _unused )
159
+ {
160
+ int32_t ret ;
161
+ _lock_emul_atomic ();
162
+ ret = * p ;
163
+ * p = n ;
164
+ _unlock_emul_atomic ();
165
+ return ret ;
166
+ }
167
+
168
+ int32_t
169
+ _bson_emul_atomic_int32_compare_exchange_strong (volatile int32_t * p ,
170
+ int32_t expect_value ,
171
+ int32_t new_value ,
172
+ enum bson_memory_order _unused )
173
+ {
174
+ int32_t ret ;
175
+ _lock_emul_atomic ();
176
+ ret = * p ;
177
+ if (ret == expect_value ) {
178
+ * p = new_value ;
179
+ }
180
+ _unlock_emul_atomic ();
181
+ return ret ;
182
+ }
183
+
184
+ int32_t
185
+ _bson_emul_atomic_int32_compare_exchange_weak (volatile int32_t * p ,
186
+ int32_t expect_value ,
187
+ int32_t new_value ,
188
+ enum bson_memory_order order )
189
+ {
190
+ /* We're emulating. We can't do a weak version. */
191
+ return _bson_emul_atomic_int32_compare_exchange_strong (
192
+ p , expect_value , new_value , order );
193
+ }
194
+
195
+
196
+ int
197
+ _bson_emul_atomic_int_fetch_add (volatile int * p ,
198
+ int n ,
199
+ enum bson_memory_order _unused )
200
+ {
201
+ int ret ;
202
+ _lock_emul_atomic ();
203
+ ret = * p ;
204
+ * p += n ;
205
+ _unlock_emul_atomic ();
206
+ return ret ;
207
+ }
208
+
209
+ int
210
+ _bson_emul_atomic_int_exchange (volatile int * p ,
211
+ int n ,
212
+ enum bson_memory_order _unused )
213
+ {
214
+ int ret ;
215
+ _lock_emul_atomic ();
216
+ ret = * p ;
217
+ * p = n ;
218
+ _unlock_emul_atomic ();
219
+ return ret ;
220
+ }
221
+
222
+ int
223
+ _bson_emul_atomic_int_compare_exchange_strong (volatile int * p ,
224
+ int expect_value ,
225
+ int new_value ,
226
+ enum bson_memory_order _unused )
227
+ {
228
+ int ret ;
229
+ _lock_emul_atomic ();
230
+ ret = * p ;
231
+ if (ret == expect_value ) {
232
+ * p = new_value ;
233
+ }
234
+ _unlock_emul_atomic ();
235
+ return ret ;
236
+ }
237
+
238
+ int
239
+ _bson_emul_atomic_int_compare_exchange_weak (volatile int * p ,
240
+ int expect_value ,
241
+ int new_value ,
242
+ enum bson_memory_order order )
243
+ {
244
+ /* We're emulating. We can't do a weak version. */
245
+ return _bson_emul_atomic_int_compare_exchange_strong (
246
+ p , expect_value , new_value , order );
247
+ }
0 commit comments