Skip to content

Commit 55ab02a

Browse files
committed
Use FT_ATOMIC_* macros.
This makes for longer code vs using the custom LOAD_*/STORE_* macros. However, I think this makes the code more clear.
1 parent ff1d60d commit 55ab02a

File tree

2 files changed

+73
-106
lines changed

2 files changed

+73
-106
lines changed

Include/internal/pycore_pyatomic_ft_wrappers.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,8 @@ extern "C" {
5555
_Py_atomic_store_uintptr_release(&value, new_value)
5656
#define FT_ATOMIC_STORE_SSIZE_RELAXED(value, new_value) \
5757
_Py_atomic_store_ssize_relaxed(&value, new_value)
58+
#define FT_ATOMIC_STORE_SSIZE_RELEASE(value, new_value) \
59+
_Py_atomic_store_ssize_release(&value, new_value)
5860
#define FT_ATOMIC_STORE_UINT8_RELAXED(value, new_value) \
5961
_Py_atomic_store_uint8_relaxed(&value, new_value)
6062
#define FT_ATOMIC_STORE_UINT16_RELAXED(value, new_value) \
@@ -129,6 +131,7 @@ extern "C" {
129131
#define FT_ATOMIC_STORE_PTR_RELEASE(value, new_value) value = new_value
130132
#define FT_ATOMIC_STORE_UINTPTR_RELEASE(value, new_value) value = new_value
131133
#define FT_ATOMIC_STORE_SSIZE_RELAXED(value, new_value) value = new_value
134+
#define FT_ATOMIC_STORE_SSIZE_RELEASE(value, new_value) value = new_value
132135
#define FT_ATOMIC_STORE_UINT8_RELAXED(value, new_value) value = new_value
133136
#define FT_ATOMIC_STORE_UINT16_RELAXED(value, new_value) value = new_value
134137
#define FT_ATOMIC_STORE_UINT32_RELAXED(value, new_value) value = new_value

Objects/setobject.c

Lines changed: 70 additions & 106 deletions
Original file line numberDiff line numberDiff line change
@@ -70,38 +70,20 @@ static PyObject _dummy_struct;
7070

7171
#ifdef Py_GIL_DISABLED
7272

73-
#define LOAD_HASH(entry) _Py_atomic_load_ssize_acquire(&(entry)->hash)
74-
#define STORE_HASH(entry, v) _Py_atomic_store_ssize_release(&(entry)->hash, v)
75-
76-
#define LOAD_KEY(entry) _Py_atomic_load_ptr_acquire(&(entry)->key)
77-
#define STORE_KEY(entry, v) _Py_atomic_store_ptr_release(&(entry)->key, v)
78-
79-
#define LOAD_TABLE(so) _Py_atomic_load_ptr_acquire(&(so)->table)
80-
#define STORE_TABLE(so, v) _Py_atomic_store_ptr_release(&(so)->table, v)
81-
82-
#define LOAD_MASK(so) _Py_atomic_load_ssize_acquire(&(so)->mask)
83-
#define STORE_MASK(so, v) _Py_atomic_store_ssize_release(&(so)->mask, v)
84-
85-
#define LOAD_USED(so) _Py_atomic_load_ssize_acquire(&(so)->used)
86-
#define STORE_USED(so, v) _Py_atomic_store_ssize_release(&(so)->used, v)
87-
88-
#define LOAD_SIZE(so) _Py_atomic_load_ssize_acquire(&(so)->size)
89-
#define STORE_SIZE(so, v) _Py_atomic_store_ssize_release(&(so)->size, v)
90-
91-
#define IS_SHARED(so) _PyObject_GC_IS_SHARED(so)
92-
#define SET_SHARED(so) _PyObject_GC_SET_SHARED(so)
73+
#define SET_IS_SHARED(so) _PyObject_GC_IS_SHARED(so)
74+
#define SET_MARK_SHARED(so) _PyObject_GC_SET_SHARED(so)
9375

9476
static void
9577
ensure_shared_on_read(PySetObject *so)
9678
{
97-
if (!_Py_IsOwnedByCurrentThread((PyObject *)so) && !IS_SHARED(so)) {
79+
if (!_Py_IsOwnedByCurrentThread((PyObject *)so) && !SET_IS_SHARED(so)) {
9880
// The first time we access a set from a non-owning thread we mark it
9981
// as shared. This ensures that a concurrent resize operation will
10082
// delay freeing the old entries using QSBR, which is necessary
10183
// to safely allow concurrent reads without locking...
10284
Py_BEGIN_CRITICAL_SECTION(so);
103-
if (!IS_SHARED(so)) {
104-
SET_SHARED(so);
85+
if (!SET_IS_SHARED(so)) {
86+
SET_MARK_SHARED(so);
10587
}
10688
Py_END_CRITICAL_SECTION();
10789
}
@@ -111,14 +93,14 @@ static inline Py_ALWAYS_INLINE int
11193
set_compare_threadsafe(PySetObject *so, setentry *table, setentry *ep,
11294
PyObject *key, Py_hash_t hash)
11395
{
114-
PyObject *startkey = LOAD_KEY(ep);
96+
PyObject *startkey = FT_ATOMIC_LOAD_PTR_ACQUIRE(ep->key);
11597
if (startkey == NULL) {
11698
return SET_LOOKKEY_EMPTY;
11799
}
118100
if (startkey == key) {
119101
return SET_LOOKKEY_FOUND;
120102
}
121-
Py_ssize_t ep_hash = LOAD_HASH(ep);
103+
Py_ssize_t ep_hash = FT_ATOMIC_LOAD_SSIZE_ACQUIRE(ep->hash);
122104
if (ep_hash == hash) {
123105
if (startkey == NULL || !_Py_TryIncrefCompare(&ep->key, startkey)) {
124106
return SET_LOOKKEY_CHANGED;
@@ -128,8 +110,8 @@ set_compare_threadsafe(PySetObject *so, setentry *table, setentry *ep,
128110
if (cmp < 0) {
129111
return SET_LOOKKEY_ERROR;
130112
}
131-
if (table == LOAD_TABLE(so) &&
132-
startkey == LOAD_KEY(ep)) {
113+
if (table == FT_ATOMIC_LOAD_PTR_ACQUIRE(so->table) &&
114+
startkey == FT_ATOMIC_LOAD_PTR_ACQUIRE(ep->key)) {
133115
assert(cmp == SET_LOOKKEY_FOUND || cmp == SET_LOOKKEY_NO_MATCH);
134116
return cmp;
135117
}
@@ -143,26 +125,8 @@ set_compare_threadsafe(PySetObject *so, setentry *table, setentry *ep,
143125

144126
#else
145127

146-
#define LOAD_HASH(entry) ((entry)->hash)
147-
#define STORE_HASH(entry, v) ((entry)->hash = v)
148-
149-
#define LOAD_KEY(entry) ((entry)->key)
150-
#define STORE_KEY(entry, v) ((entry)->key = v)
151-
152-
#define LOAD_TABLE(so) ((so)->table)
153-
#define STORE_TABLE(so, v) ((so)->table = v)
154-
155-
#define LOAD_MASK(so) ((so)->mask)
156-
#define STORE_MASK(so, v) ((so)->mask = v)
157-
158-
#define LOAD_USED(so) ((so)->used)
159-
#define STORE_USED(so, v) ((so)->used = v)
160-
161-
#define LOAD_SIZE(so) ((so)->size)
162-
#define STORE_SIZE(so, v) ((so)->size = v)
163-
164-
#define IS_SHARED(so) 0
165-
#define SET_SHARED(so)
128+
#define SET_IS_SHARED(so) 0
129+
#define SET_MARK_SHARED(so)
166130

167131
#endif
168132

@@ -201,8 +165,8 @@ set_zero_table(setentry *table, size_t size)
201165
#ifdef Py_GIL_DISABLED
202166
for (size_t i = 0; i < size; i++) {
203167
setentry *entry = &table[i];
204-
STORE_HASH(entry, 0);
205-
STORE_KEY(entry, NULL);
168+
FT_ATOMIC_STORE_SSIZE_RELAXED(entry->hash, 0);
169+
FT_ATOMIC_STORE_PTR_RELEASE(entry->key, NULL);
206170
}
207171
#else
208172
memset(table, 0, sizeof(setentry)*size);
@@ -313,16 +277,16 @@ set_add_entry_takeref(PySetObject *so, PyObject *key, Py_hash_t hash)
313277
found_unused_or_dummy:
314278
if (freeslot == NULL)
315279
goto found_unused;
316-
STORE_USED(so, so->used + 1);
317-
STORE_KEY(freeslot, key);
318-
STORE_HASH(freeslot, hash);
280+
FT_ATOMIC_STORE_SSIZE_RELAXED(so->used, so->used + 1);
281+
FT_ATOMIC_STORE_PTR_RELEASE(freeslot->key, key);
282+
FT_ATOMIC_STORE_SSIZE_RELAXED(freeslot->hash, hash);
319283
return 0;
320284

321285
found_unused:
322286
so->fill++;
323-
STORE_USED(so, so->used + 1);
324-
STORE_KEY(entry, key);
325-
STORE_HASH(entry, hash);
287+
FT_ATOMIC_STORE_SSIZE_RELAXED(so->used, so->used + 1);
288+
FT_ATOMIC_STORE_PTR_RELEASE(entry->key, key);
289+
FT_ATOMIC_STORE_SSIZE_RELAXED(entry->hash, hash);
326290
if ((size_t)so->fill*5 < mask*3)
327291
return 0;
328292
return set_table_resize(so, so->used>50000 ? so->used*2 : so->used*4);
@@ -388,8 +352,8 @@ set_insert_clean(setentry *table, size_t mask, PyObject *key, Py_hash_t hash)
388352
i = (i * 5 + 1 + perturb) & mask;
389353
}
390354
found_null:
391-
STORE_KEY(entry, key);
392-
STORE_HASH(entry, hash);
355+
FT_ATOMIC_STORE_PTR_RELEASE(entry->key, key);
356+
FT_ATOMIC_STORE_SSIZE_RELAXED(entry->hash, hash);
393357
}
394358

395359
/* ======== End logic for probing the hash table ========================== */
@@ -417,9 +381,9 @@ set_lookkey_threadsafe(PySetObject *so, PyObject *key, Py_hash_t hash)
417381
int status;
418382
setentry *entry;
419383
ensure_shared_on_read(so);
420-
setentry *table = LOAD_TABLE(so);
421-
size_t mask = LOAD_MASK(so);
422-
if (table == NULL || table != LOAD_TABLE(so)) {
384+
setentry *table = FT_ATOMIC_LOAD_PTR_ACQUIRE(so->table);
385+
size_t mask = FT_ATOMIC_LOAD_SSIZE_RELAXED(so->mask);
386+
if (table == NULL || table != FT_ATOMIC_LOAD_PTR_ACQUIRE(so->table)) {
423387
return set_lookkey(so, key, hash, &entry);
424388
}
425389
status = set_do_lookup(so, table, mask, key, hash, &entry, set_compare_threadsafe);
@@ -502,8 +466,8 @@ set_table_resize(PySetObject *so, Py_ssize_t minused)
502466
/* Make the set empty, using the new table. */
503467
assert(newtable != oldtable);
504468
set_zero_table(newtable, newsize);
505-
STORE_TABLE(so, NULL);
506-
STORE_MASK(so, newsize - 1);
469+
FT_ATOMIC_STORE_PTR_RELEASE(so->table, NULL);
470+
FT_ATOMIC_STORE_SSIZE_RELAXED(so->mask, newsize - 1);
507471

508472
/* Copy the data over; this is refcount-neutral for active entries;
509473
dummy entries aren't copied over, of course */
@@ -523,10 +487,10 @@ set_table_resize(PySetObject *so, Py_ssize_t minused)
523487
}
524488
}
525489

526-
STORE_TABLE(so, newtable);
490+
FT_ATOMIC_STORE_PTR_RELEASE(so->table, newtable);
527491

528492
if (is_oldtable_malloced)
529-
free_entries(oldtable, IS_SHARED(so));
493+
free_entries(oldtable, SET_IS_SHARED(so));
530494
return 0;
531495
}
532496

@@ -558,9 +522,9 @@ set_discard_entry(PySetObject *so, PyObject *key, Py_hash_t hash)
558522
}
559523
assert(status == SET_LOOKKEY_FOUND);
560524
old_key = entry->key;
561-
STORE_KEY(entry, dummy);
562-
STORE_HASH(entry, -1);
563-
STORE_USED(so, so->used - 1);
525+
FT_ATOMIC_STORE_PTR_RELEASE(entry->key, dummy);
526+
FT_ATOMIC_STORE_SSIZE_RELAXED(entry->hash, -1);
527+
FT_ATOMIC_STORE_SSIZE_RELAXED(so->used, so->used - 1);
564528
Py_DECREF(old_key);
565529
return DISCARD_FOUND;
566530
}
@@ -598,13 +562,13 @@ set_discard_key(PySetObject *so, PyObject *key)
598562
static void
599563
set_empty_to_minsize(PySetObject *so)
600564
{
601-
STORE_TABLE(so, NULL);
565+
FT_ATOMIC_STORE_PTR_RELEASE(so->table, NULL);
602566
set_zero_table(so->smalltable, PySet_MINSIZE);
603567
so->fill = 0;
604-
STORE_USED(so, 0);
605-
STORE_MASK(so, PySet_MINSIZE - 1);
606-
STORE_HASH(so, -1);
607-
STORE_TABLE(so, so->smalltable);
568+
FT_ATOMIC_STORE_SSIZE_RELAXED(so->used, 0);
569+
FT_ATOMIC_STORE_SSIZE_RELAXED(so->mask, PySet_MINSIZE - 1);
570+
FT_ATOMIC_STORE_SSIZE_RELAXED(so->hash, -1);
571+
FT_ATOMIC_STORE_PTR_RELEASE(so->table, so->smalltable);
608572
}
609573

610574
static int
@@ -653,7 +617,7 @@ set_clear_internal(PyObject *self)
653617
}
654618

655619
if (table_is_malloced)
656-
free_entries(table, IS_SHARED(so));
620+
free_entries(table, SET_IS_SHARED(so));
657621
return 0;
658622
}
659623

@@ -714,7 +678,7 @@ set_dealloc(PyObject *self)
714678
}
715679
}
716680
if (so->table != so->smalltable)
717-
free_entries(so->table, IS_SHARED(so));
681+
free_entries(so->table, SET_IS_SHARED(so));
718682
Py_TYPE(so)->tp_free(so);
719683
Py_TRASHCAN_END
720684
}
@@ -788,7 +752,7 @@ static Py_ssize_t
788752
set_len(PyObject *self)
789753
{
790754
PySetObject *so = _PySet_CAST(self);
791-
return LOAD_USED(so);
755+
return FT_ATOMIC_LOAD_SSIZE_RELAXED(so->used);
792756
}
793757

794758
static int
@@ -826,12 +790,12 @@ set_merge_lock_held(PySetObject *so, PyObject *otherset)
826790
key = other_entry->key;
827791
if (key != NULL) {
828792
assert(so_entry->key == NULL);
829-
STORE_KEY(so_entry, Py_NewRef(key));
830-
STORE_HASH(so_entry, other_entry->hash);
793+
FT_ATOMIC_STORE_PTR_RELEASE(so_entry->key, Py_NewRef(key));
794+
FT_ATOMIC_STORE_SSIZE_RELAXED(so_entry->hash, other_entry->hash);
831795
}
832796
}
833797
so->fill = other->fill;
834-
STORE_USED(so, other->used);
798+
FT_ATOMIC_STORE_SSIZE_RELAXED(so->used, other->used);
835799
return 0;
836800
}
837801

@@ -840,7 +804,7 @@ set_merge_lock_held(PySetObject *so, PyObject *otherset)
840804
setentry *newtable = so->table;
841805
size_t newmask = (size_t)so->mask;
842806
so->fill = other->used;
843-
STORE_USED(so, other->used);
807+
FT_ATOMIC_STORE_SSIZE_RELAXED(so->used, other->used);
844808
for (i = other->mask + 1; i > 0 ; i--, other_entry++) {
845809
key = other_entry->key;
846810
if (key != NULL && key != dummy) {
@@ -892,9 +856,9 @@ set_pop_impl(PySetObject *so)
892856
entry = so->table;
893857
}
894858
key = entry->key;
895-
STORE_KEY(entry, dummy);
896-
STORE_HASH(entry, -1);
897-
STORE_USED(so, so->used - 1);
859+
FT_ATOMIC_STORE_PTR_RELEASE(entry->key, dummy);
860+
FT_ATOMIC_STORE_SSIZE_RELAXED(entry->hash, -1);
861+
FT_ATOMIC_STORE_SSIZE_RELAXED(so->used, so->used - 1);
898862
so->finger = entry - so->table + 1; /* next place to start */
899863
return key;
900864
}
@@ -980,12 +944,12 @@ frozenset_hash(PyObject *self)
980944
PySetObject *so = _PySet_CAST(self);
981945
Py_uhash_t hash;
982946

983-
if (LOAD_HASH(so) != -1) {
984-
return LOAD_HASH(so);
947+
if (FT_ATOMIC_LOAD_SSIZE_RELAXED(so->hash) != -1) {
948+
return FT_ATOMIC_LOAD_SSIZE_ACQUIRE(so->hash);
985949
}
986950

987951
hash = frozenset_hash_impl(self);
988-
STORE_HASH(so, hash);
952+
FT_ATOMIC_STORE_SSIZE_RELEASE(so->hash, hash);
989953
return hash;
990954
}
991955

@@ -1067,8 +1031,8 @@ static PyObject *setiter_iternext(PyObject *self)
10671031
return NULL;
10681032
assert (PyAnySet_Check(so));
10691033

1070-
Py_ssize_t so_used = LOAD_USED(so);
1071-
Py_ssize_t si_used = FT_ATOMIC_LOAD_SSIZE(si->si_used);
1034+
Py_ssize_t so_used = FT_ATOMIC_LOAD_SSIZE_RELAXED(so->used);
1035+
Py_ssize_t si_used = FT_ATOMIC_LOAD_SSIZE_RELAXED(si->si_used);
10721036
if (si_used != so_used) {
10731037
PyErr_SetString(PyExc_RuntimeError,
10741038
"Set changed size during iteration");
@@ -1410,16 +1374,16 @@ set_swap_bodies(PySetObject *a, PySetObject *b)
14101374

14111375
setentry *a_table = a->table;
14121376
setentry *b_table = b->table;
1413-
STORE_TABLE(a, NULL);
1414-
STORE_TABLE(b, NULL);
1377+
FT_ATOMIC_STORE_PTR_RELEASE(a->table, NULL);
1378+
FT_ATOMIC_STORE_PTR_RELEASE(b->table, NULL);
14151379

14161380
t = a->fill; a->fill = b->fill; b->fill = t;
14171381
t = a->used;
1418-
STORE_USED(a, b->used);
1419-
STORE_USED(b, t);
1382+
FT_ATOMIC_STORE_SSIZE_RELAXED(a->used, b->used);
1383+
FT_ATOMIC_STORE_SSIZE_RELAXED(b->used, t);
14201384
t = a->mask;
1421-
STORE_MASK(a, b->mask);
1422-
STORE_MASK(b, t);
1385+
FT_ATOMIC_STORE_SSIZE_RELAXED(a->mask, b->mask);
1386+
FT_ATOMIC_STORE_SSIZE_RELAXED(b->mask, t);
14231387

14241388
u = a_table;
14251389
if (a_table == a->smalltable)
@@ -1437,21 +1401,21 @@ set_swap_bodies(PySetObject *a, PySetObject *b)
14371401

14381402
if (PyType_IsSubtype(Py_TYPE(a), &PyFrozenSet_Type) &&
14391403
PyType_IsSubtype(Py_TYPE(b), &PyFrozenSet_Type)) {
1440-
h = LOAD_HASH(a);
1441-
STORE_HASH(a, LOAD_HASH(b));
1442-
STORE_HASH(b, h);
1404+
h = FT_ATOMIC_LOAD_SSIZE_RELAXED(a->hash);
1405+
FT_ATOMIC_STORE_SSIZE_RELAXED(a->hash, FT_ATOMIC_LOAD_SSIZE_RELAXED(b->hash));
1406+
FT_ATOMIC_STORE_SSIZE_RELAXED(b->hash, h);
14431407
} else {
1444-
STORE_HASH(a, -1);
1445-
STORE_HASH(b, -1);
1408+
FT_ATOMIC_STORE_SSIZE_RELAXED(a->hash, -1);
1409+
FT_ATOMIC_STORE_SSIZE_RELAXED(b->hash, -1);
14461410
}
1447-
if (IS_SHARED(a)) {
1448-
SET_SHARED(b);
1411+
if (!SET_IS_SHARED(b) && SET_IS_SHARED(a)) {
1412+
SET_MARK_SHARED(b);
14491413
}
1450-
if (IS_SHARED(b)) {
1451-
SET_SHARED(a);
1414+
if (!SET_IS_SHARED(a) && SET_IS_SHARED(b)) {
1415+
SET_MARK_SHARED(a);
14521416
}
1453-
STORE_TABLE(a, a_table);
1454-
STORE_TABLE(b, b_table);
1417+
FT_ATOMIC_STORE_PTR_RELEASE(a->table, a_table);
1418+
FT_ATOMIC_STORE_PTR_RELEASE(b->table, b_table);
14551419
}
14561420

14571421
/*[clinic input]
@@ -2345,8 +2309,8 @@ set_richcompare(PyObject *self, PyObject *w, int op)
23452309
case Py_EQ:
23462310
if (PySet_GET_SIZE(v) != PySet_GET_SIZE(w))
23472311
Py_RETURN_FALSE;
2348-
Py_hash_t v_hash = LOAD_HASH(v);
2349-
Py_hash_t w_hash = LOAD_HASH((PySetObject *)w);
2312+
Py_hash_t v_hash = FT_ATOMIC_LOAD_SSIZE_RELAXED(v->hash);
2313+
Py_hash_t w_hash = FT_ATOMIC_LOAD_SSIZE_RELAXED(((PySetObject *)w)->hash);
23502314
if (v_hash != -1 && w_hash != -1 && v_hash != w_hash)
23512315
Py_RETURN_FALSE;
23522316
return set_issubset((PyObject*)v, w);

0 commit comments

Comments
 (0)