|
89 | 89 | #define NIB ((1ULL << SLICE) - 1) |
90 | 90 | #define SLNODES (1 << SLICE) |
91 | 91 |
|
| 92 | +// it has to be >= 2 |
| 93 | +#define LEAF_VALID (2ULL) |
| 94 | + |
92 | 95 | typedef uintptr_t word; |
93 | 96 | typedef uint8_t sh_t; |
94 | 97 |
|
@@ -331,18 +334,26 @@ static void free_leaf(struct critnib *__restrict c, |
331 | 334 | return; |
332 | 335 | } |
333 | 336 |
|
| 337 | + // k should be added to the c->deleted_leaf list here |
| 338 | + // or in critnib_release() when the reference count drops to 0. |
| 339 | + utils_atomic_store_release_u8(&k->pending_deleted_leaf, 1); |
| 340 | + |
334 | 341 | if (c->cb_free_leaf) { |
335 | 342 | uint64_t ref_count; |
336 | 343 | utils_atomic_load_acquire_u64(&k->ref_count, &ref_count); |
337 | 344 | if (ref_count > 0) { |
338 | | - // k will be added to c->deleted_leaf in critnib_release() |
| 345 | + // k will be added to the c->deleted_leaf list in critnib_release() |
339 | 346 | // when the reference count drops to 0. |
340 | | - utils_atomic_store_release_u8(&k->pending_deleted_leaf, 1); |
341 | 347 | return; |
342 | 348 | } |
343 | 349 | } |
344 | 350 |
|
345 | | - add_to_deleted_leaf_list(c, k); |
| 351 | + uint8_t expected = 1; |
| 352 | + uint8_t desired = 0; |
| 353 | + if (utils_compare_exchange_u8(&k->pending_deleted_leaf, &expected, |
| 354 | + &desired)) { |
| 355 | + add_to_deleted_leaf_list(c, k); |
| 356 | + } |
346 | 357 | } |
347 | 358 |
|
348 | 359 | /* |
@@ -392,8 +403,8 @@ int critnib_insert(struct critnib *c, word key, void *value, int update) { |
392 | 403 | utils_atomic_store_release_u8(&k->pending_deleted_leaf, 0); |
393 | 404 |
|
394 | 405 | if (c->cb_free_leaf) { |
395 | | - // mark the leaf as valid (ref_count == 1) |
396 | | - utils_atomic_store_release_u64(&k->ref_count, 1ULL); |
| 406 | + // mark the leaf as valid (ref_count == 2) |
| 407 | + utils_atomic_store_release_u64(&k->ref_count, LEAF_VALID); |
397 | 408 | } else { |
398 | 409 | // the reference counter is not used in this case |
399 | 410 | utils_atomic_store_release_u64(&k->ref_count, 0ULL); |
@@ -602,35 +613,41 @@ int critnib_release(struct critnib *c, void *ref) { |
602 | 613 | struct critnib_leaf *k = (struct critnib_leaf *)ref; |
603 | 614 |
|
604 | 615 | uint64_t ref_count; |
605 | | - utils_atomic_load_acquire_u64(&k->ref_count, &ref_count); |
606 | | - |
607 | | - if (ref_count == 0) { |
608 | | - return -1; |
609 | | - } |
610 | | - |
| 616 | + uint64_t ref_desired; |
611 | 617 | /* decrement the reference count */ |
612 | | - if (utils_atomic_decrement_u64(&k->ref_count) == 0) { |
| 618 | + do { |
| 619 | + utils_atomic_load_acquire_u64(&k->ref_count, &ref_count); |
| 620 | + if (ref_count < LEAF_VALID) { |
| 621 | + return -1; |
| 622 | + } |
| 623 | + ref_desired = ref_count - 1; |
| 624 | + } while ( |
| 625 | + !utils_compare_exchange_u64(&k->ref_count, &ref_count, &ref_desired)); |
| 626 | + |
| 627 | + if (ref_desired == (LEAF_VALID - 1)) { |
613 | 628 | void *to_be_freed = NULL; |
614 | 629 | utils_atomic_load_acquire_ptr(&k->to_be_freed, &to_be_freed); |
615 | | - if (to_be_freed) { |
616 | | - utils_atomic_store_release_ptr(&k->to_be_freed, NULL); |
617 | | - c->cb_free_leaf(c->leaf_allocator, to_be_freed); |
| 630 | +#ifndef NDEBUG |
| 631 | + if (to_be_freed == NULL) { |
| 632 | + LOG_FATAL("to_be_freed == NULL, value: %p\n", k->value); |
| 633 | + assert(to_be_freed != NULL); |
618 | 634 | } |
619 | | - uint8_t pending_deleted_leaf; |
620 | | - utils_atomic_load_acquire_u8(&k->pending_deleted_leaf, |
621 | | - &pending_deleted_leaf); |
622 | | - if (pending_deleted_leaf) { |
623 | | - utils_atomic_store_release_u8(&k->pending_deleted_leaf, 0); |
| 635 | +#endif |
| 636 | + utils_atomic_store_release_ptr(&k->to_be_freed, NULL); |
| 637 | + // mark the leaf as not used (ref_count == 0) |
| 638 | + utils_atomic_store_release_u64(&k->ref_count, 0ULL); |
| 639 | + |
| 640 | + c->cb_free_leaf(c->leaf_allocator, to_be_freed); |
| 641 | + |
| 642 | + uint8_t expected = 1; |
| 643 | + uint8_t desired = 0; |
| 644 | + if (utils_compare_exchange_u8(&k->pending_deleted_leaf, &expected, |
| 645 | + &desired)) { |
624 | 646 | add_to_deleted_leaf_list(c, k); |
625 | 647 | } |
626 | | - } |
627 | 648 |
|
628 | | -#ifndef NDEBUG |
629 | | - // check if the reference count is overflowed |
630 | | - utils_atomic_load_acquire_u64(&k->ref_count, &ref_count); |
631 | | - assert((ref_count & (1ULL << 63)) == 0); |
632 | | - assert(ref_count != (uint64_t)(0 - 1ULL)); |
633 | | -#endif |
| 649 | + return 0; |
| 650 | + } |
634 | 651 |
|
635 | 652 | return 0; |
636 | 653 | } |
@@ -661,7 +678,7 @@ static inline int increment_ref_count(struct critnib_leaf *k) { |
661 | 678 |
|
662 | 679 | do { |
663 | 680 | utils_atomic_load_acquire_u64(&k->ref_count, &expected); |
664 | | - if (expected == 0) { |
| 681 | + if (expected < LEAF_VALID) { |
665 | 682 | return -1; |
666 | 683 | } |
667 | 684 | desired = expected + 1; |
|
0 commit comments