@@ -220,7 +220,7 @@ static void gc_sync_cache(jl_ptls_t ptls, jl_gc_mark_cache_t *gc_cache) JL_NOTSA
220
220
}
221
221
222
222
// No other threads can be running marking at the same time
223
- static void gc_sync_all_caches (jl_ptls_t ptls )
223
+ static void gc_sync_all_caches (jl_ptls_t ptls ) JL_NOTSAFEPOINT
224
224
{
225
225
assert (gc_n_threads );
226
226
for (int t_i = 0 ; t_i < gc_n_threads ; t_i ++ ) {
@@ -364,7 +364,7 @@ JL_DLLEXPORT jl_weakref_t *jl_gc_new_weakref_th(jl_ptls_t ptls, jl_value_t *valu
364
364
return wr ;
365
365
}
366
366
367
- static void clear_weak_refs (void )
367
+ static void clear_weak_refs (void ) JL_NOTSAFEPOINT
368
368
{
369
369
assert (gc_n_threads );
370
370
for (int i = 0 ; i < gc_n_threads ; i ++ ) {
@@ -381,7 +381,7 @@ static void clear_weak_refs(void)
381
381
}
382
382
}
383
383
384
- static void sweep_weak_refs (void )
384
+ static void sweep_weak_refs (void ) JL_NOTSAFEPOINT
385
385
{
386
386
assert (gc_n_threads );
387
387
for (int i = 0 ; i < gc_n_threads ; i ++ ) {
@@ -1153,7 +1153,7 @@ static void gc_pool_sync_nfree(jl_gc_pagemeta_t *pg, jl_taggedvalue_t *last) JL_
1153
1153
1154
1154
// pre-scan pages to check whether there are enough pages so that's worth parallelizing
1155
1155
// also sweeps pages that don't need to be linearly scanned
1156
- int gc_sweep_prescan (jl_ptls_t ptls , jl_gc_padded_page_stack_t * new_gc_allocd_scratch )
1156
+ int gc_sweep_prescan (jl_ptls_t ptls , jl_gc_padded_page_stack_t * new_gc_allocd_scratch ) JL_NOTSAFEPOINT
1157
1157
{
1158
1158
// 4MB worth of pages is worth parallelizing
1159
1159
const int n_pages_worth_parallel_sweep = (int )(4 * (1 << 20 ) / GC_PAGE_SZ );
@@ -1210,7 +1210,7 @@ int gc_sweep_prescan(jl_ptls_t ptls, jl_gc_padded_page_stack_t *new_gc_allocd_sc
1210
1210
}
1211
1211
1212
1212
// wake up all threads to sweep the pages
1213
- void gc_sweep_wake_all_pages (jl_ptls_t ptls , jl_gc_padded_page_stack_t * new_gc_allocd_scratch )
1213
+ void gc_sweep_wake_all_pages (jl_ptls_t ptls , jl_gc_padded_page_stack_t * new_gc_allocd_scratch ) JL_NOTSAFEPOINT
1214
1214
{
1215
1215
int parallel_sweep_worthwhile = gc_sweep_prescan (ptls , new_gc_allocd_scratch );
1216
1216
if (parallel_sweep_worthwhile && !page_profile_enabled ) {
@@ -1246,7 +1246,7 @@ void gc_sweep_wake_all_pages(jl_ptls_t ptls, jl_gc_padded_page_stack_t *new_gc_a
1246
1246
}
1247
1247
1248
1248
// wait for all threads to finish sweeping
1249
- void gc_sweep_wait_for_all_pages (void )
1249
+ void gc_sweep_wait_for_all_pages (void ) JL_NOTSAFEPOINT
1250
1250
{
1251
1251
jl_atomic_store (& gc_allocd_scratch , NULL );
1252
1252
while (jl_atomic_load_acquire (& gc_n_threads_sweeping_pools ) != 0 ) {
@@ -1255,7 +1255,7 @@ void gc_sweep_wait_for_all_pages(void)
1255
1255
}
1256
1256
1257
1257
// sweep all pools
1258
- void gc_sweep_pool_parallel (jl_ptls_t ptls )
1258
+ void gc_sweep_pool_parallel (jl_ptls_t ptls ) JL_NOTSAFEPOINT
1259
1259
{
1260
1260
jl_atomic_fetch_add (& gc_n_threads_sweeping_pools , 1 );
1261
1261
jl_gc_padded_page_stack_t * allocd_scratch = jl_atomic_load (& gc_allocd_scratch );
@@ -1306,7 +1306,7 @@ void gc_sweep_pool_parallel(jl_ptls_t ptls)
1306
1306
}
1307
1307
1308
1308
// free all pages (i.e. through `madvise` on Linux) that were lazily freed
1309
- void gc_free_pages (void )
1309
+ void gc_free_pages (void ) JL_NOTSAFEPOINT
1310
1310
{
1311
1311
size_t n_pages_seen = 0 ;
1312
1312
jl_gc_page_stack_t tmp ;
@@ -1344,7 +1344,7 @@ void gc_free_pages(void)
1344
1344
}
1345
1345
1346
1346
// setup the data-structures for a sweep over all memory pools
1347
- static void gc_sweep_pool (void )
1347
+ static void gc_sweep_pool (void ) JL_NOTSAFEPOINT
1348
1348
{
1349
1349
gc_time_pool_start ();
1350
1350
@@ -1466,7 +1466,7 @@ static void gc_sweep_pool(void)
1466
1466
gc_time_pool_end (current_sweep_full );
1467
1467
}
1468
1468
1469
- static void gc_sweep_perm_alloc (void )
1469
+ static void gc_sweep_perm_alloc (void ) JL_NOTSAFEPOINT
1470
1470
{
1471
1471
uint64_t t0 = jl_hrtime ();
1472
1472
gc_sweep_sysimg ();
@@ -2237,7 +2237,7 @@ JL_DLLEXPORT void jl_gc_mark_queue_objarray(jl_ptls_t ptls, jl_value_t *parent,
2237
2237
2238
2238
// Enqueue and mark all outgoing references from `new_obj` which have not been marked yet.
2239
2239
// `_new_obj` has its lowest bit tagged if it's in the remset (in which case we shouldn't update page metadata)
2240
- FORCE_INLINE void gc_mark_outrefs (jl_ptls_t ptls , jl_gc_markqueue_t * mq , void * _new_obj )
2240
+ FORCE_INLINE void gc_mark_outrefs (jl_ptls_t ptls , jl_gc_markqueue_t * mq , void * _new_obj ) JL_NOTSAFEPOINT
2241
2241
{
2242
2242
int meta_updated = (uintptr_t )_new_obj & GC_REMSET_PTR_TAG ;
2243
2243
jl_value_t * new_obj = (jl_value_t * )((uintptr_t )_new_obj & ~(uintptr_t )GC_REMSET_PTR_TAG );
@@ -2513,7 +2513,7 @@ FORCE_INLINE void gc_mark_outrefs(jl_ptls_t ptls, jl_gc_markqueue_t *mq, void *_
2513
2513
}
2514
2514
2515
2515
// Used in gc-debug
2516
- void gc_mark_loop_serial_ (jl_ptls_t ptls , jl_gc_markqueue_t * mq )
2516
+ void gc_mark_loop_serial_ (jl_ptls_t ptls , jl_gc_markqueue_t * mq ) JL_NOTSAFEPOINT
2517
2517
{
2518
2518
while (1 ) {
2519
2519
void * new_obj = (void * )gc_ptr_queue_pop (& ptls -> gc_tls .mark_queue );
@@ -2526,7 +2526,7 @@ void gc_mark_loop_serial_(jl_ptls_t ptls, jl_gc_markqueue_t *mq)
2526
2526
}
2527
2527
2528
2528
// Drain items from worker's own chunkqueue
2529
- void gc_drain_own_chunkqueue (jl_ptls_t ptls , jl_gc_markqueue_t * mq )
2529
+ void gc_drain_own_chunkqueue (jl_ptls_t ptls , jl_gc_markqueue_t * mq ) JL_NOTSAFEPOINT
2530
2530
{
2531
2531
jl_gc_chunk_t c = {.cid = GC_empty_chunk };
2532
2532
do {
@@ -2542,13 +2542,13 @@ void gc_drain_own_chunkqueue(jl_ptls_t ptls, jl_gc_markqueue_t *mq)
2542
2542
// is used to keep track of processed items. Maintaining this stack (instead of
2543
2543
// native one) avoids stack overflow when marking deep objects and
2544
2544
// makes it easier to implement parallel marking via work-stealing
2545
- JL_EXTENSION NOINLINE void gc_mark_loop_serial (jl_ptls_t ptls )
2545
+ JL_EXTENSION NOINLINE void gc_mark_loop_serial (jl_ptls_t ptls ) JL_NOTSAFEPOINT
2546
2546
{
2547
2547
gc_mark_loop_serial_ (ptls , & ptls -> gc_tls .mark_queue );
2548
2548
gc_drain_own_chunkqueue (ptls , & ptls -> gc_tls .mark_queue );
2549
2549
}
2550
2550
2551
- void gc_mark_and_steal (jl_ptls_t ptls )
2551
+ void gc_mark_and_steal (jl_ptls_t ptls ) JL_NOTSAFEPOINT
2552
2552
{
2553
2553
int master_tid = jl_atomic_load (& gc_master_tid );
2554
2554
assert (master_tid != -1 );
@@ -2670,7 +2670,7 @@ size_t gc_count_work_in_queue(jl_ptls_t ptls) JL_NOTSAFEPOINT
2670
2670
* the mark-loop after `gc_n_threads_marking` reaches zero.
2671
2671
*/
2672
2672
2673
- int gc_should_mark (void )
2673
+ int gc_should_mark (void ) JL_NOTSAFEPOINT
2674
2674
{
2675
2675
int should_mark = 0 ;
2676
2676
uv_mutex_lock (& gc_queue_observer_lock );
@@ -2703,14 +2703,14 @@ int gc_should_mark(void)
2703
2703
return should_mark ;
2704
2704
}
2705
2705
2706
- void gc_wake_all_for_marking (jl_ptls_t ptls )
2706
+ void gc_wake_all_for_marking (jl_ptls_t ptls ) JL_NOTSAFEPOINT
2707
2707
{
2708
2708
uv_mutex_lock (& gc_threads_lock );
2709
2709
uv_cond_broadcast (& gc_threads_cond );
2710
2710
uv_mutex_unlock (& gc_threads_lock );
2711
2711
}
2712
2712
2713
- void gc_mark_loop_parallel (jl_ptls_t ptls , int master )
2713
+ void gc_mark_loop_parallel (jl_ptls_t ptls , int master ) JL_NOTSAFEPOINT
2714
2714
{
2715
2715
if (master ) {
2716
2716
jl_atomic_store (& gc_master_tid , ptls -> tid );
@@ -2729,7 +2729,7 @@ void gc_mark_loop_parallel(jl_ptls_t ptls, int master)
2729
2729
}
2730
2730
}
2731
2731
2732
- void gc_mark_loop (jl_ptls_t ptls )
2732
+ void gc_mark_loop (jl_ptls_t ptls ) JL_NOTSAFEPOINT
2733
2733
{
2734
2734
if (jl_n_markthreads == 0 || gc_heap_snapshot_enabled ) {
2735
2735
gc_mark_loop_serial (ptls );
@@ -2739,13 +2739,13 @@ void gc_mark_loop(jl_ptls_t ptls)
2739
2739
}
2740
2740
}
2741
2741
2742
- void gc_mark_loop_barrier (void )
2742
+ void gc_mark_loop_barrier (void ) JL_NOTSAFEPOINT
2743
2743
{
2744
2744
assert (jl_atomic_load_relaxed (& gc_n_threads_marking ) == 0 );
2745
2745
jl_atomic_store_relaxed (& gc_master_tid , -1 );
2746
2746
}
2747
2747
2748
- void gc_mark_clean_reclaim_sets (void )
2748
+ void gc_mark_clean_reclaim_sets (void ) JL_NOTSAFEPOINT
2749
2749
{
2750
2750
// Clean up `reclaim-sets`
2751
2751
for (int i = 0 ; i < gc_n_threads ; i ++ ) {
@@ -2888,7 +2888,7 @@ static void gc_mark_roots(jl_gc_markqueue_t *mq) JL_NOTSAFEPOINT
2888
2888
2889
2889
// find unmarked objects that need to be finalized from the finalizer list "list".
2890
2890
// this must happen last in the mark phase.
2891
- static void sweep_finalizer_list (arraylist_t * list )
2891
+ static void sweep_finalizer_list (arraylist_t * list ) JL_NOTSAFEPOINT
2892
2892
{
2893
2893
void * * items = list -> items ;
2894
2894
size_t len = list -> len ;
@@ -2994,7 +2994,7 @@ JL_DLLEXPORT int64_t jl_gc_live_bytes(void)
2994
2994
return live_bytes ;
2995
2995
}
2996
2996
2997
- uint64_t jl_gc_smooth (uint64_t old_val , uint64_t new_val , double factor )
2997
+ uint64_t jl_gc_smooth (uint64_t old_val , uint64_t new_val , double factor ) JL_NOTSAFEPOINT
2998
2998
{
2999
2999
double est = factor * old_val + (1 - factor ) * new_val ;
3000
3000
if (est <= 1 )
@@ -3006,7 +3006,7 @@ uint64_t jl_gc_smooth(uint64_t old_val, uint64_t new_val, double factor)
3006
3006
3007
3007
// an overallocation curve inspired by array allocations
3008
3008
// grows very fast initially, then much slower at large heaps
3009
- static uint64_t overallocation (uint64_t old_val , uint64_t val , uint64_t max_val )
3009
+ static uint64_t overallocation (uint64_t old_val , uint64_t val , uint64_t max_val ) JL_NOTSAFEPOINT
3010
3010
{
3011
3011
// compute maxsize = maxsize + 4*maxsize^(7/8) + maxsize/8
3012
3012
// for small n, we grow much faster than O(n)
@@ -3990,7 +3990,7 @@ JL_DLLEXPORT int jl_gc_enable_conservative_gc_support(void)
3990
3990
}
3991
3991
}
3992
3992
3993
- JL_DLLEXPORT int jl_gc_conservative_gc_support_enabled (void )
3993
+ JL_DLLEXPORT int jl_gc_conservative_gc_support_enabled (void ) JL_NOTSAFEPOINT
3994
3994
{
3995
3995
return jl_atomic_load (& support_conservative_marking );
3996
3996
}
0 commit comments