diff --git a/src/codegen.cpp b/src/codegen.cpp index 8f0644c0cff7f..8859452f3f11d 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -6159,6 +6159,7 @@ static void emit_stmtpos(jl_codectx_t &ctx, jl_value_t *expr, int ssaval_result) Value *scope_ptr = get_scope_field(ctx); jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe).decorateInst( ctx.builder.CreateAlignedStore(scope_to_restore, scope_ptr, ctx.types().alignof_ptr)); + // NOTE: wb not needed here, due to store to current_task (see jl_gc_wb_current_task) } } else if (head == jl_pop_exception_sym) { @@ -9334,12 +9335,12 @@ static jl_llvm_functions_t Value *scope_ptr = get_scope_field(ctx); LoadInst *current_scope = ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, scope_ptr, ctx.types().alignof_ptr); StoreInst *scope_store = ctx.builder.CreateAlignedStore(scope_boxed, scope_ptr, ctx.types().alignof_ptr); + // NOTE: wb not needed here, due to store to current_task (see jl_gc_wb_current_task) jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe).decorateInst(current_scope); jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe).decorateInst(scope_store); - // GC preserve the scope, since it is not rooted in the `jl_handler_t *` - // and may be removed from jl_current_task by any nested block and then - // replaced later - Value *scope_token = ctx.builder.CreateCall(prepare_call(gc_preserve_begin_func), {scope_boxed}); + // GC preserve the current_scope, since it is not rooted in the `jl_handler_t *`, + // the newly entered scope is preserved through the current_task. + Value *scope_token = ctx.builder.CreateCall(prepare_call(gc_preserve_begin_func), {current_scope}); ctx.scope_restore[cursor] = std::make_pair(scope_token, current_scope); } } diff --git a/src/gc-interface.h b/src/gc-interface.h index 826e91355b17a..9c59e274d1261 100644 --- a/src/gc-interface.h +++ b/src/gc-interface.h @@ -240,7 +240,14 @@ STATIC_INLINE void jl_gc_wb(const void *parent, const void *ptr) JL_NOTSAFEPOINT // so write barriers can be omitted until the next allocation. This function is a no-op that // can be used to annotate that a write barrier would be required were it not for this property // (as opposed to somebody just having forgotten to think about write barriers). -STATIC_INLINE void jl_gc_wb_fresh(const void *parent, const void *ptr) JL_NOTSAFEPOINT {} +STATIC_INLINE void jl_gc_wb_fresh(const void *parent JL_UNUSED, const void *ptr JL_UNUSED) JL_NOTSAFEPOINT {} +// As an optimization, the current_task is explicitly added to the remset while it is running. +// Upon deschedule, we conservatively move the write barrier into the young generation. +// This allows the omission of write barriers for all GC roots on the current task stack (JL_GC_PUSH_*), +// as well as the Task's explicit fields (but only for the current task). +// This function is a no-op that can be used to annotate that a write barrier would be required were +// it not for this property (as opposed to somebody just having forgotten to think about write barriers). +STATIC_INLINE void jl_gc_wb_current_task(const void *parent JL_UNUSED, const void *ptr JL_UNUSED) JL_NOTSAFEPOINT {} // Used to annotate that a write barrier would be required, but may be omitted because `ptr` // is known to be an old object. STATIC_INLINE void jl_gc_wb_knownold(const void *parent, const void *ptr) JL_NOTSAFEPOINT {} diff --git a/src/interpreter.c b/src/interpreter.c index a692cadaf9be1..55f8ec9188b19 100644 --- a/src/interpreter.c +++ b/src/interpreter.c @@ -539,12 +539,12 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, size_t ip, } s->locals[jl_source_nslots(s->src) + ip] = jl_box_ulong(jl_excstack_state(ct)); if (jl_enternode_scope(stmt)) { - jl_value_t *scope = eval_value(jl_enternode_scope(stmt), s); - // GC preserve the scope, since it is not rooted in the `jl_handler_t *` - // and may be removed from jl_current_task by any nested block and then - // replaced later - JL_GC_PUSH1(&scope); - ct->scope = scope; + jl_value_t *old_scope = ct->scope; // Identical to __eh.scope + // GC preserve the old_scope, since it is not rooted in the `jl_handler_t *`, + // the newly entered scope is preserved through the current_task. + JL_GC_PUSH1(&old_scope); + ct->scope = eval_value(jl_enternode_scope(stmt), s); + jl_gc_wb_current_task(ct, ct->scope); if (!jl_setjmp(__eh.eh_ctx, 0)) { ct->eh = &__eh; eval_body(stmts, s, next_ip, toplevel); diff --git a/src/rtutils.c b/src/rtutils.c index 6d4a375017bf2..77ce3c0d4adb0 100644 --- a/src/rtutils.c +++ b/src/rtutils.c @@ -296,6 +296,7 @@ JL_DLLEXPORT void jl_eh_restore_state(jl_task_t *ct, jl_handler_t *eh) ct->eh = eh->prev; ct->gcstack = eh->gcstack; ct->scope = eh->scope; + jl_gc_wb_current_task(ct, ct->scope); small_arraylist_t *locks = &ptls->locks; int unlocks = locks->len > eh->locks_len; if (unlocks) { @@ -335,6 +336,7 @@ JL_DLLEXPORT void jl_eh_restore_state_noexcept(jl_task_t *ct, jl_handler_t *eh) { assert(ct->gcstack == eh->gcstack && "Incorrect GC usage under try catch"); ct->scope = eh->scope; + jl_gc_wb_current_task(ct, ct->scope); ct->eh = eh->prev; ct->ptls->defer_signal = eh->defer_signal; // optional, but certain try-finally (in stream.jl) may be slightly harder to write without this } diff --git a/src/task.c b/src/task.c index 019a301b1f062..bb86bce48101a 100644 --- a/src/task.c +++ b/src/task.c @@ -203,10 +203,6 @@ static void NOINLINE save_stack(jl_ptls_t ptls, jl_task_t *lastt, jl_task_t **pt lastt->ctx.copy_stack = nb; lastt->sticky = 1; memcpy_stack_a16((uint64_t*)buf, (uint64_t*)frame_addr, nb); - // this task's stack could have been modified after - // it was marked by an incremental collection - // move the barrier back instead of walking it again here - jl_gc_wb_back(lastt); } JL_NO_ASAN static void NOINLINE JL_NORETURN restore_stack(jl_ucontext_t *t, jl_ptls_t ptls, char *p) @@ -504,6 +500,12 @@ JL_NO_ASAN static void ctx_switch(jl_task_t *lastt) lastt->ctx.ctx = &lasttstate.ctx; } } + // this task's stack or scope field could have been modified after + // it was marked by an incremental collection + // move the barrier back instead of walking the shadow stack again here to check if that is required + // even if killed (dropping the stack) and just the scope field matters, + // let the gc figure that out next time it does a quick mark + jl_gc_wb_back(lastt); // set up global state for new task and clear global state for old task t->ptls = ptls; @@ -1108,6 +1110,7 @@ JL_DLLEXPORT jl_task_t *jl_new_task(jl_function_t *start, jl_value_t *completion jl_atomic_store_relaxed(&t->_isexception, 0); // Inherit scope from parent task t->scope = ct->scope; + jl_gc_wb_fresh(t, t->scope); // Fork task-local random state from parent jl_rng_split(t->rngState, ct->rngState); // there is no active exception handler available on this stack yet @@ -1573,6 +1576,7 @@ jl_task_t *jl_init_root_task(jl_ptls_t ptls, void *stack_lo, void *stack_hi) ct->donenotify = jl_nothing; jl_atomic_store_relaxed(&ct->_isexception, 0); ct->scope = jl_nothing; + jl_gc_wb_knownold(ct, ct->scope); ct->eh = NULL; ct->gcstack = NULL; ct->excstack = NULL; diff --git a/test/scopedvalues.jl b/test/scopedvalues.jl index e9b36d80fc2c4..500b9647f8da6 100644 --- a/test/scopedvalues.jl +++ b/test/scopedvalues.jl @@ -195,3 +195,74 @@ nothrow_scope() push!(ts, 2) end end + +# LazyScopedValue +global lsv_ncalled = 0 +const lsv = LazyScopedValue{Int}(OncePerProcess(() -> (global lsv_ncalled; lsv_ncalled += 1; 1))) +@testset "LazyScopedValue" begin + @test (@with lsv=>2 lsv[]) == 2 + @test lsv_ncalled == 0 + @test lsv[] == 1 + @test lsv_ncalled == 1 + @test lsv[] == 1 + @test lsv_ncalled == 1 +end + +@testset "ScopedThunk" begin + function check_svals() + @test sval[] == 8 + @test sval_float[] == 8.0 + end + sf = nothing + @with sval=>8 sval_float=>8.0 begin + sf = ScopedThunk(check_svals) + end + sf() + @with sval=>8 sval_float=>8.0 begin + sf2 = ScopedThunk{Function}(check_svals) + end + sf2() +end + +using Base.ScopedValues: ScopedValue, with +@noinline function test_59483() + sv = ScopedValue([]) + ch = Channel{Bool}() + + # Spawn a child task, which inherits the parent's Scope + @noinline function inner_function() + # Block until the parent task has left the scope. + take!(ch) + # Now, per issue 59483, this task's scope is not rooted, except by the task itself. + + # Now switch to an inner scope, leaving the current scope possibly unrooted. + val = with(sv=>Any[2]) do + # Inside this new scope, when we perform GC, the parent scope can be freed. + # The fix for this issue made sure that the first scope in this task remains + # rooted. + GC.gc() + GC.gc() + sv[] + end + @test val == Any[2] + # Finally, we've returned to the original scope, but that could be a dangling + # pointer if the scope itself was freed by the above GCs. So these GCs could crash: + GC.gc() + GC.gc() + end + @noinline function spawn_inner() + # Set a new Scope in the parent task - this is the scope that could be freed. + with(sv=>Any[1]) do + return @async inner_function() + end + end + + # RUN THE TEST: + t = spawn_inner() + # Exit the scope, and let the child task proceed + put!(ch, true) + wait(t) +end +@testset "issue 59483" begin + test_59483() +end