@@ -1264,9 +1264,15 @@ deferred_wait_thread_detach_sched(struct rb_thread_sched *sched)
12641264 }
12651265}
12661266
1267- static void
1267+ static bool
12681268deferred_wait_thread_enqueue_yield (struct rb_thread_sched * sched , rb_thread_t * th )
12691269{
1270+ if (!sched -> is_running ) {
1271+ return false;
1272+ }
1273+
1274+ VM_ASSERT (sched -> running == th );
1275+
12701276 sched -> deferred_wait_th = th ;
12711277 sched -> deferred_wait_seq1 += 1 ;
12721278
@@ -1275,10 +1281,16 @@ deferred_wait_thread_enqueue_yield(struct rb_thread_sched *sched, rb_thread_t *t
12751281 rb_native_mutex_lock (& thread_deferred_wait .lock );
12761282 // We held the sched lock while waiting for the mutex so we should not have been unlinked.
12771283 VM_ASSERT (!ccan_node_linked (& sched -> deferred_wait_link ));
1284+ if (thread_deferred_wait .stop ) {
1285+ // Deferred waiter is stopped. Fall back.
1286+ rb_native_mutex_unlock (& thread_deferred_wait .lock );
1287+ return false;
1288+ }
12781289 ccan_list_add (& thread_deferred_wait .q_head , & sched -> deferred_wait_link );
12791290 rb_native_cond_signal (& thread_deferred_wait .cond );
12801291 rb_native_mutex_unlock (& thread_deferred_wait .lock );
12811292 }
1293+ return true;
12821294}
12831295
12841296static void
@@ -1294,12 +1306,8 @@ static void
12941306thread_sched_blocking_region_enter (struct rb_thread_sched * sched , rb_thread_t * th )
12951307{
12961308 thread_sched_lock (sched , th );
1297- if (sched -> is_running ) {
1298- VM_ASSERT (sched -> running == th );
1299- deferred_wait_thread_enqueue_yield (sched , th );
1300- }
1301- else {
1302- VM_ASSERT (sched -> running == NULL );
1309+ if (!deferred_wait_thread_enqueue_yield (sched , th )) {
1310+ // If we couldn't defer, then transition to waiting immediately.
13031311 thread_sched_to_waiting_common (sched , th );
13041312 }
13051313 thread_sched_unlock (sched , th );
0 commit comments