@@ -698,8 +698,12 @@ thread_sched_readyq_contain_p(struct rb_thread_sched *sched, rb_thread_t *th)
698698{
699699 rb_thread_t * rth ;
700700 ccan_list_for_each (& sched -> readyq , rth , sched .node .readyq ) {
701- if (rth == th ) return true;
701+ if (rth == th ) {
702+ VM_ASSERT (th -> sched .node .is_ready );
703+ return true;
704+ }
702705 }
706+ VM_ASSERT (!th -> sched .node .is_ready );
703707 return false;
704708}
705709
@@ -720,6 +724,8 @@ thread_sched_deq(struct rb_thread_sched *sched)
720724 }
721725 else {
722726 next_th = ccan_list_pop (& sched -> readyq , rb_thread_t , sched .node .readyq );
727+ VM_ASSERT (next_th -> sched .node .is_ready );
728+ next_th -> sched .node .is_ready = false;
723729
724730 VM_ASSERT (sched -> readyq_cnt > 0 );
725731 sched -> readyq_cnt -- ;
@@ -753,6 +759,7 @@ thread_sched_enq(struct rb_thread_sched *sched, rb_thread_t *ready_th)
753759 }
754760
755761 ccan_list_add_tail (& sched -> readyq , & ready_th -> sched .node .readyq );
762+ ready_th -> sched .node .is_ready = true;
756763 sched -> readyq_cnt ++ ;
757764}
758765
@@ -836,6 +843,30 @@ thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, b
836843 VM_ASSERT (th == rb_ec_thread_ptr (rb_current_ec_noinline ()));
837844
838845 if (th != sched -> running ) {
846+ // TODO: This optimization should also be made to work for MN_THREADS
847+ if (th -> has_dedicated_nt && th == sched -> runnable_hot_th && (sched -> running == NULL || sched -> running -> has_dedicated_nt )) {
848+ RUBY_DEBUG_LOG ("(nt) stealing: hot-th:%u. running:%u" , rb_th_serial (th ), rb_th_serial (sched -> running ));
849+
850+ // If there is a thread set to run, move it back to the front of the readyq
851+ if (sched -> running != NULL ) {
852+ rb_thread_t * running = sched -> running ;
853+ VM_ASSERT (!thread_sched_readyq_contain_p (sched , running ));
854+ running -> sched .node .is_ready = true;
855+ ccan_list_add (& sched -> readyq , & running -> sched .node .readyq );
856+ sched -> readyq_cnt ++ ;
857+ }
858+
859+ // Pull off the ready queue and start running.
860+ if (th -> sched .node .is_ready ) {
861+ VM_ASSERT (thread_sched_readyq_contain_p (sched , th ));
862+ ccan_list_del_init (& th -> sched .node .readyq );
863+ th -> sched .node .is_ready = false;
864+ sched -> readyq_cnt -- ;
865+ }
866+ thread_sched_set_running (sched , th );
867+ rb_ractor_thread_switch (th -> ractor , th , false);
868+ }
869+
839870 // already deleted from running threads
840871 // VM_ASSERT(!ractor_sched_running_threads_contain_p(th->vm, th)); // need locking
841872
@@ -852,6 +883,15 @@ thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, b
852883 }
853884 thread_sched_set_locked (sched , th );
854885
886+ if (sched -> runnable_hot_th != NULL ) {
887+ VM_ASSERT (sched -> runnable_hot_th != th );
888+ // Give the hot thread a chance to preempt, if it's actively spinning.
889+ // On multicore, this reduces the rate of core-switching. On single-core it
890+ // should mostly be a nop, since the other thread can't be concurrently spinning.
891+ thread_sched_unlock (sched , th );
892+ thread_sched_lock (sched , th );
893+ }
894+
855895 RUBY_DEBUG_LOG ("(nt) wakeup %s" , sched -> running == th ? "success" : "failed" );
856896 if (th == sched -> running ) {
857897 rb_ractor_thread_switch (th -> ractor , th , false);
@@ -900,6 +940,10 @@ thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, b
900940 thread_sched_add_running_thread (sched , th );
901941 }
902942
943+ // Control transfer to the current thread is now complete. The original thread
944+ // cannot steal control at this point.
945+ sched -> runnable_hot_th = NULL ;
946+
903947 // VM_ASSERT(ractor_sched_running_threads_contain_p(th->vm, th)); need locking
904948 RB_INTERNAL_THREAD_HOOK (RUBY_INTERNAL_THREAD_EVENT_RESUMED , th );
905949}
@@ -1000,25 +1044,28 @@ thread_sched_to_dead(struct rb_thread_sched *sched, rb_thread_t *th)
10001044//
10011045// This thread will run dedicated task (th->nt->dedicated++).
10021046static void
1003- thread_sched_to_waiting_common (struct rb_thread_sched * sched , rb_thread_t * th )
1047+ thread_sched_to_waiting_common (struct rb_thread_sched * sched , rb_thread_t * th , bool yield_immediately )
10041048{
10051049 RUBY_DEBUG_LOG ("th:%u DNT:%d" , rb_th_serial (th ), th -> nt -> dedicated );
10061050
10071051 RB_INTERNAL_THREAD_HOOK (RUBY_INTERNAL_THREAD_EVENT_SUSPENDED , th );
10081052
10091053 native_thread_dedicated_inc (th -> vm , th -> ractor , th -> nt );
1054+ if (!yield_immediately ) {
1055+ sched -> runnable_hot_th = th ;
1056+ }
10101057 thread_sched_wakeup_next_thread (sched , th , false);
10111058}
10121059
10131060// running -> waiting
10141061//
10151062// This thread will run a dedicated task.
10161063static void
1017- thread_sched_to_waiting (struct rb_thread_sched * sched , rb_thread_t * th )
1064+ thread_sched_to_waiting (struct rb_thread_sched * sched , rb_thread_t * th , bool yield_immediately )
10181065{
10191066 thread_sched_lock (sched , th );
10201067 {
1021- thread_sched_to_waiting_common (sched , th );
1068+ thread_sched_to_waiting_common (sched , th , yield_immediately );
10221069 }
10231070 thread_sched_unlock (sched , th );
10241071}
0 commit comments