Skip to content

Commit adf97a2

Browse files
author
Jacob Lacouture
committed
briefly defer yielding the thread/gvl
1 parent 7a5688e commit adf97a2

File tree

5 files changed

+116
-5
lines changed

5 files changed

+116
-5
lines changed

test/-ext-/thread/test_instrumentation_api.rb

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,8 +98,11 @@ def test_io_release_gvl
9898
thread = nil
9999
full_timeline = record do
100100
thread = Thread.new do
101-
w.write("Hello\n")
101+
r.readline
102102
end
103+
# Sleep causes the readline call to take long enough that its thread gets descheduled.
104+
sleep(1)
105+
w.puts("Hello")
103106
thread.join
104107
end
105108

test/ruby/test_process.rb

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1956,7 +1956,8 @@ def test_daemon_no_threads
19561956
puts Dir.entries("/proc/self/task") - %W[. ..]
19571957
end
19581958
bug4920 = '[ruby-dev:43873]'
1959-
assert_include(1..2, data.size, bug4920)
1959+
# On pthread builds there will be an extra thread for the deferred-thread-wait worker
1960+
assert_include(1..3, data.size, bug4920)
19601961
assert_not_include(data.map(&:to_i), pid)
19611962
end
19621963
else # darwin

thread.c

Lines changed: 22 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,7 @@ struct rb_blocking_region_buffer {
170170

171171
static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
172172
static void unblock_function_clear(rb_thread_t *th);
173+
static void queue_thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th);
173174

174175
static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
175176
rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
@@ -202,7 +203,7 @@ static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_regio
202203
/* Important that this is inlined into the macro, and not part of \
203204
* blocking_region_begin - see bug #20493 */ \
204205
RB_VM_SAVE_MACHINE_CONTEXT(th); \
205-
thread_sched_to_waiting(TH_SCHED(th), th); \
206+
queue_thread_sched_to_waiting(TH_SCHED(th), th); \
206207
exec; \
207208
blocking_region_end(th, &__region); \
208209
}; \
@@ -1486,6 +1487,17 @@ rb_thread_schedule(void)
14861487
RUBY_VM_CHECK_INTS(GET_EC());
14871488
}
14881489

1490+
static void
1491+
queue_thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th) {
1492+
thread_sched_lock(sched, th);
1493+
{
1494+
sched->deferred_wait_th = th;
1495+
sched->deferred_wait_th_count += 1;
1496+
rb_native_cond_signal(&sched->deferred_wait_cond);
1497+
}
1498+
thread_sched_unlock(sched, th);
1499+
}
1500+
14891501
/* blocking region */
14901502

14911503
static inline int
@@ -1519,9 +1531,16 @@ blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
15191531
/* entry to ubf_list impossible at this point, so unregister is safe: */
15201532
unregister_ubf_list(th);
15211533

1522-
thread_sched_to_running(TH_SCHED(th), th);
1523-
rb_ractor_thread_switch(th->ractor, th);
1534+
struct rb_thread_sched *sched = TH_SCHED(th);
1535+
if (sched->running == th && th == sched->deferred_wait_th) {
1536+
// We never descheduled the thread. Cancel that request now.
1537+
sched->deferred_wait_th_count += 1;
1538+
sched->deferred_wait_th = NULL;
1539+
} else {
1540+
thread_sched_to_running(sched, th);
1541+
}
15241542

1543+
rb_ractor_thread_switch(th->ractor, th);
15251544
th->blocking_region_buffer = 0;
15261545
rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
15271546
if (th->status == THREAD_STOPPED) {

thread_pthread.c

Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1129,9 +1129,82 @@ thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
11291129
thread_sched_unlock(sched, th);
11301130
}
11311131

1132+
static void
1133+
transfer_sched_lock(struct rb_thread_sched *sched, struct rb_thread_struct *current, struct rb_thread_struct *th)
1134+
{
1135+
RUBY_DEBUG_LOG("Transferring sched ownership from:%u to th:%u", rb_th_serial(current), rb_th_serial(th));
1136+
#if VM_CHECK_MODE
1137+
VM_ASSERT(sched->lock_owner == current);
1138+
sched->lock_owner = th;
1139+
#endif
1140+
}
1141+
1142+
static void *
1143+
deferred_wait_thread_worker(void *arg) {
1144+
struct rb_thread_sched *sched = (struct rb_thread_sched *) arg;
1145+
struct rb_thread_struct *lock_owner = sched->deferred_wait_th_dummy;
1146+
1147+
thread_sched_lock(sched, lock_owner);
1148+
for (;;) {
1149+
if (sched->stop) {
1150+
break;
1151+
}
1152+
// The cond-wait will drop the lock. We'll need to update lock_owner manually.
1153+
transfer_sched_lock(sched, lock_owner, NULL);
1154+
rb_native_cond_wait(&sched->deferred_wait_cond, &sched->lock_);
1155+
transfer_sched_lock(sched, NULL, lock_owner);
1156+
for (;;) {
1157+
if (!sched->deferred_wait_th) {
1158+
break;
1159+
}
1160+
struct rb_thread_struct *th = sched->deferred_wait_th;
1161+
int count = sched->deferred_wait_th_count;
1162+
thread_sched_unlock(sched, lock_owner);
1163+
usleep(50);
1164+
1165+
// th is not a stable reference here. Go back to our dummy thread.
1166+
lock_owner = sched->deferred_wait_th_dummy;
1167+
thread_sched_lock(sched, lock_owner);
1168+
1169+
if (count != sched->deferred_wait_th_count) {
1170+
continue;
1171+
}
1172+
VM_ASSERT(th == sched->deferred_wait_th);
1173+
sched->deferred_wait_th = NULL;
1174+
sched->deferred_wait_th_count += 1;
1175+
1176+
// Before calling into the scheduler we need to transfer lock ownership (logically) from the worker
1177+
// thread to the target thread.
1178+
transfer_sched_lock(sched, lock_owner, th);
1179+
// We're now acting on behalf of the target thread.
1180+
lock_owner = th;
1181+
thread_sched_to_waiting_common(sched, th);
1182+
break;
1183+
}
1184+
}
1185+
thread_sched_unlock(sched, lock_owner);
1186+
return NULL;
1187+
}
1188+
1189+
static void start_deferred_wait_thread(struct rb_thread_sched *sched) {
1190+
pthread_attr_t attr;
1191+
int r;
1192+
r = pthread_attr_init(&attr);
1193+
if (r) {
1194+
rb_bug_errno("start_deferred_wait_thread - pthread_attr_init", r);
1195+
}
1196+
r = pthread_create(&sched->deferred_wait_pthread, &attr, deferred_wait_thread_worker, sched);
1197+
if (r) {
1198+
rb_bug_errno("start_deferred_wait_thread - pthread_create", r);
1199+
}
1200+
pthread_attr_destroy(&attr);
1201+
pthread_setname_np(sched->deferred_wait_pthread, "rb_def_wait");
1202+
}
1203+
11321204
void
11331205
rb_thread_sched_init(struct rb_thread_sched *sched, bool atfork)
11341206
{
1207+
sched->stop = false;
11351208
rb_native_mutex_initialize(&sched->lock_);
11361209

11371210
#if VM_CHECK_MODE
@@ -1144,6 +1217,13 @@ rb_thread_sched_init(struct rb_thread_sched *sched, bool atfork)
11441217
#if USE_MN_THREADS
11451218
if (!atfork) sched->enable_mn_threads = true; // MN is enabled on Ractors
11461219
#endif
1220+
1221+
rb_native_cond_initialize(&sched->deferred_wait_cond);
1222+
sched->deferred_wait_th = NULL;
1223+
sched->deferred_wait_th_count = 0;
1224+
sched->deferred_wait_th_dummy = (struct rb_thread_struct *) malloc(sizeof(struct rb_thread_struct));
1225+
sched->deferred_wait_th_dummy->serial = 100000;
1226+
start_deferred_wait_thread(sched);
11471227
}
11481228

11491229
static void

thread_pthread.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,14 @@ struct rb_thread_sched {
128128
int readyq_cnt;
129129
// ractor scheduling
130130
struct ccan_list_node grq_node;
131+
132+
// Deferred descheduler
133+
bool stop;
134+
pthread_t deferred_wait_pthread;
135+
rb_nativethread_cond_t deferred_wait_cond;
136+
int deferred_wait_th_count;
137+
struct rb_thread_struct *deferred_wait_th;
138+
struct rb_thread_struct *deferred_wait_th_dummy;
131139
};
132140

133141
#ifdef RB_THREAD_LOCAL_SPECIFIER

0 commit comments

Comments
 (0)