@@ -120,6 +120,17 @@ static struct {
120120 struct ccan_list_head q_head ;
121121} thread_deferred_wait ;
122122
123+ char * dump_log_base = NULL ;
124+ char * dump_log = NULL ;
125+
126+ #include <unistd.h>
127+
128+ #define DUMP_LOG_REPORT (...) \
129+ {char tmp[1000]; \
130+ sprintf(tmp, __VA_ARGS__); \
131+ dump_log += sprintf(dump_log, "%x.%lx %s", (int) getpid(), (long) pthread_self(), tmp); \
132+ }
133+
123134// native thread wrappers
124135
125136#define NATIVE_MUTEX_LOCK_DEBUG 0
@@ -141,19 +152,32 @@ mutex_debug(const char *msg, void *lock)
141152void
142153rb_native_mutex_lock (pthread_mutex_t * lock )
143154{
155+ if (dump_log ) {
156+ DUMP_LOG_REPORT ("mutex_lock: %lx\n" , (unsigned long ) lock );
157+ }
158+
144159 int r ;
145160#if NATIVE_MUTEX_LOCK_DEBUG_YIELD
146161 native_thread_yield ();
147162#endif
148163 mutex_debug ("lock" , lock );
149164 if ((r = pthread_mutex_lock (lock )) != 0 ) {
165+
166+ if (dump_log ) {
167+ fprintf (stderr , "%s" , dump_log_base );
168+ }
169+
150170 rb_bug_errno ("pthread_mutex_lock" , r );
151171 }
152172}
153173
154174void
155175rb_native_mutex_unlock (pthread_mutex_t * lock )
156176{
177+ if (dump_log ) {
178+ DUMP_LOG_REPORT ("mutex_unlock: %lx\n" , (unsigned long ) lock );
179+ }
180+
157181 int r ;
158182 mutex_debug ("unlock" , lock );
159183 if ((r = pthread_mutex_unlock (lock )) != 0 ) {
@@ -168,18 +192,29 @@ rb_native_mutex_trylock(pthread_mutex_t *lock)
168192 mutex_debug ("trylock" , lock );
169193 if ((r = pthread_mutex_trylock (lock )) != 0 ) {
170194 if (r == EBUSY ) {
195+ if (dump_log ) {
196+ DUMP_LOG_REPORT ("mutex_trylock_fail: %lx\n" , (unsigned long ) lock );
197+ }
171198 return EBUSY ;
172199 }
173200 else {
174201 rb_bug_errno ("pthread_mutex_trylock" , r );
175202 }
176203 }
204+ if (dump_log ) {
205+ DUMP_LOG_REPORT ("mutex_trylock_success: %lx\n" , (unsigned long ) lock );
206+ }
177207 return 0 ;
178208}
179209
180210void
181211rb_native_mutex_initialize (pthread_mutex_t * lock )
182212{
213+ if (!dump_log ) {
214+ dump_log = dump_log_base = (char * ) malloc (1000000000 );
215+ }
216+ DUMP_LOG_REPORT ("mutex_init: %lx\n" , (unsigned long ) lock );
217+
183218 int r = pthread_mutex_init (lock , 0 );
184219 mutex_debug ("init" , lock );
185220 if (r != 0 ) {
@@ -190,6 +225,10 @@ rb_native_mutex_initialize(pthread_mutex_t *lock)
190225void
191226rb_native_mutex_destroy (pthread_mutex_t * lock )
192227{
228+ if (dump_log ) {
229+ DUMP_LOG_REPORT ("mutex_destroy: %lx\n" , (unsigned long ) lock );
230+ }
231+
193232 int r = pthread_mutex_destroy (lock );
194233 mutex_debug ("destroy" , lock );
195234 if (r != 0 ) {
@@ -424,6 +463,10 @@ thread_sched_set_unlocked(struct rb_thread_sched *sched, rb_thread_t *th)
424463static void
425464thread_sched_lock_ (struct rb_thread_sched * sched , rb_thread_t * th , const char * file , int line )
426465{
466+ if (dump_log ) {
467+ DUMP_LOG_REPORT ("thread_sched_lock_. sched: %lx lock: %lx\n" , (unsigned long ) sched , (unsigned long ) & sched -> lock_ );
468+ }
469+
427470 rb_native_mutex_lock (& sched -> lock_ );
428471
429472#if VM_CHECK_MODE
@@ -438,6 +481,10 @@ thread_sched_lock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *f
438481static void
439482thread_sched_unlock_ (struct rb_thread_sched * sched , rb_thread_t * th , const char * file , int line )
440483{
484+ if (dump_log ) {
485+ DUMP_LOG_REPORT ("thread_sched_unlock_. sched: %lx lock: %lx\n" , (unsigned long ) sched , (unsigned long ) & sched -> lock_ );
486+ }
487+
441488 RUBY_DEBUG_LOG2 (file , line , "th:%u" , rb_th_serial (th ));
442489
443490 thread_sched_set_unlocked (sched , th );
@@ -1255,12 +1302,21 @@ deferred_wait_thread_worker(void *arg)
12551302static void
12561303deferred_wait_thread_detach_sched (struct rb_thread_sched * sched )
12571304{
1305+
12581306 // Only unlink if we're actually linked.
12591307 if (ccan_node_linked (& sched -> deferred_wait_link )) {
1308+ if (dump_log ) {
1309+ DUMP_LOG_REPORT ("thread_sched_detach_real. sched: %lx lock: %lx\n" , (unsigned long ) sched , (unsigned long ) & sched -> lock_ );
1310+ }
1311+
12601312 rb_native_mutex_lock (& thread_deferred_wait .lock );
12611313 VM_ASSERT (ccan_node_linked (& sched -> deferred_wait_link ));
12621314 ccan_list_del_init (& sched -> deferred_wait_link );
12631315 rb_native_mutex_unlock (& thread_deferred_wait .lock );
1316+ } else {
1317+ if (dump_log ) {
1318+ DUMP_LOG_REPORT ("thread_sched_detach_bypass. sched: %lx lock: %lx\n" , (unsigned long ) sched , (unsigned long ) & sched -> lock_ );
1319+ }
12641320 }
12651321}
12661322
@@ -1305,23 +1361,44 @@ deferred_wait_thread_cancel_yield(struct rb_thread_sched *sched)
13051361static void
13061362thread_sched_blocking_region_enter (struct rb_thread_sched * sched , rb_thread_t * th )
13071363{
1364+ if (dump_log ) {
1365+ DUMP_LOG_REPORT ("thread_sched_enqueue_try. sched: %lx lock: %lx\n" , (unsigned long ) sched , (unsigned long ) & sched -> lock_ );
1366+ }
1367+
13081368 thread_sched_lock (sched , th );
13091369 if (!deferred_wait_thread_enqueue_yield (sched , th )) {
1370+ if (dump_log ) {
1371+ DUMP_LOG_REPORT ("thread_sched_enqueue_fail. sched: %lx lock: %lx\n" , (unsigned long ) sched , (unsigned long ) & sched -> lock_ );
1372+ }
13101373 // If we couldn't defer, then transition to waiting immediately.
13111374 thread_sched_to_waiting_common (sched , th );
1375+ } else {
1376+ if (dump_log ) {
1377+ DUMP_LOG_REPORT ("thread_sched_enqueue_success. sched: %lx lock: %lx\n" , (unsigned long ) sched , (unsigned long ) & sched -> lock_ );
1378+ }
13121379 }
13131380 thread_sched_unlock (sched , th );
13141381}
13151382
13161383static void
13171384thread_sched_blocking_region_exit (struct rb_thread_sched * sched , rb_thread_t * th )
13181385{
1386+ if (dump_log ) {
1387+ DUMP_LOG_REPORT ("thread_sched_blocking_region_exit. sched: %lx lock: %lx\n" , (unsigned long ) sched , (unsigned long ) & sched -> lock_ );
1388+ }
1389+
13191390 thread_sched_lock (sched , th );
13201391 if (sched -> running == th && th == sched -> deferred_wait_th ) {
1392+ if (dump_log ) {
1393+ DUMP_LOG_REPORT ("thread_sched_blocking_region_exit/cancel. sched: %lx lock: %lx\n" , (unsigned long ) sched , (unsigned long ) & sched -> lock_ );
1394+ }
13211395 // We never descheduled the thread. Cancel that request now.
13221396 deferred_wait_thread_cancel_yield (sched );
13231397 }
13241398 else {
1399+ if (dump_log ) {
1400+ DUMP_LOG_REPORT ("thread_sched_blocking_region_exit/wait. sched: %lx lock: %lx\n" , (unsigned long ) sched , (unsigned long ) & sched -> lock_ );
1401+ }
13251402 thread_sched_to_running_common (sched , th );
13261403 }
13271404 thread_sched_unlock (sched , th );
@@ -1330,6 +1407,10 @@ thread_sched_blocking_region_exit(struct rb_thread_sched *sched, rb_thread_t *th
13301407void
13311408rb_thread_start_deferred_wait_thread (void )
13321409{
1410+ if (dump_log ) {
1411+ DUMP_LOG_REPORT ("rb_thread_start_deferred_wait_thread. lock: %lx\n" , (unsigned long ) & thread_deferred_wait .lock );
1412+ }
1413+
13331414 rb_native_mutex_initialize (& thread_deferred_wait .lock );
13341415 rb_native_cond_initialize (& thread_deferred_wait .cond );
13351416 ccan_list_head_init (& thread_deferred_wait .q_head );
@@ -1345,11 +1426,19 @@ rb_thread_start_deferred_wait_thread(void)
13451426 rb_bug_errno ("start_deferred_wait_thread - pthread_create" , r );
13461427 }
13471428 pthread_attr_destroy (& attr );
1429+
1430+ if (dump_log ) {
1431+ DUMP_LOG_REPORT ("rb_thread_start_deferred_wait_thread/complete. lock: %lx\n" , (unsigned long ) & thread_deferred_wait .lock );
1432+ }
13481433}
13491434
13501435void
13511436rb_thread_stop_deferred_wait_thread (void )
13521437{
1438+ if (dump_log ) {
1439+ DUMP_LOG_REPORT ("rb_thread_stop_deferred_wait_thread. lock: %lx\n" , (unsigned long ) & thread_deferred_wait .lock );
1440+ }
1441+
13531442 rb_native_mutex_lock (& thread_deferred_wait .lock );
13541443 thread_deferred_wait .running = false;
13551444 rb_native_cond_signal (& thread_deferred_wait .cond );
@@ -1358,12 +1447,20 @@ rb_thread_stop_deferred_wait_thread(void)
13581447 VM_ASSERT (ccan_list_empty (& thread_deferred_wait .q_head ));
13591448 rb_native_cond_destroy (& thread_deferred_wait .cond );
13601449 rb_native_mutex_destroy (& thread_deferred_wait .lock );
1450+
1451+ if (dump_log ) {
1452+ DUMP_LOG_REPORT ("rb_thread_stop_deferred_wait_thread/complete. lock: %lx\n" , (unsigned long ) & thread_deferred_wait .lock );
1453+ }
13611454}
13621455
13631456
13641457void
13651458rb_thread_sched_init (struct rb_thread_sched * sched , bool atfork )
13661459{
1460+ if (dump_log ) {
1461+ DUMP_LOG_REPORT ("thread_sched_init. sched: %lx lock: %lx\n" , (unsigned long ) sched , (unsigned long ) & sched -> lock_ );
1462+ }
1463+
13671464 rb_native_mutex_initialize (& sched -> lock_ );
13681465
13691466#if VM_CHECK_MODE
@@ -1756,6 +1853,10 @@ static void clear_thread_cache_altstack(void);
17561853void
17571854rb_thread_sched_destroy (struct rb_thread_sched * sched )
17581855{
1856+ if (dump_log ) {
1857+ DUMP_LOG_REPORT ("thread_sched_destroy. sched: %lx lock: %lx\n" , (unsigned long ) sched , (unsigned long ) & sched -> lock_ );
1858+ }
1859+
17591860 deferred_wait_thread_detach_sched (sched );
17601861
17611862 /*
0 commit comments