43
43
#include <linux/string.h>
44
44
#include <linux/zalloc.h>
45
45
46
- struct thread_rb_node {
47
- struct rb_node rb_node ;
48
- struct thread * thread ;
49
- };
50
-
51
- static struct threads_table_entry * threads__table (struct threads * threads , pid_t tid )
52
- {
53
- /* Cast it to handle tid == -1 */
54
- return & threads -> table [(unsigned int )tid % THREADS__TABLE_SIZE ];
55
- }
56
-
57
46
static struct dso * machine__kernel_dso (struct machine * machine )
58
47
{
59
48
return map__dso (machine -> vmlinux_map );
@@ -66,18 +55,6 @@ static void dsos__init(struct dsos *dsos)
66
55
init_rwsem (& dsos -> lock );
67
56
}
68
57
69
- void threads__init (struct threads * threads )
70
- {
71
- for (int i = 0 ; i < THREADS__TABLE_SIZE ; i ++ ) {
72
- struct threads_table_entry * table = & threads -> table [i ];
73
-
74
- table -> entries = RB_ROOT_CACHED ;
75
- init_rwsem (& table -> lock );
76
- table -> nr = 0 ;
77
- table -> last_match = NULL ;
78
- }
79
- }
80
-
81
58
static int machine__set_mmap_name (struct machine * machine )
82
59
{
83
60
if (machine__is_host (machine ))
@@ -210,49 +187,11 @@ static void dsos__exit(struct dsos *dsos)
210
187
exit_rwsem (& dsos -> lock );
211
188
}
212
189
213
- static void __threads_table_entry__set_last_match (struct threads_table_entry * table ,
214
- struct thread * th );
215
-
216
- void threads__remove_all_threads (struct threads * threads )
217
- {
218
- for (int i = 0 ; i < THREADS__TABLE_SIZE ; i ++ ) {
219
- struct threads_table_entry * table = & threads -> table [i ];
220
- struct rb_node * nd ;
221
-
222
- down_write (& table -> lock );
223
- __threads_table_entry__set_last_match (table , NULL );
224
- nd = rb_first_cached (& table -> entries );
225
- while (nd ) {
226
- struct thread_rb_node * trb = rb_entry (nd , struct thread_rb_node , rb_node );
227
-
228
- nd = rb_next (nd );
229
- thread__put (trb -> thread );
230
- rb_erase_cached (& trb -> rb_node , & table -> entries );
231
- RB_CLEAR_NODE (& trb -> rb_node );
232
- -- table -> nr ;
233
-
234
- free (trb );
235
- }
236
- assert (table -> nr == 0 );
237
- up_write (& table -> lock );
238
- }
239
- }
240
-
241
190
void machine__delete_threads (struct machine * machine )
242
191
{
243
192
threads__remove_all_threads (& machine -> threads );
244
193
}
245
194
246
- void threads__exit (struct threads * threads )
247
- {
248
- threads__remove_all_threads (threads );
249
- for (int i = 0 ; i < THREADS__TABLE_SIZE ; i ++ ) {
250
- struct threads_table_entry * table = & threads -> table [i ];
251
-
252
- exit_rwsem (& table -> lock );
253
- }
254
- }
255
-
256
195
void machine__exit (struct machine * machine )
257
196
{
258
197
if (machine == NULL )
@@ -568,121 +507,6 @@ static void machine__update_thread_pid(struct machine *machine,
568
507
goto out_put ;
569
508
}
570
509
571
- /*
572
- * Front-end cache - TID lookups come in blocks,
573
- * so most of the time we dont have to look up
574
- * the full rbtree:
575
- */
576
- static struct thread * __threads_table_entry__get_last_match (struct threads_table_entry * table ,
577
- pid_t tid )
578
- {
579
- struct thread * th , * res = NULL ;
580
-
581
- th = table -> last_match ;
582
- if (th != NULL ) {
583
- if (thread__tid (th ) == tid )
584
- res = thread__get (th );
585
- }
586
- return res ;
587
- }
588
-
589
- static void __threads_table_entry__set_last_match (struct threads_table_entry * table ,
590
- struct thread * th )
591
- {
592
- thread__put (table -> last_match );
593
- table -> last_match = thread__get (th );
594
- }
595
-
596
- static void threads_table_entry__set_last_match (struct threads_table_entry * table ,
597
- struct thread * th )
598
- {
599
- down_write (& table -> lock );
600
- __threads_table_entry__set_last_match (table , th );
601
- up_write (& table -> lock );
602
- }
603
-
604
- struct thread * threads__find (struct threads * threads , pid_t tid )
605
- {
606
- struct threads_table_entry * table = threads__table (threads , tid );
607
- struct rb_node * * p ;
608
- struct thread * res = NULL ;
609
-
610
- down_read (& table -> lock );
611
- res = __threads_table_entry__get_last_match (table , tid );
612
- if (res )
613
- return res ;
614
-
615
- p = & table -> entries .rb_root .rb_node ;
616
- while (* p != NULL ) {
617
- struct rb_node * parent = * p ;
618
- struct thread * th = rb_entry (parent , struct thread_rb_node , rb_node )-> thread ;
619
-
620
- if (thread__tid (th ) == tid ) {
621
- res = thread__get (th );
622
- break ;
623
- }
624
-
625
- if (tid < thread__tid (th ))
626
- p = & (* p )-> rb_left ;
627
- else
628
- p = & (* p )-> rb_right ;
629
- }
630
- up_read (& table -> lock );
631
- if (res )
632
- threads_table_entry__set_last_match (table , res );
633
- return res ;
634
- }
635
-
636
- struct thread * threads__findnew (struct threads * threads , pid_t pid , pid_t tid , bool * created )
637
- {
638
- struct threads_table_entry * table = threads__table (threads , tid );
639
- struct rb_node * * p ;
640
- struct rb_node * parent = NULL ;
641
- struct thread * res = NULL ;
642
- struct thread_rb_node * nd ;
643
- bool leftmost = true;
644
-
645
- * created = false;
646
- down_write (& table -> lock );
647
- p = & table -> entries .rb_root .rb_node ;
648
- while (* p != NULL ) {
649
- struct thread * th ;
650
-
651
- parent = * p ;
652
- th = rb_entry (parent , struct thread_rb_node , rb_node )-> thread ;
653
-
654
- if (thread__tid (th ) == tid ) {
655
- __threads_table_entry__set_last_match (table , th );
656
- res = thread__get (th );
657
- goto out_unlock ;
658
- }
659
-
660
- if (tid < thread__tid (th ))
661
- p = & (* p )-> rb_left ;
662
- else {
663
- p = & (* p )-> rb_right ;
664
- leftmost = false;
665
- }
666
- }
667
- nd = malloc (sizeof (* nd ));
668
- if (nd == NULL )
669
- goto out_unlock ;
670
- res = thread__new (pid , tid );
671
- if (!res )
672
- free (nd );
673
- else {
674
- * created = true;
675
- nd -> thread = thread__get (res );
676
- rb_link_node (& nd -> rb_node , parent , p );
677
- rb_insert_color_cached (& nd -> rb_node , & table -> entries , leftmost );
678
- ++ table -> nr ;
679
- __threads_table_entry__set_last_match (table , res );
680
- }
681
- out_unlock :
682
- up_write (& table -> lock );
683
- return res ;
684
- }
685
-
686
510
/*
687
511
* Caller must eventually drop thread->refcnt returned with a successful
688
512
* lookup/new thread inserted.
@@ -699,7 +523,6 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
699
523
machine__update_thread_pid (machine , th , pid );
700
524
return th ;
701
525
}
702
-
703
526
if (!create )
704
527
return NULL ;
705
528
@@ -1147,20 +970,6 @@ static int machine_fprintf_cb(struct thread *thread, void *data)
1147
970
return 0 ;
1148
971
}
1149
972
1150
- size_t threads__nr (struct threads * threads )
1151
- {
1152
- size_t nr = 0 ;
1153
-
1154
- for (int i = 0 ; i < THREADS__TABLE_SIZE ; i ++ ) {
1155
- struct threads_table_entry * table = & threads -> table [i ];
1156
-
1157
- down_read (& table -> lock );
1158
- nr += table -> nr ;
1159
- up_read (& table -> lock );
1160
- }
1161
- return nr ;
1162
- }
1163
-
1164
973
size_t machine__fprintf (struct machine * machine , FILE * fp )
1165
974
{
1166
975
struct machine_fprintf_cb_args args = {
@@ -2093,39 +1902,6 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
2093
1902
return 0 ;
2094
1903
}
2095
1904
2096
- void threads__remove (struct threads * threads , struct thread * thread )
2097
- {
2098
- struct rb_node * * p ;
2099
- struct threads_table_entry * table = threads__table (threads , thread__tid (thread ));
2100
- pid_t tid = thread__tid (thread );
2101
-
2102
- down_write (& table -> lock );
2103
- if (table -> last_match && RC_CHK_EQUAL (table -> last_match , thread ))
2104
- __threads_table_entry__set_last_match (table , NULL );
2105
-
2106
- p = & table -> entries .rb_root .rb_node ;
2107
- while (* p != NULL ) {
2108
- struct rb_node * parent = * p ;
2109
- struct thread_rb_node * nd = rb_entry (parent , struct thread_rb_node , rb_node );
2110
- struct thread * th = nd -> thread ;
2111
-
2112
- if (RC_CHK_EQUAL (th , thread )) {
2113
- thread__put (nd -> thread );
2114
- rb_erase_cached (& nd -> rb_node , & table -> entries );
2115
- RB_CLEAR_NODE (& nd -> rb_node );
2116
- -- table -> nr ;
2117
- free (nd );
2118
- break ;
2119
- }
2120
-
2121
- if (tid < thread__tid (th ))
2122
- p = & (* p )-> rb_left ;
2123
- else
2124
- p = & (* p )-> rb_right ;
2125
- }
2126
- up_write (& table -> lock );
2127
- }
2128
-
2129
1905
void machine__remove_thread (struct machine * machine , struct thread * th )
2130
1906
{
2131
1907
return threads__remove (& machine -> threads , th );
@@ -3258,30 +3034,6 @@ int thread__resolve_callchain(struct thread *thread,
3258
3034
return ret ;
3259
3035
}
3260
3036
3261
- int threads__for_each_thread (struct threads * threads ,
3262
- int (* fn )(struct thread * thread , void * data ),
3263
- void * data )
3264
- {
3265
- for (int i = 0 ; i < THREADS__TABLE_SIZE ; i ++ ) {
3266
- struct threads_table_entry * table = & threads -> table [i ];
3267
- struct rb_node * nd ;
3268
-
3269
- down_read (& table -> lock );
3270
- for (nd = rb_first_cached (& table -> entries ); nd ; nd = rb_next (nd )) {
3271
- struct thread_rb_node * trb = rb_entry (nd , struct thread_rb_node , rb_node );
3272
- int rc = fn (trb -> thread , data );
3273
-
3274
- if (rc != 0 ) {
3275
- up_read (& table -> lock );
3276
- return rc ;
3277
- }
3278
- }
3279
- up_read (& table -> lock );
3280
- }
3281
- return 0 ;
3282
-
3283
- }
3284
-
3285
3037
int machine__for_each_thread (struct machine * machine ,
3286
3038
int (* fn )(struct thread * thread , void * p ),
3287
3039
void * priv )
0 commit comments