3
3
#include "machine.h"
4
4
#include "thread.h"
5
5
6
- struct thread_rb_node {
7
- struct rb_node rb_node ;
8
- struct thread * thread ;
9
- };
10
-
11
6
static struct threads_table_entry * threads__table (struct threads * threads , pid_t tid )
12
7
{
13
8
/* Cast it to handle tid == -1 */
14
9
return & threads -> table [(unsigned int )tid % THREADS__TABLE_SIZE ];
15
10
}
16
11
12
+ static size_t key_hash (long key , void * ctx __maybe_unused )
13
+ {
14
+ /* The table lookup removes low bit entropy, but this is just ignored here. */
15
+ return key ;
16
+ }
17
+
18
+ static bool key_equal (long key1 , long key2 , void * ctx __maybe_unused )
19
+ {
20
+ return key1 == key2 ;
21
+ }
22
+
17
23
void threads__init (struct threads * threads )
18
24
{
19
25
for (int i = 0 ; i < THREADS__TABLE_SIZE ; i ++ ) {
20
26
struct threads_table_entry * table = & threads -> table [i ];
21
27
22
- table -> entries = RB_ROOT_CACHED ;
28
+ hashmap__init ( & table -> shard , key_hash , key_equal , NULL ) ;
23
29
init_rwsem (& table -> lock );
24
- table -> nr = 0 ;
25
30
table -> last_match = NULL ;
26
31
}
27
32
}
@@ -32,6 +37,7 @@ void threads__exit(struct threads *threads)
32
37
for (int i = 0 ; i < THREADS__TABLE_SIZE ; i ++ ) {
33
38
struct threads_table_entry * table = & threads -> table [i ];
34
39
40
+ hashmap__clear (& table -> shard );
35
41
exit_rwsem (& table -> lock );
36
42
}
37
43
}
@@ -44,7 +50,7 @@ size_t threads__nr(struct threads *threads)
44
50
struct threads_table_entry * table = & threads -> table [i ];
45
51
46
52
down_read (& table -> lock );
47
- nr += table -> nr ;
53
+ nr += hashmap__size ( & table -> shard ) ;
48
54
up_read (& table -> lock );
49
55
}
50
56
return nr ;
@@ -86,28 +92,13 @@ static void threads_table_entry__set_last_match(struct threads_table_entry *tabl
86
92
struct thread * threads__find (struct threads * threads , pid_t tid )
87
93
{
88
94
struct threads_table_entry * table = threads__table (threads , tid );
89
- struct rb_node * * p ;
90
- struct thread * res = NULL ;
95
+ struct thread * res ;
91
96
92
97
down_read (& table -> lock );
93
98
res = __threads_table_entry__get_last_match (table , tid );
94
- if (res )
95
- return res ;
96
-
97
- p = & table -> entries .rb_root .rb_node ;
98
- while (* p != NULL ) {
99
- struct rb_node * parent = * p ;
100
- struct thread * th = rb_entry (parent , struct thread_rb_node , rb_node )-> thread ;
101
-
102
- if (thread__tid (th ) == tid ) {
103
- res = thread__get (th );
104
- break ;
105
- }
106
-
107
- if (tid < thread__tid (th ))
108
- p = & (* p )-> rb_left ;
109
- else
110
- p = & (* p )-> rb_right ;
99
+ if (!res ) {
100
+ if (hashmap__find (& table -> shard , tid , & res ))
101
+ res = thread__get (res );
111
102
}
112
103
up_read (& table -> lock );
113
104
if (res )
@@ -118,49 +109,25 @@ struct thread *threads__find(struct threads *threads, pid_t tid)
118
109
struct thread * threads__findnew (struct threads * threads , pid_t pid , pid_t tid , bool * created )
119
110
{
120
111
struct threads_table_entry * table = threads__table (threads , tid );
121
- struct rb_node * * p ;
122
- struct rb_node * parent = NULL ;
123
112
struct thread * res = NULL ;
124
- struct thread_rb_node * nd ;
125
- bool leftmost = true;
126
113
127
114
* created = false;
128
115
down_write (& table -> lock );
129
- p = & table -> entries .rb_root .rb_node ;
130
- while (* p != NULL ) {
131
- struct thread * th ;
132
-
133
- parent = * p ;
134
- th = rb_entry (parent , struct thread_rb_node , rb_node )-> thread ;
135
-
136
- if (thread__tid (th ) == tid ) {
137
- __threads_table_entry__set_last_match (table , th );
138
- res = thread__get (th );
139
- goto out_unlock ;
140
- }
141
-
142
- if (tid < thread__tid (th ))
143
- p = & (* p )-> rb_left ;
144
- else {
145
- leftmost = false;
146
- p = & (* p )-> rb_right ;
147
- }
148
- }
149
- nd = malloc (sizeof (* nd ));
150
- if (nd == NULL )
151
- goto out_unlock ;
152
116
res = thread__new (pid , tid );
153
- if (!res )
154
- free (nd );
155
- else {
156
- * created = true;
157
- nd -> thread = thread__get (res );
158
- rb_link_node (& nd -> rb_node , parent , p );
159
- rb_insert_color_cached (& nd -> rb_node , & table -> entries , leftmost );
160
- ++ table -> nr ;
161
- __threads_table_entry__set_last_match (table , res );
117
+ if (res ) {
118
+ if (hashmap__add (& table -> shard , tid , res )) {
119
+ /* Add failed. Assume a race so find other entry. */
120
+ thread__put (res );
121
+ res = NULL ;
122
+ if (hashmap__find (& table -> shard , tid , & res ))
123
+ res = thread__get (res );
124
+ } else {
125
+ res = thread__get (res );
126
+ * created = true;
127
+ }
128
+ if (res )
129
+ __threads_table_entry__set_last_match (table , res );
162
130
}
163
- out_unlock :
164
131
up_write (& table -> lock );
165
132
return res ;
166
133
}
@@ -169,57 +136,32 @@ void threads__remove_all_threads(struct threads *threads)
169
136
{
170
137
for (int i = 0 ; i < THREADS__TABLE_SIZE ; i ++ ) {
171
138
struct threads_table_entry * table = & threads -> table [i ];
172
- struct rb_node * nd ;
139
+ struct hashmap_entry * cur , * tmp ;
140
+ size_t bkt ;
173
141
174
142
down_write (& table -> lock );
175
143
__threads_table_entry__set_last_match (table , NULL );
176
- nd = rb_first_cached (& table -> entries );
177
- while (nd ) {
178
- struct thread_rb_node * trb = rb_entry (nd , struct thread_rb_node , rb_node );
179
-
180
- nd = rb_next (nd );
181
- thread__put (trb -> thread );
182
- rb_erase_cached (& trb -> rb_node , & table -> entries );
183
- RB_CLEAR_NODE (& trb -> rb_node );
184
- -- table -> nr ;
144
+ hashmap__for_each_entry_safe ((& table -> shard ), cur , tmp , bkt ) {
145
+ struct thread * old_value ;
185
146
186
- free (trb );
147
+ hashmap__delete (& table -> shard , cur -> key , /*old_key=*/ NULL , & old_value );
148
+ thread__put (old_value );
187
149
}
188
- assert (table -> nr == 0 );
189
150
up_write (& table -> lock );
190
151
}
191
152
}
192
153
193
154
void threads__remove (struct threads * threads , struct thread * thread )
194
155
{
195
- struct rb_node * * p ;
196
156
struct threads_table_entry * table = threads__table (threads , thread__tid (thread ));
197
- pid_t tid = thread__tid ( thread ) ;
157
+ struct thread * old_value ;
198
158
199
159
down_write (& table -> lock );
200
160
if (table -> last_match && RC_CHK_EQUAL (table -> last_match , thread ))
201
161
__threads_table_entry__set_last_match (table , NULL );
202
162
203
- p = & table -> entries .rb_root .rb_node ;
204
- while (* p != NULL ) {
205
- struct rb_node * parent = * p ;
206
- struct thread_rb_node * nd = rb_entry (parent , struct thread_rb_node , rb_node );
207
- struct thread * th = nd -> thread ;
208
-
209
- if (RC_CHK_EQUAL (th , thread )) {
210
- thread__put (nd -> thread );
211
- rb_erase_cached (& nd -> rb_node , & table -> entries );
212
- RB_CLEAR_NODE (& nd -> rb_node );
213
- -- table -> nr ;
214
- free (nd );
215
- break ;
216
- }
217
-
218
- if (tid < thread__tid (th ))
219
- p = & (* p )-> rb_left ;
220
- else
221
- p = & (* p )-> rb_right ;
222
- }
163
+ hashmap__delete (& table -> shard , thread__tid (thread ), /*old_key=*/ NULL , & old_value );
164
+ thread__put (old_value );
223
165
up_write (& table -> lock );
224
166
}
225
167
@@ -229,12 +171,12 @@ int threads__for_each_thread(struct threads *threads,
229
171
{
230
172
for (int i = 0 ; i < THREADS__TABLE_SIZE ; i ++ ) {
231
173
struct threads_table_entry * table = & threads -> table [i ];
232
- struct rb_node * nd ;
174
+ struct hashmap_entry * cur ;
175
+ size_t bkt ;
233
176
234
177
down_read (& table -> lock );
235
- for (nd = rb_first_cached (& table -> entries ); nd ; nd = rb_next (nd )) {
236
- struct thread_rb_node * trb = rb_entry (nd , struct thread_rb_node , rb_node );
237
- int rc = fn (trb -> thread , data );
178
+ hashmap__for_each_entry ((& table -> shard ), cur , bkt ) {
179
+ int rc = fn ((struct thread * )cur -> pvalue , data );
238
180
239
181
if (rc != 0 ) {
240
182
up_read (& table -> lock );
0 commit comments