Skip to content

Commit 412a2ff

Browse files
captain5050namhyung
authored andcommitted
perf threads: Switch from rbtree to hashmap
The rbtree provides a sorting on entries but this is unused. Switch to using hashmap for O(1) rather than O(log n) find/insert/remove complexity. Signed-off-by: Ian Rogers <[email protected]> Acked-by: Namhyung Kim <[email protected]> Cc: Yang Jihong <[email protected]> Cc: Oliver Upton <[email protected]> Signed-off-by: Namhyung Kim <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 93bb5b0 commit 412a2ff

File tree

2 files changed

+47
-105
lines changed

2 files changed

+47
-105
lines changed

tools/perf/util/threads.c

Lines changed: 44 additions & 102 deletions
Original file line numberDiff line numberDiff line change
@@ -3,25 +3,30 @@
33
#include "machine.h"
44
#include "thread.h"
55

6-
struct thread_rb_node {
7-
struct rb_node rb_node;
8-
struct thread *thread;
9-
};
10-
116
static struct threads_table_entry *threads__table(struct threads *threads, pid_t tid)
127
{
138
/* Cast it to handle tid == -1 */
149
return &threads->table[(unsigned int)tid % THREADS__TABLE_SIZE];
1510
}
1611

12+
static size_t key_hash(long key, void *ctx __maybe_unused)
13+
{
14+
/* The table lookup removes low bit entropy, but this is just ignored here. */
15+
return key;
16+
}
17+
18+
static bool key_equal(long key1, long key2, void *ctx __maybe_unused)
19+
{
20+
return key1 == key2;
21+
}
22+
1723
void threads__init(struct threads *threads)
1824
{
1925
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
2026
struct threads_table_entry *table = &threads->table[i];
2127

22-
table->entries = RB_ROOT_CACHED;
28+
hashmap__init(&table->shard, key_hash, key_equal, NULL);
2329
init_rwsem(&table->lock);
24-
table->nr = 0;
2530
table->last_match = NULL;
2631
}
2732
}
@@ -32,6 +37,7 @@ void threads__exit(struct threads *threads)
3237
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
3338
struct threads_table_entry *table = &threads->table[i];
3439

40+
hashmap__clear(&table->shard);
3541
exit_rwsem(&table->lock);
3642
}
3743
}
@@ -44,7 +50,7 @@ size_t threads__nr(struct threads *threads)
4450
struct threads_table_entry *table = &threads->table[i];
4551

4652
down_read(&table->lock);
47-
nr += table->nr;
53+
nr += hashmap__size(&table->shard);
4854
up_read(&table->lock);
4955
}
5056
return nr;
@@ -86,28 +92,13 @@ static void threads_table_entry__set_last_match(struct threads_table_entry *tabl
8692
struct thread *threads__find(struct threads *threads, pid_t tid)
8793
{
8894
struct threads_table_entry *table = threads__table(threads, tid);
89-
struct rb_node **p;
90-
struct thread *res = NULL;
95+
struct thread *res;
9196

9297
down_read(&table->lock);
9398
res = __threads_table_entry__get_last_match(table, tid);
94-
if (res)
95-
return res;
96-
97-
p = &table->entries.rb_root.rb_node;
98-
while (*p != NULL) {
99-
struct rb_node *parent = *p;
100-
struct thread *th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
101-
102-
if (thread__tid(th) == tid) {
103-
res = thread__get(th);
104-
break;
105-
}
106-
107-
if (tid < thread__tid(th))
108-
p = &(*p)->rb_left;
109-
else
110-
p = &(*p)->rb_right;
99+
if (!res) {
100+
if (hashmap__find(&table->shard, tid, &res))
101+
res = thread__get(res);
111102
}
112103
up_read(&table->lock);
113104
if (res)
@@ -118,49 +109,25 @@ struct thread *threads__find(struct threads *threads, pid_t tid)
118109
struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created)
119110
{
120111
struct threads_table_entry *table = threads__table(threads, tid);
121-
struct rb_node **p;
122-
struct rb_node *parent = NULL;
123112
struct thread *res = NULL;
124-
struct thread_rb_node *nd;
125-
bool leftmost = true;
126113

127114
*created = false;
128115
down_write(&table->lock);
129-
p = &table->entries.rb_root.rb_node;
130-
while (*p != NULL) {
131-
struct thread *th;
132-
133-
parent = *p;
134-
th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
135-
136-
if (thread__tid(th) == tid) {
137-
__threads_table_entry__set_last_match(table, th);
138-
res = thread__get(th);
139-
goto out_unlock;
140-
}
141-
142-
if (tid < thread__tid(th))
143-
p = &(*p)->rb_left;
144-
else {
145-
leftmost = false;
146-
p = &(*p)->rb_right;
147-
}
148-
}
149-
nd = malloc(sizeof(*nd));
150-
if (nd == NULL)
151-
goto out_unlock;
152116
res = thread__new(pid, tid);
153-
if (!res)
154-
free(nd);
155-
else {
156-
*created = true;
157-
nd->thread = thread__get(res);
158-
rb_link_node(&nd->rb_node, parent, p);
159-
rb_insert_color_cached(&nd->rb_node, &table->entries, leftmost);
160-
++table->nr;
161-
__threads_table_entry__set_last_match(table, res);
117+
if (res) {
118+
if (hashmap__add(&table->shard, tid, res)) {
119+
/* Add failed. Assume a race so find other entry. */
120+
thread__put(res);
121+
res = NULL;
122+
if (hashmap__find(&table->shard, tid, &res))
123+
res = thread__get(res);
124+
} else {
125+
res = thread__get(res);
126+
*created = true;
127+
}
128+
if (res)
129+
__threads_table_entry__set_last_match(table, res);
162130
}
163-
out_unlock:
164131
up_write(&table->lock);
165132
return res;
166133
}
@@ -169,57 +136,32 @@ void threads__remove_all_threads(struct threads *threads)
169136
{
170137
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
171138
struct threads_table_entry *table = &threads->table[i];
172-
struct rb_node *nd;
139+
struct hashmap_entry *cur, *tmp;
140+
size_t bkt;
173141

174142
down_write(&table->lock);
175143
__threads_table_entry__set_last_match(table, NULL);
176-
nd = rb_first_cached(&table->entries);
177-
while (nd) {
178-
struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
179-
180-
nd = rb_next(nd);
181-
thread__put(trb->thread);
182-
rb_erase_cached(&trb->rb_node, &table->entries);
183-
RB_CLEAR_NODE(&trb->rb_node);
184-
--table->nr;
144+
hashmap__for_each_entry_safe((&table->shard), cur, tmp, bkt) {
145+
struct thread *old_value;
185146

186-
free(trb);
147+
hashmap__delete(&table->shard, cur->key, /*old_key=*/NULL, &old_value);
148+
thread__put(old_value);
187149
}
188-
assert(table->nr == 0);
189150
up_write(&table->lock);
190151
}
191152
}
192153

193154
void threads__remove(struct threads *threads, struct thread *thread)
194155
{
195-
struct rb_node **p;
196156
struct threads_table_entry *table = threads__table(threads, thread__tid(thread));
197-
pid_t tid = thread__tid(thread);
157+
struct thread *old_value;
198158

199159
down_write(&table->lock);
200160
if (table->last_match && RC_CHK_EQUAL(table->last_match, thread))
201161
__threads_table_entry__set_last_match(table, NULL);
202162

203-
p = &table->entries.rb_root.rb_node;
204-
while (*p != NULL) {
205-
struct rb_node *parent = *p;
206-
struct thread_rb_node *nd = rb_entry(parent, struct thread_rb_node, rb_node);
207-
struct thread *th = nd->thread;
208-
209-
if (RC_CHK_EQUAL(th, thread)) {
210-
thread__put(nd->thread);
211-
rb_erase_cached(&nd->rb_node, &table->entries);
212-
RB_CLEAR_NODE(&nd->rb_node);
213-
--table->nr;
214-
free(nd);
215-
break;
216-
}
217-
218-
if (tid < thread__tid(th))
219-
p = &(*p)->rb_left;
220-
else
221-
p = &(*p)->rb_right;
222-
}
163+
hashmap__delete(&table->shard, thread__tid(thread), /*old_key=*/NULL, &old_value);
164+
thread__put(old_value);
223165
up_write(&table->lock);
224166
}
225167

@@ -229,12 +171,12 @@ int threads__for_each_thread(struct threads *threads,
229171
{
230172
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
231173
struct threads_table_entry *table = &threads->table[i];
232-
struct rb_node *nd;
174+
struct hashmap_entry *cur;
175+
size_t bkt;
233176

234177
down_read(&table->lock);
235-
for (nd = rb_first_cached(&table->entries); nd; nd = rb_next(nd)) {
236-
struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
237-
int rc = fn(trb->thread, data);
178+
hashmap__for_each_entry((&table->shard), cur, bkt) {
179+
int rc = fn((struct thread *)cur->pvalue, data);
238180

239181
if (rc != 0) {
240182
up_read(&table->lock);

tools/perf/util/threads.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
#ifndef __PERF_THREADS_H
33
#define __PERF_THREADS_H
44

5-
#include <linux/rbtree.h>
5+
#include "hashmap.h"
66
#include "rwsem.h"
77

88
struct thread;
@@ -11,9 +11,9 @@ struct thread;
1111
#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS)
1212

1313
struct threads_table_entry {
14-
struct rb_root_cached entries;
14+
/* Key is tid, value is struct thread. */
15+
struct hashmap shard;
1516
struct rw_semaphore lock;
16-
unsigned int nr;
1717
struct thread *last_match;
1818
};
1919

0 commit comments

Comments
 (0)