Skip to content

Commit 75d7ba3

Browse files
sean-jcbonzini
authored andcommitted
perf tools: Use dedicated non-atomic clear/set bit helpers
Use the dedicated non-atomic helpers for {clear,set}_bit() and their test variants, i.e. the double-underscore versions. Depsite being defined in atomic.h, and despite the kernel versions being atomic in the kernel, tools' {clear,set}_bit() helpers aren't actually atomic. Move to the double-underscore versions so that the versions that are expected to be atomic (for kernel developers) can be made atomic without affecting users that don't want atomic operations. No functional change intended. Signed-off-by: Sean Christopherson <[email protected]> Acked-by: Namhyung Kim <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 7f2b47f commit 75d7ba3

File tree

15 files changed

+27
-27
lines changed

15 files changed

+27
-27
lines changed

tools/perf/bench/find-bit-bench.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ static int do_for_each_set_bit(unsigned int num_bits)
7070
bitmap_zero(to_test, num_bits);
7171
skip = num_bits / set_bits;
7272
for (i = 0; i < num_bits; i += skip)
73-
set_bit(i, to_test);
73+
__set_bit(i, to_test);
7474

7575
for (i = 0; i < outer_iterations; i++) {
7676
old = accumulator;

tools/perf/builtin-c2c.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,7 @@ static void c2c_he__set_cpu(struct c2c_hist_entry *c2c_he,
230230
"WARNING: no sample cpu value"))
231231
return;
232232

233-
set_bit(sample->cpu, c2c_he->cpuset);
233+
__set_bit(sample->cpu, c2c_he->cpuset);
234234
}
235235

236236
static void c2c_he__set_node(struct c2c_hist_entry *c2c_he,
@@ -247,7 +247,7 @@ static void c2c_he__set_node(struct c2c_hist_entry *c2c_he,
247247
if (WARN_ONCE(node < 0, "WARNING: failed to find node\n"))
248248
return;
249249

250-
set_bit(node, c2c_he->nodeset);
250+
__set_bit(node, c2c_he->nodeset);
251251

252252
if (c2c_he->paddr != sample->phys_addr) {
253253
c2c_he->paddr_cnt++;
@@ -2318,7 +2318,7 @@ static int setup_nodes(struct perf_session *session)
23182318
continue;
23192319

23202320
perf_cpu_map__for_each_cpu(cpu, idx, map) {
2321-
set_bit(cpu.cpu, set);
2321+
__set_bit(cpu.cpu, set);
23222322

23232323
if (WARN_ONCE(cpu2node[cpu.cpu] != -1, "node/cpu topology bug"))
23242324
return -EINVAL;

tools/perf/builtin-kwork.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ static struct kwork_atom *atom_new(struct perf_kwork *kwork,
216216
list_add_tail(&page->list, &kwork->atom_page_list);
217217

218218
found_atom:
219-
set_bit(i, page->bitmap);
219+
__set_bit(i, page->bitmap);
220220
atom->time = sample->time;
221221
atom->prev = NULL;
222222
atom->page_addr = page;
@@ -229,8 +229,8 @@ static void atom_free(struct kwork_atom *atom)
229229
if (atom->prev != NULL)
230230
atom_free(atom->prev);
231231

232-
clear_bit(atom->bit_inpage,
233-
((struct kwork_atom_page *)atom->page_addr)->bitmap);
232+
__clear_bit(atom->bit_inpage,
233+
((struct kwork_atom_page *)atom->page_addr)->bitmap);
234234
}
235235

236236
static void atom_del(struct kwork_atom *atom)

tools/perf/builtin-record.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3555,7 +3555,7 @@ static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cp
35553555
/* Return ENODEV is input cpu is greater than max cpu */
35563556
if ((unsigned long)cpu.cpu > mask->nbits)
35573557
return -ENODEV;
3558-
set_bit(cpu.cpu, mask->bits);
3558+
__set_bit(cpu.cpu, mask->bits);
35593559
}
35603560

35613561
return 0;
@@ -3627,8 +3627,8 @@ static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map
36273627
pr_debug("nr_threads: %d\n", rec->nr_threads);
36283628

36293629
for (t = 0; t < rec->nr_threads; t++) {
3630-
set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
3631-
set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
3630+
__set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
3631+
__set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
36323632
if (verbose) {
36333633
pr_debug("thread_masks[%d]: ", t);
36343634
mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");

tools/perf/builtin-sched.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1573,7 +1573,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
15731573

15741574
if (sched->map.comp) {
15751575
cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1576-
if (!test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
1576+
if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
15771577
sched->map.comp_cpus[cpus_nr++] = this_cpu;
15781578
new_cpu = true;
15791579
}

tools/perf/tests/bitmap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
1818

1919
if (map && bm) {
2020
for (i = 0; i < perf_cpu_map__nr(map); i++)
21-
set_bit(perf_cpu_map__cpu(map, i).cpu, bm);
21+
__set_bit(perf_cpu_map__cpu(map, i).cpu, bm);
2222
}
2323

2424
if (map)

tools/perf/tests/mem2node.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
3333
int i;
3434

3535
perf_cpu_map__for_each_cpu(cpu, i, map)
36-
set_bit(cpu.cpu, bm);
36+
__set_bit(cpu.cpu, bm);
3737
}
3838

3939
if (map)

tools/perf/util/affinity.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,14 +58,14 @@ void affinity__set(struct affinity *a, int cpu)
5858
return;
5959

6060
a->changed = true;
61-
set_bit(cpu, a->sched_cpus);
61+
__set_bit(cpu, a->sched_cpus);
6262
/*
6363
* We ignore errors because affinity is just an optimization.
6464
* This could happen for example with isolated CPUs or cpusets.
6565
* In this case the IPIs inside the kernel's perf API still work.
6666
*/
6767
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus);
68-
clear_bit(cpu, a->sched_cpus);
68+
__clear_bit(cpu, a->sched_cpus);
6969
}
7070

7171
static void __affinity__cleanup(struct affinity *a)

tools/perf/util/header.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -79,12 +79,12 @@ struct perf_file_attr {
7979

8080
void perf_header__set_feat(struct perf_header *header, int feat)
8181
{
82-
set_bit(feat, header->adds_features);
82+
__set_bit(feat, header->adds_features);
8383
}
8484

8585
void perf_header__clear_feat(struct perf_header *header, int feat)
8686
{
87-
clear_bit(feat, header->adds_features);
87+
__clear_bit(feat, header->adds_features);
8888
}
8989

9090
bool perf_header__has_feat(const struct perf_header *header, int feat)
@@ -1358,7 +1358,7 @@ static int memory_node__read(struct memory_node *n, unsigned long idx)
13581358
rewinddir(dir);
13591359

13601360
for_each_memory(phys, dir) {
1361-
set_bit(phys, n->set);
1361+
__set_bit(phys, n->set);
13621362
}
13631363

13641364
closedir(dir);
@@ -3952,7 +3952,7 @@ int perf_file_header__read(struct perf_file_header *header,
39523952

39533953
if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
39543954
bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3955-
set_bit(HEADER_BUILD_ID, header->adds_features);
3955+
__set_bit(HEADER_BUILD_ID, header->adds_features);
39563956
}
39573957
}
39583958

tools/perf/util/mmap.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, i
111111
pr_err("Failed to allocate node mask for mbind: error %m\n");
112112
return -1;
113113
}
114-
set_bit(node_index, node_mask);
114+
__set_bit(node_index, node_mask);
115115
if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) {
116116
pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n",
117117
data, data + mmap_len, node_index);
@@ -256,7 +256,7 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask)
256256
for (idx = 0; idx < nr_cpus; idx++) {
257257
cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */
258258
if (cpu__get_node(cpu) == node)
259-
set_bit(cpu.cpu, mask->bits);
259+
__set_bit(cpu.cpu, mask->bits);
260260
}
261261
}
262262

@@ -270,7 +270,7 @@ static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *
270270
if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
271271
build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
272272
else if (mp->affinity == PERF_AFFINITY_CPU)
273-
set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
273+
__set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
274274

275275
return 0;
276276
}

0 commit comments

Comments
 (0)