Skip to content

Commit 588d22b

Browse files
captain5050namhyung
authored andcommitted
perf test: Expand user space event reading (rdpmc) tests
Test that disabling rdpmc support via /sys/bus/event_source/cpu*/rdpmc disables reading in the mmap (libperf read support will fallback to using a system call). Test all hybrid PMUs support rdpmc. Ensure hybrid PMUs use the correct CPU to rdpmc the correct event. Previously the test would open cycles or instructions with no extended type then rdpmc it on whatever CPU. This could fail/skip due to which CPU the test was scheduled upon. Signed-off-by: Ian Rogers <[email protected]> Reviewed-by: Kan Liang <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Namhyung Kim <[email protected]>
1 parent ce3d5af commit 588d22b

File tree

4 files changed

+240
-78
lines changed

4 files changed

+240
-78
lines changed

tools/perf/tests/mmap-basic.c

Lines changed: 211 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,18 @@
11
// SPDX-License-Identifier: GPL-2.0
22
#include <errno.h>
3+
#include <fcntl.h>
34
#include <inttypes.h>
45
#include <stdlib.h>
56
#include <perf/cpumap.h>
67

8+
#include "cpumap.h"
79
#include "debug.h"
810
#include "event.h"
911
#include "evlist.h"
1012
#include "evsel.h"
1113
#include "thread_map.h"
1214
#include "tests.h"
15+
#include "util/affinity.h"
1316
#include "util/mmap.h"
1417
#include "util/sample.h"
1518
#include <linux/err.h>
@@ -172,120 +175,232 @@ static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest
172175
return err;
173176
}
174177

175-
static int test_stat_user_read(int event)
176-
{
177-
struct perf_counts_values counts = { .val = 0 };
178-
struct perf_thread_map *threads;
179-
struct perf_evsel *evsel;
180-
struct perf_event_mmap_page *pc;
181-
struct perf_event_attr attr = {
182-
.type = PERF_TYPE_HARDWARE,
183-
.config = event,
184-
#ifdef __aarch64__
185-
.config1 = 0x2, /* Request user access */
186-
#endif
187-
};
188-
int err, i, ret = TEST_FAIL;
189-
bool opened = false, mapped = false;
178+
enum user_read_state {
179+
USER_READ_ENABLED,
180+
USER_READ_DISABLED,
181+
USER_READ_UNKNOWN,
182+
};
190183

191-
threads = perf_thread_map__new_dummy();
192-
TEST_ASSERT_VAL("failed to create threads", threads);
184+
static enum user_read_state set_user_read(struct perf_pmu *pmu, enum user_read_state enabled)
185+
{
186+
char buf[2] = {0, '\n'};
187+
ssize_t len;
188+
int events_fd, rdpmc_fd;
189+
enum user_read_state old_user_read = USER_READ_UNKNOWN;
190+
191+
if (enabled == USER_READ_UNKNOWN)
192+
return USER_READ_UNKNOWN;
193+
194+
events_fd = perf_pmu__event_source_devices_fd();
195+
if (events_fd < 0)
196+
return USER_READ_UNKNOWN;
197+
198+
rdpmc_fd = perf_pmu__pathname_fd(events_fd, pmu->name, "rdpmc", O_RDWR);
199+
if (rdpmc_fd < 0) {
200+
close(events_fd);
201+
return USER_READ_UNKNOWN;
202+
}
193203

194-
perf_thread_map__set_pid(threads, 0, 0);
204+
len = read(rdpmc_fd, buf, sizeof(buf));
205+
if (len != sizeof(buf))
206+
pr_debug("%s read failed\n", __func__);
195207

196-
evsel = perf_evsel__new(&attr);
197-
TEST_ASSERT_VAL("failed to create evsel", evsel);
208+
// Note, on Intel hybrid disabling on 1 PMU will implicitly disable on
209+
// all the core PMUs.
210+
old_user_read = (buf[0] == '1') ? USER_READ_ENABLED : USER_READ_DISABLED;
198211

199-
err = perf_evsel__open(evsel, NULL, threads);
200-
if (err) {
201-
pr_err("failed to open evsel: %s\n", strerror(-err));
202-
ret = TEST_SKIP;
203-
goto out;
212+
if (enabled != old_user_read) {
213+
buf[0] = (enabled == USER_READ_ENABLED) ? '1' : '0';
214+
len = write(rdpmc_fd, buf, sizeof(buf));
215+
if (len != sizeof(buf))
216+
pr_debug("%s write failed\n", __func__);
204217
}
205-
opened = true;
218+
close(rdpmc_fd);
219+
close(events_fd);
220+
return old_user_read;
221+
}
206222

207-
err = perf_evsel__mmap(evsel, 0);
208-
if (err) {
209-
pr_err("failed to mmap evsel: %s\n", strerror(-err));
210-
goto out;
223+
static int test_stat_user_read(u64 event, enum user_read_state enabled)
224+
{
225+
struct perf_pmu *pmu = NULL;
226+
struct perf_thread_map *threads = perf_thread_map__new_dummy();
227+
int ret = TEST_OK;
228+
229+
pr_err("User space counter reading %" PRIu64 "\n", event);
230+
if (!threads) {
231+
pr_err("User space counter reading [Failed to create threads]\n");
232+
return TEST_FAIL;
211233
}
212-
mapped = true;
234+
perf_thread_map__set_pid(threads, 0, 0);
213235

214-
pc = perf_evsel__mmap_base(evsel, 0, 0);
215-
if (!pc) {
216-
pr_err("failed to get mmapped address\n");
217-
goto out;
218-
}
236+
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
237+
enum user_read_state saved_user_read_state = set_user_read(pmu, enabled);
238+
struct perf_event_attr attr = {
239+
.type = PERF_TYPE_HARDWARE,
240+
.config = perf_pmus__supports_extended_type()
241+
? event | ((u64)pmu->type << PERF_PMU_TYPE_SHIFT)
242+
: event,
243+
#ifdef __aarch64__
244+
.config1 = 0x2, /* Request user access */
245+
#endif
246+
};
247+
struct perf_evsel *evsel = NULL;
248+
int err;
249+
struct perf_event_mmap_page *pc;
250+
bool mapped = false, opened = false, rdpmc_supported;
251+
struct perf_counts_values counts = { .val = 0 };
252+
253+
254+
pr_debug("User space counter reading for PMU %s\n", pmu->name);
255+
/*
256+
* Restrict scheduling to only use the rdpmc on the CPUs the
257+
* event can be on. If the test doesn't run on the CPU of the
258+
* event then the event will be disabled and the pc->index test
259+
* will fail.
260+
*/
261+
if (pmu->cpus != NULL)
262+
cpu_map__set_affinity(pmu->cpus);
263+
264+
/* Make the evsel. */
265+
evsel = perf_evsel__new(&attr);
266+
if (!evsel) {
267+
pr_err("User space counter reading for PMU %s [Failed to allocate evsel]\n",
268+
pmu->name);
269+
ret = TEST_FAIL;
270+
goto cleanup;
271+
}
219272

220-
if (!pc->cap_user_rdpmc || !pc->index) {
221-
pr_err("userspace counter access not %s\n",
222-
!pc->cap_user_rdpmc ? "supported" : "enabled");
223-
ret = TEST_SKIP;
224-
goto out;
225-
}
226-
if (pc->pmc_width < 32) {
227-
pr_err("userspace counter width not set (%d)\n", pc->pmc_width);
228-
goto out;
229-
}
273+
err = perf_evsel__open(evsel, NULL, threads);
274+
if (err) {
275+
pr_err("User space counter reading for PMU %s [Failed to open evsel]\n",
276+
pmu->name);
277+
ret = TEST_SKIP;
278+
goto cleanup;
279+
}
280+
opened = true;
281+
err = perf_evsel__mmap(evsel, 0);
282+
if (err) {
283+
pr_err("User space counter reading for PMU %s [Failed to mmap evsel]\n",
284+
pmu->name);
285+
ret = TEST_FAIL;
286+
goto cleanup;
287+
}
288+
mapped = true;
289+
290+
pc = perf_evsel__mmap_base(evsel, 0, 0);
291+
if (!pc) {
292+
pr_err("User space counter reading for PMU %s [Failed to get mmaped address]\n",
293+
pmu->name);
294+
ret = TEST_FAIL;
295+
goto cleanup;
296+
}
230297

231-
perf_evsel__read(evsel, 0, 0, &counts);
232-
if (counts.val == 0) {
233-
pr_err("failed to read value for evsel\n");
234-
goto out;
235-
}
298+
if (saved_user_read_state == USER_READ_UNKNOWN)
299+
rdpmc_supported = pc->cap_user_rdpmc && pc->index;
300+
else
301+
rdpmc_supported = (enabled == USER_READ_ENABLED);
236302

237-
for (i = 0; i < 5; i++) {
238-
volatile int count = 0x10000 << i;
239-
__u64 start, end, last = 0;
303+
if (rdpmc_supported && (!pc->cap_user_rdpmc || !pc->index)) {
304+
pr_err("User space counter reading for PMU %s [Failed unexpected supported counter access %d %d]\n",
305+
pmu->name, pc->cap_user_rdpmc, pc->index);
306+
ret = TEST_FAIL;
307+
goto cleanup;
308+
}
240309

241-
pr_debug("\tloop = %u, ", count);
310+
if (!rdpmc_supported && pc->cap_user_rdpmc) {
311+
pr_err("User space counter reading for PMU %s [Failed unexpected unsupported counter access %d]\n",
312+
pmu->name, pc->cap_user_rdpmc);
313+
ret = TEST_FAIL;
314+
goto cleanup;
315+
}
316+
317+
if (rdpmc_supported && pc->pmc_width < 32) {
318+
pr_err("User space counter reading for PMU %s [Failed width not set %d]\n",
319+
pmu->name, pc->pmc_width);
320+
ret = TEST_FAIL;
321+
goto cleanup;
322+
}
242323

243324
perf_evsel__read(evsel, 0, 0, &counts);
244-
start = counts.val;
325+
if (counts.val == 0) {
326+
pr_err("User space counter reading for PMU %s [Failed read]\n", pmu->name);
327+
ret = TEST_FAIL;
328+
goto cleanup;
329+
}
245330

246-
while (count--) ;
331+
for (int i = 0; i < 5; i++) {
332+
volatile int count = 0x10000 << i;
333+
__u64 start, end, last = 0;
247334

248-
perf_evsel__read(evsel, 0, 0, &counts);
249-
end = counts.val;
335+
pr_debug("\tloop = %u, ", count);
250336

251-
if ((end - start) < last) {
252-
pr_err("invalid counter data: end=%llu start=%llu last= %llu\n",
253-
end, start, last);
254-
goto out;
255-
}
256-
last = end - start;
257-
pr_debug("count = %llu\n", end - start);
258-
}
259-
ret = TEST_OK;
337+
perf_evsel__read(evsel, 0, 0, &counts);
338+
start = counts.val;
339+
340+
while (count--) ;
260341

261-
out:
262-
if (mapped)
263-
perf_evsel__munmap(evsel);
264-
if (opened)
265-
perf_evsel__close(evsel);
266-
perf_evsel__delete(evsel);
342+
perf_evsel__read(evsel, 0, 0, &counts);
343+
end = counts.val;
267344

345+
if ((end - start) < last) {
346+
pr_err("User space counter reading for PMU %s [Failed invalid counter data: end=%llu start=%llu last= %llu]\n",
347+
pmu->name, end, start, last);
348+
ret = TEST_FAIL;
349+
goto cleanup;
350+
}
351+
last = end - start;
352+
pr_debug("count = %llu\n", last);
353+
}
354+
pr_debug("User space counter reading for PMU %s [Success]\n", pmu->name);
355+
cleanup:
356+
if (mapped)
357+
perf_evsel__munmap(evsel);
358+
if (opened)
359+
perf_evsel__close(evsel);
360+
perf_evsel__delete(evsel);
361+
362+
/* If the affinity was changed, then put it back to all CPUs. */
363+
if (pmu->cpus != NULL) {
364+
struct perf_cpu_map *cpus = cpu_map__online();
365+
366+
cpu_map__set_affinity(cpus);
367+
perf_cpu_map__put(cpus);
368+
}
369+
set_user_read(pmu, saved_user_read_state);
370+
}
268371
perf_thread_map__put(threads);
269372
return ret;
270373
}
271374

272375
static int test__mmap_user_read_instr(struct test_suite *test __maybe_unused,
273376
int subtest __maybe_unused)
274377
{
275-
return test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS);
378+
return test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS, USER_READ_ENABLED);
276379
}
277380

278381
static int test__mmap_user_read_cycles(struct test_suite *test __maybe_unused,
279382
int subtest __maybe_unused)
280383
{
281-
return test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES);
384+
return test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES, USER_READ_ENABLED);
385+
}
386+
387+
static int test__mmap_user_read_instr_disabled(struct test_suite *test __maybe_unused,
388+
int subtest __maybe_unused)
389+
{
390+
return test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS, USER_READ_DISABLED);
391+
}
392+
393+
static int test__mmap_user_read_cycles_disabled(struct test_suite *test __maybe_unused,
394+
int subtest __maybe_unused)
395+
{
396+
return test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES, USER_READ_DISABLED);
282397
}
283398

284399
static struct test_case tests__basic_mmap[] = {
285400
TEST_CASE_REASON("Read samples using the mmap interface",
286401
basic_mmap,
287402
"permissions"),
288-
TEST_CASE_REASON("User space counter reading of instructions",
403+
TEST_CASE_REASON_EXCLUSIVE("User space counter reading of instructions",
289404
mmap_user_read_instr,
290405
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
291406
(defined(__riscv) && __riscv_xlen == 64)
@@ -294,13 +409,31 @@ static struct test_case tests__basic_mmap[] = {
294409
"unsupported"
295410
#endif
296411
),
297-
TEST_CASE_REASON("User space counter reading of cycles",
412+
TEST_CASE_REASON_EXCLUSIVE("User space counter reading of cycles",
298413
mmap_user_read_cycles,
299414
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
300415
(defined(__riscv) && __riscv_xlen == 64)
301416
"permissions"
302417
#else
303418
"unsupported"
419+
#endif
420+
),
421+
TEST_CASE_REASON_EXCLUSIVE("User space counter disabling instructions",
422+
mmap_user_read_instr_disabled,
423+
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
424+
(defined(__riscv) && __riscv_xlen == 64)
425+
"permissions"
426+
#else
427+
"unsupported"
428+
#endif
429+
),
430+
TEST_CASE_REASON_EXCLUSIVE("User space counter disabling cycles",
431+
mmap_user_read_cycles_disabled,
432+
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
433+
(defined(__riscv) && __riscv_xlen == 64)
434+
"permissions"
435+
#else
436+
"unsupported"
304437
#endif
305438
),
306439
{ .name = NULL, }

tools/perf/tests/tests.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,15 @@ struct test_suite {
7171
.exclusive = true, \
7272
}
7373

74+
#define TEST_CASE_REASON_EXCLUSIVE(description, _name, _reason) \
75+
{ \
76+
.name = #_name, \
77+
.desc = description, \
78+
.run_case = test__##_name, \
79+
.skip_reason = _reason, \
80+
.exclusive = true, \
81+
}
82+
7483
#define DEFINE_SUITE(description, _name) \
7584
struct test_case tests__##_name[] = { \
7685
TEST_CASE(description, _name), \

0 commit comments

Comments
 (0)