1
1
// SPDX-License-Identifier: GPL-2.0
2
2
#include <errno.h>
3
+ #include <fcntl.h>
3
4
#include <inttypes.h>
4
5
#include <stdlib.h>
5
6
#include <perf/cpumap.h>
6
7
8
+ #include "cpumap.h"
7
9
#include "debug.h"
8
10
#include "event.h"
9
11
#include "evlist.h"
10
12
#include "evsel.h"
11
13
#include "thread_map.h"
12
14
#include "tests.h"
15
+ #include "util/affinity.h"
13
16
#include "util/mmap.h"
14
17
#include "util/sample.h"
15
18
#include <linux/err.h>
@@ -172,120 +175,232 @@ static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest
172
175
return err ;
173
176
}
174
177
175
- static int test_stat_user_read (int event )
176
- {
177
- struct perf_counts_values counts = { .val = 0 };
178
- struct perf_thread_map * threads ;
179
- struct perf_evsel * evsel ;
180
- struct perf_event_mmap_page * pc ;
181
- struct perf_event_attr attr = {
182
- .type = PERF_TYPE_HARDWARE ,
183
- .config = event ,
184
- #ifdef __aarch64__
185
- .config1 = 0x2 , /* Request user access */
186
- #endif
187
- };
188
- int err , i , ret = TEST_FAIL ;
189
- bool opened = false, mapped = false;
178
+ enum user_read_state {
179
+ USER_READ_ENABLED ,
180
+ USER_READ_DISABLED ,
181
+ USER_READ_UNKNOWN ,
182
+ };
190
183
191
- threads = perf_thread_map__new_dummy ();
192
- TEST_ASSERT_VAL ("failed to create threads" , threads );
184
+ static enum user_read_state set_user_read (struct perf_pmu * pmu , enum user_read_state enabled )
185
+ {
186
+ char buf [2 ] = {0 , '\n' };
187
+ ssize_t len ;
188
+ int events_fd , rdpmc_fd ;
189
+ enum user_read_state old_user_read = USER_READ_UNKNOWN ;
190
+
191
+ if (enabled == USER_READ_UNKNOWN )
192
+ return USER_READ_UNKNOWN ;
193
+
194
+ events_fd = perf_pmu__event_source_devices_fd ();
195
+ if (events_fd < 0 )
196
+ return USER_READ_UNKNOWN ;
197
+
198
+ rdpmc_fd = perf_pmu__pathname_fd (events_fd , pmu -> name , "rdpmc" , O_RDWR );
199
+ if (rdpmc_fd < 0 ) {
200
+ close (events_fd );
201
+ return USER_READ_UNKNOWN ;
202
+ }
193
203
194
- perf_thread_map__set_pid (threads , 0 , 0 );
204
+ len = read (rdpmc_fd , buf , sizeof (buf ));
205
+ if (len != sizeof (buf ))
206
+ pr_debug ("%s read failed\n" , __func__ );
195
207
196
- evsel = perf_evsel__new (& attr );
197
- TEST_ASSERT_VAL ("failed to create evsel" , evsel );
208
+ // Note, on Intel hybrid disabling on 1 PMU will implicitly disable on
209
+ // all the core PMUs.
210
+ old_user_read = (buf [0 ] == '1' ) ? USER_READ_ENABLED : USER_READ_DISABLED ;
198
211
199
- err = perf_evsel__open ( evsel , NULL , threads );
200
- if ( err ) {
201
- pr_err ( "failed to open evsel: %s\n" , strerror ( - err ));
202
- ret = TEST_SKIP ;
203
- goto out ;
212
+ if ( enabled != old_user_read ) {
213
+ buf [ 0 ] = ( enabled == USER_READ_ENABLED ) ? '1' : '0' ;
214
+ len = write ( rdpmc_fd , buf , sizeof ( buf ));
215
+ if ( len != sizeof ( buf ))
216
+ pr_debug ( "%s write failed\n" , __func__ ) ;
204
217
}
205
- opened = true;
218
+ close (rdpmc_fd );
219
+ close (events_fd );
220
+ return old_user_read ;
221
+ }
206
222
207
- err = perf_evsel__mmap (evsel , 0 );
208
- if (err ) {
209
- pr_err ("failed to mmap evsel: %s\n" , strerror (- err ));
210
- goto out ;
223
+ static int test_stat_user_read (u64 event , enum user_read_state enabled )
224
+ {
225
+ struct perf_pmu * pmu = NULL ;
226
+ struct perf_thread_map * threads = perf_thread_map__new_dummy ();
227
+ int ret = TEST_OK ;
228
+
229
+ pr_err ("User space counter reading %" PRIu64 "\n" , event );
230
+ if (!threads ) {
231
+ pr_err ("User space counter reading [Failed to create threads]\n" );
232
+ return TEST_FAIL ;
211
233
}
212
- mapped = true ;
234
+ perf_thread_map__set_pid ( threads , 0 , 0 ) ;
213
235
214
- pc = perf_evsel__mmap_base (evsel , 0 , 0 );
215
- if (!pc ) {
216
- pr_err ("failed to get mmapped address\n" );
217
- goto out ;
218
- }
236
+ while ((pmu = perf_pmus__scan_core (pmu )) != NULL ) {
237
+ enum user_read_state saved_user_read_state = set_user_read (pmu , enabled );
238
+ struct perf_event_attr attr = {
239
+ .type = PERF_TYPE_HARDWARE ,
240
+ .config = perf_pmus__supports_extended_type ()
241
+ ? event | ((u64 )pmu -> type << PERF_PMU_TYPE_SHIFT )
242
+ : event ,
243
+ #ifdef __aarch64__
244
+ .config1 = 0x2 , /* Request user access */
245
+ #endif
246
+ };
247
+ struct perf_evsel * evsel = NULL ;
248
+ int err ;
249
+ struct perf_event_mmap_page * pc ;
250
+ bool mapped = false, opened = false, rdpmc_supported ;
251
+ struct perf_counts_values counts = { .val = 0 };
252
+
253
+
254
+ pr_debug ("User space counter reading for PMU %s\n" , pmu -> name );
255
+ /*
256
+ * Restrict scheduling to only use the rdpmc on the CPUs the
257
+ * event can be on. If the test doesn't run on the CPU of the
258
+ * event then the event will be disabled and the pc->index test
259
+ * will fail.
260
+ */
261
+ if (pmu -> cpus != NULL )
262
+ cpu_map__set_affinity (pmu -> cpus );
263
+
264
+ /* Make the evsel. */
265
+ evsel = perf_evsel__new (& attr );
266
+ if (!evsel ) {
267
+ pr_err ("User space counter reading for PMU %s [Failed to allocate evsel]\n" ,
268
+ pmu -> name );
269
+ ret = TEST_FAIL ;
270
+ goto cleanup ;
271
+ }
219
272
220
- if (!pc -> cap_user_rdpmc || !pc -> index ) {
221
- pr_err ("userspace counter access not %s\n" ,
222
- !pc -> cap_user_rdpmc ? "supported" : "enabled" );
223
- ret = TEST_SKIP ;
224
- goto out ;
225
- }
226
- if (pc -> pmc_width < 32 ) {
227
- pr_err ("userspace counter width not set (%d)\n" , pc -> pmc_width );
228
- goto out ;
229
- }
273
+ err = perf_evsel__open (evsel , NULL , threads );
274
+ if (err ) {
275
+ pr_err ("User space counter reading for PMU %s [Failed to open evsel]\n" ,
276
+ pmu -> name );
277
+ ret = TEST_SKIP ;
278
+ goto cleanup ;
279
+ }
280
+ opened = true;
281
+ err = perf_evsel__mmap (evsel , 0 );
282
+ if (err ) {
283
+ pr_err ("User space counter reading for PMU %s [Failed to mmap evsel]\n" ,
284
+ pmu -> name );
285
+ ret = TEST_FAIL ;
286
+ goto cleanup ;
287
+ }
288
+ mapped = true;
289
+
290
+ pc = perf_evsel__mmap_base (evsel , 0 , 0 );
291
+ if (!pc ) {
292
+ pr_err ("User space counter reading for PMU %s [Failed to get mmaped address]\n" ,
293
+ pmu -> name );
294
+ ret = TEST_FAIL ;
295
+ goto cleanup ;
296
+ }
230
297
231
- perf_evsel__read (evsel , 0 , 0 , & counts );
232
- if (counts .val == 0 ) {
233
- pr_err ("failed to read value for evsel\n" );
234
- goto out ;
235
- }
298
+ if (saved_user_read_state == USER_READ_UNKNOWN )
299
+ rdpmc_supported = pc -> cap_user_rdpmc && pc -> index ;
300
+ else
301
+ rdpmc_supported = (enabled == USER_READ_ENABLED );
236
302
237
- for (i = 0 ; i < 5 ; i ++ ) {
238
- volatile int count = 0x10000 << i ;
239
- __u64 start , end , last = 0 ;
303
+ if (rdpmc_supported && (!pc -> cap_user_rdpmc || !pc -> index )) {
304
+ pr_err ("User space counter reading for PMU %s [Failed unexpected supported counter access %d %d]\n" ,
305
+ pmu -> name , pc -> cap_user_rdpmc , pc -> index );
306
+ ret = TEST_FAIL ;
307
+ goto cleanup ;
308
+ }
240
309
241
- pr_debug ("\tloop = %u, " , count );
310
+ if (!rdpmc_supported && pc -> cap_user_rdpmc ) {
311
+ pr_err ("User space counter reading for PMU %s [Failed unexpected unsupported counter access %d]\n" ,
312
+ pmu -> name , pc -> cap_user_rdpmc );
313
+ ret = TEST_FAIL ;
314
+ goto cleanup ;
315
+ }
316
+
317
+ if (rdpmc_supported && pc -> pmc_width < 32 ) {
318
+ pr_err ("User space counter reading for PMU %s [Failed width not set %d]\n" ,
319
+ pmu -> name , pc -> pmc_width );
320
+ ret = TEST_FAIL ;
321
+ goto cleanup ;
322
+ }
242
323
243
324
perf_evsel__read (evsel , 0 , 0 , & counts );
244
- start = counts .val ;
325
+ if (counts .val == 0 ) {
326
+ pr_err ("User space counter reading for PMU %s [Failed read]\n" , pmu -> name );
327
+ ret = TEST_FAIL ;
328
+ goto cleanup ;
329
+ }
245
330
246
- while (count -- ) ;
331
+ for (int i = 0 ; i < 5 ; i ++ ) {
332
+ volatile int count = 0x10000 << i ;
333
+ __u64 start , end , last = 0 ;
247
334
248
- perf_evsel__read (evsel , 0 , 0 , & counts );
249
- end = counts .val ;
335
+ pr_debug ("\tloop = %u, " , count );
250
336
251
- if ((end - start ) < last ) {
252
- pr_err ("invalid counter data: end=%llu start=%llu last= %llu\n" ,
253
- end , start , last );
254
- goto out ;
255
- }
256
- last = end - start ;
257
- pr_debug ("count = %llu\n" , end - start );
258
- }
259
- ret = TEST_OK ;
337
+ perf_evsel__read (evsel , 0 , 0 , & counts );
338
+ start = counts .val ;
339
+
340
+ while (count -- ) ;
260
341
261
- out :
262
- if (mapped )
263
- perf_evsel__munmap (evsel );
264
- if (opened )
265
- perf_evsel__close (evsel );
266
- perf_evsel__delete (evsel );
342
+ perf_evsel__read (evsel , 0 , 0 , & counts );
343
+ end = counts .val ;
267
344
345
+ if ((end - start ) < last ) {
346
+ pr_err ("User space counter reading for PMU %s [Failed invalid counter data: end=%llu start=%llu last= %llu]\n" ,
347
+ pmu -> name , end , start , last );
348
+ ret = TEST_FAIL ;
349
+ goto cleanup ;
350
+ }
351
+ last = end - start ;
352
+ pr_debug ("count = %llu\n" , last );
353
+ }
354
+ pr_debug ("User space counter reading for PMU %s [Success]\n" , pmu -> name );
355
+ cleanup :
356
+ if (mapped )
357
+ perf_evsel__munmap (evsel );
358
+ if (opened )
359
+ perf_evsel__close (evsel );
360
+ perf_evsel__delete (evsel );
361
+
362
+ /* If the affinity was changed, then put it back to all CPUs. */
363
+ if (pmu -> cpus != NULL ) {
364
+ struct perf_cpu_map * cpus = cpu_map__online ();
365
+
366
+ cpu_map__set_affinity (cpus );
367
+ perf_cpu_map__put (cpus );
368
+ }
369
+ set_user_read (pmu , saved_user_read_state );
370
+ }
268
371
perf_thread_map__put (threads );
269
372
return ret ;
270
373
}
271
374
272
375
static int test__mmap_user_read_instr (struct test_suite * test __maybe_unused ,
273
376
int subtest __maybe_unused )
274
377
{
275
- return test_stat_user_read (PERF_COUNT_HW_INSTRUCTIONS );
378
+ return test_stat_user_read (PERF_COUNT_HW_INSTRUCTIONS , USER_READ_ENABLED );
276
379
}
277
380
278
381
static int test__mmap_user_read_cycles (struct test_suite * test __maybe_unused ,
279
382
int subtest __maybe_unused )
280
383
{
281
- return test_stat_user_read (PERF_COUNT_HW_CPU_CYCLES );
384
+ return test_stat_user_read (PERF_COUNT_HW_CPU_CYCLES , USER_READ_ENABLED );
385
+ }
386
+
387
+ static int test__mmap_user_read_instr_disabled (struct test_suite * test __maybe_unused ,
388
+ int subtest __maybe_unused )
389
+ {
390
+ return test_stat_user_read (PERF_COUNT_HW_INSTRUCTIONS , USER_READ_DISABLED );
391
+ }
392
+
393
+ static int test__mmap_user_read_cycles_disabled (struct test_suite * test __maybe_unused ,
394
+ int subtest __maybe_unused )
395
+ {
396
+ return test_stat_user_read (PERF_COUNT_HW_CPU_CYCLES , USER_READ_DISABLED );
282
397
}
283
398
284
399
static struct test_case tests__basic_mmap [] = {
285
400
TEST_CASE_REASON ("Read samples using the mmap interface" ,
286
401
basic_mmap ,
287
402
"permissions" ),
288
- TEST_CASE_REASON ("User space counter reading of instructions" ,
403
+ TEST_CASE_REASON_EXCLUSIVE ("User space counter reading of instructions" ,
289
404
mmap_user_read_instr ,
290
405
#if defined(__i386__ ) || defined (__x86_64__ ) || defined (__aarch64__ ) || \
291
406
(defined (__riscv ) && __riscv_xlen == 64 )
@@ -294,13 +409,31 @@ static struct test_case tests__basic_mmap[] = {
294
409
"unsupported"
295
410
#endif
296
411
),
297
- TEST_CASE_REASON ("User space counter reading of cycles" ,
412
+ TEST_CASE_REASON_EXCLUSIVE ("User space counter reading of cycles" ,
298
413
mmap_user_read_cycles ,
299
414
#if defined(__i386__ ) || defined (__x86_64__ ) || defined (__aarch64__ ) || \
300
415
(defined (__riscv ) && __riscv_xlen == 64 )
301
416
"permissions"
302
417
#else
303
418
"unsupported"
419
+ #endif
420
+ ),
421
+ TEST_CASE_REASON_EXCLUSIVE ("User space counter disabling instructions" ,
422
+ mmap_user_read_instr_disabled ,
423
+ #if defined(__i386__ ) || defined (__x86_64__ ) || defined (__aarch64__ ) || \
424
+ (defined (__riscv ) && __riscv_xlen == 64 )
425
+ "permissions"
426
+ #else
427
+ "unsupported"
428
+ #endif
429
+ ),
430
+ TEST_CASE_REASON_EXCLUSIVE ("User space counter disabling cycles" ,
431
+ mmap_user_read_cycles_disabled ,
432
+ #if defined(__i386__ ) || defined (__x86_64__ ) || defined (__aarch64__ ) || \
433
+ (defined (__riscv ) && __riscv_xlen == 64 )
434
+ "permissions"
435
+ #else
436
+ "unsupported"
304
437
#endif
305
438
),
306
439
{ .name = NULL , }
0 commit comments