13
13
#define pr_fmt (fmt ) KBUILD_MODNAME ": " fmt
14
14
15
15
#include <linux/types.h>
16
+ #include <linux/bits.h>
17
+ #include <linux/limits.h>
16
18
#include <linux/slab.h>
17
19
#include <linux/device.h>
18
20
@@ -1347,11 +1349,37 @@ static void pt_addr_filters_fini(struct perf_event *event)
1347
1349
event -> hw .addr_filters = NULL ;
1348
1350
}
1349
1351
1350
- static inline bool valid_kernel_ip (unsigned long ip )
1352
+ #ifdef CONFIG_X86_64
1353
+ static u64 canonical_address (u64 vaddr , u8 vaddr_bits )
1351
1354
{
1352
- return virt_addr_valid ( ip ) && kernel_ip ( ip );
1355
+ return (( s64 ) vaddr << ( 64 - vaddr_bits )) >> ( 64 - vaddr_bits );
1353
1356
}
1354
1357
1358
+ static u64 is_canonical_address (u64 vaddr , u8 vaddr_bits )
1359
+ {
1360
+ return canonical_address (vaddr , vaddr_bits ) == vaddr ;
1361
+ }
1362
+
1363
+ /* Clamp to a canonical address greater-than-or-equal-to the address given */
1364
+ static u64 clamp_to_ge_canonical_addr (u64 vaddr , u8 vaddr_bits )
1365
+ {
1366
+ return is_canonical_address (vaddr , vaddr_bits ) ?
1367
+ vaddr :
1368
+ - BIT_ULL (vaddr_bits - 1 );
1369
+ }
1370
+
1371
+ /* Clamp to a canonical address less-than-or-equal-to the address given */
1372
+ static u64 clamp_to_le_canonical_addr (u64 vaddr , u8 vaddr_bits )
1373
+ {
1374
+ return is_canonical_address (vaddr , vaddr_bits ) ?
1375
+ vaddr :
1376
+ BIT_ULL (vaddr_bits - 1 ) - 1 ;
1377
+ }
1378
+ #else
1379
+ #define clamp_to_ge_canonical_addr (x , y ) (x)
1380
+ #define clamp_to_le_canonical_addr (x , y ) (x)
1381
+ #endif
1382
+
1355
1383
static int pt_event_addr_filters_validate (struct list_head * filters )
1356
1384
{
1357
1385
struct perf_addr_filter * filter ;
@@ -1366,14 +1394,6 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
1366
1394
filter -> action == PERF_ADDR_FILTER_ACTION_START )
1367
1395
return - EOPNOTSUPP ;
1368
1396
1369
- if (!filter -> path .dentry ) {
1370
- if (!valid_kernel_ip (filter -> offset ))
1371
- return - EINVAL ;
1372
-
1373
- if (!valid_kernel_ip (filter -> offset + filter -> size ))
1374
- return - EINVAL ;
1375
- }
1376
-
1377
1397
if (++ range > intel_pt_validate_hw_cap (PT_CAP_num_address_ranges ))
1378
1398
return - EOPNOTSUPP ;
1379
1399
}
@@ -1397,9 +1417,26 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
1397
1417
if (filter -> path .dentry && !fr [range ].start ) {
1398
1418
msr_a = msr_b = 0 ;
1399
1419
} else {
1400
- /* apply the offset */
1401
- msr_a = fr [range ].start ;
1402
- msr_b = msr_a + fr [range ].size - 1 ;
1420
+ unsigned long n = fr [range ].size - 1 ;
1421
+ unsigned long a = fr [range ].start ;
1422
+ unsigned long b ;
1423
+
1424
+ if (a > ULONG_MAX - n )
1425
+ b = ULONG_MAX ;
1426
+ else
1427
+ b = a + n ;
1428
+ /*
1429
+ * Apply the offset. 64-bit addresses written to the
1430
+ * MSRs must be canonical, but the range can encompass
1431
+ * non-canonical addresses. Since software cannot
1432
+ * execute at non-canonical addresses, adjusting to
1433
+ * canonical addresses does not affect the result of the
1434
+ * address filter.
1435
+ */
1436
+ msr_a = clamp_to_ge_canonical_addr (a , boot_cpu_data .x86_virt_bits );
1437
+ msr_b = clamp_to_le_canonical_addr (b , boot_cpu_data .x86_virt_bits );
1438
+ if (msr_b < msr_a )
1439
+ msr_a = msr_b = 0 ;
1403
1440
}
1404
1441
1405
1442
filters -> filter [range ].msr_a = msr_a ;
0 commit comments