|
19 | 19 | #include <linux/of.h>
|
20 | 20 | #include <linux/perf/arm_pmu.h>
|
21 | 21 | #include <linux/platform_device.h>
|
| 22 | +#include <linux/sched_clock.h> |
22 | 23 | #include <linux/smp.h>
|
23 | 24 |
|
24 | 25 | /* ARMv8 Cortex-A53 specific event types. */
|
@@ -1168,28 +1169,47 @@ device_initcall(armv8_pmu_driver_init)
|
1168 | 1169 | void arch_perf_update_userpage(struct perf_event *event,
|
1169 | 1170 | struct perf_event_mmap_page *userpg, u64 now)
|
1170 | 1171 | {
|
1171 |
| - u32 freq; |
1172 |
| - u32 shift; |
| 1172 | + struct clock_read_data *rd; |
| 1173 | + unsigned int seq; |
| 1174 | + u64 ns; |
1173 | 1175 |
|
1174 | 1176 | /*
|
1175 | 1177 | * Internal timekeeping for enabled/running/stopped times
|
1176 | 1178 | * is always computed with the sched_clock.
|
1177 | 1179 | */
|
1178 |
| - freq = arch_timer_get_rate(); |
1179 | 1180 | userpg->cap_user_time = 1;
|
| 1181 | + userpg->cap_user_time_zero = 1; |
| 1182 | + |
| 1183 | + do { |
| 1184 | + rd = sched_clock_read_begin(&seq); |
| 1185 | + |
| 1186 | + userpg->time_mult = rd->mult; |
| 1187 | + userpg->time_shift = rd->shift; |
| 1188 | + userpg->time_zero = rd->epoch_ns; |
| 1189 | + |
| 1190 | + /* |
| 1191 | + * This isn't strictly correct, the ARM64 counter can be |
| 1192 | + * 'short' and then we get funnies when it wraps. The correct |
| 1193 | + * thing would be to extend the perf ABI with a cycle and mask |
| 1194 | + * value, but because wrapping on ARM64 is very rare in |
| 1195 | + * practise this 'works'. |
| 1196 | + */ |
| 1197 | + ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift); |
| 1198 | + userpg->time_zero -= ns; |
| 1199 | + |
| 1200 | + } while (sched_clock_read_retry(seq)); |
| 1201 | + |
| 1202 | + userpg->time_offset = userpg->time_zero - now; |
1180 | 1203 |
|
1181 |
| - clocks_calc_mult_shift(&userpg->time_mult, &shift, freq, |
1182 |
| - NSEC_PER_SEC, 0); |
1183 | 1204 | /*
|
1184 | 1205 | * time_shift is not expected to be greater than 31 due to
|
1185 | 1206 | * the original published conversion algorithm shifting a
|
1186 | 1207 | * 32-bit value (now specifies a 64-bit value) - refer
|
1187 | 1208 | * perf_event_mmap_page documentation in perf_event.h.
|
1188 | 1209 | */
|
1189 |
| - if (shift == 32) { |
1190 |
| - shift = 31; |
| 1210 | + if (userpg->time_shift == 32) { |
| 1211 | + userpg->time_shift = 31; |
1191 | 1212 | userpg->time_mult >>= 1;
|
1192 | 1213 | }
|
1193 |
| - userpg->time_shift = (u16)shift; |
1194 |
| - userpg->time_offset = -now; |
| 1214 | + |
1195 | 1215 | }
|
0 commit comments