@@ -1177,6 +1177,7 @@ void arch_perf_update_userpage(struct perf_event *event,
1177
1177
1178
1178
userpg -> cap_user_time = 0 ;
1179
1179
userpg -> cap_user_time_zero = 0 ;
1180
+ userpg -> cap_user_time_short = 0 ;
1180
1181
1181
1182
do {
1182
1183
rd = sched_clock_read_begin (& seq );
@@ -1187,13 +1188,13 @@ void arch_perf_update_userpage(struct perf_event *event,
1187
1188
userpg -> time_mult = rd -> mult ;
1188
1189
userpg -> time_shift = rd -> shift ;
1189
1190
userpg -> time_zero = rd -> epoch_ns ;
1191
+ userpg -> time_cycles = rd -> epoch_cyc ;
1192
+ userpg -> time_mask = rd -> sched_clock_mask ;
1190
1193
1191
1194
/*
1192
- * This isn't strictly correct, the ARM64 counter can be
1193
- * 'short' and then we get funnies when it wraps. The correct
1194
- * thing would be to extend the perf ABI with a cycle and mask
1195
- * value, but because wrapping on ARM64 is very rare in
1196
- * practise this 'works'.
1195
+ * Subtract the cycle base, such that software that
1196
+ * doesn't know about cap_user_time_short still 'works'
1197
+ * assuming no wraps.
1197
1198
*/
1198
1199
ns = mul_u64_u32_shr (rd -> epoch_cyc , rd -> mult , rd -> shift );
1199
1200
userpg -> time_zero -= ns ;
@@ -1219,4 +1220,5 @@ void arch_perf_update_userpage(struct perf_event *event,
1219
1220
*/
1220
1221
userpg -> cap_user_time = 1 ;
1221
1222
userpg -> cap_user_time_zero = 1 ;
1223
+ userpg -> cap_user_time_short = 1 ;
1222
1224
}
0 commit comments