Skip to content

Commit d030456

Browse files
ahunter6KAGA-KOKO
authored andcommitted
clocksource: Make watchdog and suspend-timing multiplication overflow safe
Kernel timekeeping is designed to keep the change in cycles (since the last timer interrupt) below max_cycles, which prevents multiplication overflow when converting cycles to nanoseconds. However, if timer interrupts stop, the clocksource_cyc2ns() calculation will eventually overflow. Add protection against that. Simplify by folding together clocksource_delta() and clocksource_cyc2ns() into cycles_to_nsec_safe(). Check against max_cycles, falling back to a slower higher precision calculation. Suggested-by: Thomas Gleixner <[email protected]> Signed-off-by: Adrian Hunter <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 135225a commit d030456

File tree

1 file changed

+20
-22
lines changed

1 file changed

+20
-22
lines changed

kernel/time/clocksource.c

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,16 @@
2020
#include "tick-internal.h"
2121
#include "timekeeping_internal.h"
2222

23+
static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end)
24+
{
25+
u64 delta = clocksource_delta(end, start, cs->mask);
26+
27+
if (likely(delta < cs->max_cycles))
28+
return clocksource_cyc2ns(delta, cs->mult, cs->shift);
29+
30+
return mul_u64_u32_shr(delta, cs->mult, cs->shift);
31+
}
32+
2333
/**
2434
* clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
2535
* @mult: pointer to mult variable
@@ -222,8 +232,8 @@ enum wd_read_status {
222232
static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
223233
{
224234
unsigned int nretries, max_retries;
225-
u64 wd_end, wd_end2, wd_delta;
226235
int64_t wd_delay, wd_seq_delay;
236+
u64 wd_end, wd_end2;
227237

228238
max_retries = clocksource_get_max_watchdog_retry();
229239
for (nretries = 0; nretries <= max_retries; nretries++) {
@@ -234,9 +244,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
234244
wd_end2 = watchdog->read(watchdog);
235245
local_irq_enable();
236246

237-
wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask);
238-
wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
239-
watchdog->shift);
247+
wd_delay = cycles_to_nsec_safe(watchdog, *wdnow, wd_end);
240248
if (wd_delay <= WATCHDOG_MAX_SKEW) {
241249
if (nretries > 1 || nretries >= max_retries) {
242250
pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
@@ -254,8 +262,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
254262
* report system busy, reinit the watchdog and skip the current
255263
* watchdog test.
256264
*/
257-
wd_delta = clocksource_delta(wd_end2, wd_end, watchdog->mask);
258-
wd_seq_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift);
265+
wd_seq_delay = cycles_to_nsec_safe(watchdog, wd_end, wd_end2);
259266
if (wd_seq_delay > WATCHDOG_MAX_SKEW/2)
260267
goto skip_test;
261268
}
@@ -366,8 +373,7 @@ void clocksource_verify_percpu(struct clocksource *cs)
366373
delta = (csnow_end - csnow_mid) & cs->mask;
367374
if (delta < 0)
368375
cpumask_set_cpu(cpu, &cpus_ahead);
369-
delta = clocksource_delta(csnow_end, csnow_begin, cs->mask);
370-
cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
376+
cs_nsec = cycles_to_nsec_safe(cs, csnow_begin, csnow_end);
371377
if (cs_nsec > cs_nsec_max)
372378
cs_nsec_max = cs_nsec;
373379
if (cs_nsec < cs_nsec_min)
@@ -398,8 +404,8 @@ static inline void clocksource_reset_watchdog(void)
398404

399405
static void clocksource_watchdog(struct timer_list *unused)
400406
{
401-
u64 csnow, wdnow, cslast, wdlast, delta;
402407
int64_t wd_nsec, cs_nsec, interval;
408+
u64 csnow, wdnow, cslast, wdlast;
403409
int next_cpu, reset_pending;
404410
struct clocksource *cs;
405411
enum wd_read_status read_ret;
@@ -456,12 +462,8 @@ static void clocksource_watchdog(struct timer_list *unused)
456462
continue;
457463
}
458464

459-
delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
460-
wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
461-
watchdog->shift);
462-
463-
delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
464-
cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
465+
wd_nsec = cycles_to_nsec_safe(watchdog, cs->wd_last, wdnow);
466+
cs_nsec = cycles_to_nsec_safe(cs, cs->cs_last, csnow);
465467
wdlast = cs->wd_last; /* save these in case we print them */
466468
cslast = cs->cs_last;
467469
cs->cs_last = csnow;
@@ -832,7 +834,7 @@ void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
832834
*/
833835
u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
834836
{
835-
u64 now, delta, nsec = 0;
837+
u64 now, nsec = 0;
836838

837839
if (!suspend_clocksource)
838840
return 0;
@@ -847,12 +849,8 @@ u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
847849
else
848850
now = suspend_clocksource->read(suspend_clocksource);
849851

850-
if (now > suspend_start) {
851-
delta = clocksource_delta(now, suspend_start,
852-
suspend_clocksource->mask);
853-
nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
854-
suspend_clocksource->shift);
855-
}
852+
if (now > suspend_start)
853+
nsec = cycles_to_nsec_safe(suspend_clocksource, suspend_start, now);
856854

857855
/*
858856
* Disable the suspend timer to save power if current clocksource is

0 commit comments

Comments
 (0)