@@ -429,6 +429,14 @@ static void update_fast_timekeeper(const struct tk_read_base *tkr,
429
429
memcpy (base + 1 , base , sizeof (* base ));
430
430
}
431
431
432
+ static __always_inline u64 fast_tk_get_delta_ns (struct tk_read_base * tkr )
433
+ {
434
+ u64 delta , cycles = tk_clock_read (tkr );
435
+
436
+ delta = clocksource_delta (cycles , tkr -> cycle_last , tkr -> mask );
437
+ return timekeeping_delta_to_ns (tkr , delta );
438
+ }
439
+
432
440
static __always_inline u64 __ktime_get_fast_ns (struct tk_fast * tkf )
433
441
{
434
442
struct tk_read_base * tkr ;
@@ -439,12 +447,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
439
447
seq = raw_read_seqcount_latch (& tkf -> seq );
440
448
tkr = tkf -> base + (seq & 0x01 );
441
449
now = ktime_to_ns (tkr -> base );
442
-
443
- now += timekeeping_delta_to_ns (tkr ,
444
- clocksource_delta (
445
- tk_clock_read (tkr ),
446
- tkr -> cycle_last ,
447
- tkr -> mask ));
450
+ now += fast_tk_get_delta_ns (tkr );
448
451
} while (read_seqcount_latch_retry (& tkf -> seq , seq ));
449
452
450
453
return now ;
@@ -560,10 +563,7 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
560
563
tkr = tkf -> base + (seq & 0x01 );
561
564
basem = ktime_to_ns (tkr -> base );
562
565
baser = ktime_to_ns (tkr -> base_real );
563
-
564
- delta = timekeeping_delta_to_ns (tkr ,
565
- clocksource_delta (tk_clock_read (tkr ),
566
- tkr -> cycle_last , tkr -> mask ));
566
+ delta = fast_tk_get_delta_ns (tkr );
567
567
} while (read_seqcount_latch_retry (& tkf -> seq , seq ));
568
568
569
569
if (mono )
0 commit comments