@@ -644,17 +644,12 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
644
644
/*
645
645
* Is the high resolution mode active ?
646
646
*/
647
- static inline int __hrtimer_hres_active (struct hrtimer_cpu_base * cpu_base )
647
+ static inline int hrtimer_hres_active (struct hrtimer_cpu_base * cpu_base )
648
648
{
649
649
return IS_ENABLED (CONFIG_HIGH_RES_TIMERS ) ?
650
650
cpu_base -> hres_active : 0 ;
651
651
}
652
652
653
- static inline int hrtimer_hres_active (void )
654
- {
655
- return __hrtimer_hres_active (this_cpu_ptr (& hrtimer_bases ));
656
- }
657
-
658
653
static void __hrtimer_reprogram (struct hrtimer_cpu_base * cpu_base ,
659
654
struct hrtimer * next_timer ,
660
655
ktime_t expires_next )
@@ -678,7 +673,7 @@ static void __hrtimer_reprogram(struct hrtimer_cpu_base *cpu_base,
678
673
* set. So we'd effectively block all timers until the T2 event
679
674
* fires.
680
675
*/
681
- if (!__hrtimer_hres_active (cpu_base ) || cpu_base -> hang_detected )
676
+ if (!hrtimer_hres_active (cpu_base ) || cpu_base -> hang_detected )
682
677
return ;
683
678
684
679
tick_program_event (expires_next , 1 );
@@ -789,12 +784,12 @@ static void retrigger_next_event(void *arg)
789
784
* function call will take care of the reprogramming in case the
790
785
* CPU was in a NOHZ idle sleep.
791
786
*/
792
- if (!__hrtimer_hres_active (base ) && !tick_nohz_active )
787
+ if (!hrtimer_hres_active (base ) && !tick_nohz_active )
793
788
return ;
794
789
795
790
raw_spin_lock (& base -> lock );
796
791
hrtimer_update_base (base );
797
- if (__hrtimer_hres_active (base ))
792
+ if (hrtimer_hres_active (base ))
798
793
hrtimer_force_reprogram (base , 0 );
799
794
else
800
795
hrtimer_update_next_event (base );
@@ -951,7 +946,7 @@ void clock_was_set(unsigned int bases)
951
946
cpumask_var_t mask ;
952
947
int cpu ;
953
948
954
- if (!__hrtimer_hres_active (cpu_base ) && !tick_nohz_active )
949
+ if (!hrtimer_hres_active (cpu_base ) && !tick_nohz_active )
955
950
goto out_timerfd ;
956
951
957
952
if (!zalloc_cpumask_var (& mask , GFP_KERNEL )) {
@@ -1491,7 +1486,7 @@ u64 hrtimer_get_next_event(void)
1491
1486
1492
1487
raw_spin_lock_irqsave (& cpu_base -> lock , flags );
1493
1488
1494
- if (!__hrtimer_hres_active (cpu_base ))
1489
+ if (!hrtimer_hres_active (cpu_base ))
1495
1490
expires = __hrtimer_get_next_event (cpu_base , HRTIMER_ACTIVE_ALL );
1496
1491
1497
1492
raw_spin_unlock_irqrestore (& cpu_base -> lock , flags );
@@ -1514,7 +1509,7 @@ u64 hrtimer_next_event_without(const struct hrtimer *exclude)
1514
1509
1515
1510
raw_spin_lock_irqsave (& cpu_base -> lock , flags );
1516
1511
1517
- if (__hrtimer_hres_active (cpu_base )) {
1512
+ if (hrtimer_hres_active (cpu_base )) {
1518
1513
unsigned int active ;
1519
1514
1520
1515
if (!cpu_base -> softirq_activated ) {
@@ -1886,7 +1881,7 @@ void hrtimer_run_queues(void)
1886
1881
unsigned long flags ;
1887
1882
ktime_t now ;
1888
1883
1889
- if (__hrtimer_hres_active (cpu_base ))
1884
+ if (hrtimer_hres_active (cpu_base ))
1890
1885
return ;
1891
1886
1892
1887
/*
0 commit comments