@@ -682,7 +682,6 @@ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
682
682
683
683
static int do_timer_gettime (timer_t timer_id , struct itimerspec64 * setting )
684
684
{
685
- const struct k_clock * kc ;
686
685
struct k_itimer * timr ;
687
686
unsigned long flags ;
688
687
int ret = 0 ;
@@ -692,11 +691,7 @@ static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting)
692
691
return - EINVAL ;
693
692
694
693
memset (setting , 0 , sizeof (* setting ));
695
- kc = timr -> kclock ;
696
- if (WARN_ON_ONCE (!kc || !kc -> timer_get ))
697
- ret = - EINVAL ;
698
- else
699
- kc -> timer_get (timr , setting );
694
+ timr -> kclock -> timer_get (timr , setting );
700
695
701
696
unlock_timer (timr , flags );
702
697
return ret ;
@@ -824,7 +819,6 @@ static void common_timer_wait_running(struct k_itimer *timer)
824
819
static struct k_itimer * timer_wait_running (struct k_itimer * timer ,
825
820
unsigned long * flags )
826
821
{
827
- const struct k_clock * kc = READ_ONCE (timer -> kclock );
828
822
timer_t timer_id = READ_ONCE (timer -> it_id );
829
823
830
824
/* Prevent kfree(timer) after dropping the lock */
@@ -835,8 +829,7 @@ static struct k_itimer *timer_wait_running(struct k_itimer *timer,
835
829
* kc->timer_wait_running() might drop RCU lock. So @timer
836
830
* cannot be touched anymore after the function returns!
837
831
*/
838
- if (!WARN_ON_ONCE (!kc -> timer_wait_running ))
839
- kc -> timer_wait_running (timer );
832
+ timer -> kclock -> timer_wait_running (timer );
840
833
841
834
rcu_read_unlock ();
842
835
/* Relock the timer. It might be not longer hashed. */
@@ -899,7 +892,6 @@ static int do_timer_settime(timer_t timer_id, int tmr_flags,
899
892
struct itimerspec64 * new_spec64 ,
900
893
struct itimerspec64 * old_spec64 )
901
894
{
902
- const struct k_clock * kc ;
903
895
struct k_itimer * timr ;
904
896
unsigned long flags ;
905
897
int error ;
@@ -922,11 +914,7 @@ static int do_timer_settime(timer_t timer_id, int tmr_flags,
922
914
/* Prevent signal delivery and rearming. */
923
915
timr -> it_signal_seq ++ ;
924
916
925
- kc = timr -> kclock ;
926
- if (WARN_ON_ONCE (!kc || !kc -> timer_set ))
927
- error = - EINVAL ;
928
- else
929
- error = kc -> timer_set (timr , tmr_flags , new_spec64 , old_spec64 );
917
+ error = timr -> kclock -> timer_set (timr , tmr_flags , new_spec64 , old_spec64 );
930
918
931
919
if (error == TIMER_RETRY ) {
932
920
// We already got the old time...
@@ -1008,18 +996,6 @@ static inline void posix_timer_cleanup_ignored(struct k_itimer *tmr)
1008
996
}
1009
997
}
1010
998
1011
- static inline int timer_delete_hook (struct k_itimer * timer )
1012
- {
1013
- const struct k_clock * kc = timer -> kclock ;
1014
-
1015
- /* Prevent signal delivery and rearming. */
1016
- timer -> it_signal_seq ++ ;
1017
-
1018
- if (WARN_ON_ONCE (!kc || !kc -> timer_del ))
1019
- return - EINVAL ;
1020
- return kc -> timer_del (timer );
1021
- }
1022
-
1023
999
/* Delete a POSIX.1b interval timer. */
1024
1000
SYSCALL_DEFINE1 (timer_delete , timer_t , timer_id )
1025
1001
{
@@ -1032,7 +1008,10 @@ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
1032
1008
if (!timer )
1033
1009
return - EINVAL ;
1034
1010
1035
- if (unlikely (timer_delete_hook (timer ) == TIMER_RETRY )) {
1011
+ /* Prevent signal delivery and rearming. */
1012
+ timer -> it_signal_seq ++ ;
1013
+
1014
+ if (unlikely (timer -> kclock -> timer_del (timer ) == TIMER_RETRY )) {
1036
1015
/* Unlocks and relocks the timer if it still exists */
1037
1016
timer = timer_wait_running (timer , & flags );
1038
1017
goto retry_delete ;
@@ -1078,7 +1057,7 @@ static void itimer_delete(struct k_itimer *timer)
1078
1057
* mechanism. Worse, that timer mechanism might run the expiry
1079
1058
* function concurrently.
1080
1059
*/
1081
- if (timer_delete_hook (timer ) == TIMER_RETRY ) {
1060
+ if (timer -> kclock -> timer_del (timer ) == TIMER_RETRY ) {
1082
1061
/*
1083
1062
* Timer is expired concurrently, prevent livelocks
1084
1063
* and pointless spinning on RT.
0 commit comments