@@ -279,7 +279,7 @@ static bool __posixtimer_deliver_signal(struct kernel_siginfo *info, struct k_it
279
279
* since the signal was queued. In either case, don't rearm and
280
280
* drop the signal.
281
281
*/
282
- if (timr -> it_signal_seq != timr -> it_sigqueue_seq || WARN_ON_ONCE (!timr -> it_signal ))
282
+ if (timr -> it_signal_seq != timr -> it_sigqueue_seq || WARN_ON_ONCE (!posixtimer_valid ( timr ) ))
283
283
return false;
284
284
285
285
if (!timr -> it_interval || WARN_ON_ONCE (timr -> it_status != POSIX_TIMER_REQUEUE_PENDING ))
@@ -324,6 +324,9 @@ void posix_timer_queue_signal(struct k_itimer *timr)
324
324
{
325
325
lockdep_assert_held (& timr -> it_lock );
326
326
327
+ if (!posixtimer_valid (timr ))
328
+ return ;
329
+
327
330
timr -> it_status = timr -> it_interval ? POSIX_TIMER_REQUEUE_PENDING : POSIX_TIMER_DISARMED ;
328
331
posixtimer_send_sigqueue (timr );
329
332
}
@@ -553,11 +556,11 @@ static struct k_itimer *__lock_timer(timer_t timer_id)
553
556
* The hash lookup and the timers are RCU protected.
554
557
*
555
558
* Timers are added to the hash in invalid state where
556
- * timr::it_signal == NULL . timer::it_signal is only set after the
557
- * rest of the initialization succeeded.
559
+ * timr::it_signal is marked invalid . timer::it_signal is only set
560
+ * after the rest of the initialization succeeded.
558
561
*
559
562
* Timer destruction happens in steps:
560
- * 1) Set timr::it_signal to NULL with timr::it_lock held
563
+ * 1) Set timr::it_signal marked invalid with timr::it_lock held
561
564
* 2) Release timr::it_lock
562
565
* 3) Remove from the hash under hash_lock
563
566
* 4) Put the reference count.
@@ -574,8 +577,8 @@ static struct k_itimer *__lock_timer(timer_t timer_id)
574
577
*
575
578
* The lookup validates locklessly that timr::it_signal ==
576
579
* current::it_signal and timr::it_id == @timer_id. timr::it_id
577
- * can't change, but timr::it_signal becomes NULL during
578
- * destruction.
580
+ * can't change, but timr::it_signal can become invalid during
581
+ * destruction, which makes the locked check fail .
579
582
*/
580
583
guard (rcu )();
581
584
timr = posix_timer_by_id (timer_id );
@@ -811,22 +814,13 @@ static void common_timer_wait_running(struct k_itimer *timer)
811
814
* when the task which tries to delete or disarm the timer has preempted
812
815
* the task which runs the expiry in task work context.
813
816
*/
814
- static struct k_itimer * timer_wait_running (struct k_itimer * timer )
817
+ static void timer_wait_running (struct k_itimer * timer )
815
818
{
816
- timer_t timer_id = READ_ONCE (timer -> it_id );
817
-
818
- /* Prevent kfree(timer) after dropping the lock */
819
- scoped_guard (rcu ) {
820
- unlock_timer (timer );
821
- /*
822
- * kc->timer_wait_running() might drop RCU lock. So @timer
823
- * cannot be touched anymore after the function returns!
824
- */
825
- timer -> kclock -> timer_wait_running (timer );
826
- }
827
-
828
- /* Relock the timer. It might be not longer hashed. */
829
- return lock_timer (timer_id );
819
+ /*
820
+ * kc->timer_wait_running() might drop RCU lock. So @timer
821
+ * cannot be touched anymore after the function returns!
822
+ */
823
+ timer -> kclock -> timer_wait_running (timer );
830
824
}
831
825
832
826
/*
@@ -885,8 +879,7 @@ static int do_timer_settime(timer_t timer_id, int tmr_flags,
885
879
struct itimerspec64 * new_spec64 ,
886
880
struct itimerspec64 * old_spec64 )
887
881
{
888
- struct k_itimer * timr ;
889
- int error ;
882
+ int ret ;
890
883
891
884
if (!timespec64_valid (& new_spec64 -> it_interval ) ||
892
885
!timespec64_valid (& new_spec64 -> it_value ))
@@ -895,29 +888,36 @@ static int do_timer_settime(timer_t timer_id, int tmr_flags,
895
888
if (old_spec64 )
896
889
memset (old_spec64 , 0 , sizeof (* old_spec64 ));
897
890
898
- timr = lock_timer (timer_id );
899
- retry :
900
- if (!timr )
901
- return - EINVAL ;
891
+ for (;;) {
892
+ struct k_itimer * timr = lock_timer (timer_id );
902
893
903
- if (old_spec64 )
904
- old_spec64 -> it_interval = ktime_to_timespec64 (timr -> it_interval );
894
+ if (!timr )
895
+ return - EINVAL ;
896
+
897
+ if (old_spec64 )
898
+ old_spec64 -> it_interval = ktime_to_timespec64 (timr -> it_interval );
905
899
906
- /* Prevent signal delivery and rearming. */
907
- timr -> it_signal_seq ++ ;
900
+ /* Prevent signal delivery and rearming. */
901
+ timr -> it_signal_seq ++ ;
908
902
909
- error = timr -> kclock -> timer_set (timr , tmr_flags , new_spec64 , old_spec64 );
903
+ ret = timr -> kclock -> timer_set (timr , tmr_flags , new_spec64 , old_spec64 );
904
+ if (ret != TIMER_RETRY ) {
905
+ unlock_timer (timr );
906
+ break ;
907
+ }
910
908
911
- if (error == TIMER_RETRY ) {
912
- // We already got the old time...
909
+ /* Read the old time only once */
913
910
old_spec64 = NULL ;
914
- /* Unlocks and relocks the timer if it still exists */
915
- timr = timer_wait_running (timr );
916
- goto retry ;
911
+ /* Protect the timer from being freed after the lock is dropped */
912
+ guard (rcu )();
913
+ unlock_timer (timr );
914
+ /*
915
+ * timer_wait_running() might drop RCU read side protection
916
+ * so the timer has to be looked up again!
917
+ */
918
+ timer_wait_running (timr );
917
919
}
918
- unlock_timer (timr );
919
-
920
- return error ;
920
+ return ret ;
921
921
}
922
922
923
923
/* Set a POSIX.1b interval timer */
@@ -988,90 +988,56 @@ static inline void posix_timer_cleanup_ignored(struct k_itimer *tmr)
988
988
}
989
989
}
990
990
991
- /* Delete a POSIX.1b interval timer. */
992
- SYSCALL_DEFINE1 (timer_delete , timer_t , timer_id )
991
+ static void posix_timer_delete (struct k_itimer * timer )
993
992
{
994
- struct k_itimer * timer = lock_timer (timer_id );
995
-
996
- retry_delete :
997
- if (!timer )
998
- return - EINVAL ;
999
-
1000
- /* Prevent signal delivery and rearming. */
993
+ /*
994
+ * Invalidate the timer, remove it from the linked list and remove
995
+ * it from the ignored list if pending.
996
+ *
997
+ * The invalidation must be written with siglock held so that the
998
+ * signal code observes the invalidated timer::it_signal in
999
+ * do_sigaction(), which prevents it from moving a pending signal
1000
+ * of a deleted timer to the ignore list.
1001
+ *
1002
+ * The invalidation also prevents signal queueing, signal delivery
1003
+ * and therefore rearming from the signal delivery path.
1004
+ *
1005
+ * A concurrent lookup can still find the timer in the hash, but it
1006
+ * will check timer::it_signal with timer::it_lock held and observe
1007
+ * bit 0 set, which invalidates it. That also prevents the timer ID
1008
+ * from being handed out before this timer is completely gone.
1009
+ */
1001
1010
timer -> it_signal_seq ++ ;
1002
1011
1003
- if (unlikely (timer -> kclock -> timer_del (timer ) == TIMER_RETRY )) {
1004
- /* Unlocks and relocks the timer if it still exists */
1005
- timer = timer_wait_running (timer );
1006
- goto retry_delete ;
1007
- }
1008
-
1009
1012
scoped_guard (spinlock , & current -> sighand -> siglock ) {
1013
+ unsigned long sig = (unsigned long )timer -> it_signal | 1UL ;
1014
+
1015
+ WRITE_ONCE (timer -> it_signal , (struct signal_struct * )sig );
1010
1016
hlist_del (& timer -> list );
1011
1017
posix_timer_cleanup_ignored (timer );
1012
- /*
1013
- * A concurrent lookup could check timer::it_signal lockless. It
1014
- * will reevaluate with timer::it_lock held and observe the NULL.
1015
- *
1016
- * It must be written with siglock held so that the signal code
1017
- * observes timer->it_signal == NULL in do_sigaction(SIG_IGN),
1018
- * which prevents it from moving a pending signal of a deleted
1019
- * timer to the ignore list.
1020
- */
1021
- WRITE_ONCE (timer -> it_signal , NULL );
1022
1018
}
1023
1019
1024
- unlock_timer (timer );
1025
- posix_timer_unhash_and_free (timer );
1026
- return 0 ;
1020
+ while (timer -> kclock -> timer_del (timer ) == TIMER_RETRY ) {
1021
+ guard (rcu )();
1022
+ spin_unlock_irq (& timer -> it_lock );
1023
+ timer_wait_running (timer );
1024
+ spin_lock_irq (& timer -> it_lock );
1025
+ }
1027
1026
}
1028
1027
1029
- /*
1030
- * Delete a timer if it is armed, remove it from the hash and schedule it
1031
- * for RCU freeing.
1032
- */
1033
- static void itimer_delete (struct k_itimer * timer )
1028
+ /* Delete a POSIX.1b interval timer. */
1029
+ SYSCALL_DEFINE1 (timer_delete , timer_t , timer_id )
1034
1030
{
1035
- spin_lock_irq (& timer -> it_lock );
1036
-
1037
- retry_delete :
1038
- /*
1039
- * Even if the timer is not longer accessible from other tasks
1040
- * it still might be armed and queued in the underlying timer
1041
- * mechanism. Worse, that timer mechanism might run the expiry
1042
- * function concurrently.
1043
- */
1044
- if (timer -> kclock -> timer_del (timer ) == TIMER_RETRY ) {
1045
- /*
1046
- * Timer is expired concurrently, prevent livelocks
1047
- * and pointless spinning on RT.
1048
- *
1049
- * timer_wait_running() drops timer::it_lock, which opens
1050
- * the possibility for another task to delete the timer.
1051
- *
1052
- * That's not possible here because this is invoked from
1053
- * do_exit() only for the last thread of the thread group.
1054
- * So no other task can access and delete that timer.
1055
- */
1056
- if (WARN_ON_ONCE (timer_wait_running (timer ) != timer ))
1057
- return ;
1058
-
1059
- goto retry_delete ;
1060
- }
1061
- hlist_del (& timer -> list );
1062
-
1063
- posix_timer_cleanup_ignored (timer );
1031
+ struct k_itimer * timer = lock_timer (timer_id );
1064
1032
1065
- /*
1066
- * Setting timer::it_signal to NULL is technically not required
1067
- * here as nothing can access the timer anymore legitimately via
1068
- * the hash table. Set it to NULL nevertheless so that all deletion
1069
- * paths are consistent.
1070
- */
1071
- WRITE_ONCE (timer -> it_signal , NULL );
1033
+ if (!timer )
1034
+ return - EINVAL ;
1072
1035
1073
- spin_unlock_irq (& timer -> it_lock );
1036
+ posix_timer_delete (timer );
1037
+ unlock_timer (timer );
1038
+ /* Remove it from the hash, which frees up the timer ID */
1074
1039
posix_timer_unhash_and_free (timer );
1040
+ return 0 ;
1075
1041
}
1076
1042
1077
1043
/*
@@ -1082,6 +1048,8 @@ static void itimer_delete(struct k_itimer *timer)
1082
1048
void exit_itimers (struct task_struct * tsk )
1083
1049
{
1084
1050
struct hlist_head timers ;
1051
+ struct hlist_node * next ;
1052
+ struct k_itimer * timer ;
1085
1053
1086
1054
if (hlist_empty (& tsk -> signal -> posix_timers ))
1087
1055
return ;
@@ -1091,8 +1059,10 @@ void exit_itimers(struct task_struct *tsk)
1091
1059
hlist_move_list (& tsk -> signal -> posix_timers , & timers );
1092
1060
1093
1061
/* The timers are not longer accessible via tsk::signal */
1094
- while (!hlist_empty (& timers )) {
1095
- itimer_delete (hlist_entry (timers .first , struct k_itimer , list ));
1062
+ hlist_for_each_entry_safe (timer , next , & timers , list ) {
1063
+ scoped_guard (spinlock_irq , & timer -> it_lock )
1064
+ posix_timer_delete (timer );
1065
+ posix_timer_unhash_and_free (timer );
1096
1066
cond_resched ();
1097
1067
}
1098
1068
0 commit comments