@@ -5721,9 +5721,6 @@ static void sched_tick_remote(struct work_struct *work)
57215721 struct tick_work * twork = container_of (dwork , struct tick_work , work );
57225722 int cpu = twork -> cpu ;
57235723 struct rq * rq = cpu_rq (cpu );
5724- struct task_struct * curr ;
5725- struct rq_flags rf ;
5726- u64 delta ;
57275724 int os ;
57285725
57295726 /*
@@ -5733,30 +5730,26 @@ static void sched_tick_remote(struct work_struct *work)
57335730 * statistics and checks timeslices in a time-independent way, regardless
57345731 * of when exactly it is running.
57355732 */
5736- if (!tick_nohz_tick_stopped_cpu (cpu ))
5737- goto out_requeue ;
5733+ if (tick_nohz_tick_stopped_cpu (cpu )) {
5734+ guard (rq_lock_irq )(rq );
5735+ struct task_struct * curr = rq -> curr ;
57385736
5739- rq_lock_irq (rq , & rf );
5740- curr = rq -> curr ;
5741- if (cpu_is_offline (cpu ))
5742- goto out_unlock ;
5737+ if (cpu_online (cpu )) {
5738+ update_rq_clock (rq );
57435739
5744- update_rq_clock (rq );
5740+ if (!is_idle_task (curr )) {
5741+ /*
5742+ * Make sure the next tick runs within a
5743+ * reasonable amount of time.
5744+ */
5745+ u64 delta = rq_clock_task (rq ) - curr -> se .exec_start ;
5746+ WARN_ON_ONCE (delta > (u64 )NSEC_PER_SEC * 3 );
5747+ }
5748+ curr -> sched_class -> task_tick (rq , curr , 0 );
57455749
5746- if (!is_idle_task (curr )) {
5747- /*
5748- * Make sure the next tick runs within a reasonable
5749- * amount of time.
5750- */
5751- delta = rq_clock_task (rq ) - curr -> se .exec_start ;
5752- WARN_ON_ONCE (delta > (u64 )NSEC_PER_SEC * 3 );
5750+ calc_load_nohz_remote (rq );
5751+ }
57535752 }
5754- curr -> sched_class -> task_tick (rq , curr , 0 );
5755-
5756- calc_load_nohz_remote (rq );
5757- out_unlock :
5758- rq_unlock_irq (rq , & rf );
5759- out_requeue :
57605753
57615754 /*
57625755 * Run the remote tick once per second (1Hz). This arbitrary
0 commit comments