@@ -1110,6 +1110,7 @@ static void __resched_curr(struct rq *rq, int tif)
11101110
11111111 cpu = cpu_of (rq );
11121112
1113+ trace_sched_set_need_resched_tp (curr , cpu , tif );
11131114 if (cpu == smp_processor_id ()) {
11141115 set_ti_thread_flag (cti , tif );
11151116 if (tif == TIF_NEED_RESCHED )
@@ -1125,6 +1126,11 @@ static void __resched_curr(struct rq *rq, int tif)
11251126 }
11261127}
11271128
1129+ void __trace_set_need_resched (struct task_struct * curr , int tif )
1130+ {
1131+ trace_sched_set_need_resched_tp (curr , smp_processor_id (), tif );
1132+ }
1133+
11281134void resched_curr (struct rq * rq )
11291135{
11301136 __resched_curr (rq , TIF_NEED_RESCHED );
@@ -5329,7 +5335,7 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev)
53295335 * switched the context for the first time. It is returning from
53305336 * schedule for the first time in this path.
53315337 */
5332- trace_sched_exit_tp (true, CALLER_ADDR0 );
5338+ trace_sched_exit_tp (true);
53335339 preempt_enable ();
53345340
53355341 if (current -> set_child_tid )
@@ -6678,7 +6684,8 @@ static void __sched notrace __schedule(int sched_mode)
66786684 struct rq * rq ;
66796685 int cpu ;
66806686
6681- trace_sched_entry_tp (preempt , CALLER_ADDR0 );
6687+ /* Trace preemptions consistently with task switches */
6688+ trace_sched_entry_tp (sched_mode == SM_PREEMPT );
66826689
66836690 cpu = smp_processor_id ();
66846691 rq = cpu_rq (cpu );
@@ -6793,7 +6800,7 @@ static void __sched notrace __schedule(int sched_mode)
67936800 __balance_callbacks (rq );
67946801 raw_spin_rq_unlock_irq (rq );
67956802 }
6796- trace_sched_exit_tp (is_switch , CALLER_ADDR0 );
6803+ trace_sched_exit_tp (is_switch );
67976804}
67986805
67996806void __noreturn do_task_dead (void )
0 commit comments