Skip to content

Commit 4581bea

Browse files
Vincent DonnefortPeter Zijlstra
authored andcommitted
sched/debug: Add new tracepoints to track util_est
The util_est signals are key elements for EAS task placement and frequency selection. Having tracepoints to track these signals enables load-tracking and schedutil testing and/or debugging by a toolkit. Signed-off-by: Vincent Donnefort <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Valentin Schneider <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 1ca2034 commit 4581bea

File tree

3 files changed

+16
-0
lines changed

3 files changed

+16
-0
lines changed

include/trace/events/sched.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -634,6 +634,14 @@ DECLARE_TRACE(sched_overutilized_tp,
634634
TP_PROTO(struct root_domain *rd, bool overutilized),
635635
TP_ARGS(rd, overutilized));
636636

637+
DECLARE_TRACE(sched_util_est_cfs_tp,
638+
TP_PROTO(struct cfs_rq *cfs_rq),
639+
TP_ARGS(cfs_rq));
640+
641+
DECLARE_TRACE(sched_util_est_se_tp,
642+
TP_PROTO(struct sched_entity *se),
643+
TP_ARGS(se));
644+
637645
#endif /* _TRACE_SCHED_H */
638646

639647
/* This part must be outside protection */

kernel/sched/core.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp);
3636
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
3737
EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp);
3838
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
39+
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
40+
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
3941

4042
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
4143

kernel/sched/fair.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3922,6 +3922,8 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
39223922
enqueued = cfs_rq->avg.util_est.enqueued;
39233923
enqueued += _task_util_est(p);
39243924
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
3925+
3926+
trace_sched_util_est_cfs_tp(cfs_rq);
39253927
}
39263928

39273929
/*
@@ -3952,6 +3954,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
39523954
ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
39533955
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
39543956

3957+
trace_sched_util_est_cfs_tp(cfs_rq);
3958+
39553959
/*
39563960
* Skip update of task's estimated utilization when the task has not
39573961
* yet completed an activation, e.g. being migrated.
@@ -4017,6 +4021,8 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
40174021
ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
40184022
done:
40194023
WRITE_ONCE(p->se.avg.util_est, ue);
4024+
4025+
trace_sched_util_est_se_tp(&p->se);
40204026
}
40214027

40224028
static inline int task_fits_capacity(struct task_struct *p, long capacity)

0 commit comments

Comments
 (0)