@@ -1164,6 +1164,14 @@ static s64 update_curr_se(struct rq *rq, struct sched_entity *curr)
1164
1164
curr -> exec_start = now ;
1165
1165
curr -> sum_exec_runtime += delta_exec ;
1166
1166
1167
+ if (entity_is_task (curr )) {
1168
+ struct task_struct * p = task_of (curr );
1169
+
1170
+ trace_sched_stat_runtime (p , delta_exec );
1171
+ account_group_exec_runtime (p , delta_exec );
1172
+ cgroup_account_cputime (p , delta_exec );
1173
+ }
1174
+
1167
1175
if (schedstat_enabled ()) {
1168
1176
struct sched_statistics * stats ;
1169
1177
@@ -1175,26 +1183,14 @@ static s64 update_curr_se(struct rq *rq, struct sched_entity *curr)
1175
1183
return delta_exec ;
1176
1184
}
1177
1185
1178
- static inline void update_curr_task (struct task_struct * p , s64 delta_exec )
1179
- {
1180
- trace_sched_stat_runtime (p , delta_exec );
1181
- account_group_exec_runtime (p , delta_exec );
1182
- cgroup_account_cputime (p , delta_exec );
1183
- }
1184
-
1185
1186
/*
1186
1187
* Used by other classes to account runtime.
1187
1188
*/
1188
1189
s64 update_curr_common (struct rq * rq )
1189
1190
{
1190
1191
struct task_struct * donor = rq -> donor ;
1191
- s64 delta_exec ;
1192
1192
1193
- delta_exec = update_curr_se (rq , & donor -> se );
1194
- if (likely (delta_exec > 0 ))
1195
- update_curr_task (donor , delta_exec );
1196
-
1197
- return delta_exec ;
1193
+ return update_curr_se (rq , & donor -> se );
1198
1194
}
1199
1195
1200
1196
/*
@@ -1219,10 +1215,6 @@ static void update_curr(struct cfs_rq *cfs_rq)
1219
1215
update_min_vruntime (cfs_rq );
1220
1216
1221
1217
if (entity_is_task (curr )) {
1222
- struct task_struct * p = task_of (curr );
1223
-
1224
- update_curr_task (p , delta_exec );
1225
-
1226
1218
/*
1227
1219
* If the fair_server is active, we need to account for the
1228
1220
* fair_server time whether or not the task is running on
0 commit comments