@@ -912,6 +912,30 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
912
912
} while (read_seqcount_retry (& vtime -> seqcount , seq ));
913
913
}
914
914
915
+ static int vtime_state_check (struct vtime * vtime , int cpu )
916
+ {
917
+ /*
918
+ * We raced against a context switch, fetch the
919
+ * kcpustat task again.
920
+ */
921
+ if (vtime -> cpu != cpu && vtime -> cpu != -1 )
922
+ return - EAGAIN ;
923
+
924
+ /*
925
+ * Two possible things here:
926
+ * 1) We are seeing the scheduling out task (prev) or any past one.
927
+ * 2) We are seeing the scheduling in task (next) but it hasn't
928
+ * passed though vtime_task_switch() yet so the pending
929
+ * cputime of the prev task may not be flushed yet.
930
+ *
931
+ * Case 1) is ok but 2) is not. So wait for a safe VTIME state.
932
+ */
933
+ if (vtime -> state == VTIME_INACTIVE )
934
+ return - EAGAIN ;
935
+
936
+ return 0 ;
937
+ }
938
+
915
939
static u64 kcpustat_user_vtime (struct vtime * vtime )
916
940
{
917
941
if (vtime -> state == VTIME_USER )
@@ -933,26 +957,9 @@ static int kcpustat_field_vtime(u64 *cpustat,
933
957
do {
934
958
seq = read_seqcount_begin (& vtime -> seqcount );
935
959
936
- /*
937
- * We raced against context switch, fetch the
938
- * kcpustat task again.
939
- */
940
- if (vtime -> cpu != cpu && vtime -> cpu != -1 )
941
- return - EAGAIN ;
942
-
943
- /*
944
- * Two possible things here:
945
- * 1) We are seeing the scheduling out task (prev) or any past one.
946
- * 2) We are seeing the scheduling in task (next) but it hasn't
947
- * passed though vtime_task_switch() yet so the pending
948
- * cputime of the prev task may not be flushed yet.
949
- *
950
- * Case 1) is ok but 2) is not. So wait for a safe VTIME state.
951
- */
952
- if (vtime -> state == VTIME_INACTIVE )
953
- return - EAGAIN ;
954
-
955
- err = 0 ;
960
+ err = vtime_state_check (vtime , cpu );
961
+ if (err < 0 )
962
+ return err ;
956
963
957
964
* val = cpustat [usage ];
958
965
@@ -1025,4 +1032,93 @@ u64 kcpustat_field(struct kernel_cpustat *kcpustat,
1025
1032
}
1026
1033
}
1027
1034
EXPORT_SYMBOL_GPL (kcpustat_field );
1035
+
1036
+ static int kcpustat_cpu_fetch_vtime (struct kernel_cpustat * dst ,
1037
+ const struct kernel_cpustat * src ,
1038
+ struct task_struct * tsk , int cpu )
1039
+ {
1040
+ struct vtime * vtime = & tsk -> vtime ;
1041
+ unsigned int seq ;
1042
+ int err ;
1043
+
1044
+ do {
1045
+ u64 * cpustat ;
1046
+ u64 delta ;
1047
+
1048
+ seq = read_seqcount_begin (& vtime -> seqcount );
1049
+
1050
+ err = vtime_state_check (vtime , cpu );
1051
+ if (err < 0 )
1052
+ return err ;
1053
+
1054
+ * dst = * src ;
1055
+ cpustat = dst -> cpustat ;
1056
+
1057
+ /* Task is sleeping, dead or idle, nothing to add */
1058
+ if (vtime -> state < VTIME_SYS )
1059
+ continue ;
1060
+
1061
+ delta = vtime_delta (vtime );
1062
+
1063
+ /*
1064
+ * Task runs either in user (including guest) or kernel space,
1065
+ * add pending nohz time to the right place.
1066
+ */
1067
+ if (vtime -> state == VTIME_SYS ) {
1068
+ cpustat [CPUTIME_SYSTEM ] += vtime -> stime + delta ;
1069
+ } else if (vtime -> state == VTIME_USER ) {
1070
+ if (task_nice (tsk ) > 0 )
1071
+ cpustat [CPUTIME_NICE ] += vtime -> utime + delta ;
1072
+ else
1073
+ cpustat [CPUTIME_USER ] += vtime -> utime + delta ;
1074
+ } else {
1075
+ WARN_ON_ONCE (vtime -> state != VTIME_GUEST );
1076
+ if (task_nice (tsk ) > 0 ) {
1077
+ cpustat [CPUTIME_GUEST_NICE ] += vtime -> gtime + delta ;
1078
+ cpustat [CPUTIME_NICE ] += vtime -> gtime + delta ;
1079
+ } else {
1080
+ cpustat [CPUTIME_GUEST ] += vtime -> gtime + delta ;
1081
+ cpustat [CPUTIME_USER ] += vtime -> gtime + delta ;
1082
+ }
1083
+ }
1084
+ } while (read_seqcount_retry (& vtime -> seqcount , seq ));
1085
+
1086
+ return err ;
1087
+ }
1088
+
1089
+ void kcpustat_cpu_fetch (struct kernel_cpustat * dst , int cpu )
1090
+ {
1091
+ const struct kernel_cpustat * src = & kcpustat_cpu (cpu );
1092
+ struct rq * rq ;
1093
+ int err ;
1094
+
1095
+ if (!vtime_accounting_enabled_cpu (cpu )) {
1096
+ * dst = * src ;
1097
+ return ;
1098
+ }
1099
+
1100
+ rq = cpu_rq (cpu );
1101
+
1102
+ for (;;) {
1103
+ struct task_struct * curr ;
1104
+
1105
+ rcu_read_lock ();
1106
+ curr = rcu_dereference (rq -> curr );
1107
+ if (WARN_ON_ONCE (!curr )) {
1108
+ rcu_read_unlock ();
1109
+ * dst = * src ;
1110
+ return ;
1111
+ }
1112
+
1113
+ err = kcpustat_cpu_fetch_vtime (dst , src , curr , cpu );
1114
+ rcu_read_unlock ();
1115
+
1116
+ if (!err )
1117
+ return ;
1118
+
1119
+ cpu_relax ();
1120
+ }
1121
+ }
1122
+ EXPORT_SYMBOL_GPL (kcpustat_cpu_fetch );
1123
+
1028
1124
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
0 commit comments