@@ -128,30 +128,30 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
128
128
return pos ;
129
129
}
130
130
131
- static void cgroup_stat_accumulate (struct cgroup_stat * dst_stat ,
132
- struct cgroup_stat * src_stat )
131
+ static void cgroup_base_stat_accumulate (struct cgroup_base_stat * dst_bstat ,
132
+ struct cgroup_base_stat * src_bstat )
133
133
{
134
- dst_stat -> cputime .utime += src_stat -> cputime .utime ;
135
- dst_stat -> cputime .stime += src_stat -> cputime .stime ;
136
- dst_stat -> cputime .sum_exec_runtime += src_stat -> cputime .sum_exec_runtime ;
134
+ dst_bstat -> cputime .utime += src_bstat -> cputime .utime ;
135
+ dst_bstat -> cputime .stime += src_bstat -> cputime .stime ;
136
+ dst_bstat -> cputime .sum_exec_runtime += src_bstat -> cputime .sum_exec_runtime ;
137
137
}
138
138
139
- static void cgroup_cpu_stat_flush_one (struct cgroup * cgrp , int cpu )
139
+ static void cgroup_base_stat_flush (struct cgroup * cgrp , int cpu )
140
140
{
141
141
struct cgroup * parent = cgroup_parent (cgrp );
142
142
struct cgroup_rstat_cpu * rstatc = cgroup_rstat_cpu (cgrp , cpu );
143
- struct task_cputime * last_cputime = & rstatc -> last_cputime ;
143
+ struct task_cputime * last_cputime = & rstatc -> last_bstat . cputime ;
144
144
struct task_cputime cputime ;
145
- struct cgroup_stat delta ;
145
+ struct cgroup_base_stat delta ;
146
146
unsigned seq ;
147
147
148
148
lockdep_assert_held (& cgroup_rstat_mutex );
149
149
150
150
/* fetch the current per-cpu values */
151
151
do {
152
- seq = __u64_stats_fetch_begin (& rstatc -> sync );
153
- cputime = rstatc -> cputime ;
154
- } while (__u64_stats_fetch_retry (& rstatc -> sync , seq ));
152
+ seq = __u64_stats_fetch_begin (& rstatc -> bsync );
153
+ cputime = rstatc -> bstat . cputime ;
154
+ } while (__u64_stats_fetch_retry (& rstatc -> bsync , seq ));
155
155
156
156
/* accumulate the deltas to propgate */
157
157
delta .cputime .utime = cputime .utime - last_cputime -> utime ;
@@ -161,13 +161,13 @@ static void cgroup_cpu_stat_flush_one(struct cgroup *cgrp, int cpu)
161
161
* last_cputime = cputime ;
162
162
163
163
/* transfer the pending stat into delta */
164
- cgroup_stat_accumulate (& delta , & cgrp -> pending_stat );
165
- memset (& cgrp -> pending_stat , 0 , sizeof (cgrp -> pending_stat ));
164
+ cgroup_base_stat_accumulate (& delta , & cgrp -> pending_bstat );
165
+ memset (& cgrp -> pending_bstat , 0 , sizeof (cgrp -> pending_bstat ));
166
166
167
167
/* propagate delta into the global stat and the parent's pending */
168
- cgroup_stat_accumulate (& cgrp -> stat , & delta );
168
+ cgroup_base_stat_accumulate (& cgrp -> bstat , & delta );
169
169
if (parent )
170
- cgroup_stat_accumulate (& parent -> pending_stat , & delta );
170
+ cgroup_base_stat_accumulate (& parent -> pending_bstat , & delta );
171
171
}
172
172
173
173
/* see cgroup_rstat_flush() */
@@ -184,7 +184,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
184
184
185
185
raw_spin_lock_irq (cpu_lock );
186
186
while ((pos = cgroup_rstat_cpu_pop_updated (pos , cgrp , cpu )))
187
- cgroup_cpu_stat_flush_one (pos , cpu );
187
+ cgroup_base_stat_flush (pos , cpu );
188
188
raw_spin_unlock_irq (cpu_lock );
189
189
}
190
190
}
@@ -208,19 +208,19 @@ void cgroup_rstat_flush(struct cgroup *cgrp)
208
208
}
209
209
210
210
static struct cgroup_rstat_cpu *
211
- cgroup_cpu_stat_account_begin (struct cgroup * cgrp )
211
+ cgroup_base_stat_cputime_account_begin (struct cgroup * cgrp )
212
212
{
213
213
struct cgroup_rstat_cpu * rstatc ;
214
214
215
215
rstatc = get_cpu_ptr (cgrp -> rstat_cpu );
216
- u64_stats_update_begin (& rstatc -> sync );
216
+ u64_stats_update_begin (& rstatc -> bsync );
217
217
return rstatc ;
218
218
}
219
219
220
- static void cgroup_cpu_stat_account_end (struct cgroup * cgrp ,
221
- struct cgroup_rstat_cpu * rstatc )
220
+ static void cgroup_base_stat_cputime_account_end (struct cgroup * cgrp ,
221
+ struct cgroup_rstat_cpu * rstatc )
222
222
{
223
- u64_stats_update_end (& rstatc -> sync );
223
+ u64_stats_update_end (& rstatc -> bsync );
224
224
cgroup_rstat_cpu_updated (cgrp , smp_processor_id ());
225
225
put_cpu_ptr (rstatc );
226
226
}
@@ -229,36 +229,36 @@ void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
229
229
{
230
230
struct cgroup_rstat_cpu * rstatc ;
231
231
232
- rstatc = cgroup_cpu_stat_account_begin (cgrp );
233
- rstatc -> cputime .sum_exec_runtime += delta_exec ;
234
- cgroup_cpu_stat_account_end (cgrp , rstatc );
232
+ rstatc = cgroup_base_stat_cputime_account_begin (cgrp );
233
+ rstatc -> bstat . cputime .sum_exec_runtime += delta_exec ;
234
+ cgroup_base_stat_cputime_account_end (cgrp , rstatc );
235
235
}
236
236
237
237
void __cgroup_account_cputime_field (struct cgroup * cgrp ,
238
238
enum cpu_usage_stat index , u64 delta_exec )
239
239
{
240
240
struct cgroup_rstat_cpu * rstatc ;
241
241
242
- rstatc = cgroup_cpu_stat_account_begin (cgrp );
242
+ rstatc = cgroup_base_stat_cputime_account_begin (cgrp );
243
243
244
244
switch (index ) {
245
245
case CPUTIME_USER :
246
246
case CPUTIME_NICE :
247
- rstatc -> cputime .utime += delta_exec ;
247
+ rstatc -> bstat . cputime .utime += delta_exec ;
248
248
break ;
249
249
case CPUTIME_SYSTEM :
250
250
case CPUTIME_IRQ :
251
251
case CPUTIME_SOFTIRQ :
252
- rstatc -> cputime .stime += delta_exec ;
252
+ rstatc -> bstat . cputime .stime += delta_exec ;
253
253
break ;
254
254
default :
255
255
break ;
256
256
}
257
257
258
- cgroup_cpu_stat_account_end (cgrp , rstatc );
258
+ cgroup_base_stat_cputime_account_end (cgrp , rstatc );
259
259
}
260
260
261
- void cgroup_stat_show_cputime (struct seq_file * seq )
261
+ void cgroup_base_stat_cputime_show (struct seq_file * seq )
262
262
{
263
263
struct cgroup * cgrp = seq_css (seq )-> cgroup ;
264
264
u64 usage , utime , stime ;
@@ -270,9 +270,8 @@ void cgroup_stat_show_cputime(struct seq_file *seq)
270
270
271
271
cgroup_rstat_flush_locked (cgrp );
272
272
273
- usage = cgrp -> stat .cputime .sum_exec_runtime ;
274
- cputime_adjust (& cgrp -> stat .cputime , & cgrp -> stat .prev_cputime ,
275
- & utime , & stime );
273
+ usage = cgrp -> bstat .cputime .sum_exec_runtime ;
274
+ cputime_adjust (& cgrp -> bstat .cputime , & cgrp -> prev_cputime , & utime , & stime );
276
275
277
276
mutex_unlock (& cgroup_rstat_mutex );
278
277
@@ -302,11 +301,9 @@ int cgroup_rstat_init(struct cgroup *cgrp)
302
301
struct cgroup_rstat_cpu * rstatc = cgroup_rstat_cpu (cgrp , cpu );
303
302
304
303
rstatc -> updated_children = cgrp ;
305
- u64_stats_init (& rstatc -> sync );
304
+ u64_stats_init (& rstatc -> bsync );
306
305
}
307
306
308
- prev_cputime_init (& cgrp -> stat .prev_cputime );
309
-
310
307
return 0 ;
311
308
}
312
309
0 commit comments