Skip to content

Commit d4ff749

Browse files
committed
cgroup: Distinguish base resource stat implementation from rstat
Base resource stat accounts universial (not specific to any controller) resource consumptions on top of rstat. Currently, its implementation is intermixed with rstat implementation making the code confusing to follow. This patch clarifies the distintion by doing the followings. * Encapsulate base resource stat counters, currently only cputime, in struct cgroup_base_stat. * Move prev_cputime into struct cgroup and initialize it with cgroup. * Rename the related functions so that they start with cgroup_base_stat. * Prefix the related variables and field names with b. This patch doesn't make any functional changes. Signed-off-by: Tejun Heo <[email protected]>
1 parent c58632b commit d4ff749

File tree

4 files changed

+52
-50
lines changed

4 files changed

+52
-50
lines changed

include/linux/cgroup-defs.h

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -258,6 +258,10 @@ struct css_set {
258258
struct rcu_head rcu_head;
259259
};
260260

261+
struct cgroup_base_stat {
262+
struct task_cputime cputime;
263+
};
264+
261265
/*
262266
* rstat - cgroup scalable recursive statistics. Accounting is done
263267
* per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
@@ -273,20 +277,24 @@ struct css_set {
273277
* aren't active and stat may be read frequently. The combination can
274278
* become very expensive. By propagating selectively, increasing reading
275279
* frequency decreases the cost of each read.
280+
*
281+
* This struct hosts both the fields which implement the above -
282+
* updated_children and updated_next - and the fields which track basic
283+
* resource statistics on top of it - bsync, bstat and last_bstat.
276284
*/
277285
struct cgroup_rstat_cpu {
278286
/*
279-
* ->sync protects all the current counters. These are the only
280-
* fields which get updated in the hot path.
287+
* ->bsync protects ->bstat. These are the only fields which get
288+
* updated in the hot path.
281289
*/
282-
struct u64_stats_sync sync;
283-
struct task_cputime cputime;
290+
struct u64_stats_sync bsync;
291+
struct cgroup_base_stat bstat;
284292

285293
/*
286294
* Snapshots at the last reading. These are used to calculate the
287295
* deltas to propagate to the global counters.
288296
*/
289-
struct task_cputime last_cputime;
297+
struct cgroup_base_stat last_bstat;
290298

291299
/*
292300
* Child cgroups with stat updates on this cpu since the last read
@@ -303,12 +311,6 @@ struct cgroup_rstat_cpu {
303311
struct cgroup *updated_next; /* NULL iff not on the list */
304312
};
305313

306-
struct cgroup_stat {
307-
/* per-cpu statistics are collected into the folowing global counters */
308-
struct task_cputime cputime;
309-
struct prev_cputime prev_cputime;
310-
};
311-
312314
struct cgroup {
313315
/* self css with NULL ->ss, points back to this cgroup */
314316
struct cgroup_subsys_state self;
@@ -412,8 +414,9 @@ struct cgroup {
412414
struct cgroup_rstat_cpu __percpu *rstat_cpu;
413415

414416
/* cgroup basic resource statistics */
415-
struct cgroup_stat pending_stat; /* pending from children */
416-
struct cgroup_stat stat;
417+
struct cgroup_base_stat pending_bstat; /* pending from children */
418+
struct cgroup_base_stat bstat;
419+
struct prev_cputime prev_cputime; /* for printing out cputime */
417420

418421
/*
419422
* list of pidlists, up to two for each namespace (one for procs, one

kernel/cgroup/cgroup-internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,7 @@ int cgroup_task_count(const struct cgroup *cgrp);
206206
void cgroup_rstat_flush(struct cgroup *cgrp);
207207
int cgroup_rstat_init(struct cgroup *cgrp);
208208
void cgroup_rstat_exit(struct cgroup *cgrp);
209-
void cgroup_stat_show_cputime(struct seq_file *seq);
209+
void cgroup_base_stat_cputime_show(struct seq_file *seq);
210210
void cgroup_rstat_boot(void);
211211

212212
/*

kernel/cgroup/cgroup.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@
5454
#include <linux/proc_ns.h>
5555
#include <linux/nsproxy.h>
5656
#include <linux/file.h>
57+
#include <linux/sched/cputime.h>
5758
#include <net/sock.h>
5859

5960
#define CREATE_TRACE_POINTS
@@ -1859,6 +1860,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
18591860
cgrp->dom_cgrp = cgrp;
18601861
cgrp->max_descendants = INT_MAX;
18611862
cgrp->max_depth = INT_MAX;
1863+
prev_cputime_init(&cgrp->prev_cputime);
18621864

18631865
for_each_subsys(ss, ssid)
18641866
INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
@@ -3396,7 +3398,7 @@ static int cpu_stat_show(struct seq_file *seq, void *v)
33963398
struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup;
33973399
int ret = 0;
33983400

3399-
cgroup_stat_show_cputime(seq);
3401+
cgroup_base_stat_cputime_show(seq);
34003402
#ifdef CONFIG_CGROUP_SCHED
34013403
ret = cgroup_extra_stat_show(seq, cgrp, cpu_cgrp_id);
34023404
#endif

kernel/cgroup/rstat.c

Lines changed: 32 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -128,30 +128,30 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
128128
return pos;
129129
}
130130

131-
static void cgroup_stat_accumulate(struct cgroup_stat *dst_stat,
132-
struct cgroup_stat *src_stat)
131+
static void cgroup_base_stat_accumulate(struct cgroup_base_stat *dst_bstat,
132+
struct cgroup_base_stat *src_bstat)
133133
{
134-
dst_stat->cputime.utime += src_stat->cputime.utime;
135-
dst_stat->cputime.stime += src_stat->cputime.stime;
136-
dst_stat->cputime.sum_exec_runtime += src_stat->cputime.sum_exec_runtime;
134+
dst_bstat->cputime.utime += src_bstat->cputime.utime;
135+
dst_bstat->cputime.stime += src_bstat->cputime.stime;
136+
dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
137137
}
138138

139-
static void cgroup_cpu_stat_flush_one(struct cgroup *cgrp, int cpu)
139+
static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
140140
{
141141
struct cgroup *parent = cgroup_parent(cgrp);
142142
struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
143-
struct task_cputime *last_cputime = &rstatc->last_cputime;
143+
struct task_cputime *last_cputime = &rstatc->last_bstat.cputime;
144144
struct task_cputime cputime;
145-
struct cgroup_stat delta;
145+
struct cgroup_base_stat delta;
146146
unsigned seq;
147147

148148
lockdep_assert_held(&cgroup_rstat_mutex);
149149

150150
/* fetch the current per-cpu values */
151151
do {
152-
seq = __u64_stats_fetch_begin(&rstatc->sync);
153-
cputime = rstatc->cputime;
154-
} while (__u64_stats_fetch_retry(&rstatc->sync, seq));
152+
seq = __u64_stats_fetch_begin(&rstatc->bsync);
153+
cputime = rstatc->bstat.cputime;
154+
} while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
155155

156156
/* accumulate the deltas to propgate */
157157
delta.cputime.utime = cputime.utime - last_cputime->utime;
@@ -161,13 +161,13 @@ static void cgroup_cpu_stat_flush_one(struct cgroup *cgrp, int cpu)
161161
*last_cputime = cputime;
162162

163163
/* transfer the pending stat into delta */
164-
cgroup_stat_accumulate(&delta, &cgrp->pending_stat);
165-
memset(&cgrp->pending_stat, 0, sizeof(cgrp->pending_stat));
164+
cgroup_base_stat_accumulate(&delta, &cgrp->pending_bstat);
165+
memset(&cgrp->pending_bstat, 0, sizeof(cgrp->pending_bstat));
166166

167167
/* propagate delta into the global stat and the parent's pending */
168-
cgroup_stat_accumulate(&cgrp->stat, &delta);
168+
cgroup_base_stat_accumulate(&cgrp->bstat, &delta);
169169
if (parent)
170-
cgroup_stat_accumulate(&parent->pending_stat, &delta);
170+
cgroup_base_stat_accumulate(&parent->pending_bstat, &delta);
171171
}
172172

173173
/* see cgroup_rstat_flush() */
@@ -184,7 +184,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
184184

185185
raw_spin_lock_irq(cpu_lock);
186186
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu)))
187-
cgroup_cpu_stat_flush_one(pos, cpu);
187+
cgroup_base_stat_flush(pos, cpu);
188188
raw_spin_unlock_irq(cpu_lock);
189189
}
190190
}
@@ -208,19 +208,19 @@ void cgroup_rstat_flush(struct cgroup *cgrp)
208208
}
209209

210210
static struct cgroup_rstat_cpu *
211-
cgroup_cpu_stat_account_begin(struct cgroup *cgrp)
211+
cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp)
212212
{
213213
struct cgroup_rstat_cpu *rstatc;
214214

215215
rstatc = get_cpu_ptr(cgrp->rstat_cpu);
216-
u64_stats_update_begin(&rstatc->sync);
216+
u64_stats_update_begin(&rstatc->bsync);
217217
return rstatc;
218218
}
219219

220-
static void cgroup_cpu_stat_account_end(struct cgroup *cgrp,
221-
struct cgroup_rstat_cpu *rstatc)
220+
static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
221+
struct cgroup_rstat_cpu *rstatc)
222222
{
223-
u64_stats_update_end(&rstatc->sync);
223+
u64_stats_update_end(&rstatc->bsync);
224224
cgroup_rstat_cpu_updated(cgrp, smp_processor_id());
225225
put_cpu_ptr(rstatc);
226226
}
@@ -229,36 +229,36 @@ void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
229229
{
230230
struct cgroup_rstat_cpu *rstatc;
231231

232-
rstatc = cgroup_cpu_stat_account_begin(cgrp);
233-
rstatc->cputime.sum_exec_runtime += delta_exec;
234-
cgroup_cpu_stat_account_end(cgrp, rstatc);
232+
rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
233+
rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
234+
cgroup_base_stat_cputime_account_end(cgrp, rstatc);
235235
}
236236

237237
void __cgroup_account_cputime_field(struct cgroup *cgrp,
238238
enum cpu_usage_stat index, u64 delta_exec)
239239
{
240240
struct cgroup_rstat_cpu *rstatc;
241241

242-
rstatc = cgroup_cpu_stat_account_begin(cgrp);
242+
rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
243243

244244
switch (index) {
245245
case CPUTIME_USER:
246246
case CPUTIME_NICE:
247-
rstatc->cputime.utime += delta_exec;
247+
rstatc->bstat.cputime.utime += delta_exec;
248248
break;
249249
case CPUTIME_SYSTEM:
250250
case CPUTIME_IRQ:
251251
case CPUTIME_SOFTIRQ:
252-
rstatc->cputime.stime += delta_exec;
252+
rstatc->bstat.cputime.stime += delta_exec;
253253
break;
254254
default:
255255
break;
256256
}
257257

258-
cgroup_cpu_stat_account_end(cgrp, rstatc);
258+
cgroup_base_stat_cputime_account_end(cgrp, rstatc);
259259
}
260260

261-
void cgroup_stat_show_cputime(struct seq_file *seq)
261+
void cgroup_base_stat_cputime_show(struct seq_file *seq)
262262
{
263263
struct cgroup *cgrp = seq_css(seq)->cgroup;
264264
u64 usage, utime, stime;
@@ -270,9 +270,8 @@ void cgroup_stat_show_cputime(struct seq_file *seq)
270270

271271
cgroup_rstat_flush_locked(cgrp);
272272

273-
usage = cgrp->stat.cputime.sum_exec_runtime;
274-
cputime_adjust(&cgrp->stat.cputime, &cgrp->stat.prev_cputime,
275-
&utime, &stime);
273+
usage = cgrp->bstat.cputime.sum_exec_runtime;
274+
cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, &utime, &stime);
276275

277276
mutex_unlock(&cgroup_rstat_mutex);
278277

@@ -302,11 +301,9 @@ int cgroup_rstat_init(struct cgroup *cgrp)
302301
struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
303302

304303
rstatc->updated_children = cgrp;
305-
u64_stats_init(&rstatc->sync);
304+
u64_stats_init(&rstatc->bsync);
306305
}
307306

308-
prev_cputime_init(&cgrp->stat.prev_cputime);
309-
310307
return 0;
311308
}
312309

0 commit comments

Comments
 (0)