Skip to content

Commit c58632b

Browse files
committed
cgroup: Rename stat to rstat
stat is too generic a name and ends up causing subtle confusions. It'll be made generic so that controllers can plug into it, which will make the problem worse. Let's rename it to something more specific - cgroup_rstat for cgroup recursive stat. This patch does the following renames. No other changes. * cpu_stat -> rstat_cpu * stat -> rstat * ?cstat -> ?rstatc Note that the renames are selective. The unrenamed are the ones which implement basic resource statistics on top of rstat. This will be further cleaned up in the following patches. Signed-off-by: Tejun Heo <[email protected]>
1 parent a5c2b93 commit c58632b

File tree

4 files changed

+112
-108
lines changed

4 files changed

+112
-108
lines changed

include/linux/cgroup-defs.h

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -259,11 +259,11 @@ struct css_set {
259259
};
260260

261261
/*
262-
* cgroup basic resource usage statistics. Accounting is done per-cpu in
263-
* cgroup_cpu_stat which is then lazily propagated up the hierarchy on
264-
* reads.
262+
* rstat - cgroup scalable recursive statistics. Accounting is done
263+
* per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
264+
* hierarchy on reads.
265265
*
266-
* When a stat gets updated, the cgroup_cpu_stat and its ancestors are
266+
* When a stat gets updated, the cgroup_rstat_cpu and its ancestors are
267267
* linked into the updated tree. On the following read, propagation only
268268
* considers and consumes the updated tree. This makes reading O(the
269269
* number of descendants which have been active since last read) instead of
@@ -274,7 +274,7 @@ struct css_set {
274274
* become very expensive. By propagating selectively, increasing reading
275275
* frequency decreases the cost of each read.
276276
*/
277-
struct cgroup_cpu_stat {
277+
struct cgroup_rstat_cpu {
278278
/*
279279
* ->sync protects all the current counters. These are the only
280280
* fields which get updated in the hot path.
@@ -297,7 +297,7 @@ struct cgroup_cpu_stat {
297297
* to the cgroup makes it unnecessary for each per-cpu struct to
298298
* point back to the associated cgroup.
299299
*
300-
* Protected by per-cpu cgroup_cpu_stat_lock.
300+
* Protected by per-cpu cgroup_rstat_cpu_lock.
301301
*/
302302
struct cgroup *updated_children; /* terminated by self cgroup */
303303
struct cgroup *updated_next; /* NULL iff not on the list */
@@ -408,8 +408,10 @@ struct cgroup {
408408
*/
409409
struct cgroup *dom_cgrp;
410410

411+
/* per-cpu recursive resource statistics */
412+
struct cgroup_rstat_cpu __percpu *rstat_cpu;
413+
411414
/* cgroup basic resource statistics */
412-
struct cgroup_cpu_stat __percpu *cpu_stat;
413415
struct cgroup_stat pending_stat; /* pending from children */
414416
struct cgroup_stat stat;
415417

kernel/cgroup/cgroup-internal.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -201,13 +201,13 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
201201
int cgroup_task_count(const struct cgroup *cgrp);
202202

203203
/*
204-
* stat.c
204+
* rstat.c
205205
*/
206-
void cgroup_stat_flush(struct cgroup *cgrp);
207-
int cgroup_stat_init(struct cgroup *cgrp);
208-
void cgroup_stat_exit(struct cgroup *cgrp);
206+
void cgroup_rstat_flush(struct cgroup *cgrp);
207+
int cgroup_rstat_init(struct cgroup *cgrp);
208+
void cgroup_rstat_exit(struct cgroup *cgrp);
209209
void cgroup_stat_show_cputime(struct seq_file *seq);
210-
void cgroup_stat_boot(void);
210+
void cgroup_rstat_boot(void);
211211

212212
/*
213213
* namespace.c

kernel/cgroup/cgroup.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -144,14 +144,14 @@ static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
144144
};
145145
#undef SUBSYS
146146

147-
static DEFINE_PER_CPU(struct cgroup_cpu_stat, cgrp_dfl_root_cpu_stat);
147+
static DEFINE_PER_CPU(struct cgroup_rstat_cpu, cgrp_dfl_root_rstat_cpu);
148148

149149
/*
150150
* The default hierarchy, reserved for the subsystems that are otherwise
151151
* unattached - it never has more than a single cgroup, and all tasks are
152152
* part of that cgroup.
153153
*/
154-
struct cgroup_root cgrp_dfl_root = { .cgrp.cpu_stat = &cgrp_dfl_root_cpu_stat };
154+
struct cgroup_root cgrp_dfl_root = { .cgrp.rstat_cpu = &cgrp_dfl_root_rstat_cpu };
155155
EXPORT_SYMBOL_GPL(cgrp_dfl_root);
156156

157157
/*
@@ -4592,7 +4592,7 @@ static void css_free_rwork_fn(struct work_struct *work)
45924592
cgroup_put(cgroup_parent(cgrp));
45934593
kernfs_put(cgrp->kn);
45944594
if (cgroup_on_dfl(cgrp))
4595-
cgroup_stat_exit(cgrp);
4595+
cgroup_rstat_exit(cgrp);
45964596
kfree(cgrp);
45974597
} else {
45984598
/*
@@ -4629,7 +4629,7 @@ static void css_release_work_fn(struct work_struct *work)
46294629
trace_cgroup_release(cgrp);
46304630

46314631
if (cgroup_on_dfl(cgrp))
4632-
cgroup_stat_flush(cgrp);
4632+
cgroup_rstat_flush(cgrp);
46334633

46344634
for (tcgrp = cgroup_parent(cgrp); tcgrp;
46354635
tcgrp = cgroup_parent(tcgrp))
@@ -4817,7 +4817,7 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
48174817
goto out_free_cgrp;
48184818

48194819
if (cgroup_on_dfl(parent)) {
4820-
ret = cgroup_stat_init(cgrp);
4820+
ret = cgroup_rstat_init(cgrp);
48214821
if (ret)
48224822
goto out_cancel_ref;
48234823
}
@@ -4882,7 +4882,7 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
48824882
cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
48834883
out_stat_exit:
48844884
if (cgroup_on_dfl(parent))
4885-
cgroup_stat_exit(cgrp);
4885+
cgroup_rstat_exit(cgrp);
48864886
out_cancel_ref:
48874887
percpu_ref_exit(&cgrp->self.refcnt);
48884888
out_free_cgrp:
@@ -5275,7 +5275,7 @@ int __init cgroup_init(void)
52755275
BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
52765276
BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files));
52775277

5278-
cgroup_stat_boot();
5278+
cgroup_rstat_boot();
52795279

52805280
/*
52815281
* The latency of the synchronize_sched() is too high for cgroups,

0 commit comments

Comments
 (0)