Skip to content

Commit a17556f

Browse files
committed
cgroup: Reorganize kernel/cgroup/rstat.c
Currently, rstat.c has rstat and base stat implementations intermixed. Collect base stat implementation at the end of the file. Also, reorder the prototypes. This patch doesn't make any functional changes. Signed-off-by: Tejun Heo <[email protected]>
1 parent d4ff749 commit a17556f

File tree

2 files changed

+95
-89
lines changed

2 files changed

+95
-89
lines changed

kernel/cgroup/cgroup-internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -206,8 +206,8 @@ int cgroup_task_count(const struct cgroup *cgrp);
206206
void cgroup_rstat_flush(struct cgroup *cgrp);
207207
int cgroup_rstat_init(struct cgroup *cgrp);
208208
void cgroup_rstat_exit(struct cgroup *cgrp);
209-
void cgroup_base_stat_cputime_show(struct seq_file *seq);
210209
void cgroup_rstat_boot(void);
210+
void cgroup_base_stat_cputime_show(struct seq_file *seq);
211211

212212
/*
213213
* namespace.c

kernel/cgroup/rstat.c

Lines changed: 94 additions & 88 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@
55
static DEFINE_MUTEX(cgroup_rstat_mutex);
66
static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
77

8+
static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
9+
810
static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
911
{
1012
return per_cpu_ptr(cgrp->rstat_cpu, cpu);
@@ -128,6 +130,98 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
128130
return pos;
129131
}
130132

133+
/* see cgroup_rstat_flush() */
134+
static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
135+
{
136+
int cpu;
137+
138+
lockdep_assert_held(&cgroup_rstat_mutex);
139+
140+
for_each_possible_cpu(cpu) {
141+
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
142+
cpu);
143+
struct cgroup *pos = NULL;
144+
145+
raw_spin_lock_irq(cpu_lock);
146+
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu)))
147+
cgroup_base_stat_flush(pos, cpu);
148+
raw_spin_unlock_irq(cpu_lock);
149+
}
150+
}
151+
152+
/**
153+
* cgroup_rstat_flush - flush stats in @cgrp's subtree
154+
* @cgrp: target cgroup
155+
*
156+
* Collect all per-cpu stats in @cgrp's subtree into the global counters
157+
* and propagate them upwards. After this function returns, all cgroups in
158+
* the subtree have up-to-date ->stat.
159+
*
160+
* This also gets all cgroups in the subtree including @cgrp off the
161+
* ->updated_children lists.
162+
*/
163+
void cgroup_rstat_flush(struct cgroup *cgrp)
164+
{
165+
mutex_lock(&cgroup_rstat_mutex);
166+
cgroup_rstat_flush_locked(cgrp);
167+
mutex_unlock(&cgroup_rstat_mutex);
168+
}
169+
170+
int cgroup_rstat_init(struct cgroup *cgrp)
171+
{
172+
int cpu;
173+
174+
/* the root cgrp has rstat_cpu preallocated */
175+
if (!cgrp->rstat_cpu) {
176+
cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
177+
if (!cgrp->rstat_cpu)
178+
return -ENOMEM;
179+
}
180+
181+
/* ->updated_children list is self terminated */
182+
for_each_possible_cpu(cpu) {
183+
struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
184+
185+
rstatc->updated_children = cgrp;
186+
u64_stats_init(&rstatc->bsync);
187+
}
188+
189+
return 0;
190+
}
191+
192+
void cgroup_rstat_exit(struct cgroup *cgrp)
193+
{
194+
int cpu;
195+
196+
cgroup_rstat_flush(cgrp);
197+
198+
/* sanity check */
199+
for_each_possible_cpu(cpu) {
200+
struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
201+
202+
if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
203+
WARN_ON_ONCE(rstatc->updated_next))
204+
return;
205+
}
206+
207+
free_percpu(cgrp->rstat_cpu);
208+
cgrp->rstat_cpu = NULL;
209+
}
210+
211+
void __init cgroup_rstat_boot(void)
212+
{
213+
int cpu;
214+
215+
for_each_possible_cpu(cpu)
216+
raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
217+
218+
BUG_ON(cgroup_rstat_init(&cgrp_dfl_root.cgrp));
219+
}
220+
221+
/*
222+
* Functions for cgroup basic resource statistics implemented on top of
223+
* rstat.
224+
*/
131225
static void cgroup_base_stat_accumulate(struct cgroup_base_stat *dst_bstat,
132226
struct cgroup_base_stat *src_bstat)
133227
{
@@ -170,43 +264,6 @@ static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
170264
cgroup_base_stat_accumulate(&parent->pending_bstat, &delta);
171265
}
172266

173-
/* see cgroup_rstat_flush() */
174-
static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
175-
{
176-
int cpu;
177-
178-
lockdep_assert_held(&cgroup_rstat_mutex);
179-
180-
for_each_possible_cpu(cpu) {
181-
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
182-
cpu);
183-
struct cgroup *pos = NULL;
184-
185-
raw_spin_lock_irq(cpu_lock);
186-
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu)))
187-
cgroup_base_stat_flush(pos, cpu);
188-
raw_spin_unlock_irq(cpu_lock);
189-
}
190-
}
191-
192-
/**
193-
* cgroup_rstat_flush - flush stats in @cgrp's subtree
194-
* @cgrp: target cgroup
195-
*
196-
* Collect all per-cpu stats in @cgrp's subtree into the global counters
197-
* and propagate them upwards. After this function returns, all cgroups in
198-
* the subtree have up-to-date ->stat.
199-
*
200-
* This also gets all cgroups in the subtree including @cgrp off the
201-
* ->updated_children lists.
202-
*/
203-
void cgroup_rstat_flush(struct cgroup *cgrp)
204-
{
205-
mutex_lock(&cgroup_rstat_mutex);
206-
cgroup_rstat_flush_locked(cgrp);
207-
mutex_unlock(&cgroup_rstat_mutex);
208-
}
209-
210267
static struct cgroup_rstat_cpu *
211268
cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp)
212269
{
@@ -284,54 +341,3 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq)
284341
"system_usec %llu\n",
285342
usage, utime, stime);
286343
}
287-
288-
int cgroup_rstat_init(struct cgroup *cgrp)
289-
{
290-
int cpu;
291-
292-
/* the root cgrp has rstat_cpu preallocated */
293-
if (!cgrp->rstat_cpu) {
294-
cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
295-
if (!cgrp->rstat_cpu)
296-
return -ENOMEM;
297-
}
298-
299-
/* ->updated_children list is self terminated */
300-
for_each_possible_cpu(cpu) {
301-
struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
302-
303-
rstatc->updated_children = cgrp;
304-
u64_stats_init(&rstatc->bsync);
305-
}
306-
307-
return 0;
308-
}
309-
310-
void cgroup_rstat_exit(struct cgroup *cgrp)
311-
{
312-
int cpu;
313-
314-
cgroup_rstat_flush(cgrp);
315-
316-
/* sanity check */
317-
for_each_possible_cpu(cpu) {
318-
struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
319-
320-
if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
321-
WARN_ON_ONCE(rstatc->updated_next))
322-
return;
323-
}
324-
325-
free_percpu(cgrp->rstat_cpu);
326-
cgrp->rstat_cpu = NULL;
327-
}
328-
329-
void __init cgroup_rstat_boot(void)
330-
{
331-
int cpu;
332-
333-
for_each_possible_cpu(cpu)
334-
raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
335-
336-
BUG_ON(cgroup_rstat_init(&cgrp_dfl_root.cgrp));
337-
}

0 commit comments

Comments
 (0)