Skip to content

Commit 093c881

Browse files
yosrym93htejun
authored andcommitted
cgroup: rstat: Cleanup flushing functions and locking
Now that the rstat lock is being re-acquired on every CPU iteration in cgroup_rstat_flush_locked(), having the initially acquire the lock is unnecessary and unclear. Inline cgroup_rstat_flush_locked() into cgroup_rstat_flush() and move the lock/unlock calls to the beginning and ending of the loop body to make the critical section obvious. cgroup_rstat_flush_hold/release() do not make much sense with the lock being dropped and reacquired internally. Since it has no external callers, remove it and explicitly acquire the lock in cgroup_base_stat_cputime_show() instead. This leaves the code with a single flushing function, cgroup_rstat_flush(). Signed-off-by: Yosry Ahmed <[email protected]> Signed-off-by: Tejun Heo <[email protected]>
1 parent 0efc297 commit 093c881

File tree

2 files changed

+20
-61
lines changed

2 files changed

+20
-61
lines changed

include/linux/cgroup.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -690,8 +690,6 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
690690
*/
691691
void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
692692
void cgroup_rstat_flush(struct cgroup *cgrp);
693-
void cgroup_rstat_flush_hold(struct cgroup *cgrp);
694-
void cgroup_rstat_flush_release(struct cgroup *cgrp);
695693

696694
/*
697695
* Basic resource stats.

kernel/cgroup/rstat.c

Lines changed: 20 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -299,17 +299,29 @@ static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int cpu_in_loop)
299299
spin_unlock_irq(&cgroup_rstat_lock);
300300
}
301301

302-
/* see cgroup_rstat_flush() */
303-
static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
304-
__releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
302+
/**
303+
* cgroup_rstat_flush - flush stats in @cgrp's subtree
304+
* @cgrp: target cgroup
305+
*
306+
* Collect all per-cpu stats in @cgrp's subtree into the global counters
307+
* and propagate them upwards. After this function returns, all cgroups in
308+
* the subtree have up-to-date ->stat.
309+
*
310+
* This also gets all cgroups in the subtree including @cgrp off the
311+
* ->updated_children lists.
312+
*
313+
* This function may block.
314+
*/
315+
__bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
305316
{
306317
int cpu;
307318

308-
lockdep_assert_held(&cgroup_rstat_lock);
309-
319+
might_sleep();
310320
for_each_possible_cpu(cpu) {
311321
struct cgroup *pos = cgroup_rstat_updated_list(cgrp, cpu);
312322

323+
/* Reacquire for each CPU to avoid disabling IRQs too long */
324+
__cgroup_rstat_lock(cgrp, cpu);
313325
for (; pos; pos = pos->rstat_flush_next) {
314326
struct cgroup_subsys_state *css;
315327

@@ -322,64 +334,12 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
322334
css->ss->css_rstat_flush(css, cpu);
323335
rcu_read_unlock();
324336
}
325-
326-
/* play nice and avoid disabling interrupts for a long time */
327337
__cgroup_rstat_unlock(cgrp, cpu);
328338
if (!cond_resched())
329339
cpu_relax();
330-
__cgroup_rstat_lock(cgrp, cpu);
331340
}
332341
}
333342

334-
/**
335-
* cgroup_rstat_flush - flush stats in @cgrp's subtree
336-
* @cgrp: target cgroup
337-
*
338-
* Collect all per-cpu stats in @cgrp's subtree into the global counters
339-
* and propagate them upwards. After this function returns, all cgroups in
340-
* the subtree have up-to-date ->stat.
341-
*
342-
* This also gets all cgroups in the subtree including @cgrp off the
343-
* ->updated_children lists.
344-
*
345-
* This function may block.
346-
*/
347-
__bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
348-
{
349-
might_sleep();
350-
351-
__cgroup_rstat_lock(cgrp, -1);
352-
cgroup_rstat_flush_locked(cgrp);
353-
__cgroup_rstat_unlock(cgrp, -1);
354-
}
355-
356-
/**
357-
* cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold
358-
* @cgrp: target cgroup
359-
*
360-
* Flush stats in @cgrp's subtree and prevent further flushes. Must be
361-
* paired with cgroup_rstat_flush_release().
362-
*
363-
* This function may block.
364-
*/
365-
void cgroup_rstat_flush_hold(struct cgroup *cgrp)
366-
__acquires(&cgroup_rstat_lock)
367-
{
368-
might_sleep();
369-
__cgroup_rstat_lock(cgrp, -1);
370-
cgroup_rstat_flush_locked(cgrp);
371-
}
372-
373-
/**
374-
* cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
375-
* @cgrp: cgroup used by tracepoint
376-
*/
377-
void cgroup_rstat_flush_release(struct cgroup *cgrp)
378-
__releases(&cgroup_rstat_lock)
379-
{
380-
__cgroup_rstat_unlock(cgrp, -1);
381-
}
382-
383343
int cgroup_rstat_init(struct cgroup *cgrp)
384344
{
385345
int cpu;
@@ -614,11 +574,12 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq)
614574
struct cgroup_base_stat bstat;
615575

616576
if (cgroup_parent(cgrp)) {
617-
cgroup_rstat_flush_hold(cgrp);
577+
cgroup_rstat_flush(cgrp);
578+
__cgroup_rstat_lock(cgrp, -1);
618579
bstat = cgrp->bstat;
619580
cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
620581
&bstat.cputime.utime, &bstat.cputime.stime);
621-
cgroup_rstat_flush_release(cgrp);
582+
__cgroup_rstat_unlock(cgrp, -1);
622583
} else {
623584
root_cgroup_cputime(&bstat);
624585
}

0 commit comments

Comments
 (0)