Skip to content

Commit 8a4b42b

Browse files
shakeelbakpm00
authored andcommitted
memcg: memcg_rstat_updated re-entrant safe against irqs
Patch series "memcg: make memcg stats irq safe", v2. This series converts memcg stats to be irq safe i.e. memcg stats can be updated in any context (task, softirq or hardirq) without disabling the irqs. This is still not nmi-safe on all architectures but after this series converting memcg charging and stats nmi-safe will be easier. This patch (of 7): memcg_rstat_updated() is used to track the memcg stats updates for optimizing the flushes. At the moment, it is not re-entrant safe and the callers disabled irqs before calling. However to achieve the goal of updating memcg stats without irqs, memcg_rstat_updated() needs to be re-entrant safe against irqs. This patch makes memcg_rstat_updated() re-entrant safe using this_cpu_* ops. On archs with CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS, this patch is also making memcg_rstat_updated() nmi safe. [[email protected]: fix build] Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Shakeel Butt <[email protected]> Signed-off-by: Lorenzo Stoakes <[email protected]> Reviewed-by: Vlastimil Babka <[email protected]> Tested-by: Alexei Starovoitov <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Muchun Song <[email protected]> Cc: Roman Gushchin <[email protected]> Cc: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent cc79061 commit 8a4b42b

File tree

1 file changed

+18
-11
lines changed

1 file changed

+18
-11
lines changed

mm/memcontrol.c

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -503,8 +503,8 @@ struct memcg_vmstats_percpu {
503503
unsigned int stats_updates;
504504

505505
/* Cached pointers for fast iteration in memcg_rstat_updated() */
506-
struct memcg_vmstats_percpu *parent;
507-
struct memcg_vmstats *vmstats;
506+
struct memcg_vmstats_percpu __percpu *parent_pcpu;
507+
struct memcg_vmstats *vmstats;
508508

509509
/* The above should fit a single cacheline for memcg_rstat_updated() */
510510

@@ -586,16 +586,21 @@ static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
586586

587587
static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
588588
{
589+
struct memcg_vmstats_percpu __percpu *statc_pcpu;
589590
struct memcg_vmstats_percpu *statc;
590-
int cpu = smp_processor_id();
591+
int cpu;
591592
unsigned int stats_updates;
592593

593594
if (!val)
594595
return;
595596

597+
/* Don't assume callers have preemption disabled. */
598+
cpu = get_cpu();
599+
596600
cgroup_rstat_updated(memcg->css.cgroup, cpu);
597-
statc = this_cpu_ptr(memcg->vmstats_percpu);
598-
for (; statc; statc = statc->parent) {
601+
statc_pcpu = memcg->vmstats_percpu;
602+
for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) {
603+
statc = this_cpu_ptr(statc_pcpu);
599604
/*
600605
* If @memcg is already flushable then all its ancestors are
601606
* flushable as well and also there is no need to increase
@@ -604,14 +609,15 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
604609
if (memcg_vmstats_needs_flush(statc->vmstats))
605610
break;
606611

607-
stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
608-
WRITE_ONCE(statc->stats_updates, stats_updates);
612+
stats_updates = this_cpu_add_return(statc_pcpu->stats_updates,
613+
abs(val));
609614
if (stats_updates < MEMCG_CHARGE_BATCH)
610615
continue;
611616

617+
stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0);
612618
atomic64_add(stats_updates, &statc->vmstats->stats_updates);
613-
WRITE_ONCE(statc->stats_updates, 0);
614619
}
620+
put_cpu();
615621
}
616622

617623
static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
@@ -3689,7 +3695,8 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
36893695

36903696
static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
36913697
{
3692-
struct memcg_vmstats_percpu *statc, *pstatc;
3698+
struct memcg_vmstats_percpu *statc;
3699+
struct memcg_vmstats_percpu __percpu *pstatc_pcpu;
36933700
struct mem_cgroup *memcg;
36943701
int node, cpu;
36953702
int __maybe_unused i;
@@ -3720,9 +3727,9 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
37203727

37213728
for_each_possible_cpu(cpu) {
37223729
if (parent)
3723-
pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
3730+
pstatc_pcpu = parent->vmstats_percpu;
37243731
statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3725-
statc->parent = parent ? pstatc : NULL;
3732+
statc->parent_pcpu = parent ? pstatc_pcpu : NULL;
37263733
statc->vmstats = memcg->vmstats;
37273734
}
37283735

0 commit comments

Comments
 (0)