Skip to content

Commit 31bb5fe

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge misc mm fixes from Andrew Morton: "7 fixes" * emailed patches from Andrew Morton <[email protected]>: mm: memcontrol: fix percpu vmstats and vmevents flush mm, memcg: do not set reclaim_state on soft limit reclaim mailmap: add aliases for Dmitry Safonov mm/z3fold.c: fix lock/unlock imbalance in z3fold_page_isolate mm, memcg: partially revert "mm/memcontrol.c: keep local VM counters in sync with the hierarchical ones" mm/zsmalloc.c: fix build when CONFIG_COMPACTION=n mm: memcontrol: flush percpu slab vmstats on kmem offlining
2 parents e0f14b8 + 6c1c280 commit 31bb5fe

File tree

6 files changed

+47
-22
lines changed

6 files changed

+47
-22
lines changed

.mailmap

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,9 @@ Dengcheng Zhu <[email protected]> <[email protected]>
6464
6565
6666
Dmitry Eremin-Solenikov <[email protected]>
67+
68+
69+
6770
Domen Puncer <[email protected]>
6871
Douglas Gilbert <[email protected]>
6972
Ed L. Cashin <[email protected]>

include/linux/mmzone.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -215,8 +215,9 @@ enum node_stat_item {
215215
NR_INACTIVE_FILE, /* " " " " " */
216216
NR_ACTIVE_FILE, /* " " " " " */
217217
NR_UNEVICTABLE, /* " " " " " */
218-
NR_SLAB_RECLAIMABLE,
219-
NR_SLAB_UNRECLAIMABLE,
218+
NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */
219+
NR_SLAB_UNRECLAIMABLE, /* and this one without looking at
220+
* memcg_flush_percpu_vmstats() first. */
220221
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
221222
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
222223
WORKINGSET_NODES,

mm/memcontrol.c

Lines changed: 35 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -752,15 +752,13 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
752752
/* Update memcg */
753753
__mod_memcg_state(memcg, idx, val);
754754

755+
/* Update lruvec */
756+
__this_cpu_add(pn->lruvec_stat_local->count[idx], val);
757+
755758
x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
756759
if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
757760
struct mem_cgroup_per_node *pi;
758761

759-
/*
760-
* Batch local counters to keep them in sync with
761-
* the hierarchical ones.
762-
*/
763-
__this_cpu_add(pn->lruvec_stat_local->count[idx], x);
764762
for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
765763
atomic_long_add(x, &pi->lruvec_stat[idx]);
766764
x = 0;
@@ -3260,37 +3258,49 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
32603258
}
32613259
}
32623260

3263-
static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
3261+
static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only)
32643262
{
32653263
unsigned long stat[MEMCG_NR_STAT];
32663264
struct mem_cgroup *mi;
32673265
int node, cpu, i;
3266+
int min_idx, max_idx;
32683267

3269-
for (i = 0; i < MEMCG_NR_STAT; i++)
3268+
if (slab_only) {
3269+
min_idx = NR_SLAB_RECLAIMABLE;
3270+
max_idx = NR_SLAB_UNRECLAIMABLE;
3271+
} else {
3272+
min_idx = 0;
3273+
max_idx = MEMCG_NR_STAT;
3274+
}
3275+
3276+
for (i = min_idx; i < max_idx; i++)
32703277
stat[i] = 0;
32713278

32723279
for_each_online_cpu(cpu)
3273-
for (i = 0; i < MEMCG_NR_STAT; i++)
3274-
stat[i] += raw_cpu_read(memcg->vmstats_percpu->stat[i]);
3280+
for (i = min_idx; i < max_idx; i++)
3281+
stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
32753282

32763283
for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3277-
for (i = 0; i < MEMCG_NR_STAT; i++)
3284+
for (i = min_idx; i < max_idx; i++)
32783285
atomic_long_add(stat[i], &mi->vmstats[i]);
32793286

3287+
if (!slab_only)
3288+
max_idx = NR_VM_NODE_STAT_ITEMS;
3289+
32803290
for_each_node(node) {
32813291
struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
32823292
struct mem_cgroup_per_node *pi;
32833293

3284-
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3294+
for (i = min_idx; i < max_idx; i++)
32853295
stat[i] = 0;
32863296

32873297
for_each_online_cpu(cpu)
3288-
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3289-
stat[i] += raw_cpu_read(
3290-
pn->lruvec_stat_cpu->count[i]);
3298+
for (i = min_idx; i < max_idx; i++)
3299+
stat[i] += per_cpu(
3300+
pn->lruvec_stat_cpu->count[i], cpu);
32913301

32923302
for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3293-
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3303+
for (i = min_idx; i < max_idx; i++)
32943304
atomic_long_add(stat[i], &pi->lruvec_stat[i]);
32953305
}
32963306
}
@@ -3306,8 +3316,8 @@ static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
33063316

33073317
for_each_online_cpu(cpu)
33083318
for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3309-
events[i] += raw_cpu_read(
3310-
memcg->vmstats_percpu->events[i]);
3319+
events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3320+
cpu);
33113321

33123322
for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
33133323
for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
@@ -3363,7 +3373,14 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
33633373
if (!parent)
33643374
parent = root_mem_cgroup;
33653375

3376+
/*
3377+
* Deactivate and reparent kmem_caches. Then flush percpu
3378+
* slab statistics to have precise values at the parent and
3379+
* all ancestor levels. It's required to keep slab stats
3380+
* accurate after the reparenting of kmem_caches.
3381+
*/
33663382
memcg_deactivate_kmem_caches(memcg, parent);
3383+
memcg_flush_percpu_vmstats(memcg, true);
33673384

33683385
kmemcg_id = memcg->kmemcg_id;
33693386
BUG_ON(kmemcg_id < 0);
@@ -4740,7 +4757,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
47404757
* Flush percpu vmstats and vmevents to guarantee the value correctness
47414758
* on parent's and all ancestor levels.
47424759
*/
4743-
memcg_flush_percpu_vmstats(memcg);
4760+
memcg_flush_percpu_vmstats(memcg, false);
47444761
memcg_flush_percpu_vmevents(memcg);
47454762
for_each_node(node)
47464763
free_mem_cgroup_per_node_info(memcg, node);

mm/vmscan.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3220,6 +3220,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
32203220

32213221
#ifdef CONFIG_MEMCG
32223222

3223+
/* Only used by soft limit reclaim. Do not reuse for anything else. */
32233224
unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
32243225
gfp_t gfp_mask, bool noswap,
32253226
pg_data_t *pgdat,
@@ -3235,7 +3236,8 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
32353236
};
32363237
unsigned long lru_pages;
32373238

3238-
set_task_reclaim_state(current, &sc.reclaim_state);
3239+
WARN_ON_ONCE(!current->reclaim_state);
3240+
32393241
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
32403242
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
32413243

@@ -3253,7 +3255,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
32533255

32543256
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
32553257

3256-
set_task_reclaim_state(current, NULL);
32573258
*nr_scanned = sc.nr_scanned;
32583259

32593260
return sc.nr_reclaimed;

mm/z3fold.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1406,6 +1406,7 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
14061406
* should freak out.
14071407
*/
14081408
WARN(1, "Z3fold is experiencing kref problems\n");
1409+
z3fold_page_unlock(zhdr);
14091410
return false;
14101411
}
14111412
z3fold_page_unlock(zhdr);

mm/zsmalloc.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2412,7 +2412,9 @@ struct zs_pool *zs_create_pool(const char *name)
24122412
if (!pool->name)
24132413
goto err;
24142414

2415+
#ifdef CONFIG_COMPACTION
24152416
init_waitqueue_head(&pool->migration_wait);
2417+
#endif
24162418

24172419
if (create_cache(pool))
24182420
goto err;

0 commit comments

Comments
 (0)