Skip to content

Commit 8380ce4

Browse files
rgushchintorvalds
authored andcommitted
mm: fork: fix kernel_stack memcg stats for various stack implementations
Depending on CONFIG_VMAP_STACK and the THREAD_SIZE / PAGE_SIZE ratio the space for task stacks can be allocated using __vmalloc_node_range(), alloc_pages_node() and kmem_cache_alloc_node(). In the first and the second cases page->mem_cgroup pointer is set, but in the third it's not: memcg membership of a slab page should be determined using the memcg_from_slab_page() function, which looks at page->slab_cache->memcg_params.memcg . In this case, using mod_memcg_page_state() (as in account_kernel_stack()) is incorrect: page->mem_cgroup pointer is NULL even for pages charged to a non-root memory cgroup. It can lead to kernel_stack per-memcg counters permanently showing 0 on some architectures (depending on the configuration). In order to fix it, let's introduce a mod_memcg_obj_state() helper, which takes a pointer to a kernel object as a first argument, uses mem_cgroup_from_obj() to get a RCU-protected memcg pointer and calls mod_memcg_state(). It allows to handle all possible configurations (CONFIG_VMAP_STACK and various THREAD_SIZE/PAGE_SIZE values) without spilling any memcg/kmem specifics into fork.c . Note: This is a special version of the patch created for stable backports. It contains code from the following two patches: - mm: memcg/slab: introduce mem_cgroup_from_obj() - mm: fork: fix kernel_stack memcg stats for various stack implementations [[email protected]: introduce mem_cgroup_from_obj()] Link: http://lkml.kernel.org/r/[email protected] Fixes: 4d96ba3 ("mm: memcg/slab: stop setting page->mem_cgroup pointer for slab pages") Signed-off-by: Roman Gushchin <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Reviewed-by: Shakeel Butt <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Bharata B Rao <[email protected]> Cc: Shakeel Butt <[email protected]> Cc: <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 726b7bb commit 8380ce4

File tree

3 files changed

+52
-2
lines changed

3 files changed

+52
-2
lines changed

include/linux/memcontrol.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -695,6 +695,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
695695
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
696696
int val);
697697
void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
698+
void mod_memcg_obj_state(void *p, int idx, int val);
698699

699700
static inline void mod_lruvec_state(struct lruvec *lruvec,
700701
enum node_stat_item idx, int val)
@@ -1123,6 +1124,10 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
11231124
__mod_node_page_state(page_pgdat(page), idx, val);
11241125
}
11251126

1127+
static inline void mod_memcg_obj_state(void *p, int idx, int val)
1128+
{
1129+
}
1130+
11261131
static inline
11271132
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
11281133
gfp_t gfp_mask,
@@ -1427,6 +1432,8 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
14271432
return memcg ? memcg->kmemcg_id : -1;
14281433
}
14291434

1435+
struct mem_cgroup *mem_cgroup_from_obj(void *p);
1436+
14301437
#else
14311438

14321439
static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
@@ -1468,6 +1475,11 @@ static inline void memcg_put_cache_ids(void)
14681475
{
14691476
}
14701477

1478+
static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1479+
{
1480+
return NULL;
1481+
}
1482+
14711483
#endif /* CONFIG_MEMCG_KMEM */
14721484

14731485
#endif /* _LINUX_MEMCONTROL_H */

kernel/fork.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -397,8 +397,8 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
397397
mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
398398
THREAD_SIZE / 1024 * account);
399399

400-
mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB,
401-
account * (THREAD_SIZE / 1024));
400+
mod_memcg_obj_state(stack, MEMCG_KERNEL_STACK_KB,
401+
account * (THREAD_SIZE / 1024));
402402
}
403403
}
404404

mm/memcontrol.c

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -777,6 +777,17 @@ void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
777777
rcu_read_unlock();
778778
}
779779

780+
void mod_memcg_obj_state(void *p, int idx, int val)
781+
{
782+
struct mem_cgroup *memcg;
783+
784+
rcu_read_lock();
785+
memcg = mem_cgroup_from_obj(p);
786+
if (memcg)
787+
mod_memcg_state(memcg, idx, val);
788+
rcu_read_unlock();
789+
}
790+
780791
/**
781792
* __count_memcg_events - account VM events in a cgroup
782793
* @memcg: the memory cgroup
@@ -2661,6 +2672,33 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
26612672
}
26622673

26632674
#ifdef CONFIG_MEMCG_KMEM
2675+
/*
2676+
* Returns a pointer to the memory cgroup to which the kernel object is charged.
2677+
*
2678+
* The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2679+
* cgroup_mutex, etc.
2680+
*/
2681+
struct mem_cgroup *mem_cgroup_from_obj(void *p)
2682+
{
2683+
struct page *page;
2684+
2685+
if (mem_cgroup_disabled())
2686+
return NULL;
2687+
2688+
page = virt_to_head_page(p);
2689+
2690+
/*
2691+
* Slab pages don't have page->mem_cgroup set because corresponding
2692+
* kmem caches can be reparented during the lifetime. That's why
2693+
* memcg_from_slab_page() should be used instead.
2694+
*/
2695+
if (PageSlab(page))
2696+
return memcg_from_slab_page(page);
2697+
2698+
/* All other pages use page->mem_cgroup */
2699+
return page->mem_cgroup;
2700+
}
2701+
26642702
static int memcg_alloc_cache_id(void)
26652703
{
26662704
int id, size;

0 commit comments

Comments
 (0)