Skip to content

Commit a7ebf56

Browse files
Waiman-Longtorvalds
authored andcommitted
mm/memcg: relocate mod_objcg_mlstate(), get_obj_stock() and put_obj_stock()
All the calls to mod_objcg_mlstate(), get_obj_stock() and put_obj_stock() are done by functions defined within the same "#ifdef CONFIG_MEMCG_KMEM" compilation block. When CONFIG_MEMCG_KMEM isn't defined, the following compilation warnings will be issued [1] and [2]. mm/memcontrol.c:785:20: warning: unused function 'mod_objcg_mlstate' mm/memcontrol.c:2113:33: warning: unused function 'get_obj_stock' Fix these warning by moving those functions to under the same CONFIG_MEMCG_KMEM compilation block. There is no functional change. [1] https://lore.kernel.org/lkml/[email protected]/ [2] https://lore.kernel.org/lkml/[email protected]/ Link: https://lkml.kernel.org/r/[email protected] Fixes: 5592711 ("mm/memcg: optimize user context object stock access") Fixes: 68ac5b3 ("mm/memcg: cache vmstat data in percpu memcg_stock_pcp") Signed-off-by: Waiman Long <[email protected]> Reported-by: kernel test robot <[email protected]> Reviewed-by: Shakeel Butt <[email protected]> Acked-by: Roman Gushchin <[email protected]> Reviewed-by: Muchun Song <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Vladimir Davydov <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 005a79e commit a7ebf56

File tree

1 file changed

+53
-53
lines changed

1 file changed

+53
-53
lines changed

mm/memcontrol.c

Lines changed: 53 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
776776
rcu_read_unlock();
777777
}
778778

779-
/*
780-
* mod_objcg_mlstate() may be called with irq enabled, so
781-
* mod_memcg_lruvec_state() should be used.
782-
*/
783-
static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
784-
struct pglist_data *pgdat,
785-
enum node_stat_item idx, int nr)
786-
{
787-
struct mem_cgroup *memcg;
788-
struct lruvec *lruvec;
789-
790-
rcu_read_lock();
791-
memcg = obj_cgroup_memcg(objcg);
792-
lruvec = mem_cgroup_lruvec(memcg, pgdat);
793-
mod_memcg_lruvec_state(lruvec, idx, nr);
794-
rcu_read_unlock();
795-
}
796-
797779
/**
798780
* __count_memcg_events - account VM events in a cgroup
799781
* @memcg: the memory cgroup
@@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
21372119
}
21382120
#endif
21392121

2140-
/*
2141-
* Most kmem_cache_alloc() calls are from user context. The irq disable/enable
2142-
* sequence used in this case to access content from object stock is slow.
2143-
* To optimize for user context access, there are now two object stocks for
2144-
* task context and interrupt context access respectively.
2145-
*
2146-
* The task context object stock can be accessed by disabling preemption only
2147-
* which is cheap in non-preempt kernel. The interrupt context object stock
2148-
* can only be accessed after disabling interrupt. User context code can
2149-
* access interrupt object stock, but not vice versa.
2150-
*/
2151-
static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
2152-
{
2153-
struct memcg_stock_pcp *stock;
2154-
2155-
if (likely(in_task())) {
2156-
*pflags = 0UL;
2157-
preempt_disable();
2158-
stock = this_cpu_ptr(&memcg_stock);
2159-
return &stock->task_obj;
2160-
}
2161-
2162-
local_irq_save(*pflags);
2163-
stock = this_cpu_ptr(&memcg_stock);
2164-
return &stock->irq_obj;
2165-
}
2166-
2167-
static inline void put_obj_stock(unsigned long flags)
2168-
{
2169-
if (likely(in_task()))
2170-
preempt_enable();
2171-
else
2172-
local_irq_restore(flags);
2173-
}
2174-
21752122
/**
21762123
* consume_stock: Try to consume stocked charge on this cpu.
21772124
* @memcg: memcg to consume from.
@@ -2816,6 +2763,59 @@ static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
28162763
*/
28172764
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
28182765

2766+
/*
2767+
* Most kmem_cache_alloc() calls are from user context. The irq disable/enable
2768+
* sequence used in this case to access content from object stock is slow.
2769+
* To optimize for user context access, there are now two object stocks for
2770+
* task context and interrupt context access respectively.
2771+
*
2772+
* The task context object stock can be accessed by disabling preemption only
2773+
* which is cheap in non-preempt kernel. The interrupt context object stock
2774+
* can only be accessed after disabling interrupt. User context code can
2775+
* access interrupt object stock, but not vice versa.
2776+
*/
2777+
static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
2778+
{
2779+
struct memcg_stock_pcp *stock;
2780+
2781+
if (likely(in_task())) {
2782+
*pflags = 0UL;
2783+
preempt_disable();
2784+
stock = this_cpu_ptr(&memcg_stock);
2785+
return &stock->task_obj;
2786+
}
2787+
2788+
local_irq_save(*pflags);
2789+
stock = this_cpu_ptr(&memcg_stock);
2790+
return &stock->irq_obj;
2791+
}
2792+
2793+
static inline void put_obj_stock(unsigned long flags)
2794+
{
2795+
if (likely(in_task()))
2796+
preempt_enable();
2797+
else
2798+
local_irq_restore(flags);
2799+
}
2800+
2801+
/*
2802+
* mod_objcg_mlstate() may be called with irq enabled, so
2803+
* mod_memcg_lruvec_state() should be used.
2804+
*/
2805+
static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2806+
struct pglist_data *pgdat,
2807+
enum node_stat_item idx, int nr)
2808+
{
2809+
struct mem_cgroup *memcg;
2810+
struct lruvec *lruvec;
2811+
2812+
rcu_read_lock();
2813+
memcg = obj_cgroup_memcg(objcg);
2814+
lruvec = mem_cgroup_lruvec(memcg, pgdat);
2815+
mod_memcg_lruvec_state(lruvec, idx, nr);
2816+
rcu_read_unlock();
2817+
}
2818+
28192819
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
28202820
gfp_t gfp, bool new_page)
28212821
{

0 commit comments

Comments
 (0)