Skip to content

Commit 200577f

Browse files
shakeelbakpm00
authored andcommitted
memcg: objcg stock trylock without irq disabling
There is no need to disable irqs to use objcg per-cpu stock, so let's just not do that but consume_obj_stock() and refill_obj_stock() will need to use trylock instead to avoid deadlock against irq. One consequence of this change is that the charge request from irq context may take slowpath more often but it should be rare. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Shakeel Butt <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Cc: Alexei Starovoitov <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Muchun Song <[email protected]> Cc: Roman Gushchin <[email protected]> Cc: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 0ccf180 commit 200577f

File tree

1 file changed

+15
-10
lines changed

1 file changed

+15
-10
lines changed

mm/memcontrol.c

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1880,18 +1880,17 @@ static void drain_local_memcg_stock(struct work_struct *dummy)
18801880
static void drain_local_obj_stock(struct work_struct *dummy)
18811881
{
18821882
struct obj_stock_pcp *stock;
1883-
unsigned long flags;
18841883

18851884
if (WARN_ONCE(!in_task(), "drain in non-task context"))
18861885
return;
18871886

1888-
local_lock_irqsave(&obj_stock.lock, flags);
1887+
local_lock(&obj_stock.lock);
18891888

18901889
stock = this_cpu_ptr(&obj_stock);
18911890
drain_obj_stock(stock);
18921891
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
18931892

1894-
local_unlock_irqrestore(&obj_stock.lock, flags);
1893+
local_unlock(&obj_stock.lock);
18951894
}
18961895

18971896
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
@@ -2874,10 +2873,10 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
28742873
struct pglist_data *pgdat, enum node_stat_item idx)
28752874
{
28762875
struct obj_stock_pcp *stock;
2877-
unsigned long flags;
28782876
bool ret = false;
28792877

2880-
local_lock_irqsave(&obj_stock.lock, flags);
2878+
if (!local_trylock(&obj_stock.lock))
2879+
return ret;
28812880

28822881
stock = this_cpu_ptr(&obj_stock);
28832882
if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
@@ -2888,7 +2887,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
28882887
__account_obj_stock(objcg, stock, nr_bytes, pgdat, idx);
28892888
}
28902889

2891-
local_unlock_irqrestore(&obj_stock.lock, flags);
2890+
local_unlock(&obj_stock.lock);
28922891

28932892
return ret;
28942893
}
@@ -2977,10 +2976,16 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
29772976
enum node_stat_item idx)
29782977
{
29792978
struct obj_stock_pcp *stock;
2980-
unsigned long flags;
29812979
unsigned int nr_pages = 0;
29822980

2983-
local_lock_irqsave(&obj_stock.lock, flags);
2981+
if (!local_trylock(&obj_stock.lock)) {
2982+
if (pgdat)
2983+
mod_objcg_mlstate(objcg, pgdat, idx, nr_bytes);
2984+
nr_pages = nr_bytes >> PAGE_SHIFT;
2985+
nr_bytes = nr_bytes & (PAGE_SIZE - 1);
2986+
atomic_add(nr_bytes, &objcg->nr_charged_bytes);
2987+
goto out;
2988+
}
29842989

29852990
stock = this_cpu_ptr(&obj_stock);
29862991
if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
@@ -3002,8 +3007,8 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
30023007
stock->nr_bytes &= (PAGE_SIZE - 1);
30033008
}
30043009

3005-
local_unlock_irqrestore(&obj_stock.lock, flags);
3006-
3010+
local_unlock(&obj_stock.lock);
3011+
out:
30073012
if (nr_pages)
30083013
obj_cgroup_uncharge_pages(objcg, nr_pages);
30093014
}

0 commit comments

Comments
 (0)