Skip to content

Commit 8472a33

Browse files
umgwanakikbutijnettlet
authored andcommitted
memcontrol: Prevent scheduling while atomic in cgroup code
mm, memcg: make refill_stock() use get_cpu_light() Nikita reported the following memcg scheduling while atomic bug: Call Trace: [e22d5a90] [c0007ea8] show_stack+0x4c/0x168 (unreliable) [e22d5ad] [c0618c04] __schedule_bug+0x94/0xb0 [e22d5ae0] [c060b9ec] __schedule+0x530/0x550 [e22d5bf0] [c060bacc] schedule+0x30/0xbc [e22d5c00] [c060ca24] rt_spin_lock_slowlock+0x180/0x27c [e22d5c70] [c00b39dc] res_counter_uncharge_until+0x40/0xc4 [e22d5ca] [c013ca88] drain_stock.isra.20+0x54/0x98 [e22d5cc0] [c01402ac] __mem_cgroup_try_charge+0x2e8/0xbac [e22d5d70] [c01410d4] mem_cgroup_charge_common+0x3c/0x70 [e22d5d90] [c0117284] __do_fault+0x38c/0x510 [e22d5df0] [c011a5f4] handle_pte_fault+0x98/0x858 [e22d5e50] [c060ed08] do_page_fault+0x42c/0x6fc [e22d5f40] [c000f5b4] handle_page_fault+0xc/0x80 What happens: refill_stock() get_cpu_var() drain_stock() res_counter_uncharge() res_counter_uncharge_until() spin_lock() <== boom Fix it by replacing get/put_cpu_var() with get/put_cpu_light(). Reported-by: Nikita Yushchenko <[email protected]> Signed-off-by: Mike Galbraith <[email protected]> [bigeasy: use memcg_stock_ll as a locallock since it is now IRQ-off region] Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
1 parent 74dc629 commit 8472a33

File tree

1 file changed

+7
-6
lines changed

1 file changed

+7
-6
lines changed

mm/memcontrol.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1698,6 +1698,7 @@ struct memcg_stock_pcp {
16981698
#define FLUSHING_CACHED_CHARGE 0
16991699
};
17001700
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1701+
static DEFINE_LOCAL_IRQ_LOCK(memcg_stock_ll);
17011702
static DEFINE_MUTEX(percpu_charge_mutex);
17021703

17031704
/**
@@ -1720,15 +1721,15 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
17201721
if (nr_pages > CHARGE_BATCH)
17211722
return ret;
17221723

1723-
local_irq_save(flags);
1724+
local_lock_irqsave(memcg_stock_ll, flags);
17241725

17251726
stock = this_cpu_ptr(&memcg_stock);
17261727
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
17271728
stock->nr_pages -= nr_pages;
17281729
ret = true;
17291730
}
17301731

1731-
local_irq_restore(flags);
1732+
local_unlock_irqrestore(memcg_stock_ll, flags);
17321733

17331734
return ret;
17341735
}
@@ -1755,13 +1756,13 @@ static void drain_local_stock(struct work_struct *dummy)
17551756
struct memcg_stock_pcp *stock;
17561757
unsigned long flags;
17571758

1758-
local_irq_save(flags);
1759+
local_lock_irqsave(memcg_stock_ll, flags);
17591760

17601761
stock = this_cpu_ptr(&memcg_stock);
17611762
drain_stock(stock);
17621763
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
17631764

1764-
local_irq_restore(flags);
1765+
local_unlock_irqrestore(memcg_stock_ll, flags);
17651766
}
17661767

17671768
/*
@@ -1773,7 +1774,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
17731774
struct memcg_stock_pcp *stock;
17741775
unsigned long flags;
17751776

1776-
local_irq_save(flags);
1777+
local_lock_irqsave(memcg_stock_ll, flags);
17771778

17781779
stock = this_cpu_ptr(&memcg_stock);
17791780
if (stock->cached != memcg) { /* reset if necessary */
@@ -1782,7 +1783,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
17821783
}
17831784
stock->nr_pages += nr_pages;
17841785

1785-
local_irq_restore(flags);
1786+
local_unlock_irqrestore(memcg_stock_ll, flags);
17861787
}
17871788

17881789
/*

0 commit comments

Comments
 (0)