Skip to content

Commit d34b073

Browse files
gormanmtorvalds
authored andcommitted
Revert "mm, page_alloc: only use per-cpu allocator for irq-safe requests"
This reverts commit 374ad05. While the patch worked great for userspace allocations, the fact that softirq loses the per-cpu allocator caused problems. It needs to be redone taking into account that a separate list is needed for hard/soft IRQs or alternatively find a cheap way of detecting reentry due to an interrupt. Both are possible but sufficiently tricky that it shouldn't be rushed. Jesper had one method for allowing softirqs but reported that the cost was high enough that it performed similarly to a plain revert. His figures for netperf TCP_STREAM were as follows Baseline v4.10.0 : 60316 Mbit/s Current 4.11.0-rc6: 47491 Mbit/s Jesper's patch : 60662 Mbit/s This patch : 60106 Mbit/s As this is a regression, I wish to revert to noirq allocator for now and go back to the drawing board. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Mel Gorman <[email protected]> Reported-by: Tariq Toukan <[email protected]> Acked-by: Jesper Dangaard Brouer <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent f61143c commit d34b073

File tree

1 file changed

+20
-23
lines changed

1 file changed

+20
-23
lines changed

mm/page_alloc.c

Lines changed: 20 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1090,10 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
10901090
{
10911091
int migratetype = 0;
10921092
int batch_free = 0;
1093-
unsigned long nr_scanned, flags;
1093+
unsigned long nr_scanned;
10941094
bool isolated_pageblocks;
10951095

1096-
spin_lock_irqsave(&zone->lock, flags);
1096+
spin_lock(&zone->lock);
10971097
isolated_pageblocks = has_isolate_pageblock(zone);
10981098
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
10991099
if (nr_scanned)
@@ -1142,17 +1142,16 @@ static void free_pcppages_bulk(struct zone *zone, int count,
11421142
trace_mm_page_pcpu_drain(page, 0, mt);
11431143
} while (--count && --batch_free && !list_empty(list));
11441144
}
1145-
spin_unlock_irqrestore(&zone->lock, flags);
1145+
spin_unlock(&zone->lock);
11461146
}
11471147

11481148
static void free_one_page(struct zone *zone,
11491149
struct page *page, unsigned long pfn,
11501150
unsigned int order,
11511151
int migratetype)
11521152
{
1153-
unsigned long nr_scanned, flags;
1154-
spin_lock_irqsave(&zone->lock, flags);
1155-
__count_vm_events(PGFREE, 1 << order);
1153+
unsigned long nr_scanned;
1154+
spin_lock(&zone->lock);
11561155
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
11571156
if (nr_scanned)
11581157
__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
@@ -1162,7 +1161,7 @@ static void free_one_page(struct zone *zone,
11621161
migratetype = get_pfnblock_migratetype(page, pfn);
11631162
}
11641163
__free_one_page(page, pfn, zone, order, migratetype);
1165-
spin_unlock_irqrestore(&zone->lock, flags);
1164+
spin_unlock(&zone->lock);
11661165
}
11671166

11681167
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
@@ -1240,14 +1239,18 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
12401239

12411240
static void __free_pages_ok(struct page *page, unsigned int order)
12421241
{
1242+
unsigned long flags;
12431243
int migratetype;
12441244
unsigned long pfn = page_to_pfn(page);
12451245

12461246
if (!free_pages_prepare(page, order, true))
12471247
return;
12481248

12491249
migratetype = get_pfnblock_migratetype(page, pfn);
1250+
local_irq_save(flags);
1251+
__count_vm_events(PGFREE, 1 << order);
12501252
free_one_page(page_zone(page), page, pfn, order, migratetype);
1253+
local_irq_restore(flags);
12511254
}
12521255

12531256
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
@@ -2219,9 +2222,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
22192222
int migratetype, bool cold)
22202223
{
22212224
int i, alloced = 0;
2222-
unsigned long flags;
22232225

2224-
spin_lock_irqsave(&zone->lock, flags);
2226+
spin_lock(&zone->lock);
22252227
for (i = 0; i < count; ++i) {
22262228
struct page *page = __rmqueue(zone, order, migratetype);
22272229
if (unlikely(page == NULL))
@@ -2257,7 +2259,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
22572259
* pages added to the pcp list.
22582260
*/
22592261
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2260-
spin_unlock_irqrestore(&zone->lock, flags);
2262+
spin_unlock(&zone->lock);
22612263
return alloced;
22622264
}
22632265

@@ -2485,20 +2487,17 @@ void free_hot_cold_page(struct page *page, bool cold)
24852487
{
24862488
struct zone *zone = page_zone(page);
24872489
struct per_cpu_pages *pcp;
2490+
unsigned long flags;
24882491
unsigned long pfn = page_to_pfn(page);
24892492
int migratetype;
24902493

2491-
if (in_interrupt()) {
2492-
__free_pages_ok(page, 0);
2493-
return;
2494-
}
2495-
24962494
if (!free_pcp_prepare(page))
24972495
return;
24982496

24992497
migratetype = get_pfnblock_migratetype(page, pfn);
25002498
set_pcppage_migratetype(page, migratetype);
2501-
preempt_disable();
2499+
local_irq_save(flags);
2500+
__count_vm_event(PGFREE);
25022501

25032502
/*
25042503
* We only track unmovable, reclaimable and movable on pcp lists.
@@ -2515,7 +2514,6 @@ void free_hot_cold_page(struct page *page, bool cold)
25152514
migratetype = MIGRATE_MOVABLE;
25162515
}
25172516

2518-
__count_vm_event(PGFREE);
25192517
pcp = &this_cpu_ptr(zone->pageset)->pcp;
25202518
if (!cold)
25212519
list_add(&page->lru, &pcp->lists[migratetype]);
@@ -2529,7 +2527,7 @@ void free_hot_cold_page(struct page *page, bool cold)
25292527
}
25302528

25312529
out:
2532-
preempt_enable();
2530+
local_irq_restore(flags);
25332531
}
25342532

25352533
/*
@@ -2654,8 +2652,6 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
26542652
{
26552653
struct page *page;
26562654

2657-
VM_BUG_ON(in_interrupt());
2658-
26592655
do {
26602656
if (list_empty(list)) {
26612657
pcp->count += rmqueue_bulk(zone, 0,
@@ -2686,16 +2682,17 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
26862682
struct list_head *list;
26872683
bool cold = ((gfp_flags & __GFP_COLD) != 0);
26882684
struct page *page;
2685+
unsigned long flags;
26892686

2690-
preempt_disable();
2687+
local_irq_save(flags);
26912688
pcp = &this_cpu_ptr(zone->pageset)->pcp;
26922689
list = &pcp->lists[migratetype];
26932690
page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
26942691
if (page) {
26952692
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
26962693
zone_statistics(preferred_zone, zone);
26972694
}
2698-
preempt_enable();
2695+
local_irq_restore(flags);
26992696
return page;
27002697
}
27012698

@@ -2711,7 +2708,7 @@ struct page *rmqueue(struct zone *preferred_zone,
27112708
unsigned long flags;
27122709
struct page *page;
27132710

2714-
if (likely(order == 0) && !in_interrupt()) {
2711+
if (likely(order == 0)) {
27152712
page = rmqueue_pcplist(preferred_zone, zone, order,
27162713
gfp_flags, migratetype);
27172714
goto out;

0 commit comments

Comments
 (0)