Skip to content

Commit 19fc7be

Browse files
JoonsooKimtorvalds
authored andcommitted
mm/migrate: introduce a standard migration target allocation function
There are some similar functions for migration target allocation. Since there is no fundamental difference, it's better to keep just one rather than keeping all variants. This patch implements base migration target allocation function. In the following patches, variants will be converted to use this function. Changes should be mechanical, but, unfortunately, there are some differences. First, some callers' nodemask is assgined to NULL since NULL nodemask will be considered as all available nodes, that is, &node_states[N_MEMORY]. Second, for hugetlb page allocation, gfp_mask is redefined as regular hugetlb allocation gfp_mask plus __GFP_THISNODE if user provided gfp_mask has it. This is because future caller of this function requires to set this node constaint. Lastly, if provided nodeid is NUMA_NO_NODE, nodeid is set up to the node where migration source lives. It helps to remove simple wrappers for setting up the nodeid. Note that PageHighmem() call in previous function is changed to open-code "is_highmem_idx()" since it provides more readability. [[email protected]: tweak patch title, per Vlastimil] [[email protected]: fix typo in comment] Signed-off-by: Joonsoo Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Mike Kravetz <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Roman Gushchin <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 9933a0c commit 19fc7be

File tree

7 files changed

+61
-22
lines changed

7 files changed

+61
-22
lines changed

include/linux/hugetlb.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -703,6 +703,16 @@ static inline gfp_t htlb_alloc_mask(struct hstate *h)
703703
return GFP_HIGHUSER;
704704
}
705705

706+
static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
707+
{
708+
gfp_t modified_mask = htlb_alloc_mask(h);
709+
710+
/* Some callers might want to enforce node */
711+
modified_mask |= (gfp_mask & __GFP_THISNODE);
712+
713+
return modified_mask;
714+
}
715+
706716
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
707717
struct mm_struct *mm, pte_t *pte)
708718
{
@@ -890,6 +900,11 @@ static inline gfp_t htlb_alloc_mask(struct hstate *h)
890900
return 0;
891901
}
892902

903+
static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
904+
{
905+
return 0;
906+
}
907+
893908
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
894909
struct mm_struct *mm, pte_t *pte)
895910
{

include/linux/migrate.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010
typedef struct page *new_page_t(struct page *page, unsigned long private);
1111
typedef void free_page_t(struct page *page, unsigned long private);
1212

13+
struct migration_target_control;
14+
1315
/*
1416
* Return values from addresss_space_operations.migratepage():
1517
* - negative errno on page migration failure;
@@ -39,8 +41,7 @@ extern int migrate_page(struct address_space *mapping,
3941
enum migrate_mode mode);
4042
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
4143
unsigned long private, enum migrate_mode mode, int reason);
42-
extern struct page *new_page_nodemask(struct page *page,
43-
int preferred_nid, nodemask_t *nodemask);
44+
extern struct page *alloc_migration_target(struct page *page, unsigned long private);
4445
extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
4546
extern void putback_movable_page(struct page *page);
4647

@@ -59,8 +60,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
5960
free_page_t free, unsigned long private, enum migrate_mode mode,
6061
int reason)
6162
{ return -ENOSYS; }
62-
static inline struct page *new_page_nodemask(struct page *page,
63-
int preferred_nid, nodemask_t *nodemask)
63+
static inline struct page *alloc_migration_target(struct page *page,
64+
unsigned long private)
6465
{ return NULL; }
6566
static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
6667
{ return -EBUSY; }

mm/internal.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -614,4 +614,11 @@ static inline bool is_migrate_highatomic_page(struct page *page)
614614

615615
void setup_zone_pageset(struct zone *zone);
616616
extern struct page *alloc_new_node_page(struct page *page, unsigned long node);
617+
618+
struct migration_target_control {
619+
int nid; /* preferred node id */
620+
nodemask_t *nmask;
621+
gfp_t gfp_mask;
622+
};
623+
617624
#endif /* __MM_INTERNAL_H */

mm/memory-failure.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1648,9 +1648,12 @@ EXPORT_SYMBOL(unpoison_memory);
16481648

16491649
static struct page *new_page(struct page *p, unsigned long private)
16501650
{
1651-
int nid = page_to_nid(p);
1651+
struct migration_target_control mtc = {
1652+
.nid = page_to_nid(p),
1653+
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
1654+
};
16521655

1653-
return new_page_nodemask(p, nid, &node_states[N_MEMORY]);
1656+
return alloc_migration_target(p, (unsigned long)&mtc);
16541657
}
16551658

16561659
/*

mm/memory_hotplug.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1276,19 +1276,23 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
12761276

12771277
static struct page *new_node_page(struct page *page, unsigned long private)
12781278
{
1279-
int nid = page_to_nid(page);
12801279
nodemask_t nmask = node_states[N_MEMORY];
1280+
struct migration_target_control mtc = {
1281+
.nid = page_to_nid(page),
1282+
.nmask = &nmask,
1283+
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
1284+
};
12811285

12821286
/*
12831287
* try to allocate from a different node but reuse this node if there
12841288
* are no other online nodes to be used (e.g. we are offlining a part
12851289
* of the only existing node)
12861290
*/
1287-
node_clear(nid, nmask);
1291+
node_clear(mtc.nid, nmask);
12881292
if (nodes_empty(nmask))
1289-
node_set(nid, nmask);
1293+
node_set(mtc.nid, nmask);
12901294

1291-
return new_page_nodemask(page, nid, &nmask);
1295+
return alloc_migration_target(page, (unsigned long)&mtc);
12921296
}
12931297

12941298
static int

mm/migrate.c

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1538,19 +1538,26 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
15381538
return rc;
15391539
}
15401540

1541-
struct page *new_page_nodemask(struct page *page,
1542-
int preferred_nid, nodemask_t *nodemask)
1541+
struct page *alloc_migration_target(struct page *page, unsigned long private)
15431542
{
1544-
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
1543+
struct migration_target_control *mtc;
1544+
gfp_t gfp_mask;
15451545
unsigned int order = 0;
15461546
struct page *new_page = NULL;
1547+
int nid;
1548+
int zidx;
1549+
1550+
mtc = (struct migration_target_control *)private;
1551+
gfp_mask = mtc->gfp_mask;
1552+
nid = mtc->nid;
1553+
if (nid == NUMA_NO_NODE)
1554+
nid = page_to_nid(page);
15471555

15481556
if (PageHuge(page)) {
15491557
struct hstate *h = page_hstate(compound_head(page));
15501558

1551-
gfp_mask = htlb_alloc_mask(h);
1552-
return alloc_huge_page_nodemask(h, preferred_nid,
1553-
nodemask, gfp_mask);
1559+
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1560+
return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
15541561
}
15551562

15561563
if (PageTransHuge(page)) {
@@ -1562,12 +1569,11 @@ struct page *new_page_nodemask(struct page *page,
15621569
gfp_mask |= GFP_TRANSHUGE;
15631570
order = HPAGE_PMD_ORDER;
15641571
}
1565-
1566-
if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
1572+
zidx = zone_idx(page_zone(page));
1573+
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
15671574
gfp_mask |= __GFP_HIGHMEM;
15681575

1569-
new_page = __alloc_pages_nodemask(gfp_mask, order,
1570-
preferred_nid, nodemask);
1576+
new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
15711577

15721578
if (new_page && PageTransHuge(new_page))
15731579
prep_transhuge_page(new_page);

mm/page_isolation.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -309,7 +309,10 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
309309

310310
struct page *alloc_migrate_target(struct page *page, unsigned long private)
311311
{
312-
int nid = page_to_nid(page);
312+
struct migration_target_control mtc = {
313+
.nid = page_to_nid(page),
314+
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
315+
};
313316

314-
return new_page_nodemask(page, nid, &node_states[N_MEMORY]);
317+
return alloc_migration_target(page, (unsigned long)&mtc);
315318
}

0 commit comments

Comments
 (0)