Skip to content

Commit ed03d92

Browse files
JoonsooKimtorvalds
authored andcommitted
mm/gup: use a standard migration target allocation callback
There is a well-defined migration target allocation callback. Use it. Signed-off-by: Joonsoo Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Acked-by: Vlastimil Babka <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: "Aneesh Kumar K . V" <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Mike Kravetz <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Roman Gushchin <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent bbe8875 commit ed03d92

File tree

1 file changed

+6
-48
lines changed

1 file changed

+6
-48
lines changed

mm/gup.c

Lines changed: 6 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -1609,52 +1609,6 @@ static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
16091609
}
16101610

16111611
#ifdef CONFIG_CMA
1612-
static struct page *new_non_cma_page(struct page *page, unsigned long private)
1613-
{
1614-
/*
1615-
* We want to make sure we allocate the new page from the same node
1616-
* as the source page.
1617-
*/
1618-
int nid = page_to_nid(page);
1619-
/*
1620-
* Trying to allocate a page for migration. Ignore allocation
1621-
* failure warnings. We don't force __GFP_THISNODE here because
1622-
* this node here is the node where we have CMA reservation and
1623-
* in some case these nodes will have really less non CMA
1624-
* allocation memory.
1625-
*
1626-
* Note that CMA region is prohibited by allocation scope.
1627-
*/
1628-
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN;
1629-
1630-
if (PageHighMem(page))
1631-
gfp_mask |= __GFP_HIGHMEM;
1632-
1633-
#ifdef CONFIG_HUGETLB_PAGE
1634-
if (PageHuge(page)) {
1635-
struct hstate *h = page_hstate(page);
1636-
1637-
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1638-
return alloc_huge_page_nodemask(h, nid, NULL, gfp_mask);
1639-
}
1640-
#endif
1641-
if (PageTransHuge(page)) {
1642-
struct page *thp;
1643-
/*
1644-
* ignore allocation failure warnings
1645-
*/
1646-
gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
1647-
1648-
thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
1649-
if (!thp)
1650-
return NULL;
1651-
prep_transhuge_page(thp);
1652-
return thp;
1653-
}
1654-
1655-
return __alloc_pages_node(nid, gfp_mask, 0);
1656-
}
1657-
16581612
static long check_and_migrate_cma_pages(struct task_struct *tsk,
16591613
struct mm_struct *mm,
16601614
unsigned long start,
@@ -1669,6 +1623,10 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
16691623
bool migrate_allow = true;
16701624
LIST_HEAD(cma_page_list);
16711625
long ret = nr_pages;
1626+
struct migration_target_control mtc = {
1627+
.nid = NUMA_NO_NODE,
1628+
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
1629+
};
16721630

16731631
check_again:
16741632
for (i = 0; i < nr_pages;) {
@@ -1714,8 +1672,8 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
17141672
for (i = 0; i < nr_pages; i++)
17151673
put_page(pages[i]);
17161674

1717-
if (migrate_pages(&cma_page_list, new_non_cma_page,
1718-
NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
1675+
if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
1676+
(unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
17191677
/*
17201678
* some of the pages failed migration. Do get_user_pages
17211679
* without migration.

0 commit comments

Comments
 (0)