@@ -1609,52 +1609,6 @@ static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
1609
1609
}
1610
1610
1611
1611
#ifdef CONFIG_CMA
1612
- static struct page * new_non_cma_page (struct page * page , unsigned long private )
1613
- {
1614
- /*
1615
- * We want to make sure we allocate the new page from the same node
1616
- * as the source page.
1617
- */
1618
- int nid = page_to_nid (page );
1619
- /*
1620
- * Trying to allocate a page for migration. Ignore allocation
1621
- * failure warnings. We don't force __GFP_THISNODE here because
1622
- * this node here is the node where we have CMA reservation and
1623
- * in some case these nodes will have really less non CMA
1624
- * allocation memory.
1625
- *
1626
- * Note that CMA region is prohibited by allocation scope.
1627
- */
1628
- gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN ;
1629
-
1630
- if (PageHighMem (page ))
1631
- gfp_mask |= __GFP_HIGHMEM ;
1632
-
1633
- #ifdef CONFIG_HUGETLB_PAGE
1634
- if (PageHuge (page )) {
1635
- struct hstate * h = page_hstate (page );
1636
-
1637
- gfp_mask = htlb_modify_alloc_mask (h , gfp_mask );
1638
- return alloc_huge_page_nodemask (h , nid , NULL , gfp_mask );
1639
- }
1640
- #endif
1641
- if (PageTransHuge (page )) {
1642
- struct page * thp ;
1643
- /*
1644
- * ignore allocation failure warnings
1645
- */
1646
- gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN ;
1647
-
1648
- thp = __alloc_pages_node (nid , thp_gfpmask , HPAGE_PMD_ORDER );
1649
- if (!thp )
1650
- return NULL ;
1651
- prep_transhuge_page (thp );
1652
- return thp ;
1653
- }
1654
-
1655
- return __alloc_pages_node (nid , gfp_mask , 0 );
1656
- }
1657
-
1658
1612
static long check_and_migrate_cma_pages (struct task_struct * tsk ,
1659
1613
struct mm_struct * mm ,
1660
1614
unsigned long start ,
@@ -1669,6 +1623,10 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
1669
1623
bool migrate_allow = true;
1670
1624
LIST_HEAD (cma_page_list );
1671
1625
long ret = nr_pages ;
1626
+ struct migration_target_control mtc = {
1627
+ .nid = NUMA_NO_NODE ,
1628
+ .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN ,
1629
+ };
1672
1630
1673
1631
check_again :
1674
1632
for (i = 0 ; i < nr_pages ;) {
@@ -1714,8 +1672,8 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
1714
1672
for (i = 0 ; i < nr_pages ; i ++ )
1715
1673
put_page (pages [i ]);
1716
1674
1717
- if (migrate_pages (& cma_page_list , new_non_cma_page ,
1718
- NULL , 0 , MIGRATE_SYNC , MR_CONTIG_RANGE )) {
1675
+ if (migrate_pages (& cma_page_list , alloc_migration_target , NULL ,
1676
+ ( unsigned long ) & mtc , MIGRATE_SYNC , MR_CONTIG_RANGE )) {
1719
1677
/*
1720
1678
* some of the pages failed migration. Do get_user_pages
1721
1679
* without migration.
0 commit comments