Skip to content

Commit ab09243

Browse files
apopple-nvidiatorvalds
authored andcommitted
mm/migrate.c: remove MIGRATE_PFN_LOCKED
MIGRATE_PFN_LOCKED is used to indicate to migrate_vma_prepare() that a source page was already locked during migrate_vma_collect(). If it wasn't then the a second attempt is made to lock the page. However if the first attempt failed it's unlikely a second attempt will succeed, and the retry adds complexity. So clean this up by removing the retry and MIGRATE_PFN_LOCKED flag. Destination pages are also meant to have the MIGRATE_PFN_LOCKED flag set, but nothing actually checks that. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Alistair Popple <[email protected]> Reviewed-by: Ralph Campbell <[email protected]> Acked-by: Felix Kuehling <[email protected]> Cc: Alex Deucher <[email protected]> Cc: Jerome Glisse <[email protected]> Cc: John Hubbard <[email protected]> Cc: Zi Yan <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Ben Skeggs <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 0ef0246 commit ab09243

File tree

7 files changed

+35
-128
lines changed

7 files changed

+35
-128
lines changed

Documentation/vm/hmm.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,7 @@ between device driver specific code and shared common code:
360360
system memory page, locks the page with ``lock_page()``, and fills in the
361361
``dst`` array entry with::
362362

363-
dst[i] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
363+
dst[i] = migrate_pfn(page_to_pfn(dpage));
364364

365365
Now that the driver knows that this page is being migrated, it can
366366
invalidate device private MMU mappings and copy device private memory

arch/powerpc/kvm/book3s_hv_uvmem.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -560,7 +560,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
560560
gpa, 0, page_shift);
561561

562562
if (ret == U_SUCCESS)
563-
*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
563+
*mig.dst = migrate_pfn(pfn);
564564
else {
565565
unlock_page(dpage);
566566
__free_page(dpage);
@@ -774,7 +774,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma,
774774
}
775775
}
776776

777-
*mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
777+
*mig.dst = migrate_pfn(page_to_pfn(dpage));
778778
migrate_vma_pages(&mig);
779779
out_finalize:
780780
migrate_vma_finalize(&mig);

drivers/gpu/drm/amd/amdkfd/kfd_migrate.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -317,7 +317,6 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
317317
migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
318318
svm_migrate_get_vram_page(prange, migrate->dst[i]);
319319
migrate->dst[i] = migrate_pfn(migrate->dst[i]);
320-
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
321320
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
322321
DMA_TO_DEVICE);
323322
r = dma_mapping_error(dev, src[i]);
@@ -610,7 +609,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
610609
dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
611610

612611
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
613-
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
614612
j++;
615613
}
616614

drivers/gpu/drm/nouveau/nouveau_dmem.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
166166
goto error_dma_unmap;
167167
mutex_unlock(&svmm->mutex);
168168

169-
args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
169+
args->dst[0] = migrate_pfn(page_to_pfn(dpage));
170170
return 0;
171171

172172
error_dma_unmap:
@@ -602,7 +602,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
602602
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
603603
if (src & MIGRATE_PFN_WRITE)
604604
*pfn |= NVIF_VMM_PFNMAP_V0_W;
605-
return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
605+
return migrate_pfn(page_to_pfn(dpage));
606606

607607
out_dma_unmap:
608608
dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);

include/linux/migrate.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,6 @@ static inline int migrate_misplaced_page(struct page *page,
110110
*/
111111
#define MIGRATE_PFN_VALID (1UL << 0)
112112
#define MIGRATE_PFN_MIGRATE (1UL << 1)
113-
#define MIGRATE_PFN_LOCKED (1UL << 2)
114113
#define MIGRATE_PFN_WRITE (1UL << 3)
115114
#define MIGRATE_PFN_SHIFT 6
116115

lib/test_hmm.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -613,8 +613,7 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
613613
*/
614614
rpage->zone_device_data = dmirror;
615615

616-
*dst = migrate_pfn(page_to_pfn(dpage)) |
617-
MIGRATE_PFN_LOCKED;
616+
*dst = migrate_pfn(page_to_pfn(dpage));
618617
if ((*src & MIGRATE_PFN_WRITE) ||
619618
(!spage && args->vma->vm_flags & VM_WRITE))
620619
*dst |= MIGRATE_PFN_WRITE;
@@ -1137,7 +1136,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
11371136
lock_page(dpage);
11381137
xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
11391138
copy_highpage(dpage, spage);
1140-
*dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
1139+
*dst = migrate_pfn(page_to_pfn(dpage));
11411140
if (*src & MIGRATE_PFN_WRITE)
11421141
*dst |= MIGRATE_PFN_WRITE;
11431142
}

mm/migrate.c

Lines changed: 28 additions & 117 deletions
Original file line numberDiff line numberDiff line change
@@ -2362,7 +2362,6 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
23622362
* can't be dropped from it).
23632363
*/
23642364
get_page(page);
2365-
migrate->cpages++;
23662365

23672366
/*
23682367
* Optimize for the common case where page is only mapped once
@@ -2372,7 +2371,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
23722371
if (trylock_page(page)) {
23732372
pte_t swp_pte;
23742373

2375-
mpfn |= MIGRATE_PFN_LOCKED;
2374+
migrate->cpages++;
23762375
ptep_get_and_clear(mm, addr, ptep);
23772376

23782377
/* Setup special migration page table entry */
@@ -2406,6 +2405,9 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
24062405

24072406
if (pte_present(pte))
24082407
unmapped++;
2408+
} else {
2409+
put_page(page);
2410+
mpfn = 0;
24092411
}
24102412

24112413
next:
@@ -2510,15 +2512,17 @@ static bool migrate_vma_check_page(struct page *page)
25102512
}
25112513

25122514
/*
2513-
* migrate_vma_prepare() - lock pages and isolate them from the lru
2515+
* migrate_vma_unmap() - replace page mapping with special migration pte entry
25142516
* @migrate: migrate struct containing all migration information
25152517
*
2516-
* This locks pages that have been collected by migrate_vma_collect(). Once each
2517-
* page is locked it is isolated from the lru (for non-device pages). Finally,
2518-
* the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2519-
* migrated by concurrent kernel threads.
2518+
* Isolate pages from the LRU and replace mappings (CPU page table pte) with a
2519+
* special migration pte entry and check if it has been pinned. Pinned pages are
2520+
* restored because we cannot migrate them.
2521+
*
2522+
* This is the last step before we call the device driver callback to allocate
2523+
* destination memory and copy contents of original page over to new page.
25202524
*/
2521-
static void migrate_vma_prepare(struct migrate_vma *migrate)
2525+
static void migrate_vma_unmap(struct migrate_vma *migrate)
25222526
{
25232527
const unsigned long npages = migrate->npages;
25242528
const unsigned long start = migrate->start;
@@ -2527,32 +2531,12 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
25272531

25282532
lru_add_drain();
25292533

2530-
for (i = 0; (i < npages) && migrate->cpages; i++) {
2534+
for (i = 0; i < npages; i++) {
25312535
struct page *page = migrate_pfn_to_page(migrate->src[i]);
2532-
bool remap = true;
25332536

25342537
if (!page)
25352538
continue;
25362539

2537-
if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2538-
/*
2539-
* Because we are migrating several pages there can be
2540-
* a deadlock between 2 concurrent migration where each
2541-
* are waiting on each other page lock.
2542-
*
2543-
* Make migrate_vma() a best effort thing and backoff
2544-
* for any page we can not lock right away.
2545-
*/
2546-
if (!trylock_page(page)) {
2547-
migrate->src[i] = 0;
2548-
migrate->cpages--;
2549-
put_page(page);
2550-
continue;
2551-
}
2552-
remap = false;
2553-
migrate->src[i] |= MIGRATE_PFN_LOCKED;
2554-
}
2555-
25562540
/* ZONE_DEVICE pages are not on LRU */
25572541
if (!is_zone_device_page(page)) {
25582542
if (!PageLRU(page) && allow_drain) {
@@ -2562,97 +2546,30 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
25622546
}
25632547

25642548
if (isolate_lru_page(page)) {
2565-
if (remap) {
2566-
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2567-
migrate->cpages--;
2568-
restore++;
2569-
} else {
2570-
migrate->src[i] = 0;
2571-
unlock_page(page);
2572-
migrate->cpages--;
2573-
put_page(page);
2574-
}
2549+
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2550+
migrate->cpages--;
2551+
restore++;
25752552
continue;
25762553
}
25772554

25782555
/* Drop the reference we took in collect */
25792556
put_page(page);
25802557
}
25812558

2582-
if (!migrate_vma_check_page(page)) {
2583-
if (remap) {
2584-
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2585-
migrate->cpages--;
2586-
restore++;
2587-
2588-
if (!is_zone_device_page(page)) {
2589-
get_page(page);
2590-
putback_lru_page(page);
2591-
}
2592-
} else {
2593-
migrate->src[i] = 0;
2594-
unlock_page(page);
2595-
migrate->cpages--;
2559+
if (page_mapped(page))
2560+
try_to_migrate(page, 0);
25962561

2597-
if (!is_zone_device_page(page))
2598-
putback_lru_page(page);
2599-
else
2600-
put_page(page);
2562+
if (page_mapped(page) || !migrate_vma_check_page(page)) {
2563+
if (!is_zone_device_page(page)) {
2564+
get_page(page);
2565+
putback_lru_page(page);
26012566
}
2602-
}
2603-
}
2604-
2605-
for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2606-
struct page *page = migrate_pfn_to_page(migrate->src[i]);
2607-
2608-
if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2609-
continue;
26102567

2611-
remove_migration_pte(page, migrate->vma, addr, page);
2612-
2613-
migrate->src[i] = 0;
2614-
unlock_page(page);
2615-
put_page(page);
2616-
restore--;
2617-
}
2618-
}
2619-
2620-
/*
2621-
* migrate_vma_unmap() - replace page mapping with special migration pte entry
2622-
* @migrate: migrate struct containing all migration information
2623-
*
2624-
* Replace page mapping (CPU page table pte) with a special migration pte entry
2625-
* and check again if it has been pinned. Pinned pages are restored because we
2626-
* cannot migrate them.
2627-
*
2628-
* This is the last step before we call the device driver callback to allocate
2629-
* destination memory and copy contents of original page over to new page.
2630-
*/
2631-
static void migrate_vma_unmap(struct migrate_vma *migrate)
2632-
{
2633-
const unsigned long npages = migrate->npages;
2634-
const unsigned long start = migrate->start;
2635-
unsigned long addr, i, restore = 0;
2636-
2637-
for (i = 0; i < npages; i++) {
2638-
struct page *page = migrate_pfn_to_page(migrate->src[i]);
2639-
2640-
if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2568+
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2569+
migrate->cpages--;
2570+
restore++;
26412571
continue;
2642-
2643-
if (page_mapped(page)) {
2644-
try_to_migrate(page, 0);
2645-
if (page_mapped(page))
2646-
goto restore;
26472572
}
2648-
2649-
if (migrate_vma_check_page(page))
2650-
continue;
2651-
2652-
restore:
2653-
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2654-
migrate->cpages--;
2655-
restore++;
26562573
}
26572574

26582575
for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
@@ -2665,12 +2582,8 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
26652582

26662583
migrate->src[i] = 0;
26672584
unlock_page(page);
2585+
put_page(page);
26682586
restore--;
2669-
2670-
if (is_zone_device_page(page))
2671-
put_page(page);
2672-
else
2673-
putback_lru_page(page);
26742587
}
26752588
}
26762589

@@ -2693,8 +2606,8 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
26932606
* it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
26942607
* flag set). Once these are allocated and copied, the caller must update each
26952608
* corresponding entry in the dst array with the pfn value of the destination
2696-
* page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
2697-
* (destination pages must have their struct pages locked, via lock_page()).
2609+
* page and with MIGRATE_PFN_VALID. Destination pages must be locked via
2610+
* lock_page().
26982611
*
26992612
* Note that the caller does not have to migrate all the pages that are marked
27002613
* with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
@@ -2763,8 +2676,6 @@ int migrate_vma_setup(struct migrate_vma *args)
27632676

27642677
migrate_vma_collect(args);
27652678

2766-
if (args->cpages)
2767-
migrate_vma_prepare(args);
27682679
if (args->cpages)
27692680
migrate_vma_unmap(args);
27702681

0 commit comments

Comments
 (0)