@@ -1464,7 +1464,6 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1464
1464
}
1465
1465
}
1466
1466
1467
- #ifdef CONFIG_SHMEM
1468
1467
/* folio must be locked, and mmap_lock must be held */
1469
1468
static int set_huge_pmd (struct vm_area_struct * vma , unsigned long addr ,
1470
1469
pmd_t * pmdp , struct folio * folio , struct page * page )
@@ -2353,14 +2352,6 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2353
2352
trace_mm_khugepaged_scan_file (mm , folio , file , present , swap , result );
2354
2353
return result ;
2355
2354
}
2356
- #else
2357
- static int hpage_collapse_scan_file (struct mm_struct * mm , unsigned long addr ,
2358
- struct file * file , pgoff_t start ,
2359
- struct collapse_control * cc )
2360
- {
2361
- BUILD_BUG ();
2362
- }
2363
- #endif
2364
2355
2365
2356
static unsigned int khugepaged_scan_mm_slot (unsigned int pages , int * result ,
2366
2357
struct collapse_control * cc )
@@ -2436,7 +2427,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
2436
2427
VM_BUG_ON (khugepaged_scan .address < hstart ||
2437
2428
khugepaged_scan .address + HPAGE_PMD_SIZE >
2438
2429
hend );
2439
- if (IS_ENABLED ( CONFIG_SHMEM ) && !vma_is_anonymous (vma )) {
2430
+ if (!vma_is_anonymous (vma )) {
2440
2431
struct file * file = get_file (vma -> vm_file );
2441
2432
pgoff_t pgoff = linear_page_index (vma ,
2442
2433
khugepaged_scan .address );
@@ -2782,7 +2773,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2782
2773
mmap_assert_locked (mm );
2783
2774
memset (cc -> node_load , 0 , sizeof (cc -> node_load ));
2784
2775
nodes_clear (cc -> alloc_nmask );
2785
- if (IS_ENABLED ( CONFIG_SHMEM ) && !vma_is_anonymous (vma )) {
2776
+ if (!vma_is_anonymous (vma )) {
2786
2777
struct file * file = get_file (vma -> vm_file );
2787
2778
pgoff_t pgoff = linear_page_index (vma , addr );
2788
2779
0 commit comments