@@ -388,8 +388,65 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
388
388
return 0 ;
389
389
}
390
390
391
+ #ifdef CONFIG_HUGETLB_PAGE
392
+ static void damon_hugetlb_mkold (pte_t * pte , struct mm_struct * mm ,
393
+ struct vm_area_struct * vma , unsigned long addr )
394
+ {
395
+ bool referenced = false;
396
+ pte_t entry = huge_ptep_get (pte );
397
+ struct page * page = pte_page (entry );
398
+
399
+ if (!page )
400
+ return ;
401
+
402
+ get_page (page );
403
+
404
+ if (pte_young (entry )) {
405
+ referenced = true;
406
+ entry = pte_mkold (entry );
407
+ huge_ptep_set_access_flags (vma , addr , pte , entry ,
408
+ vma -> vm_flags & VM_WRITE );
409
+ }
410
+
411
+ #ifdef CONFIG_MMU_NOTIFIER
412
+ if (mmu_notifier_clear_young (mm , addr ,
413
+ addr + huge_page_size (hstate_vma (vma ))))
414
+ referenced = true;
415
+ #endif /* CONFIG_MMU_NOTIFIER */
416
+
417
+ if (referenced )
418
+ set_page_young (page );
419
+
420
+ set_page_idle (page );
421
+ put_page (page );
422
+ }
423
+
424
+ static int damon_mkold_hugetlb_entry (pte_t * pte , unsigned long hmask ,
425
+ unsigned long addr , unsigned long end ,
426
+ struct mm_walk * walk )
427
+ {
428
+ struct hstate * h = hstate_vma (walk -> vma );
429
+ spinlock_t * ptl ;
430
+ pte_t entry ;
431
+
432
+ ptl = huge_pte_lock (h , walk -> mm , pte );
433
+ entry = huge_ptep_get (pte );
434
+ if (!pte_present (entry ))
435
+ goto out ;
436
+
437
+ damon_hugetlb_mkold (pte , walk -> mm , walk -> vma , addr );
438
+
439
+ out :
440
+ spin_unlock (ptl );
441
+ return 0 ;
442
+ }
443
+ #else
444
+ #define damon_mkold_hugetlb_entry NULL
445
+ #endif /* CONFIG_HUGETLB_PAGE */
446
+
391
447
static const struct mm_walk_ops damon_mkold_ops = {
392
448
.pmd_entry = damon_mkold_pmd_entry ,
449
+ .hugetlb_entry = damon_mkold_hugetlb_entry ,
393
450
};
394
451
395
452
static void damon_va_mkold (struct mm_struct * mm , unsigned long addr )
@@ -484,8 +541,47 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
484
541
return 0 ;
485
542
}
486
543
544
+ #ifdef CONFIG_HUGETLB_PAGE
545
+ static int damon_young_hugetlb_entry (pte_t * pte , unsigned long hmask ,
546
+ unsigned long addr , unsigned long end ,
547
+ struct mm_walk * walk )
548
+ {
549
+ struct damon_young_walk_private * priv = walk -> private ;
550
+ struct hstate * h = hstate_vma (walk -> vma );
551
+ struct page * page ;
552
+ spinlock_t * ptl ;
553
+ pte_t entry ;
554
+
555
+ ptl = huge_pte_lock (h , walk -> mm , pte );
556
+ entry = huge_ptep_get (pte );
557
+ if (!pte_present (entry ))
558
+ goto out ;
559
+
560
+ page = pte_page (entry );
561
+ if (!page )
562
+ goto out ;
563
+
564
+ get_page (page );
565
+
566
+ if (pte_young (entry ) || !page_is_idle (page ) ||
567
+ mmu_notifier_test_young (walk -> mm , addr )) {
568
+ * priv -> page_sz = huge_page_size (h );
569
+ priv -> young = true;
570
+ }
571
+
572
+ put_page (page );
573
+
574
+ out :
575
+ spin_unlock (ptl );
576
+ return 0 ;
577
+ }
578
+ #else
579
+ #define damon_young_hugetlb_entry NULL
580
+ #endif /* CONFIG_HUGETLB_PAGE */
581
+
487
582
static const struct mm_walk_ops damon_young_ops = {
488
583
.pmd_entry = damon_young_pmd_entry ,
584
+ .hugetlb_entry = damon_young_hugetlb_entry ,
489
585
};
490
586
491
587
static bool damon_va_young (struct mm_struct * mm , unsigned long addr ,
0 commit comments