Skip to content

Commit 49f4203

Browse files
Baolin Wangtorvalds
authored andcommitted
mm/damon: add access checking for hugetlb pages
The process's VMAs can be mapped by hugetlb page, but now the DAMON did not implement the access checking for hugetlb pte, so we can not get the actual access count like below if a process VMAs were mapped by hugetlb. damon_aggregated: target_id=18446614368406014464 nr_regions=12 4194304-5476352: 0 545 damon_aggregated: target_id=18446614368406014464 nr_regions=12 140662370467840-140662372970496: 0 545 damon_aggregated: target_id=18446614368406014464 nr_regions=12 140662372970496-140662375460864: 0 545 damon_aggregated: target_id=18446614368406014464 nr_regions=12 140662375460864-140662377951232: 0 545 damon_aggregated: target_id=18446614368406014464 nr_regions=12 140662377951232-140662380449792: 0 545 damon_aggregated: target_id=18446614368406014464 nr_regions=12 140662380449792-140662382944256: 0 545 ...... Thus this patch adds hugetlb access checking support, with this patch we can see below VMA mapped by hugetlb access count. damon_aggregated: target_id=18446613056935405824 nr_regions=12 140296486649856-140296489914368: 1 3 damon_aggregated: target_id=18446613056935405824 nr_regions=12 140296489914368-140296492978176: 1 3 damon_aggregated: target_id=18446613056935405824 nr_regions=12 140296492978176-140296495439872: 1 3 damon_aggregated: target_id=18446613056935405824 nr_regions=12 140296495439872-140296498311168: 1 3 damon_aggregated: target_id=18446613056935405824 nr_regions=12 140296498311168-140296501198848: 1 3 damon_aggregated: target_id=18446613056935405824 nr_regions=12 140296501198848-140296504320000: 1 3 damon_aggregated: target_id=18446613056935405824 nr_regions=12 140296504320000-140296507568128: 1 2 ...... [[email protected]: fix unused var warning] Link: https://lkml.kernel.org/r/[email protected] [[email protected]: v3] Link: https://lkml.kernel.org/r/486927ecaaaecf2e3a7fbe0378ec6e1c58b50747.1640852276.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/6afcbd1fda5f9c7c24f320d26a98188c727ceec3.1639623751.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <[email protected]> Reviewed-by: SeongJae Park <[email protected]> Cc: Mike Kravetz <[email protected]> Cc: Randy Dunlap <[email protected]> Cc: Stephen Rothwell <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent dbcb9b9 commit 49f4203

File tree

1 file changed

+96
-0
lines changed

1 file changed

+96
-0
lines changed

mm/damon/vaddr.c

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -388,8 +388,65 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
388388
return 0;
389389
}
390390

391+
#ifdef CONFIG_HUGETLB_PAGE
392+
static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
393+
struct vm_area_struct *vma, unsigned long addr)
394+
{
395+
bool referenced = false;
396+
pte_t entry = huge_ptep_get(pte);
397+
struct page *page = pte_page(entry);
398+
399+
if (!page)
400+
return;
401+
402+
get_page(page);
403+
404+
if (pte_young(entry)) {
405+
referenced = true;
406+
entry = pte_mkold(entry);
407+
huge_ptep_set_access_flags(vma, addr, pte, entry,
408+
vma->vm_flags & VM_WRITE);
409+
}
410+
411+
#ifdef CONFIG_MMU_NOTIFIER
412+
if (mmu_notifier_clear_young(mm, addr,
413+
addr + huge_page_size(hstate_vma(vma))))
414+
referenced = true;
415+
#endif /* CONFIG_MMU_NOTIFIER */
416+
417+
if (referenced)
418+
set_page_young(page);
419+
420+
set_page_idle(page);
421+
put_page(page);
422+
}
423+
424+
static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
425+
unsigned long addr, unsigned long end,
426+
struct mm_walk *walk)
427+
{
428+
struct hstate *h = hstate_vma(walk->vma);
429+
spinlock_t *ptl;
430+
pte_t entry;
431+
432+
ptl = huge_pte_lock(h, walk->mm, pte);
433+
entry = huge_ptep_get(pte);
434+
if (!pte_present(entry))
435+
goto out;
436+
437+
damon_hugetlb_mkold(pte, walk->mm, walk->vma, addr);
438+
439+
out:
440+
spin_unlock(ptl);
441+
return 0;
442+
}
443+
#else
444+
#define damon_mkold_hugetlb_entry NULL
445+
#endif /* CONFIG_HUGETLB_PAGE */
446+
391447
static const struct mm_walk_ops damon_mkold_ops = {
392448
.pmd_entry = damon_mkold_pmd_entry,
449+
.hugetlb_entry = damon_mkold_hugetlb_entry,
393450
};
394451

395452
static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
@@ -484,8 +541,47 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
484541
return 0;
485542
}
486543

544+
#ifdef CONFIG_HUGETLB_PAGE
545+
static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
546+
unsigned long addr, unsigned long end,
547+
struct mm_walk *walk)
548+
{
549+
struct damon_young_walk_private *priv = walk->private;
550+
struct hstate *h = hstate_vma(walk->vma);
551+
struct page *page;
552+
spinlock_t *ptl;
553+
pte_t entry;
554+
555+
ptl = huge_pte_lock(h, walk->mm, pte);
556+
entry = huge_ptep_get(pte);
557+
if (!pte_present(entry))
558+
goto out;
559+
560+
page = pte_page(entry);
561+
if (!page)
562+
goto out;
563+
564+
get_page(page);
565+
566+
if (pte_young(entry) || !page_is_idle(page) ||
567+
mmu_notifier_test_young(walk->mm, addr)) {
568+
*priv->page_sz = huge_page_size(h);
569+
priv->young = true;
570+
}
571+
572+
put_page(page);
573+
574+
out:
575+
spin_unlock(ptl);
576+
return 0;
577+
}
578+
#else
579+
#define damon_young_hugetlb_entry NULL
580+
#endif /* CONFIG_HUGETLB_PAGE */
581+
487582
static const struct mm_walk_ops damon_young_ops = {
488583
.pmd_entry = damon_young_pmd_entry,
584+
.hugetlb_entry = damon_young_hugetlb_entry,
489585
};
490586

491587
static bool damon_va_young(struct mm_struct *mm, unsigned long addr,

0 commit comments

Comments
 (0)