Skip to content

Commit 3b50a6e

Browse files
Ralph Campbelljgunthorpe
authored andcommitted
mm/hmm: provide the page mapping order in hmm_range_fault()
hmm_range_fault() returns an array of page frame numbers and flags for how the pages are mapped in the requested process' page tables. The PFN can be used to get the struct page with hmm_pfn_to_page() and the page size order can be determined with compound_order(page). However, if the page is larger than order 0 (PAGE_SIZE), there is no indication that a compound page is mapped by the CPU using a larger page size. Without this information, the caller can't safely use a large device PTE to map the compound page because the CPU might be using smaller PTEs with different read/write permissions. Add a new function hmm_pfn_to_map_order() to return the mapping size order so that callers know the pages are being mapped with consistent permissions and a large device page table mapping can be used if one is available. This will allow devices to optimize mapping the page into HW by avoiding or batching work for huge pages. For instance the dma_map can be done with a high order directly. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Ralph Campbell <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]>
1 parent dcb7fd8 commit 3b50a6e

File tree

2 files changed

+35
-5
lines changed

2 files changed

+35
-5
lines changed

include/linux/hmm.h

Lines changed: 22 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,16 +37,17 @@
3737
* will fail. Must be combined with HMM_PFN_REQ_FAULT.
3838
*/
3939
enum hmm_pfn_flags {
40-
/* Output flags */
40+
/* Output fields and flags */
4141
HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1),
4242
HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2),
4343
HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3),
44+
HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 8),
4445

4546
/* Input flags */
4647
HMM_PFN_REQ_FAULT = HMM_PFN_VALID,
4748
HMM_PFN_REQ_WRITE = HMM_PFN_WRITE,
4849

49-
HMM_PFN_FLAGS = HMM_PFN_VALID | HMM_PFN_WRITE | HMM_PFN_ERROR,
50+
HMM_PFN_FLAGS = 0xFFUL << HMM_PFN_ORDER_SHIFT,
5051
};
5152

5253
/*
@@ -61,6 +62,25 @@ static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn)
6162
return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS);
6263
}
6364

65+
/*
66+
* hmm_pfn_to_map_order() - return the CPU mapping size order
67+
*
68+
* This is optionally useful to optimize processing of the pfn result
69+
* array. It indicates that the page starts at the order aligned VA and is
70+
* 1<<order bytes long. Every pfn within an high order page will have the
71+
* same pfn flags, both access protections and the map_order. The caller must
72+
* be careful with edge cases as the start and end VA of the given page may
73+
* extend past the range used with hmm_range_fault().
74+
*
75+
* This must be called under the caller 'user_lock' after a successful
76+
* mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
77+
* already.
78+
*/
79+
static inline unsigned int hmm_pfn_to_map_order(unsigned long hmm_pfn)
80+
{
81+
return (hmm_pfn >> HMM_PFN_ORDER_SHIFT) & 0x1F;
82+
}
83+
6484
/*
6585
* struct hmm_range - track invalidation lock on virtual address range
6686
*

mm/hmm.c

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -165,12 +165,19 @@ static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
165165
return hmm_pfns_fill(addr, end, range, 0);
166166
}
167167

168+
static inline unsigned long hmm_pfn_flags_order(unsigned long order)
169+
{
170+
return order << HMM_PFN_ORDER_SHIFT;
171+
}
172+
168173
static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
169174
pmd_t pmd)
170175
{
171176
if (pmd_protnone(pmd))
172177
return 0;
173-
return pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
178+
return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
179+
HMM_PFN_VALID) |
180+
hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
174181
}
175182

176183
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -389,7 +396,9 @@ static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
389396
{
390397
if (!pud_present(pud))
391398
return 0;
392-
return pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
399+
return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
400+
HMM_PFN_VALID) |
401+
hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
393402
}
394403

395404
static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
@@ -474,7 +483,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
474483

475484
i = (start - range->start) >> PAGE_SHIFT;
476485
pfn_req_flags = range->hmm_pfns[i];
477-
cpu_flags = pte_to_hmm_pfn_flags(range, entry);
486+
cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
487+
hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
478488
required_fault =
479489
hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
480490
if (required_fault) {

0 commit comments

Comments
 (0)