Skip to content

Commit 42ad41e

Browse files
committed
mm/arm64: support large pfn mappings
jira LE-3557 Rebuild_History Non-Buildable kernel-5.14.0-570.26.1.el9_6 commit-author Peter Xu <[email protected]> commit 3e509c9 Empty-Commit: Cherry-Pick Conflicts during history rebuild. Will be included in final tarball splat. Ref for failed cherry-pick at: ciq/ciq_backports/kernel-5.14.0-570.26.1.el9_6/3e509c9b.failed Support huge pfnmaps by using bit 56 (PTE_SPECIAL) for "special" on pmds/puds. Provide the pmd/pud helpers to set/get special bit. There's one more thing missing for arm64 which is the pxx_pgprot() for pmd/pud. Add them too, which is mostly the same as the pte version by dropping the pfn field. These helpers are essential to be used in the new follow_pfnmap*() API to report valid pgprot_t results. Note that arm64 doesn't yet support huge PUD yet, but it's still straightforward to provide the pud helpers that we need altogether. Only PMD helpers will make an immediate benefit until arm64 will support huge PUDs first in general (e.g. in THPs). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Peter Xu <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Will Deacon <[email protected]> Cc: Alexander Gordeev <[email protected]> Cc: Alex Williamson <[email protected]> Cc: Aneesh Kumar K.V <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: Dave Hansen <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Gavin Shan <[email protected]> Cc: Gerald Schaefer <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Jason Gunthorpe <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Niklas Schnelle <[email protected]> Cc: Paolo Bonzini <[email protected]> Cc: Ryan Roberts <[email protected]> Cc: Sean Christopherson <[email protected]> Cc: Sven Schnelle <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vasily Gorbik <[email protected]> Cc: Zi Yan <[email protected]> Signed-off-by: Andrew Morton <[email protected]> (cherry picked from commit 3e509c9) Signed-off-by: Jonathan Maple <[email protected]> # Conflicts: # arch/arm64/Kconfig
1 parent 37c3a64 commit 42ad41e

File tree

1 file changed

+118
-0
lines changed

1 file changed

+118
-0
lines changed
Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,118 @@
1+
mm/arm64: support large pfn mappings
2+
3+
jira LE-3557
4+
Rebuild_History Non-Buildable kernel-5.14.0-570.26.1.el9_6
5+
commit-author Peter Xu <[email protected]>
6+
commit 3e509c9b03f9abc7804c80bed266a6cc4286a5a8
7+
Empty-Commit: Cherry-Pick Conflicts during history rebuild.
8+
Will be included in final tarball splat. Ref for failed cherry-pick at:
9+
ciq/ciq_backports/kernel-5.14.0-570.26.1.el9_6/3e509c9b.failed
10+
11+
Support huge pfnmaps by using bit 56 (PTE_SPECIAL) for "special" on
12+
pmds/puds. Provide the pmd/pud helpers to set/get special bit.
13+
14+
There's one more thing missing for arm64 which is the pxx_pgprot() for
15+
pmd/pud. Add them too, which is mostly the same as the pte version by
16+
dropping the pfn field. These helpers are essential to be used in the new
17+
follow_pfnmap*() API to report valid pgprot_t results.
18+
19+
Note that arm64 doesn't yet support huge PUD yet, but it's still
20+
straightforward to provide the pud helpers that we need altogether. Only
21+
PMD helpers will make an immediate benefit until arm64 will support huge
22+
PUDs first in general (e.g. in THPs).
23+
24+
Link: https://lkml.kernel.org/r/[email protected]
25+
Signed-off-by: Peter Xu <[email protected]>
26+
Cc: Catalin Marinas <[email protected]>
27+
Cc: Will Deacon <[email protected]>
28+
Cc: Alexander Gordeev <[email protected]>
29+
Cc: Alex Williamson <[email protected]>
30+
Cc: Aneesh Kumar K.V <[email protected]>
31+
Cc: Borislav Petkov <[email protected]>
32+
Cc: Christian Borntraeger <[email protected]>
33+
Cc: Dave Hansen <[email protected]>
34+
Cc: David Hildenbrand <[email protected]>
35+
Cc: Gavin Shan <[email protected]>
36+
Cc: Gerald Schaefer <[email protected]>
37+
Cc: Heiko Carstens <[email protected]>
38+
Cc: Ingo Molnar <[email protected]>
39+
Cc: Jason Gunthorpe <[email protected]>
40+
Cc: Matthew Wilcox <[email protected]>
41+
Cc: Niklas Schnelle <[email protected]>
42+
Cc: Paolo Bonzini <[email protected]>
43+
Cc: Ryan Roberts <[email protected]>
44+
Cc: Sean Christopherson <[email protected]>
45+
Cc: Sven Schnelle <[email protected]>
46+
Cc: Thomas Gleixner <[email protected]>
47+
Cc: Vasily Gorbik <[email protected]>
48+
Cc: Zi Yan <[email protected]>
49+
Signed-off-by: Andrew Morton <[email protected]>
50+
(cherry picked from commit 3e509c9b03f9abc7804c80bed266a6cc4286a5a8)
51+
Signed-off-by: Jonathan Maple <[email protected]>
52+
53+
# Conflicts:
54+
# arch/arm64/Kconfig
55+
diff --cc arch/arm64/Kconfig
56+
index ca1f39e49631,6607ed8fdbb4..000000000000
57+
--- a/arch/arm64/Kconfig
58+
+++ b/arch/arm64/Kconfig
59+
@@@ -97,7 -99,7 +97,11 @@@ config ARM6
60+
select ARCH_SUPPORTS_NUMA_BALANCING
61+
select ARCH_SUPPORTS_PAGE_TABLE_CHECK
62+
select ARCH_SUPPORTS_PER_VMA_LOCK
63+
++<<<<<<< HEAD
64+
+ select ARCH_SUPPORTS_RT
65+
++=======
66+
+ select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
67+
++>>>>>>> 3e509c9b03f9 (mm/arm64: support large pfn mappings)
68+
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
69+
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
70+
select ARCH_WANT_DEFAULT_BPF_JIT
71+
* Unmerged path arch/arm64/Kconfig
72+
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
73+
index e3ea0ef9673d..7fa291d3f90a 100644
74+
--- a/arch/arm64/include/asm/pgtable.h
75+
+++ b/arch/arm64/include/asm/pgtable.h
76+
@@ -528,6 +528,14 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
77+
return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
78+
}
79+
80+
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
81+
+#define pmd_special(pte) (!!((pmd_val(pte) & PTE_SPECIAL)))
82+
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
83+
+{
84+
+ return set_pmd_bit(pmd, __pgprot(PTE_SPECIAL));
85+
+}
86+
+#endif
87+
+
88+
#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
89+
#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
90+
#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
91+
@@ -545,6 +553,27 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
92+
#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
93+
#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
94+
95+
+#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
96+
+#define pud_special(pte) pte_special(pud_pte(pud))
97+
+#define pud_mkspecial(pte) pte_pud(pte_mkspecial(pud_pte(pud)))
98+
+#endif
99+
+
100+
+#define pmd_pgprot pmd_pgprot
101+
+static inline pgprot_t pmd_pgprot(pmd_t pmd)
102+
+{
103+
+ unsigned long pfn = pmd_pfn(pmd);
104+
+
105+
+ return __pgprot(pmd_val(pfn_pmd(pfn, __pgprot(0))) ^ pmd_val(pmd));
106+
+}
107+
+
108+
+#define pud_pgprot pud_pgprot
109+
+static inline pgprot_t pud_pgprot(pud_t pud)
110+
+{
111+
+ unsigned long pfn = pud_pfn(pud);
112+
+
113+
+ return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud));
114+
+}
115+
+
116+
static inline void __set_pte_at(struct mm_struct *mm,
117+
unsigned long __always_unused addr,
118+
pte_t *ptep, pte_t pte, unsigned int nr)

0 commit comments

Comments
 (0)