16
16
17
17
extern struct range pfn_mapped [E820_MAX_ENTRIES ];
18
18
19
+ static p4d_t tmp_p4d_table [PTRS_PER_P4D ] __initdata __aligned (PAGE_SIZE );
20
+
19
21
static int __init map_range (struct range * range )
20
22
{
21
23
unsigned long start ;
@@ -31,8 +33,10 @@ static void __init clear_pgds(unsigned long start,
31
33
unsigned long end )
32
34
{
33
35
pgd_t * pgd ;
36
+ /* See comment in kasan_init() */
37
+ unsigned long pgd_end = end & PGDIR_MASK ;
34
38
35
- for (; start < end ; start += PGDIR_SIZE ) {
39
+ for (; start < pgd_end ; start += PGDIR_SIZE ) {
36
40
pgd = pgd_offset_k (start );
37
41
/*
38
42
* With folded p4d, pgd_clear() is nop, use p4d_clear()
@@ -43,29 +47,61 @@ static void __init clear_pgds(unsigned long start,
43
47
else
44
48
pgd_clear (pgd );
45
49
}
50
+
51
+ pgd = pgd_offset_k (start );
52
+ for (; start < end ; start += P4D_SIZE )
53
+ p4d_clear (p4d_offset (pgd , start ));
54
+ }
55
+
56
+ static inline p4d_t * early_p4d_offset (pgd_t * pgd , unsigned long addr )
57
+ {
58
+ unsigned long p4d ;
59
+
60
+ if (!IS_ENABLED (CONFIG_X86_5LEVEL ))
61
+ return (p4d_t * )pgd ;
62
+
63
+ p4d = __pa_nodebug (pgd_val (* pgd )) & PTE_PFN_MASK ;
64
+ p4d += __START_KERNEL_map - phys_base ;
65
+ return (p4d_t * )p4d + p4d_index (addr );
66
+ }
67
+
68
+ static void __init kasan_early_p4d_populate (pgd_t * pgd ,
69
+ unsigned long addr ,
70
+ unsigned long end )
71
+ {
72
+ pgd_t pgd_entry ;
73
+ p4d_t * p4d , p4d_entry ;
74
+ unsigned long next ;
75
+
76
+ if (pgd_none (* pgd )) {
77
+ pgd_entry = __pgd (_KERNPG_TABLE | __pa_nodebug (kasan_zero_p4d ));
78
+ set_pgd (pgd , pgd_entry );
79
+ }
80
+
81
+ p4d = early_p4d_offset (pgd , addr );
82
+ do {
83
+ next = p4d_addr_end (addr , end );
84
+
85
+ if (!p4d_none (* p4d ))
86
+ continue ;
87
+
88
+ p4d_entry = __p4d (_KERNPG_TABLE | __pa_nodebug (kasan_zero_pud ));
89
+ set_p4d (p4d , p4d_entry );
90
+ } while (p4d ++ , addr = next , addr != end && p4d_none (* p4d ));
46
91
}
47
92
48
93
static void __init kasan_map_early_shadow (pgd_t * pgd )
49
94
{
50
- int i ;
51
- unsigned long start = KASAN_SHADOW_START ;
95
+ /* See comment in kasan_init() */
96
+ unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK ;
52
97
unsigned long end = KASAN_SHADOW_END ;
98
+ unsigned long next ;
53
99
54
- for (i = pgd_index (start ); start < end ; i ++ ) {
55
- switch (CONFIG_PGTABLE_LEVELS ) {
56
- case 4 :
57
- pgd [i ] = __pgd (__pa_nodebug (kasan_zero_pud ) |
58
- _KERNPG_TABLE );
59
- break ;
60
- case 5 :
61
- pgd [i ] = __pgd (__pa_nodebug (kasan_zero_p4d ) |
62
- _KERNPG_TABLE );
63
- break ;
64
- default :
65
- BUILD_BUG ();
66
- }
67
- start += PGDIR_SIZE ;
68
- }
100
+ pgd += pgd_index (addr );
101
+ do {
102
+ next = pgd_addr_end (addr , end );
103
+ kasan_early_p4d_populate (pgd , addr , next );
104
+ } while (pgd ++ , addr = next , addr != end );
69
105
}
70
106
71
107
#ifdef CONFIG_KASAN_INLINE
@@ -102,7 +138,7 @@ void __init kasan_early_init(void)
102
138
for (i = 0 ; i < PTRS_PER_PUD ; i ++ )
103
139
kasan_zero_pud [i ] = __pud (pud_val );
104
140
105
- for (i = 0 ; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D ; i ++ )
141
+ for (i = 0 ; IS_ENABLED ( CONFIG_X86_5LEVEL ) && i < PTRS_PER_P4D ; i ++ )
106
142
kasan_zero_p4d [i ] = __p4d (p4d_val );
107
143
108
144
kasan_map_early_shadow (early_top_pgt );
@@ -118,12 +154,35 @@ void __init kasan_init(void)
118
154
#endif
119
155
120
156
memcpy (early_top_pgt , init_top_pgt , sizeof (early_top_pgt ));
157
+
158
+ /*
159
+ * We use the same shadow offset for 4- and 5-level paging to
160
+ * facilitate boot-time switching between paging modes.
161
+ * As result in 5-level paging mode KASAN_SHADOW_START and
162
+ * KASAN_SHADOW_END are not aligned to PGD boundary.
163
+ *
164
+ * KASAN_SHADOW_START doesn't share PGD with anything else.
165
+ * We claim whole PGD entry to make things easier.
166
+ *
167
+ * KASAN_SHADOW_END lands in the last PGD entry and it collides with
168
+ * bunch of things like kernel code, modules, EFI mapping, etc.
169
+ * We need to take extra steps to not overwrite them.
170
+ */
171
+ if (IS_ENABLED (CONFIG_X86_5LEVEL )) {
172
+ void * ptr ;
173
+
174
+ ptr = (void * )pgd_page_vaddr (* pgd_offset_k (KASAN_SHADOW_END ));
175
+ memcpy (tmp_p4d_table , (void * )ptr , sizeof (tmp_p4d_table ));
176
+ set_pgd (& early_top_pgt [pgd_index (KASAN_SHADOW_END )],
177
+ __pgd (__pa (tmp_p4d_table ) | _KERNPG_TABLE ));
178
+ }
179
+
121
180
load_cr3 (early_top_pgt );
122
181
__flush_tlb_all ();
123
182
124
- clear_pgds (KASAN_SHADOW_START , KASAN_SHADOW_END );
183
+ clear_pgds (KASAN_SHADOW_START & PGDIR_MASK , KASAN_SHADOW_END );
125
184
126
- kasan_populate_zero_shadow ((void * )KASAN_SHADOW_START ,
185
+ kasan_populate_zero_shadow ((void * )( KASAN_SHADOW_START & PGDIR_MASK ) ,
127
186
kasan_mem_to_shadow ((void * )PAGE_OFFSET ));
128
187
129
188
for (i = 0 ; i < E820_MAX_ENTRIES ; i ++ ) {
0 commit comments