|
16 | 16 | #include <asm/segment.h>
|
17 | 17 | #include <asm/asm.h>
|
18 | 18 | #include <asm/boot.h>
|
| 19 | +#include <asm/pgtable.h> |
19 | 20 | #include <asm/processor-flags.h>
|
20 | 21 | #include <asm/msr.h>
|
21 | 22 | #include <asm/nospec-branch.h>
|
@@ -102,8 +103,47 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
|
102 | 103 | btsl $_EFER_LME, %eax
|
103 | 104 | wrmsr
|
104 | 105 |
|
| 106 | + mov %ebp, %ebx |
| 107 | + subl $_pa(pvh_start_xen), %ebx /* offset */ |
| 108 | + jz .Lpagetable_done |
| 109 | + |
| 110 | + /* Fixup page-tables for relocation. */ |
| 111 | + leal rva(pvh_init_top_pgt)(%ebp), %edi |
| 112 | + movl $PTRS_PER_PGD, %ecx |
| 113 | +2: |
| 114 | + testl $_PAGE_PRESENT, 0x00(%edi) |
| 115 | + jz 1f |
| 116 | + addl %ebx, 0x00(%edi) |
| 117 | +1: |
| 118 | + addl $8, %edi |
| 119 | + decl %ecx |
| 120 | + jnz 2b |
| 121 | + |
| 122 | + /* L3 ident has a single entry. */ |
| 123 | + leal rva(pvh_level3_ident_pgt)(%ebp), %edi |
| 124 | + addl %ebx, 0x00(%edi) |
| 125 | + |
| 126 | + leal rva(pvh_level3_kernel_pgt)(%ebp), %edi |
| 127 | + addl %ebx, (PAGE_SIZE - 16)(%edi) |
| 128 | + addl %ebx, (PAGE_SIZE - 8)(%edi) |
| 129 | + |
| 130 | + /* pvh_level2_ident_pgt is fine - large pages */ |
| 131 | + |
| 132 | + /* pvh_level2_kernel_pgt needs adjustment - large pages */ |
| 133 | + leal rva(pvh_level2_kernel_pgt)(%ebp), %edi |
| 134 | + movl $PTRS_PER_PMD, %ecx |
| 135 | +2: |
| 136 | + testl $_PAGE_PRESENT, 0x00(%edi) |
| 137 | + jz 1f |
| 138 | + addl %ebx, 0x00(%edi) |
| 139 | +1: |
| 140 | + addl $8, %edi |
| 141 | + decl %ecx |
| 142 | + jnz 2b |
| 143 | + |
| 144 | +.Lpagetable_done: |
105 | 145 | /* Enable pre-constructed page tables. */
|
106 |
| - leal rva(init_top_pgt)(%ebp), %eax |
| 146 | + leal rva(pvh_init_top_pgt)(%ebp), %eax |
107 | 147 | mov %eax, %cr3
|
108 | 148 | mov $(X86_CR0_PG | X86_CR0_PE), %eax
|
109 | 149 | mov %eax, %cr0
|
@@ -198,5 +238,67 @@ SYM_DATA_START_LOCAL(early_stack)
|
198 | 238 | .fill BOOT_STACK_SIZE, 1, 0
|
199 | 239 | SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
|
200 | 240 |
|
| 241 | +#ifdef CONFIG_X86_64 |
| 242 | +/* |
| 243 | + * Xen PVH needs a set of identity mapped and kernel high mapping |
| 244 | + * page tables. pvh_start_xen starts running on the identity mapped |
| 245 | + * page tables, but xen_prepare_pvh calls into the high mapping. |
| 246 | + * These page tables need to be relocatable and are only used until |
| 247 | + * startup_64 transitions to init_top_pgt. |
| 248 | + */ |
| 249 | +SYM_DATA_START_PAGE_ALIGNED(pvh_init_top_pgt) |
| 250 | + .quad pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC |
| 251 | + .org pvh_init_top_pgt + L4_PAGE_OFFSET * 8, 0 |
| 252 | + .quad pvh_level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC |
| 253 | + .org pvh_init_top_pgt + L4_START_KERNEL * 8, 0 |
| 254 | + /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ |
| 255 | + .quad pvh_level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC |
| 256 | +SYM_DATA_END(pvh_init_top_pgt) |
| 257 | + |
| 258 | +SYM_DATA_START_PAGE_ALIGNED(pvh_level3_ident_pgt) |
| 259 | + .quad pvh_level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC |
| 260 | + .fill 511, 8, 0 |
| 261 | +SYM_DATA_END(pvh_level3_ident_pgt) |
| 262 | +SYM_DATA_START_PAGE_ALIGNED(pvh_level2_ident_pgt) |
| 263 | + /* |
| 264 | + * Since I easily can, map the first 1G. |
| 265 | + * Don't set NX because code runs from these pages. |
| 266 | + * |
| 267 | + * Note: This sets _PAGE_GLOBAL despite whether |
| 268 | + * the CPU supports it or it is enabled. But, |
| 269 | + * the CPU should ignore the bit. |
| 270 | + */ |
| 271 | + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) |
| 272 | +SYM_DATA_END(pvh_level2_ident_pgt) |
| 273 | +SYM_DATA_START_PAGE_ALIGNED(pvh_level3_kernel_pgt) |
| 274 | + .fill L3_START_KERNEL, 8, 0 |
| 275 | + /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ |
| 276 | + .quad pvh_level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC |
| 277 | + .quad 0 /* no fixmap */ |
| 278 | +SYM_DATA_END(pvh_level3_kernel_pgt) |
| 279 | + |
| 280 | +SYM_DATA_START_PAGE_ALIGNED(pvh_level2_kernel_pgt) |
| 281 | + /* |
| 282 | + * Kernel high mapping. |
| 283 | + * |
| 284 | + * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in |
| 285 | + * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled, |
| 286 | + * 512 MiB otherwise. |
| 287 | + * |
| 288 | + * (NOTE: after that starts the module area, see MODULES_VADDR.) |
| 289 | + * |
| 290 | + * This table is eventually used by the kernel during normal runtime. |
| 291 | + * Care must be taken to clear out undesired bits later, like _PAGE_RW |
| 292 | + * or _PAGE_GLOBAL in some cases. |
| 293 | + */ |
| 294 | + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE / PMD_SIZE) |
| 295 | +SYM_DATA_END(pvh_level2_kernel_pgt) |
| 296 | + |
| 297 | + ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_RELOC, |
| 298 | + .long CONFIG_PHYSICAL_ALIGN; |
| 299 | + .long LOAD_PHYSICAL_ADDR; |
| 300 | + .long KERNEL_IMAGE_SIZE - 1) |
| 301 | +#endif |
| 302 | + |
201 | 303 | ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
|
202 | 304 | _ASM_PTR (pvh_start_xen - __START_KERNEL_map))
|
0 commit comments