Skip to content

Commit 2b5d00b

Browse files
tlendackyIngo Molnar
authored andcommitted
x86/mm: Centralize PMD flags in sme_encrypt_kernel()
In preparation for encrypting more than just the kernel during early boot processing, centralize the use of the PMD flag settings based on the type of mapping desired. When 4KB aligned encryption is added, this will allow either PTE flags or large page PMD flags to be used without requiring the caller to adjust. Tested-by: Gabriel Craciunescu <[email protected]> Signed-off-by: Tom Lendacky <[email protected]> Reviewed-by: Borislav Petkov <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Brijesh Singh <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent bacf6b4 commit 2b5d00b

File tree

1 file changed

+77
-56
lines changed

1 file changed

+77
-56
lines changed

arch/x86/mm/mem_encrypt.c

Lines changed: 77 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -468,31 +468,39 @@ struct sme_populate_pgd_data {
468468
void *pgtable_area;
469469
pgd_t *pgd;
470470

471-
pmdval_t pmd_val;
471+
pmdval_t pmd_flags;
472+
unsigned long paddr;
473+
472474
unsigned long vaddr;
475+
unsigned long vaddr_end;
473476
};
474477

475-
static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
476-
unsigned long end)
478+
static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
477479
{
478480
unsigned long pgd_start, pgd_end, pgd_size;
479481
pgd_t *pgd_p;
480482

481-
pgd_start = start & PGDIR_MASK;
482-
pgd_end = end & PGDIR_MASK;
483+
pgd_start = ppd->vaddr & PGDIR_MASK;
484+
pgd_end = ppd->vaddr_end & PGDIR_MASK;
483485

484-
pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1);
485-
pgd_size *= sizeof(pgd_t);
486+
pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
486487

487-
pgd_p = pgd_base + pgd_index(start);
488+
pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
488489

489490
memset(pgd_p, 0, pgd_size);
490491
}
491492

492-
#define PGD_FLAGS _KERNPG_TABLE_NOENC
493-
#define P4D_FLAGS _KERNPG_TABLE_NOENC
494-
#define PUD_FLAGS _KERNPG_TABLE_NOENC
495-
#define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
493+
#define PGD_FLAGS _KERNPG_TABLE_NOENC
494+
#define P4D_FLAGS _KERNPG_TABLE_NOENC
495+
#define PUD_FLAGS _KERNPG_TABLE_NOENC
496+
497+
#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
498+
499+
#define PMD_FLAGS_DEC PMD_FLAGS_LARGE
500+
#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
501+
(_PAGE_PAT | _PAGE_PWT))
502+
503+
#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
496504

497505
static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
498506
{
@@ -561,7 +569,35 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
561569

562570
pmd_p += pmd_index(ppd->vaddr);
563571
if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
564-
native_set_pmd(pmd_p, native_make_pmd(ppd->pmd_val));
572+
native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags));
573+
}
574+
575+
static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
576+
pmdval_t pmd_flags)
577+
{
578+
ppd->pmd_flags = pmd_flags;
579+
580+
while (ppd->vaddr < ppd->vaddr_end) {
581+
sme_populate_pgd_large(ppd);
582+
583+
ppd->vaddr += PMD_PAGE_SIZE;
584+
ppd->paddr += PMD_PAGE_SIZE;
585+
}
586+
}
587+
588+
static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
589+
{
590+
__sme_map_range(ppd, PMD_FLAGS_ENC);
591+
}
592+
593+
static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
594+
{
595+
__sme_map_range(ppd, PMD_FLAGS_DEC);
596+
}
597+
598+
static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
599+
{
600+
__sme_map_range(ppd, PMD_FLAGS_DEC_WP);
565601
}
566602

567603
static unsigned long __init sme_pgtable_calc(unsigned long len)
@@ -621,7 +657,6 @@ void __init sme_encrypt_kernel(void)
621657
unsigned long kernel_start, kernel_end, kernel_len;
622658
struct sme_populate_pgd_data ppd;
623659
unsigned long pgtable_area_len;
624-
unsigned long paddr, pmd_flags;
625660
unsigned long decrypted_base;
626661

627662
if (!sme_active())
@@ -693,14 +728,10 @@ void __init sme_encrypt_kernel(void)
693728
* addressing the workarea.
694729
*/
695730
ppd.pgd = (pgd_t *)native_read_cr3_pa();
696-
paddr = workarea_start;
697-
while (paddr < workarea_end) {
698-
ppd.pmd_val = paddr + PMD_FLAGS;
699-
ppd.vaddr = paddr;
700-
sme_populate_pgd_large(&ppd);
701-
702-
paddr += PMD_PAGE_SIZE;
703-
}
731+
ppd.paddr = workarea_start;
732+
ppd.vaddr = workarea_start;
733+
ppd.vaddr_end = workarea_end;
734+
sme_map_range_decrypted(&ppd);
704735

705736
/* Flush the TLB - no globals so cr3 is enough */
706737
native_write_cr3(__native_read_cr3());
@@ -715,17 +746,6 @@ void __init sme_encrypt_kernel(void)
715746
memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
716747
ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
717748

718-
/* Add encrypted kernel (identity) mappings */
719-
pmd_flags = PMD_FLAGS | _PAGE_ENC;
720-
paddr = kernel_start;
721-
while (paddr < kernel_end) {
722-
ppd.pmd_val = paddr + pmd_flags;
723-
ppd.vaddr = paddr;
724-
sme_populate_pgd_large(&ppd);
725-
726-
paddr += PMD_PAGE_SIZE;
727-
}
728-
729749
/*
730750
* A different PGD index/entry must be used to get different
731751
* pagetable entries for the decrypted mapping. Choose the next
@@ -735,29 +755,28 @@ void __init sme_encrypt_kernel(void)
735755
decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
736756
decrypted_base <<= PGDIR_SHIFT;
737757

758+
/* Add encrypted kernel (identity) mappings */
759+
ppd.paddr = kernel_start;
760+
ppd.vaddr = kernel_start;
761+
ppd.vaddr_end = kernel_end;
762+
sme_map_range_encrypted(&ppd);
763+
738764
/* Add decrypted, write-protected kernel (non-identity) mappings */
739-
pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT);
740-
paddr = kernel_start;
741-
while (paddr < kernel_end) {
742-
ppd.pmd_val = paddr + pmd_flags;
743-
ppd.vaddr = paddr + decrypted_base;
744-
sme_populate_pgd_large(&ppd);
745-
746-
paddr += PMD_PAGE_SIZE;
747-
}
765+
ppd.paddr = kernel_start;
766+
ppd.vaddr = kernel_start + decrypted_base;
767+
ppd.vaddr_end = kernel_end + decrypted_base;
768+
sme_map_range_decrypted_wp(&ppd);
748769

749770
/* Add decrypted workarea mappings to both kernel mappings */
750-
paddr = workarea_start;
751-
while (paddr < workarea_end) {
752-
ppd.pmd_val = paddr + PMD_FLAGS;
753-
ppd.vaddr = paddr;
754-
sme_populate_pgd_large(&ppd);
755-
756-
ppd.vaddr = paddr + decrypted_base;
757-
sme_populate_pgd_large(&ppd);
771+
ppd.paddr = workarea_start;
772+
ppd.vaddr = workarea_start;
773+
ppd.vaddr_end = workarea_end;
774+
sme_map_range_decrypted(&ppd);
758775

759-
paddr += PMD_PAGE_SIZE;
760-
}
776+
ppd.paddr = workarea_start;
777+
ppd.vaddr = workarea_start + decrypted_base;
778+
ppd.vaddr_end = workarea_end + decrypted_base;
779+
sme_map_range_decrypted(&ppd);
761780

762781
/* Perform the encryption */
763782
sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
@@ -768,11 +787,13 @@ void __init sme_encrypt_kernel(void)
768787
* the decrypted areas - all that is needed for this is to remove
769788
* the PGD entry/entries.
770789
*/
771-
sme_clear_pgd(ppd.pgd, kernel_start + decrypted_base,
772-
kernel_end + decrypted_base);
790+
ppd.vaddr = kernel_start + decrypted_base;
791+
ppd.vaddr_end = kernel_end + decrypted_base;
792+
sme_clear_pgd(&ppd);
773793

774-
sme_clear_pgd(ppd.pgd, workarea_start + decrypted_base,
775-
workarea_end + decrypted_base);
794+
ppd.vaddr = workarea_start + decrypted_base;
795+
ppd.vaddr_end = workarea_end + decrypted_base;
796+
sme_clear_pgd(&ppd);
776797

777798
/* Flush the TLB - no globals so cr3 is enough */
778799
native_write_cr3(__native_read_cr3());

0 commit comments

Comments
 (0)