Skip to content

Commit cc5f01e

Browse files
tlendackyIngo Molnar
authored andcommitted
x86/mm: Prepare sme_encrypt_kernel() for PAGE aligned encryption
In preparation for encrypting more than just the kernel, the encryption support in sme_encrypt_kernel() needs to support 4KB page aligned encryption instead of just 2MB large page aligned encryption. Update the routines that populate the PGD to support non-2MB aligned addresses. This is done by creating PTE page tables for the start and end portion of the address range that fall outside of the 2MB alignment. This results in, at most, two extra pages to hold the PTE entries for each mapping of a range. Tested-by: Gabriel Craciunescu <[email protected]> Signed-off-by: Tom Lendacky <[email protected]> Reviewed-by: Borislav Petkov <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Brijesh Singh <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 2b5d00b commit cc5f01e

File tree

2 files changed

+121
-22
lines changed

2 files changed

+121
-22
lines changed

arch/x86/mm/mem_encrypt.c

Lines changed: 107 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -469,6 +469,7 @@ struct sme_populate_pgd_data {
469469
pgd_t *pgd;
470470

471471
pmdval_t pmd_flags;
472+
pteval_t pte_flags;
472473
unsigned long paddr;
473474

474475
unsigned long vaddr;
@@ -493,6 +494,7 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
493494
#define PGD_FLAGS _KERNPG_TABLE_NOENC
494495
#define P4D_FLAGS _KERNPG_TABLE_NOENC
495496
#define PUD_FLAGS _KERNPG_TABLE_NOENC
497+
#define PMD_FLAGS _KERNPG_TABLE_NOENC
496498

497499
#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
498500

@@ -502,7 +504,15 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
502504

503505
#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
504506

505-
static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
507+
#define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
508+
509+
#define PTE_FLAGS_DEC PTE_FLAGS
510+
#define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
511+
(_PAGE_PAT | _PAGE_PWT))
512+
513+
#define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
514+
515+
static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
506516
{
507517
pgd_t *pgd_p;
508518
p4d_t *p4d_p;
@@ -553,7 +563,7 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
553563
pud_p += pud_index(ppd->vaddr);
554564
if (native_pud_val(*pud_p)) {
555565
if (native_pud_val(*pud_p) & _PAGE_PSE)
556-
return;
566+
return NULL;
557567

558568
pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
559569
} else {
@@ -567,16 +577,55 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
567577
native_set_pud(pud_p, pud);
568578
}
569579

580+
return pmd_p;
581+
}
582+
583+
static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
584+
{
585+
pmd_t *pmd_p;
586+
587+
pmd_p = sme_prepare_pgd(ppd);
588+
if (!pmd_p)
589+
return;
590+
570591
pmd_p += pmd_index(ppd->vaddr);
571592
if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
572593
native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags));
573594
}
574595

575-
static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
576-
pmdval_t pmd_flags)
596+
static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
577597
{
578-
ppd->pmd_flags = pmd_flags;
598+
pmd_t *pmd_p;
599+
pte_t *pte_p;
600+
601+
pmd_p = sme_prepare_pgd(ppd);
602+
if (!pmd_p)
603+
return;
604+
605+
pmd_p += pmd_index(ppd->vaddr);
606+
if (native_pmd_val(*pmd_p)) {
607+
if (native_pmd_val(*pmd_p) & _PAGE_PSE)
608+
return;
609+
610+
pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK);
611+
} else {
612+
pmd_t pmd;
579613

614+
pte_p = ppd->pgtable_area;
615+
memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE);
616+
ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE;
617+
618+
pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS);
619+
native_set_pmd(pmd_p, pmd);
620+
}
621+
622+
pte_p += pte_index(ppd->vaddr);
623+
if (!native_pte_val(*pte_p))
624+
native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags));
625+
}
626+
627+
static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
628+
{
580629
while (ppd->vaddr < ppd->vaddr_end) {
581630
sme_populate_pgd_large(ppd);
582631

@@ -585,33 +634,71 @@ static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
585634
}
586635
}
587636

637+
static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
638+
{
639+
while (ppd->vaddr < ppd->vaddr_end) {
640+
sme_populate_pgd(ppd);
641+
642+
ppd->vaddr += PAGE_SIZE;
643+
ppd->paddr += PAGE_SIZE;
644+
}
645+
}
646+
647+
static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
648+
pmdval_t pmd_flags, pteval_t pte_flags)
649+
{
650+
unsigned long vaddr_end;
651+
652+
ppd->pmd_flags = pmd_flags;
653+
ppd->pte_flags = pte_flags;
654+
655+
/* Save original end value since we modify the struct value */
656+
vaddr_end = ppd->vaddr_end;
657+
658+
/* If start is not 2MB aligned, create PTE entries */
659+
ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
660+
__sme_map_range_pte(ppd);
661+
662+
/* Create PMD entries */
663+
ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
664+
__sme_map_range_pmd(ppd);
665+
666+
/* If end is not 2MB aligned, create PTE entries */
667+
ppd->vaddr_end = vaddr_end;
668+
__sme_map_range_pte(ppd);
669+
}
670+
588671
static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
589672
{
590-
__sme_map_range(ppd, PMD_FLAGS_ENC);
673+
__sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
591674
}
592675

593676
static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
594677
{
595-
__sme_map_range(ppd, PMD_FLAGS_DEC);
678+
__sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
596679
}
597680

598681
static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
599682
{
600-
__sme_map_range(ppd, PMD_FLAGS_DEC_WP);
683+
__sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
601684
}
602685

603686
static unsigned long __init sme_pgtable_calc(unsigned long len)
604687
{
605-
unsigned long p4d_size, pud_size, pmd_size;
688+
unsigned long p4d_size, pud_size, pmd_size, pte_size;
606689
unsigned long total;
607690

608691
/*
609692
* Perform a relatively simplistic calculation of the pagetable
610-
* entries that are needed. That mappings will be covered by 2MB
611-
* PMD entries so we can conservatively calculate the required
693+
* entries that are needed. Those mappings will be covered mostly
694+
* by 2MB PMD entries so we can conservatively calculate the required
612695
* number of P4D, PUD and PMD structures needed to perform the
613-
* mappings. Incrementing the count for each covers the case where
614-
* the addresses cross entries.
696+
* mappings. For mappings that are not 2MB aligned, PTE mappings
697+
* would be needed for the start and end portion of the address range
698+
* that fall outside of the 2MB alignment. This results in, at most,
699+
* two extra pages to hold PTE entries for each range that is mapped.
700+
* Incrementing the count for each covers the case where the addresses
701+
* cross entries.
615702
*/
616703
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
617704
p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1;
@@ -625,8 +712,9 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
625712
}
626713
pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1;
627714
pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD;
715+
pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE;
628716

629-
total = p4d_size + pud_size + pmd_size;
717+
total = p4d_size + pud_size + pmd_size + pte_size;
630718

631719
/*
632720
* Now calculate the added pagetable structures needed to populate
@@ -709,10 +797,13 @@ void __init sme_encrypt_kernel(void)
709797

710798
/*
711799
* The total workarea includes the executable encryption area and
712-
* the pagetable area.
800+
* the pagetable area. The start of the workarea is already 2MB
801+
* aligned, align the end of the workarea on a 2MB boundary so that
802+
* we don't try to create/allocate PTE entries from the workarea
803+
* before it is mapped.
713804
*/
714805
workarea_len = execute_len + pgtable_area_len;
715-
workarea_end = workarea_start + workarea_len;
806+
workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
716807

717808
/*
718809
* Set the address to the start of where newly created pagetable

arch/x86/mm/mem_encrypt_boot.S

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,7 @@ ENTRY(__enc_copy)
104104
mov %rdx, %cr4
105105

106106
push %r15
107+
push %r12
107108

108109
movq %rcx, %r9 /* Save kernel length */
109110
movq %rdi, %r10 /* Save encrypted kernel address */
@@ -119,21 +120,27 @@ ENTRY(__enc_copy)
119120

120121
wbinvd /* Invalidate any cache entries */
121122

122-
/* Copy/encrypt 2MB at a time */
123+
/* Copy/encrypt up to 2MB at a time */
124+
movq $PMD_PAGE_SIZE, %r12
123125
1:
126+
cmpq %r12, %r9
127+
jnb 2f
128+
movq %r9, %r12
129+
130+
2:
124131
movq %r11, %rsi /* Source - decrypted kernel */
125132
movq %r8, %rdi /* Dest - intermediate copy buffer */
126-
movq $PMD_PAGE_SIZE, %rcx /* 2MB length */
133+
movq %r12, %rcx
127134
rep movsb
128135

129136
movq %r8, %rsi /* Source - intermediate copy buffer */
130137
movq %r10, %rdi /* Dest - encrypted kernel */
131-
movq $PMD_PAGE_SIZE, %rcx /* 2MB length */
138+
movq %r12, %rcx
132139
rep movsb
133140

134-
addq $PMD_PAGE_SIZE, %r11
135-
addq $PMD_PAGE_SIZE, %r10
136-
subq $PMD_PAGE_SIZE, %r9 /* Kernel length decrement */
141+
addq %r12, %r11
142+
addq %r12, %r10
143+
subq %r12, %r9 /* Kernel length decrement */
137144
jnz 1b /* Kernel length not zero? */
138145

139146
/* Restore PAT register */
@@ -142,6 +149,7 @@ ENTRY(__enc_copy)
142149
mov %r15, %rdx /* Restore original PAT value */
143150
wrmsr
144151

152+
pop %r12
145153
pop %r15
146154

147155
ret

0 commit comments

Comments
 (0)