Skip to content

Commit bacf6b4

Browse files
tlendackyIngo Molnar
authored andcommitted
x86/mm: Use a struct to reduce parameters for SME PGD mapping
In preparation for follow-on patches, combine the PGD mapping parameters into a struct to reduce the number of function arguments and allow for direct updating of the next pagetable mapping area pointer. Tested-by: Gabriel Craciunescu <[email protected]> Signed-off-by: Tom Lendacky <[email protected]> Reviewed-by: Borislav Petkov <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Brijesh Singh <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 1303880 commit bacf6b4

File tree

1 file changed

+46
-44
lines changed

1 file changed

+46
-44
lines changed

arch/x86/mm/mem_encrypt.c

Lines changed: 46 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -464,6 +464,14 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
464464
set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
465465
}
466466

467+
struct sme_populate_pgd_data {
468+
void *pgtable_area;
469+
pgd_t *pgd;
470+
471+
pmdval_t pmd_val;
472+
unsigned long vaddr;
473+
};
474+
467475
static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
468476
unsigned long end)
469477
{
@@ -486,15 +494,14 @@ static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
486494
#define PUD_FLAGS _KERNPG_TABLE_NOENC
487495
#define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
488496

489-
static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
490-
unsigned long vaddr, pmdval_t pmd_val)
497+
static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
491498
{
492499
pgd_t *pgd_p;
493500
p4d_t *p4d_p;
494501
pud_t *pud_p;
495502
pmd_t *pmd_p;
496503

497-
pgd_p = pgd_base + pgd_index(vaddr);
504+
pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
498505
if (native_pgd_val(*pgd_p)) {
499506
if (IS_ENABLED(CONFIG_X86_5LEVEL))
500507
p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK);
@@ -504,60 +511,57 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
504511
pgd_t pgd;
505512

506513
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
507-
p4d_p = pgtable_area;
514+
p4d_p = ppd->pgtable_area;
508515
memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
509-
pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
516+
ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
510517

511518
pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS);
512519
} else {
513-
pud_p = pgtable_area;
520+
pud_p = ppd->pgtable_area;
514521
memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
515-
pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
522+
ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
516523

517524
pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS);
518525
}
519526
native_set_pgd(pgd_p, pgd);
520527
}
521528

522529
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
523-
p4d_p += p4d_index(vaddr);
530+
p4d_p += p4d_index(ppd->vaddr);
524531
if (native_p4d_val(*p4d_p)) {
525532
pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK);
526533
} else {
527534
p4d_t p4d;
528535

529-
pud_p = pgtable_area;
536+
pud_p = ppd->pgtable_area;
530537
memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
531-
pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
538+
ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
532539

533540
p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS);
534541
native_set_p4d(p4d_p, p4d);
535542
}
536543
}
537544

538-
pud_p += pud_index(vaddr);
545+
pud_p += pud_index(ppd->vaddr);
539546
if (native_pud_val(*pud_p)) {
540547
if (native_pud_val(*pud_p) & _PAGE_PSE)
541-
goto out;
548+
return;
542549

543550
pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK);
544551
} else {
545552
pud_t pud;
546553

547-
pmd_p = pgtable_area;
554+
pmd_p = ppd->pgtable_area;
548555
memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
549-
pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
556+
ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD;
550557

551558
pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS);
552559
native_set_pud(pud_p, pud);
553560
}
554561

555-
pmd_p += pmd_index(vaddr);
562+
pmd_p += pmd_index(ppd->vaddr);
556563
if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
557-
native_set_pmd(pmd_p, native_make_pmd(pmd_val));
558-
559-
out:
560-
return pgtable_area;
564+
native_set_pmd(pmd_p, native_make_pmd(ppd->pmd_val));
561565
}
562566

563567
static unsigned long __init sme_pgtable_calc(unsigned long len)
@@ -615,11 +619,10 @@ void __init sme_encrypt_kernel(void)
615619
unsigned long workarea_start, workarea_end, workarea_len;
616620
unsigned long execute_start, execute_end, execute_len;
617621
unsigned long kernel_start, kernel_end, kernel_len;
622+
struct sme_populate_pgd_data ppd;
618623
unsigned long pgtable_area_len;
619624
unsigned long paddr, pmd_flags;
620625
unsigned long decrypted_base;
621-
void *pgtable_area;
622-
pgd_t *pgd;
623626

624627
if (!sme_active())
625628
return;
@@ -683,18 +686,18 @@ void __init sme_encrypt_kernel(void)
683686
* pagetables and when the new encrypted and decrypted kernel
684687
* mappings are populated.
685688
*/
686-
pgtable_area = (void *)execute_end;
689+
ppd.pgtable_area = (void *)execute_end;
687690

688691
/*
689692
* Make sure the current pagetable structure has entries for
690693
* addressing the workarea.
691694
*/
692-
pgd = (pgd_t *)native_read_cr3_pa();
695+
ppd.pgd = (pgd_t *)native_read_cr3_pa();
693696
paddr = workarea_start;
694697
while (paddr < workarea_end) {
695-
pgtable_area = sme_populate_pgd(pgd, pgtable_area,
696-
paddr,
697-
paddr + PMD_FLAGS);
698+
ppd.pmd_val = paddr + PMD_FLAGS;
699+
ppd.vaddr = paddr;
700+
sme_populate_pgd_large(&ppd);
698701

699702
paddr += PMD_PAGE_SIZE;
700703
}
@@ -708,17 +711,17 @@ void __init sme_encrypt_kernel(void)
708711
* populated with new PUDs and PMDs as the encrypted and decrypted
709712
* kernel mappings are created.
710713
*/
711-
pgd = pgtable_area;
712-
memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD);
713-
pgtable_area += sizeof(*pgd) * PTRS_PER_PGD;
714+
ppd.pgd = ppd.pgtable_area;
715+
memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
716+
ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
714717

715718
/* Add encrypted kernel (identity) mappings */
716719
pmd_flags = PMD_FLAGS | _PAGE_ENC;
717720
paddr = kernel_start;
718721
while (paddr < kernel_end) {
719-
pgtable_area = sme_populate_pgd(pgd, pgtable_area,
720-
paddr,
721-
paddr + pmd_flags);
722+
ppd.pmd_val = paddr + pmd_flags;
723+
ppd.vaddr = paddr;
724+
sme_populate_pgd_large(&ppd);
722725

723726
paddr += PMD_PAGE_SIZE;
724727
}
@@ -736,40 +739,39 @@ void __init sme_encrypt_kernel(void)
736739
pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT);
737740
paddr = kernel_start;
738741
while (paddr < kernel_end) {
739-
pgtable_area = sme_populate_pgd(pgd, pgtable_area,
740-
paddr + decrypted_base,
741-
paddr + pmd_flags);
742+
ppd.pmd_val = paddr + pmd_flags;
743+
ppd.vaddr = paddr + decrypted_base;
744+
sme_populate_pgd_large(&ppd);
742745

743746
paddr += PMD_PAGE_SIZE;
744747
}
745748

746749
/* Add decrypted workarea mappings to both kernel mappings */
747750
paddr = workarea_start;
748751
while (paddr < workarea_end) {
749-
pgtable_area = sme_populate_pgd(pgd, pgtable_area,
750-
paddr,
751-
paddr + PMD_FLAGS);
752+
ppd.pmd_val = paddr + PMD_FLAGS;
753+
ppd.vaddr = paddr;
754+
sme_populate_pgd_large(&ppd);
752755

753-
pgtable_area = sme_populate_pgd(pgd, pgtable_area,
754-
paddr + decrypted_base,
755-
paddr + PMD_FLAGS);
756+
ppd.vaddr = paddr + decrypted_base;
757+
sme_populate_pgd_large(&ppd);
756758

757759
paddr += PMD_PAGE_SIZE;
758760
}
759761

760762
/* Perform the encryption */
761763
sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
762-
kernel_len, workarea_start, (unsigned long)pgd);
764+
kernel_len, workarea_start, (unsigned long)ppd.pgd);
763765

764766
/*
765767
* At this point we are running encrypted. Remove the mappings for
766768
* the decrypted areas - all that is needed for this is to remove
767769
* the PGD entry/entries.
768770
*/
769-
sme_clear_pgd(pgd, kernel_start + decrypted_base,
771+
sme_clear_pgd(ppd.pgd, kernel_start + decrypted_base,
770772
kernel_end + decrypted_base);
771773

772-
sme_clear_pgd(pgd, workarea_start + decrypted_base,
774+
sme_clear_pgd(ppd.pgd, workarea_start + decrypted_base,
773775
workarea_end + decrypted_base);
774776

775777
/* Flush the TLB - no globals so cr3 is enough */

0 commit comments

Comments
 (0)