@@ -469,6 +469,7 @@ struct sme_populate_pgd_data {
469
469
pgd_t * pgd ;
470
470
471
471
pmdval_t pmd_flags ;
472
+ pteval_t pte_flags ;
472
473
unsigned long paddr ;
473
474
474
475
unsigned long vaddr ;
@@ -493,6 +494,7 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
493
494
#define PGD_FLAGS _KERNPG_TABLE_NOENC
494
495
#define P4D_FLAGS _KERNPG_TABLE_NOENC
495
496
#define PUD_FLAGS _KERNPG_TABLE_NOENC
497
+ #define PMD_FLAGS _KERNPG_TABLE_NOENC
496
498
497
499
#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
498
500
@@ -502,7 +504,15 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
502
504
503
505
#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
504
506
505
- static void __init sme_populate_pgd_large (struct sme_populate_pgd_data * ppd )
507
+ #define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
508
+
509
+ #define PTE_FLAGS_DEC PTE_FLAGS
510
+ #define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
511
+ (_PAGE_PAT | _PAGE_PWT))
512
+
513
+ #define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
514
+
515
+ static pmd_t __init * sme_prepare_pgd (struct sme_populate_pgd_data * ppd )
506
516
{
507
517
pgd_t * pgd_p ;
508
518
p4d_t * p4d_p ;
@@ -553,7 +563,7 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
553
563
pud_p += pud_index (ppd -> vaddr );
554
564
if (native_pud_val (* pud_p )) {
555
565
if (native_pud_val (* pud_p ) & _PAGE_PSE )
556
- return ;
566
+ return NULL ;
557
567
558
568
pmd_p = (pmd_t * )(native_pud_val (* pud_p ) & ~PTE_FLAGS_MASK );
559
569
} else {
@@ -567,16 +577,55 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
567
577
native_set_pud (pud_p , pud );
568
578
}
569
579
580
+ return pmd_p ;
581
+ }
582
+
583
+ static void __init sme_populate_pgd_large (struct sme_populate_pgd_data * ppd )
584
+ {
585
+ pmd_t * pmd_p ;
586
+
587
+ pmd_p = sme_prepare_pgd (ppd );
588
+ if (!pmd_p )
589
+ return ;
590
+
570
591
pmd_p += pmd_index (ppd -> vaddr );
571
592
if (!native_pmd_val (* pmd_p ) || !(native_pmd_val (* pmd_p ) & _PAGE_PSE ))
572
593
native_set_pmd (pmd_p , native_make_pmd (ppd -> paddr | ppd -> pmd_flags ));
573
594
}
574
595
575
- static void __init __sme_map_range (struct sme_populate_pgd_data * ppd ,
576
- pmdval_t pmd_flags )
596
+ static void __init sme_populate_pgd (struct sme_populate_pgd_data * ppd )
577
597
{
578
- ppd -> pmd_flags = pmd_flags ;
598
+ pmd_t * pmd_p ;
599
+ pte_t * pte_p ;
600
+
601
+ pmd_p = sme_prepare_pgd (ppd );
602
+ if (!pmd_p )
603
+ return ;
604
+
605
+ pmd_p += pmd_index (ppd -> vaddr );
606
+ if (native_pmd_val (* pmd_p )) {
607
+ if (native_pmd_val (* pmd_p ) & _PAGE_PSE )
608
+ return ;
609
+
610
+ pte_p = (pte_t * )(native_pmd_val (* pmd_p ) & ~PTE_FLAGS_MASK );
611
+ } else {
612
+ pmd_t pmd ;
579
613
614
+ pte_p = ppd -> pgtable_area ;
615
+ memset (pte_p , 0 , sizeof (* pte_p ) * PTRS_PER_PTE );
616
+ ppd -> pgtable_area += sizeof (* pte_p ) * PTRS_PER_PTE ;
617
+
618
+ pmd = native_make_pmd ((pteval_t )pte_p + PMD_FLAGS );
619
+ native_set_pmd (pmd_p , pmd );
620
+ }
621
+
622
+ pte_p += pte_index (ppd -> vaddr );
623
+ if (!native_pte_val (* pte_p ))
624
+ native_set_pte (pte_p , native_make_pte (ppd -> paddr | ppd -> pte_flags ));
625
+ }
626
+
627
+ static void __init __sme_map_range_pmd (struct sme_populate_pgd_data * ppd )
628
+ {
580
629
while (ppd -> vaddr < ppd -> vaddr_end ) {
581
630
sme_populate_pgd_large (ppd );
582
631
@@ -585,33 +634,71 @@ static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
585
634
}
586
635
}
587
636
637
+ static void __init __sme_map_range_pte (struct sme_populate_pgd_data * ppd )
638
+ {
639
+ while (ppd -> vaddr < ppd -> vaddr_end ) {
640
+ sme_populate_pgd (ppd );
641
+
642
+ ppd -> vaddr += PAGE_SIZE ;
643
+ ppd -> paddr += PAGE_SIZE ;
644
+ }
645
+ }
646
+
647
+ static void __init __sme_map_range (struct sme_populate_pgd_data * ppd ,
648
+ pmdval_t pmd_flags , pteval_t pte_flags )
649
+ {
650
+ unsigned long vaddr_end ;
651
+
652
+ ppd -> pmd_flags = pmd_flags ;
653
+ ppd -> pte_flags = pte_flags ;
654
+
655
+ /* Save original end value since we modify the struct value */
656
+ vaddr_end = ppd -> vaddr_end ;
657
+
658
+ /* If start is not 2MB aligned, create PTE entries */
659
+ ppd -> vaddr_end = ALIGN (ppd -> vaddr , PMD_PAGE_SIZE );
660
+ __sme_map_range_pte (ppd );
661
+
662
+ /* Create PMD entries */
663
+ ppd -> vaddr_end = vaddr_end & PMD_PAGE_MASK ;
664
+ __sme_map_range_pmd (ppd );
665
+
666
+ /* If end is not 2MB aligned, create PTE entries */
667
+ ppd -> vaddr_end = vaddr_end ;
668
+ __sme_map_range_pte (ppd );
669
+ }
670
+
588
671
static void __init sme_map_range_encrypted (struct sme_populate_pgd_data * ppd )
589
672
{
590
- __sme_map_range (ppd , PMD_FLAGS_ENC );
673
+ __sme_map_range (ppd , PMD_FLAGS_ENC , PTE_FLAGS_ENC );
591
674
}
592
675
593
676
static void __init sme_map_range_decrypted (struct sme_populate_pgd_data * ppd )
594
677
{
595
- __sme_map_range (ppd , PMD_FLAGS_DEC );
678
+ __sme_map_range (ppd , PMD_FLAGS_DEC , PTE_FLAGS_DEC );
596
679
}
597
680
598
681
static void __init sme_map_range_decrypted_wp (struct sme_populate_pgd_data * ppd )
599
682
{
600
- __sme_map_range (ppd , PMD_FLAGS_DEC_WP );
683
+ __sme_map_range (ppd , PMD_FLAGS_DEC_WP , PTE_FLAGS_DEC_WP );
601
684
}
602
685
603
686
static unsigned long __init sme_pgtable_calc (unsigned long len )
604
687
{
605
- unsigned long p4d_size , pud_size , pmd_size ;
688
+ unsigned long p4d_size , pud_size , pmd_size , pte_size ;
606
689
unsigned long total ;
607
690
608
691
/*
609
692
* Perform a relatively simplistic calculation of the pagetable
610
- * entries that are needed. That mappings will be covered by 2MB
611
- * PMD entries so we can conservatively calculate the required
693
+ * entries that are needed. Those mappings will be covered mostly
694
+ * by 2MB PMD entries so we can conservatively calculate the required
612
695
* number of P4D, PUD and PMD structures needed to perform the
613
- * mappings. Incrementing the count for each covers the case where
614
- * the addresses cross entries.
696
+ * mappings. For mappings that are not 2MB aligned, PTE mappings
697
+ * would be needed for the start and end portion of the address range
698
+ * that fall outside of the 2MB alignment. This results in, at most,
699
+ * two extra pages to hold PTE entries for each range that is mapped.
700
+ * Incrementing the count for each covers the case where the addresses
701
+ * cross entries.
615
702
*/
616
703
if (IS_ENABLED (CONFIG_X86_5LEVEL )) {
617
704
p4d_size = (ALIGN (len , PGDIR_SIZE ) / PGDIR_SIZE ) + 1 ;
@@ -625,8 +712,9 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
625
712
}
626
713
pmd_size = (ALIGN (len , PUD_SIZE ) / PUD_SIZE ) + 1 ;
627
714
pmd_size *= sizeof (pmd_t ) * PTRS_PER_PMD ;
715
+ pte_size = 2 * sizeof (pte_t ) * PTRS_PER_PTE ;
628
716
629
- total = p4d_size + pud_size + pmd_size ;
717
+ total = p4d_size + pud_size + pmd_size + pte_size ;
630
718
631
719
/*
632
720
* Now calculate the added pagetable structures needed to populate
@@ -709,10 +797,13 @@ void __init sme_encrypt_kernel(void)
709
797
710
798
/*
711
799
* The total workarea includes the executable encryption area and
712
- * the pagetable area.
800
+ * the pagetable area. The start of the workarea is already 2MB
801
+ * aligned, align the end of the workarea on a 2MB boundary so that
802
+ * we don't try to create/allocate PTE entries from the workarea
803
+ * before it is mapped.
713
804
*/
714
805
workarea_len = execute_len + pgtable_area_len ;
715
- workarea_end = workarea_start + workarea_len ;
806
+ workarea_end = ALIGN ( workarea_start + workarea_len , PMD_PAGE_SIZE ) ;
716
807
717
808
/*
718
809
* Set the address to the start of where newly created pagetable
0 commit comments