Skip to content

Commit 107cd25

Browse files
tlendackyIngo Molnar
authored andcommitted
x86/mm: Encrypt the initrd earlier for BSP microcode update
Currently the BSP microcode update code examines the initrd very early in the boot process. If SME is active, the initrd is treated as being encrypted but it has not been encrypted (in place) yet. Update the early boot code that encrypts the kernel to also encrypt the initrd so that early BSP microcode updates work. Tested-by: Gabriel Craciunescu <[email protected]> Signed-off-by: Tom Lendacky <[email protected]> Reviewed-by: Borislav Petkov <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Brijesh Singh <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent cc5f01e commit 107cd25

File tree

5 files changed

+85
-45
lines changed

5 files changed

+85
-45
lines changed

arch/x86/include/asm/mem_encrypt.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ void __init sme_unmap_bootdata(char *real_mode_data);
3939

4040
void __init sme_early_init(void);
4141

42-
void __init sme_encrypt_kernel(void);
42+
void __init sme_encrypt_kernel(struct boot_params *bp);
4343
void __init sme_enable(struct boot_params *bp);
4444

4545
int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
@@ -67,7 +67,7 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
6767

6868
static inline void __init sme_early_init(void) { }
6969

70-
static inline void __init sme_encrypt_kernel(void) { }
70+
static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
7171
static inline void __init sme_enable(struct boot_params *bp) { }
7272

7373
static inline bool sme_active(void) { return false; }

arch/x86/kernel/head64.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -157,8 +157,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
157157
p = fixup_pointer(&phys_base, physaddr);
158158
*p += load_delta - sme_get_me_mask();
159159

160-
/* Encrypt the kernel (if SME is active) */
161-
sme_encrypt_kernel();
160+
/* Encrypt the kernel and related (if SME is active) */
161+
sme_encrypt_kernel(bp);
162162

163163
/*
164164
* Return the SME encryption mask (if SME is active) to be used as a

arch/x86/kernel/setup.c

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -364,16 +364,6 @@ static void __init reserve_initrd(void)
364364
!ramdisk_image || !ramdisk_size)
365365
return; /* No initrd provided by bootloader */
366366

367-
/*
368-
* If SME is active, this memory will be marked encrypted by the
369-
* kernel when it is accessed (including relocation). However, the
370-
* ramdisk image was loaded decrypted by the bootloader, so make
371-
* sure that it is encrypted before accessing it. For SEV the
372-
* ramdisk will already be encrypted, so only do this for SME.
373-
*/
374-
if (sme_active())
375-
sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
376-
377367
initrd_start = 0;
378368

379369
mapped_size = memblock_mem_size(max_pfn_mapped);

arch/x86/mm/mem_encrypt.c

Lines changed: 58 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -738,11 +738,12 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
738738
return total;
739739
}
740740

741-
void __init sme_encrypt_kernel(void)
741+
void __init sme_encrypt_kernel(struct boot_params *bp)
742742
{
743743
unsigned long workarea_start, workarea_end, workarea_len;
744744
unsigned long execute_start, execute_end, execute_len;
745745
unsigned long kernel_start, kernel_end, kernel_len;
746+
unsigned long initrd_start, initrd_end, initrd_len;
746747
struct sme_populate_pgd_data ppd;
747748
unsigned long pgtable_area_len;
748749
unsigned long decrypted_base;
@@ -751,14 +752,15 @@ void __init sme_encrypt_kernel(void)
751752
return;
752753

753754
/*
754-
* Prepare for encrypting the kernel by building new pagetables with
755-
* the necessary attributes needed to encrypt the kernel in place.
755+
* Prepare for encrypting the kernel and initrd by building new
756+
* pagetables with the necessary attributes needed to encrypt the
757+
* kernel in place.
756758
*
757759
* One range of virtual addresses will map the memory occupied
758-
* by the kernel as encrypted.
760+
* by the kernel and initrd as encrypted.
759761
*
760762
* Another range of virtual addresses will map the memory occupied
761-
* by the kernel as decrypted and write-protected.
763+
* by the kernel and initrd as decrypted and write-protected.
762764
*
763765
* The use of write-protect attribute will prevent any of the
764766
* memory from being cached.
@@ -769,6 +771,20 @@ void __init sme_encrypt_kernel(void)
769771
kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
770772
kernel_len = kernel_end - kernel_start;
771773

774+
initrd_start = 0;
775+
initrd_end = 0;
776+
initrd_len = 0;
777+
#ifdef CONFIG_BLK_DEV_INITRD
778+
initrd_len = (unsigned long)bp->hdr.ramdisk_size |
779+
((unsigned long)bp->ext_ramdisk_size << 32);
780+
if (initrd_len) {
781+
initrd_start = (unsigned long)bp->hdr.ramdisk_image |
782+
((unsigned long)bp->ext_ramdisk_image << 32);
783+
initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
784+
initrd_len = initrd_end - initrd_start;
785+
}
786+
#endif
787+
772788
/* Set the encryption workarea to be immediately after the kernel */
773789
workarea_start = kernel_end;
774790

@@ -791,6 +807,8 @@ void __init sme_encrypt_kernel(void)
791807
*/
792808
pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
793809
pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
810+
if (initrd_len)
811+
pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
794812

795813
/* PUDs and PMDs needed in the current pagetables for the workarea */
796814
pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
@@ -829,9 +847,9 @@ void __init sme_encrypt_kernel(void)
829847

830848
/*
831849
* A new pagetable structure is being built to allow for the kernel
832-
* to be encrypted. It starts with an empty PGD that will then be
833-
* populated with new PUDs and PMDs as the encrypted and decrypted
834-
* kernel mappings are created.
850+
* and initrd to be encrypted. It starts with an empty PGD that will
851+
* then be populated with new PUDs and PMDs as the encrypted and
852+
* decrypted kernel mappings are created.
835853
*/
836854
ppd.pgd = ppd.pgtable_area;
837855
memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
@@ -844,6 +862,12 @@ void __init sme_encrypt_kernel(void)
844862
* the base of the mapping.
845863
*/
846864
decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
865+
if (initrd_len) {
866+
unsigned long check_base;
867+
868+
check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
869+
decrypted_base = max(decrypted_base, check_base);
870+
}
847871
decrypted_base <<= PGDIR_SHIFT;
848872

849873
/* Add encrypted kernel (identity) mappings */
@@ -858,6 +882,21 @@ void __init sme_encrypt_kernel(void)
858882
ppd.vaddr_end = kernel_end + decrypted_base;
859883
sme_map_range_decrypted_wp(&ppd);
860884

885+
if (initrd_len) {
886+
/* Add encrypted initrd (identity) mappings */
887+
ppd.paddr = initrd_start;
888+
ppd.vaddr = initrd_start;
889+
ppd.vaddr_end = initrd_end;
890+
sme_map_range_encrypted(&ppd);
891+
/*
892+
* Add decrypted, write-protected initrd (non-identity) mappings
893+
*/
894+
ppd.paddr = initrd_start;
895+
ppd.vaddr = initrd_start + decrypted_base;
896+
ppd.vaddr_end = initrd_end + decrypted_base;
897+
sme_map_range_decrypted_wp(&ppd);
898+
}
899+
861900
/* Add decrypted workarea mappings to both kernel mappings */
862901
ppd.paddr = workarea_start;
863902
ppd.vaddr = workarea_start;
@@ -873,6 +912,11 @@ void __init sme_encrypt_kernel(void)
873912
sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
874913
kernel_len, workarea_start, (unsigned long)ppd.pgd);
875914

915+
if (initrd_len)
916+
sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
917+
initrd_len, workarea_start,
918+
(unsigned long)ppd.pgd);
919+
876920
/*
877921
* At this point we are running encrypted. Remove the mappings for
878922
* the decrypted areas - all that is needed for this is to remove
@@ -882,6 +926,12 @@ void __init sme_encrypt_kernel(void)
882926
ppd.vaddr_end = kernel_end + decrypted_base;
883927
sme_clear_pgd(&ppd);
884928

929+
if (initrd_len) {
930+
ppd.vaddr = initrd_start + decrypted_base;
931+
ppd.vaddr_end = initrd_end + decrypted_base;
932+
sme_clear_pgd(&ppd);
933+
}
934+
885935
ppd.vaddr = workarea_start + decrypted_base;
886936
ppd.vaddr_end = workarea_end + decrypted_base;
887937
sme_clear_pgd(&ppd);

arch/x86/mm/mem_encrypt_boot.S

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,9 @@ ENTRY(sme_encrypt_execute)
2222

2323
/*
2424
* Entry parameters:
25-
* RDI - virtual address for the encrypted kernel mapping
26-
* RSI - virtual address for the decrypted kernel mapping
27-
* RDX - length of kernel
25+
* RDI - virtual address for the encrypted mapping
26+
* RSI - virtual address for the decrypted mapping
27+
* RDX - length to encrypt
2828
* RCX - virtual address of the encryption workarea, including:
2929
* - stack page (PAGE_SIZE)
3030
* - encryption routine page (PAGE_SIZE)
@@ -41,9 +41,9 @@ ENTRY(sme_encrypt_execute)
4141
addq $PAGE_SIZE, %rax /* Workarea encryption routine */
4242

4343
push %r12
44-
movq %rdi, %r10 /* Encrypted kernel */
45-
movq %rsi, %r11 /* Decrypted kernel */
46-
movq %rdx, %r12 /* Kernel length */
44+
movq %rdi, %r10 /* Encrypted area */
45+
movq %rsi, %r11 /* Decrypted area */
46+
movq %rdx, %r12 /* Area length */
4747

4848
/* Copy encryption routine into the workarea */
4949
movq %rax, %rdi /* Workarea encryption routine */
@@ -52,10 +52,10 @@ ENTRY(sme_encrypt_execute)
5252
rep movsb
5353

5454
/* Setup registers for call */
55-
movq %r10, %rdi /* Encrypted kernel */
56-
movq %r11, %rsi /* Decrypted kernel */
55+
movq %r10, %rdi /* Encrypted area */
56+
movq %r11, %rsi /* Decrypted area */
5757
movq %r8, %rdx /* Pagetables used for encryption */
58-
movq %r12, %rcx /* Kernel length */
58+
movq %r12, %rcx /* Area length */
5959
movq %rax, %r8 /* Workarea encryption routine */
6060
addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
6161

@@ -71,27 +71,27 @@ ENDPROC(sme_encrypt_execute)
7171

7272
ENTRY(__enc_copy)
7373
/*
74-
* Routine used to encrypt kernel.
74+
* Routine used to encrypt memory in place.
7575
* This routine must be run outside of the kernel proper since
7676
* the kernel will be encrypted during the process. So this
7777
* routine is defined here and then copied to an area outside
7878
* of the kernel where it will remain and run decrypted
7979
* during execution.
8080
*
8181
* On entry the registers must be:
82-
* RDI - virtual address for the encrypted kernel mapping
83-
* RSI - virtual address for the decrypted kernel mapping
82+
* RDI - virtual address for the encrypted mapping
83+
* RSI - virtual address for the decrypted mapping
8484
* RDX - address of the pagetables to use for encryption
85-
* RCX - length of kernel
85+
* RCX - length of area
8686
* R8 - intermediate copy buffer
8787
*
8888
* RAX - points to this routine
8989
*
90-
* The kernel will be encrypted by copying from the non-encrypted
91-
* kernel space to an intermediate buffer and then copying from the
92-
* intermediate buffer back to the encrypted kernel space. The physical
93-
* addresses of the two kernel space mappings are the same which
94-
* results in the kernel being encrypted "in place".
90+
* The area will be encrypted by copying from the non-encrypted
91+
* memory space to an intermediate buffer and then copying from the
92+
* intermediate buffer back to the encrypted memory space. The physical
93+
* addresses of the two mappings are the same which results in the area
94+
* being encrypted "in place".
9595
*/
9696
/* Enable the new page tables */
9797
mov %rdx, %cr3
@@ -106,9 +106,9 @@ ENTRY(__enc_copy)
106106
push %r15
107107
push %r12
108108

109-
movq %rcx, %r9 /* Save kernel length */
110-
movq %rdi, %r10 /* Save encrypted kernel address */
111-
movq %rsi, %r11 /* Save decrypted kernel address */
109+
movq %rcx, %r9 /* Save area length */
110+
movq %rdi, %r10 /* Save encrypted area address */
111+
movq %rsi, %r11 /* Save decrypted area address */
112112

113113
/* Set the PAT register PA5 entry to write-protect */
114114
movl $MSR_IA32_CR_PAT, %ecx
@@ -128,13 +128,13 @@ ENTRY(__enc_copy)
128128
movq %r9, %r12
129129

130130
2:
131-
movq %r11, %rsi /* Source - decrypted kernel */
131+
movq %r11, %rsi /* Source - decrypted area */
132132
movq %r8, %rdi /* Dest - intermediate copy buffer */
133133
movq %r12, %rcx
134134
rep movsb
135135

136136
movq %r8, %rsi /* Source - intermediate copy buffer */
137-
movq %r10, %rdi /* Dest - encrypted kernel */
137+
movq %r10, %rdi /* Dest - encrypted area */
138138
movq %r12, %rcx
139139
rep movsb
140140

0 commit comments

Comments
 (0)