Skip to content

Commit 01d5e78

Browse files
committed
Merge tag 'x86_sev_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 SEV updates from Borislav Petkov: "The accumulated pile of x86/sev generalizations and cleanups: - Share the SEV string unrolling logic with TDX as TDX guests need it too - Cleanups and generalzation of code shared by SEV and TDX" * tag 'x86_sev_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/sev: Move common memory encryption code to mem_encrypt.c x86/sev: Rename mem_encrypt.c to mem_encrypt_amd.c x86/sev: Use CC_ATTR attribute to generalize string I/O unroll x86/sev: Remove do_early_exception() forward declarations x86/head64: Carve out the guest encryption postprocessing into a helper x86/sev: Get rid of excessive use of defines x86/sev: Shorten GHCB terminate macro names
2 parents cd36722 + 20f07a0 commit 01d5e78

File tree

12 files changed

+545
-524
lines changed

12 files changed

+545
-524
lines changed

arch/x86/Kconfig

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1523,16 +1523,20 @@ config X86_CPA_STATISTICS
15231523
helps to determine the effectiveness of preserving large and huge
15241524
page mappings when mapping protections are changed.
15251525

1526+
config X86_MEM_ENCRYPT
1527+
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
1528+
select DYNAMIC_PHYSICAL_MASK
1529+
select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
1530+
def_bool n
1531+
15261532
config AMD_MEM_ENCRYPT
15271533
bool "AMD Secure Memory Encryption (SME) support"
15281534
depends on X86_64 && CPU_SUP_AMD
15291535
select DMA_COHERENT_POOL
1530-
select DYNAMIC_PHYSICAL_MASK
15311536
select ARCH_USE_MEMREMAP_PROT
1532-
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
15331537
select INSTRUCTION_DECODER
1534-
select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
15351538
select ARCH_HAS_CC_PLATFORM
1539+
select X86_MEM_ENCRYPT
15361540
help
15371541
Say yes to enable support for the encryption of system memory.
15381542
This requires an AMD processor that supports Secure Memory

arch/x86/boot/compressed/sev.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
122122
static bool early_setup_sev_es(void)
123123
{
124124
if (!sev_es_negotiate_protocol())
125-
sev_es_terminate(GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED);
125+
sev_es_terminate(GHCB_SEV_ES_PROT_UNSUPPORTED);
126126

127127
if (set_page_decrypted((unsigned long)&boot_ghcb_page))
128128
return false;
@@ -175,7 +175,7 @@ void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
175175
enum es_result result;
176176

177177
if (!boot_ghcb && !early_setup_sev_es())
178-
sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
178+
sev_es_terminate(GHCB_SEV_ES_GEN_REQ);
179179

180180
vc_ghcb_invalidate(boot_ghcb);
181181
result = vc_init_em_ctxt(&ctxt, regs, exit_code);
@@ -202,5 +202,5 @@ void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
202202
if (result == ES_OK)
203203
vc_finish_insn(&ctxt);
204204
else if (result != ES_RETRY)
205-
sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
205+
sev_es_terminate(GHCB_SEV_ES_GEN_REQ);
206206
}

arch/x86/include/asm/io.h

Lines changed: 3 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040

4141
#include <linux/string.h>
4242
#include <linux/compiler.h>
43+
#include <linux/cc_platform.h>
4344
#include <asm/page.h>
4445
#include <asm/early_ioremap.h>
4546
#include <asm/pgtable_types.h>
@@ -256,21 +257,6 @@ static inline void slow_down_io(void)
256257

257258
#endif
258259

259-
#ifdef CONFIG_AMD_MEM_ENCRYPT
260-
#include <linux/jump_label.h>
261-
262-
extern struct static_key_false sev_enable_key;
263-
static inline bool sev_key_active(void)
264-
{
265-
return static_branch_unlikely(&sev_enable_key);
266-
}
267-
268-
#else /* !CONFIG_AMD_MEM_ENCRYPT */
269-
270-
static inline bool sev_key_active(void) { return false; }
271-
272-
#endif /* CONFIG_AMD_MEM_ENCRYPT */
273-
274260
#define BUILDIO(bwl, bw, type) \
275261
static inline void out##bwl(unsigned type value, int port) \
276262
{ \
@@ -301,7 +287,7 @@ static inline unsigned type in##bwl##_p(int port) \
301287
\
302288
static inline void outs##bwl(int port, const void *addr, unsigned long count) \
303289
{ \
304-
if (sev_key_active()) { \
290+
if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) { \
305291
unsigned type *value = (unsigned type *)addr; \
306292
while (count) { \
307293
out##bwl(*value, port); \
@@ -317,7 +303,7 @@ static inline void outs##bwl(int port, const void *addr, unsigned long count) \
317303
\
318304
static inline void ins##bwl(int port, void *addr, unsigned long count) \
319305
{ \
320-
if (sev_key_active()) { \
306+
if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) { \
321307
unsigned type *value = (unsigned type *)addr; \
322308
while (count) { \
323309
*value = in##bwl(port); \

arch/x86/include/asm/sev-common.h

Lines changed: 30 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -18,20 +18,19 @@
1818
/* SEV Information Request/Response */
1919
#define GHCB_MSR_SEV_INFO_RESP 0x001
2020
#define GHCB_MSR_SEV_INFO_REQ 0x002
21-
#define GHCB_MSR_VER_MAX_POS 48
22-
#define GHCB_MSR_VER_MAX_MASK 0xffff
23-
#define GHCB_MSR_VER_MIN_POS 32
24-
#define GHCB_MSR_VER_MIN_MASK 0xffff
25-
#define GHCB_MSR_CBIT_POS 24
26-
#define GHCB_MSR_CBIT_MASK 0xff
27-
#define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
28-
((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \
29-
(((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \
30-
(((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \
21+
22+
#define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
23+
/* GHCBData[63:48] */ \
24+
((((_max) & 0xffff) << 48) | \
25+
/* GHCBData[47:32] */ \
26+
(((_min) & 0xffff) << 32) | \
27+
/* GHCBData[31:24] */ \
28+
(((_cbit) & 0xff) << 24) | \
3129
GHCB_MSR_SEV_INFO_RESP)
30+
3231
#define GHCB_MSR_INFO(v) ((v) & 0xfffUL)
33-
#define GHCB_MSR_PROTO_MAX(v) (((v) >> GHCB_MSR_VER_MAX_POS) & GHCB_MSR_VER_MAX_MASK)
34-
#define GHCB_MSR_PROTO_MIN(v) (((v) >> GHCB_MSR_VER_MIN_POS) & GHCB_MSR_VER_MIN_MASK)
32+
#define GHCB_MSR_PROTO_MAX(v) (((v) >> 48) & 0xffff)
33+
#define GHCB_MSR_PROTO_MIN(v) (((v) >> 32) & 0xffff)
3534

3635
/* CPUID Request/Response */
3736
#define GHCB_MSR_CPUID_REQ 0x004
@@ -46,30 +45,36 @@
4645
#define GHCB_CPUID_REQ_EBX 1
4746
#define GHCB_CPUID_REQ_ECX 2
4847
#define GHCB_CPUID_REQ_EDX 3
49-
#define GHCB_CPUID_REQ(fn, reg) \
50-
(GHCB_MSR_CPUID_REQ | \
51-
(((unsigned long)reg & GHCB_MSR_CPUID_REG_MASK) << GHCB_MSR_CPUID_REG_POS) | \
52-
(((unsigned long)fn) << GHCB_MSR_CPUID_FUNC_POS))
48+
#define GHCB_CPUID_REQ(fn, reg) \
49+
/* GHCBData[11:0] */ \
50+
(GHCB_MSR_CPUID_REQ | \
51+
/* GHCBData[31:12] */ \
52+
(((unsigned long)(reg) & 0x3) << 30) | \
53+
/* GHCBData[63:32] */ \
54+
(((unsigned long)fn) << 32))
5355

5456
/* AP Reset Hold */
55-
#define GHCB_MSR_AP_RESET_HOLD_REQ 0x006
56-
#define GHCB_MSR_AP_RESET_HOLD_RESP 0x007
57+
#define GHCB_MSR_AP_RESET_HOLD_REQ 0x006
58+
#define GHCB_MSR_AP_RESET_HOLD_RESP 0x007
5759

5860
/* GHCB Hypervisor Feature Request/Response */
59-
#define GHCB_MSR_HV_FT_REQ 0x080
60-
#define GHCB_MSR_HV_FT_RESP 0x081
61+
#define GHCB_MSR_HV_FT_REQ 0x080
62+
#define GHCB_MSR_HV_FT_RESP 0x081
6163

6264
#define GHCB_MSR_TERM_REQ 0x100
6365
#define GHCB_MSR_TERM_REASON_SET_POS 12
6466
#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
6567
#define GHCB_MSR_TERM_REASON_POS 16
6668
#define GHCB_MSR_TERM_REASON_MASK 0xff
67-
#define GHCB_SEV_TERM_REASON(reason_set, reason_val) \
68-
(((((u64)reason_set) & GHCB_MSR_TERM_REASON_SET_MASK) << GHCB_MSR_TERM_REASON_SET_POS) | \
69-
((((u64)reason_val) & GHCB_MSR_TERM_REASON_MASK) << GHCB_MSR_TERM_REASON_POS))
7069

71-
#define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0
72-
#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1
70+
#define GHCB_SEV_TERM_REASON(reason_set, reason_val) \
71+
/* GHCBData[15:12] */ \
72+
(((((u64)reason_set) & 0xf) << 12) | \
73+
/* GHCBData[23:16] */ \
74+
((((u64)reason_val) & 0xff) << 16))
75+
76+
#define GHCB_SEV_ES_GEN_REQ 0
77+
#define GHCB_SEV_ES_PROT_UNSUPPORTED 1
7378

7479
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
7580

arch/x86/kernel/cc_platform.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,14 @@ static bool amd_cc_platform_has(enum cc_attr attr)
5050
case CC_ATTR_GUEST_STATE_ENCRYPT:
5151
return sev_status & MSR_AMD64_SEV_ES_ENABLED;
5252

53+
/*
54+
* With SEV, the rep string I/O instructions need to be unrolled
55+
* but SEV-ES supports them through the #VC handler.
56+
*/
57+
case CC_ATTR_GUEST_UNROLL_STRING_IO:
58+
return (sev_status & MSR_AMD64_SEV_ENABLED) &&
59+
!(sev_status & MSR_AMD64_SEV_ES_ENABLED);
60+
5361
default:
5462
return false;
5563
}

arch/x86/kernel/head64.c

Lines changed: 31 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,36 @@ static bool __head check_la57_support(unsigned long physaddr)
126126
}
127127
#endif
128128

129+
static unsigned long sme_postprocess_startup(struct boot_params *bp, pmdval_t *pmd)
130+
{
131+
unsigned long vaddr, vaddr_end;
132+
int i;
133+
134+
/* Encrypt the kernel and related (if SME is active) */
135+
sme_encrypt_kernel(bp);
136+
137+
/*
138+
* Clear the memory encryption mask from the .bss..decrypted section.
139+
* The bss section will be memset to zero later in the initialization so
140+
* there is no need to zero it after changing the memory encryption
141+
* attribute.
142+
*/
143+
if (sme_get_me_mask()) {
144+
vaddr = (unsigned long)__start_bss_decrypted;
145+
vaddr_end = (unsigned long)__end_bss_decrypted;
146+
for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
147+
i = pmd_index(vaddr);
148+
pmd[i] -= sme_get_me_mask();
149+
}
150+
}
151+
152+
/*
153+
* Return the SME encryption mask (if SME is active) to be used as a
154+
* modifier for the initial pgdir entry programmed into CR3.
155+
*/
156+
return sme_get_me_mask();
157+
}
158+
129159
/* Code in __startup_64() can be relocated during execution, but the compiler
130160
* doesn't have to generate PC-relative relocations when accessing globals from
131161
* that function. Clang actually does not generate them, which leads to
@@ -135,7 +165,6 @@ static bool __head check_la57_support(unsigned long physaddr)
135165
unsigned long __head __startup_64(unsigned long physaddr,
136166
struct boot_params *bp)
137167
{
138-
unsigned long vaddr, vaddr_end;
139168
unsigned long load_delta, *p;
140169
unsigned long pgtable_flags;
141170
pgdval_t *pgd;
@@ -276,34 +305,7 @@ unsigned long __head __startup_64(unsigned long physaddr,
276305
*/
277306
*fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask();
278307

279-
/* Encrypt the kernel and related (if SME is active) */
280-
sme_encrypt_kernel(bp);
281-
282-
/*
283-
* Clear the memory encryption mask from the .bss..decrypted section.
284-
* The bss section will be memset to zero later in the initialization so
285-
* there is no need to zero it after changing the memory encryption
286-
* attribute.
287-
*
288-
* This is early code, use an open coded check for SME instead of
289-
* using cc_platform_has(). This eliminates worries about removing
290-
* instrumentation or checking boot_cpu_data in the cc_platform_has()
291-
* function.
292-
*/
293-
if (sme_get_me_mask()) {
294-
vaddr = (unsigned long)__start_bss_decrypted;
295-
vaddr_end = (unsigned long)__end_bss_decrypted;
296-
for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
297-
i = pmd_index(vaddr);
298-
pmd[i] -= sme_get_me_mask();
299-
}
300-
}
301-
302-
/*
303-
* Return the SME encryption mask (if SME is active) to be used as a
304-
* modifier for the initial pgdir entry programmed into CR3.
305-
*/
306-
return sme_get_me_mask();
308+
return sme_postprocess_startup(bp, pmd);
307309
}
308310

309311
unsigned long __startup_secondary_64(void)

arch/x86/kernel/sev-shared.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -221,7 +221,7 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
221221

222222
fail:
223223
/* Terminate the guest */
224-
sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
224+
sev_es_terminate(GHCB_SEV_ES_GEN_REQ);
225225
}
226226

227227
static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,

arch/x86/kernel/sev.c

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include <asm/fpu/xcr.h>
2727
#include <asm/processor.h>
2828
#include <asm/realmode.h>
29+
#include <asm/setup.h>
2930
#include <asm/traps.h>
3031
#include <asm/svm.h>
3132
#include <asm/smp.h>
@@ -86,9 +87,6 @@ struct ghcb_state {
8687
static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
8788
DEFINE_STATIC_KEY_FALSE(sev_es_enable_key);
8889

89-
/* Needed in vc_early_forward_exception */
90-
void do_early_exception(struct pt_regs *regs, int trapnr);
91-
9290
static __always_inline bool on_vc_stack(struct pt_regs *regs)
9391
{
9492
unsigned long sp = regs->sp;
@@ -209,9 +207,6 @@ static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
209207
return ghcb;
210208
}
211209

212-
/* Needed in vc_early_forward_exception */
213-
void do_early_exception(struct pt_regs *regs, int trapnr);
214-
215210
static inline u64 sev_es_rd_ghcb_msr(void)
216211
{
217212
return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
@@ -1432,7 +1427,7 @@ DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
14321427
show_regs(regs);
14331428

14341429
/* Ask hypervisor to sev_es_terminate */
1435-
sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
1430+
sev_es_terminate(GHCB_SEV_ES_GEN_REQ);
14361431

14371432
/* If that fails and we get here - just panic */
14381433
panic("Returned from Terminate-Request to Hypervisor\n");
@@ -1480,7 +1475,7 @@ bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
14801475

14811476
/* Do initial setup or terminate the guest */
14821477
if (unlikely(boot_ghcb == NULL && !sev_es_setup_ghcb()))
1483-
sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
1478+
sev_es_terminate(GHCB_SEV_ES_GEN_REQ);
14841479

14851480
vc_ghcb_invalidate(boot_ghcb);
14861481

arch/x86/mm/Makefile

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,11 @@
22
# Kernel does not boot with instrumentation of tlb.c and mem_encrypt*.c
33
KCOV_INSTRUMENT_tlb.o := n
44
KCOV_INSTRUMENT_mem_encrypt.o := n
5+
KCOV_INSTRUMENT_mem_encrypt_amd.o := n
56
KCOV_INSTRUMENT_mem_encrypt_identity.o := n
67

78
KASAN_SANITIZE_mem_encrypt.o := n
9+
KASAN_SANITIZE_mem_encrypt_amd.o := n
810
KASAN_SANITIZE_mem_encrypt_identity.o := n
911

1012
# Disable KCSAN entirely, because otherwise we get warnings that some functions
@@ -13,6 +15,7 @@ KCSAN_SANITIZE := n
1315

1416
ifdef CONFIG_FUNCTION_TRACER
1517
CFLAGS_REMOVE_mem_encrypt.o = -pg
18+
CFLAGS_REMOVE_mem_encrypt_amd.o = -pg
1619
CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
1720
endif
1821

@@ -52,6 +55,8 @@ obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
5255
obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
5356
obj-$(CONFIG_PAGE_TABLE_ISOLATION) += pti.o
5457

55-
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt.o
58+
obj-$(CONFIG_X86_MEM_ENCRYPT) += mem_encrypt.o
59+
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o
60+
5661
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o
5762
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o

0 commit comments

Comments
 (0)