Skip to content

Commit 20273d2

Browse files
committed
Merge tag 'x86_sev_for_v5.16_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 SEV updates from Borislav Petkov: - Export sev_es_ghcb_hv_call() so that HyperV Isolation VMs can use it too - Non-urgent fixes and cleanups * tag 'x86_sev_for_v5.16_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/sev: Expose sev_es_ghcb_hv_call() for use by HyperV x86/sev: Allow #VC exceptions on the VC2 stack x86/sev: Fix stack type check in vc_switch_off_ist() x86/sme: Use #define USE_EARLY_PGTABLE_L5 in mem_encrypt_identity.c x86/sev: Carve out HV call's return value verification
2 parents 160729a + 007faec commit 20273d2

File tree

5 files changed

+80
-39
lines changed

5 files changed

+80
-39
lines changed

arch/x86/include/asm/sev.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@ static inline u64 lower_bits(u64 val, unsigned int bits)
5353

5454
struct real_mode_header;
5555
enum stack_type;
56+
struct ghcb;
5657

5758
/* Early IDT entry points for #VC handler */
5859
extern void vc_no_ghcb(void);
@@ -81,6 +82,11 @@ static __always_inline void sev_es_nmi_complete(void)
8182
__sev_es_nmi_complete();
8283
}
8384
extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
85+
extern enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
86+
bool set_ghcb_msr,
87+
struct es_em_ctxt *ctxt,
88+
u64 exit_code, u64 exit_info_1,
89+
u64 exit_info_2);
8490
#else
8591
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
8692
static inline void sev_es_ist_exit(void) { }

arch/x86/kernel/sev-shared.c

Lines changed: 40 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -94,25 +94,15 @@ static void vc_finish_insn(struct es_em_ctxt *ctxt)
9494
ctxt->regs->ip += ctxt->insn.length;
9595
}
9696

97-
static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
98-
struct es_em_ctxt *ctxt,
99-
u64 exit_code, u64 exit_info_1,
100-
u64 exit_info_2)
97+
static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
10198
{
102-
enum es_result ret;
103-
104-
/* Fill in protocol and format specifiers */
105-
ghcb->protocol_version = GHCB_PROTOCOL_MAX;
106-
ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
107-
108-
ghcb_set_sw_exit_code(ghcb, exit_code);
109-
ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
110-
ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
99+
u32 ret;
111100

112-
sev_es_wr_ghcb_msr(__pa(ghcb));
113-
VMGEXIT();
101+
ret = ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0);
102+
if (!ret)
103+
return ES_OK;
114104

115-
if ((ghcb->save.sw_exit_info_1 & 0xffffffff) == 1) {
105+
if (ret == 1) {
116106
u64 info = ghcb->save.sw_exit_info_2;
117107
unsigned long v;
118108

@@ -124,19 +114,40 @@ static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
124114
((v == X86_TRAP_GP) || (v == X86_TRAP_UD)) &&
125115
((info & SVM_EVTINJ_TYPE_MASK) == SVM_EVTINJ_TYPE_EXEPT)) {
126116
ctxt->fi.vector = v;
117+
127118
if (info & SVM_EVTINJ_VALID_ERR)
128119
ctxt->fi.error_code = info >> 32;
129-
ret = ES_EXCEPTION;
130-
} else {
131-
ret = ES_VMM_ERROR;
120+
121+
return ES_EXCEPTION;
132122
}
133-
} else if (ghcb->save.sw_exit_info_1 & 0xffffffff) {
134-
ret = ES_VMM_ERROR;
135-
} else {
136-
ret = ES_OK;
137123
}
138124

139-
return ret;
125+
return ES_VMM_ERROR;
126+
}
127+
128+
enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb, bool set_ghcb_msr,
129+
struct es_em_ctxt *ctxt, u64 exit_code,
130+
u64 exit_info_1, u64 exit_info_2)
131+
{
132+
/* Fill in protocol and format specifiers */
133+
ghcb->protocol_version = GHCB_PROTOCOL_MAX;
134+
ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
135+
136+
ghcb_set_sw_exit_code(ghcb, exit_code);
137+
ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
138+
ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
139+
140+
/*
141+
* Hyper-V unenlightened guests use a paravisor for communicating and
142+
* GHCB pages are being allocated and set up by that paravisor. Linux
143+
* should not change the GHCB page's physical address.
144+
*/
145+
if (set_ghcb_msr)
146+
sev_es_wr_ghcb_msr(__pa(ghcb));
147+
148+
VMGEXIT();
149+
150+
return verify_exception_info(ghcb, ctxt);
140151
}
141152

142153
/*
@@ -413,7 +424,7 @@ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
413424
*/
414425
sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer);
415426
ghcb_set_sw_scratch(ghcb, sw_scratch);
416-
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO,
427+
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_IOIO,
417428
exit_info_1, exit_info_2);
418429
if (ret != ES_OK)
419430
return ret;
@@ -455,7 +466,8 @@ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
455466

456467
ghcb_set_rax(ghcb, rax);
457468

458-
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0);
469+
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt,
470+
SVM_EXIT_IOIO, exit_info_1, 0);
459471
if (ret != ES_OK)
460472
return ret;
461473

@@ -486,7 +498,7 @@ static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
486498
/* xgetbv will cause #GP - use reset value for xcr0 */
487499
ghcb_set_xcr0(ghcb, 1);
488500

489-
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
501+
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_CPUID, 0, 0);
490502
if (ret != ES_OK)
491503
return ret;
492504

@@ -511,7 +523,7 @@ static enum es_result vc_handle_rdtsc(struct ghcb *ghcb,
511523
bool rdtscp = (exit_code == SVM_EXIT_RDTSCP);
512524
enum es_result ret;
513525

514-
ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0);
526+
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, 0, 0);
515527
if (ret != ES_OK)
516528
return ret;
517529

arch/x86/kernel/sev.c

Lines changed: 24 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -648,7 +648,8 @@ static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
648648
ghcb_set_rdx(ghcb, regs->dx);
649649
}
650650

651-
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
651+
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_MSR,
652+
exit_info_1, 0);
652653

653654
if ((ret == ES_OK) && (!exit_info_1)) {
654655
regs->ax = ghcb->save.rax;
@@ -867,7 +868,7 @@ static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
867868

868869
ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
869870

870-
return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
871+
return sev_es_ghcb_hv_call(ghcb, true, ctxt, exit_code, exit_info_1, exit_info_2);
871872
}
872873

873874
static enum es_result vc_handle_mmio_twobyte_ops(struct ghcb *ghcb,
@@ -1117,7 +1118,7 @@ static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
11171118

11181119
/* Using a value of 0 for ExitInfo1 means RAX holds the value */
11191120
ghcb_set_rax(ghcb, val);
1120-
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
1121+
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
11211122
if (ret != ES_OK)
11221123
return ret;
11231124

@@ -1147,7 +1148,7 @@ static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
11471148
static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
11481149
struct es_em_ctxt *ctxt)
11491150
{
1150-
return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
1151+
return sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_WBINVD, 0, 0);
11511152
}
11521153

11531154
static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
@@ -1156,7 +1157,7 @@ static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt
11561157

11571158
ghcb_set_rcx(ghcb, ctxt->regs->cx);
11581159

1159-
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
1160+
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_RDPMC, 0, 0);
11601161
if (ret != ES_OK)
11611162
return ret;
11621163

@@ -1197,7 +1198,7 @@ static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
11971198
if (x86_platform.hyper.sev_es_hcall_prepare)
11981199
x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
11991200

1200-
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
1201+
ret = sev_es_ghcb_hv_call(ghcb, true, ctxt, SVM_EXIT_VMMCALL, 0, 0);
12011202
if (ret != ES_OK)
12021203
return ret;
12031204

@@ -1319,13 +1320,26 @@ static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
13191320
}
13201321
}
13211322

1322-
static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
1323+
static __always_inline bool is_vc2_stack(unsigned long sp)
13231324
{
1324-
unsigned long sp = (unsigned long)regs;
1325-
13261325
return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
13271326
}
13281327

1328+
static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
1329+
{
1330+
unsigned long sp, prev_sp;
1331+
1332+
sp = (unsigned long)regs;
1333+
prev_sp = regs->sp;
1334+
1335+
/*
1336+
* If the code was already executing on the VC2 stack when the #VC
1337+
* happened, let it proceed to the normal handling routine. This way the
1338+
* code executing on the VC2 stack can cause #VC exceptions to get handled.
1339+
*/
1340+
return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
1341+
}
1342+
13291343
static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
13301344
{
13311345
struct ghcb_state state;
@@ -1406,7 +1420,7 @@ DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
14061420
* But keep this here in case the noinstr annotations are violated due
14071421
* to bug elsewhere.
14081422
*/
1409-
if (unlikely(on_vc_fallback_stack(regs))) {
1423+
if (unlikely(vc_from_invalid_context(regs))) {
14101424
instrumentation_begin();
14111425
panic("Can't handle #VC exception from unsupported context\n");
14121426
instrumentation_end();

arch/x86/kernel/traps.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -709,7 +709,7 @@ asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *r
709709
stack = (unsigned long *)sp;
710710

711711
if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
712-
info.type >= STACK_TYPE_EXCEPTION_LAST)
712+
info.type > STACK_TYPE_EXCEPTION_LAST)
713713
sp = __this_cpu_ist_top_va(VC2);
714714

715715
sync:

arch/x86/mm/mem_encrypt_identity.c

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,15 @@
2727
#undef CONFIG_PARAVIRT_XXL
2828
#undef CONFIG_PARAVIRT_SPINLOCKS
2929

30+
/*
31+
* This code runs before CPU feature bits are set. By default, the
32+
* pgtable_l5_enabled() function uses bit X86_FEATURE_LA57 to determine if
33+
* 5-level paging is active, so that won't work here. USE_EARLY_PGTABLE_L5
34+
* is provided to handle this situation and, instead, use a variable that
35+
* has been set by the early boot code.
36+
*/
37+
#define USE_EARLY_PGTABLE_L5
38+
3039
#include <linux/kernel.h>
3140
#include <linux/mm.h>
3241
#include <linux/mem_encrypt.h>

0 commit comments

Comments
 (0)