Skip to content

Commit d951b22

Browse files
vittyvkbonzini
authored andcommitted
KVM: selftests: smm_test: Test SMM enter from L2
Two additional tests are added: - SMM triggered from L2 does not currupt L1 host state. - Save/restore during SMM triggered from L2 does not corrupt guest/host state. Signed-off-by: Vitaly Kuznetsov <[email protected]> Message-Id: <[email protected]> Reviewed-by: Maxim Levitsky <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent bb00bd9 commit d951b22

File tree

1 file changed

+64
-6
lines changed

1 file changed

+64
-6
lines changed

tools/testing/selftests/kvm/x86_64/smm_test.c

Lines changed: 64 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -53,15 +53,28 @@ static inline void sync_with_host(uint64_t phase)
5353
: "+a" (phase));
5454
}
5555

56-
void self_smi(void)
56+
static void self_smi(void)
5757
{
5858
x2apic_write_reg(APIC_ICR,
5959
APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
6060
}
6161

62-
void guest_code(void *arg)
62+
static void l2_guest_code(void)
6363
{
64+
sync_with_host(8);
65+
66+
sync_with_host(10);
67+
68+
vmcall();
69+
}
70+
71+
static void guest_code(void *arg)
72+
{
73+
#define L2_GUEST_STACK_SIZE 64
74+
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
6475
uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
76+
struct svm_test_data *svm = arg;
77+
struct vmx_pages *vmx_pages = arg;
6578

6679
sync_with_host(1);
6780

@@ -74,21 +87,50 @@ void guest_code(void *arg)
7487
sync_with_host(4);
7588

7689
if (arg) {
77-
if (cpu_has_svm())
78-
generic_svm_setup(arg, NULL, NULL);
79-
else
80-
GUEST_ASSERT(prepare_for_vmx_operation(arg));
90+
if (cpu_has_svm()) {
91+
generic_svm_setup(svm, l2_guest_code,
92+
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
93+
} else {
94+
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
95+
GUEST_ASSERT(load_vmcs(vmx_pages));
96+
prepare_vmcs(vmx_pages, l2_guest_code,
97+
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
98+
}
8199

82100
sync_with_host(5);
83101

84102
self_smi();
85103

86104
sync_with_host(7);
105+
106+
if (cpu_has_svm()) {
107+
run_guest(svm->vmcb, svm->vmcb_gpa);
108+
svm->vmcb->save.rip += 3;
109+
run_guest(svm->vmcb, svm->vmcb_gpa);
110+
} else {
111+
vmlaunch();
112+
vmresume();
113+
}
114+
115+
/* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */
116+
sync_with_host(12);
87117
}
88118

89119
sync_with_host(DONE);
90120
}
91121

122+
void inject_smi(struct kvm_vm *vm)
123+
{
124+
struct kvm_vcpu_events events;
125+
126+
vcpu_events_get(vm, VCPU_ID, &events);
127+
128+
events.smi.pending = 1;
129+
events.flags |= KVM_VCPUEVENT_VALID_SMM;
130+
131+
vcpu_events_set(vm, VCPU_ID, &events);
132+
}
133+
92134
int main(int argc, char *argv[])
93135
{
94136
vm_vaddr_t nested_gva = 0;
@@ -147,6 +189,22 @@ int main(int argc, char *argv[])
147189
"Unexpected stage: #%x, got %x",
148190
stage, stage_reported);
149191

192+
/*
193+
* Enter SMM during L2 execution and check that we correctly
194+
* return from it. Do not perform save/restore while in SMM yet.
195+
*/
196+
if (stage == 8) {
197+
inject_smi(vm);
198+
continue;
199+
}
200+
201+
/*
202+
* Perform save/restore while the guest is in SMM triggered
203+
* during L2 execution.
204+
*/
205+
if (stage == 10)
206+
inject_smi(vm);
207+
150208
state = vcpu_save_state(vm, VCPU_ID);
151209
kvm_vm_release(vm);
152210
kvm_vm_restart(vm, O_RDWR);

0 commit comments

Comments
 (0)