Skip to content

Commit 136a55c

Browse files
Maxim Levitskybonzini
authored andcommitted
KVM: x86: nSVM: refactor svm_leave_smm and smm_enter_smm
Use return statements instead of nested if, and fix error path to free all the maps that were allocated. Suggested-by: Sean Christopherson <[email protected]> Signed-off-by: Maxim Levitsky <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent e85d3e7 commit 136a55c

File tree

1 file changed

+69
-66
lines changed

1 file changed

+69
-66
lines changed

arch/x86/kvm/svm/svm.c

Lines changed: 69 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -4285,96 +4285,99 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
42854285
struct kvm_host_map map_save;
42864286
int ret;
42874287

4288-
if (is_guest_mode(vcpu)) {
4289-
/* FED8h - SVM Guest */
4290-
put_smstate(u64, smstate, 0x7ed8, 1);
4291-
/* FEE0h - SVM Guest VMCB Physical Address */
4292-
put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
4288+
if (!is_guest_mode(vcpu))
4289+
return 0;
42934290

4294-
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4295-
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4296-
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4291+
/* FED8h - SVM Guest */
4292+
put_smstate(u64, smstate, 0x7ed8, 1);
4293+
/* FEE0h - SVM Guest VMCB Physical Address */
4294+
put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
42974295

4298-
ret = nested_svm_vmexit(svm);
4299-
if (ret)
4300-
return ret;
4296+
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4297+
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4298+
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
43014299

4302-
/*
4303-
* KVM uses VMCB01 to store L1 host state while L2 runs but
4304-
* VMCB01 is going to be used during SMM and thus the state will
4305-
* be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
4306-
* area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
4307-
* format of the area is identical to guest save area offsetted
4308-
* by 0x400 (matches the offset of 'struct vmcb_save_area'
4309-
* within 'struct vmcb'). Note: HSAVE area may also be used by
4310-
* L1 hypervisor to save additional host context (e.g. KVM does
4311-
* that, see svm_prepare_guest_switch()) which must be
4312-
* preserved.
4313-
*/
4314-
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
4315-
&map_save) == -EINVAL)
4316-
return 1;
4300+
ret = nested_svm_vmexit(svm);
4301+
if (ret)
4302+
return ret;
4303+
4304+
/*
4305+
* KVM uses VMCB01 to store L1 host state while L2 runs but
4306+
* VMCB01 is going to be used during SMM and thus the state will
4307+
* be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
4308+
* area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
4309+
* format of the area is identical to guest save area offsetted
4310+
* by 0x400 (matches the offset of 'struct vmcb_save_area'
4311+
* within 'struct vmcb'). Note: HSAVE area may also be used by
4312+
* L1 hypervisor to save additional host context (e.g. KVM does
4313+
* that, see svm_prepare_guest_switch()) which must be
4314+
* preserved.
4315+
*/
4316+
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
4317+
&map_save) == -EINVAL)
4318+
return 1;
43174319

4318-
BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
4320+
BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
43194321

4320-
svm_copy_vmrun_state(map_save.hva + 0x400,
4321-
&svm->vmcb01.ptr->save);
4322+
svm_copy_vmrun_state(map_save.hva + 0x400,
4323+
&svm->vmcb01.ptr->save);
43224324

4323-
kvm_vcpu_unmap(vcpu, &map_save, true);
4324-
}
4325+
kvm_vcpu_unmap(vcpu, &map_save, true);
43254326
return 0;
43264327
}
43274328

43284329
static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
43294330
{
43304331
struct vcpu_svm *svm = to_svm(vcpu);
43314332
struct kvm_host_map map, map_save;
4332-
int ret = 0;
4333+
u64 saved_efer, vmcb12_gpa;
4334+
struct vmcb *vmcb12;
4335+
int ret;
43334336

4334-
if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
4335-
u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
4336-
u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
4337-
u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
4338-
struct vmcb *vmcb12;
4337+
if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
4338+
return 0;
43394339

4340-
if (guest) {
4341-
if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
4342-
return 1;
4340+
/* Non-zero if SMI arrived while vCPU was in guest mode. */
4341+
if (!GET_SMSTATE(u64, smstate, 0x7ed8))
4342+
return 0;
43434343

4344-
if (!(saved_efer & EFER_SVME))
4345-
return 1;
4344+
if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
4345+
return 1;
43464346

4347-
if (kvm_vcpu_map(vcpu,
4348-
gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
4349-
return 1;
4347+
saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
4348+
if (!(saved_efer & EFER_SVME))
4349+
return 1;
43504350

4351-
if (svm_allocate_nested(svm))
4352-
return 1;
4351+
vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
4352+
if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
4353+
return 1;
43534354

4354-
kvm_vcpu_unmap(vcpu, &map, true);
4355+
ret = 1;
4356+
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
4357+
goto unmap_map;
43554358

4356-
/*
4357-
* Restore L1 host state from L1 HSAVE area as VMCB01 was
4358-
* used during SMM (see svm_enter_smm())
4359-
*/
4360-
if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
4361-
&map_save) == -EINVAL)
4362-
return 1;
4359+
if (svm_allocate_nested(svm))
4360+
goto unmap_save;
43634361

4364-
svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
4365-
map_save.hva + 0x400);
4362+
/*
4363+
* Restore L1 host state from L1 HSAVE area as VMCB01 was
4364+
* used during SMM (see svm_enter_smm())
4365+
*/
43664366

4367-
/*
4368-
* Enter the nested guest now
4369-
*/
4370-
vmcb12 = map.hva;
4371-
nested_load_control_from_vmcb12(svm, &vmcb12->control);
4372-
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
4367+
svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
43734368

4374-
kvm_vcpu_unmap(vcpu, &map_save, true);
4375-
}
4376-
}
4369+
/*
4370+
* Enter the nested guest now
4371+
*/
43774372

4373+
vmcb12 = map.hva;
4374+
nested_load_control_from_vmcb12(svm, &vmcb12->control);
4375+
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
4376+
4377+
unmap_save:
4378+
kvm_vcpu_unmap(vcpu, &map_save, true);
4379+
unmap_map:
4380+
kvm_vcpu_unmap(vcpu, &map, true);
43784381
return ret;
43794382
}
43804383

0 commit comments

Comments
 (0)