Skip to content

Commit e85d3e7

Browse files
Maxim Levitskybonzini
authored andcommitted
KVM: x86: SVM: call KVM_REQ_GET_NESTED_STATE_PAGES on exit from SMM mode
Currently the KVM_REQ_GET_NESTED_STATE_PAGES on SVM only reloads PDPTRs, and MSR bitmap, with former not really needed for SMM as SMM exit code reloads them again from SMRAM'S CR3, and later happens to work since MSR bitmap isn't modified while in SMM. Still it is better to be consistient with VMX. Signed-off-by: Maxim Levitsky <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 37687c4 commit e85d3e7

File tree

3 files changed

+9
-5
lines changed

3 files changed

+9
-5
lines changed

arch/x86/kvm/svm/nested.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -579,7 +579,7 @@ static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to
579579
}
580580

581581
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
582-
struct vmcb *vmcb12)
582+
struct vmcb *vmcb12, bool from_vmrun)
583583
{
584584
struct vcpu_svm *svm = to_svm(vcpu);
585585
int ret;
@@ -609,13 +609,16 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
609609
nested_vmcb02_prepare_save(svm, vmcb12);
610610

611611
ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
612-
nested_npt_enabled(svm), true);
612+
nested_npt_enabled(svm), from_vmrun);
613613
if (ret)
614614
return ret;
615615

616616
if (!npt_enabled)
617617
vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
618618

619+
if (!from_vmrun)
620+
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
621+
619622
svm_set_gif(svm, true);
620623

621624
return 0;
@@ -681,7 +684,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
681684

682685
svm->nested.nested_run_pending = 1;
683686

684-
if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
687+
if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
685688
goto out_exit_err;
686689

687690
if (nested_svm_vmrun_msrpm(svm))

arch/x86/kvm/svm/svm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4369,7 +4369,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
43694369
*/
43704370
vmcb12 = map.hva;
43714371
nested_load_control_from_vmcb12(svm, &vmcb12->control);
4372-
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
4372+
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
43734373

43744374
kvm_vcpu_unmap(vcpu, &map_save, true);
43754375
}

arch/x86/kvm/svm/svm.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -459,7 +459,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
459459
return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
460460
}
461461

462-
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
462+
int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
463+
u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
463464
void svm_leave_nested(struct vcpu_svm *svm);
464465
void svm_free_nested(struct vcpu_svm *svm);
465466
int svm_allocate_nested(struct vcpu_svm *svm);

0 commit comments

Comments
 (0)