@@ -336,6 +336,21 @@ static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
336
336
nested_vmcb -> control .exit_int_info = exit_int_info ;
337
337
}
338
338
339
+ static inline bool nested_npt_enabled (struct vcpu_svm * svm )
340
+ {
341
+ return svm -> nested .ctl .nested_ctl & SVM_NESTED_CTL_NP_ENABLE ;
342
+ }
343
+
344
+ /*
345
+ * Load guest's cr3 at nested entry. @nested_npt is true if we are
346
+ * emulating VM-Entry into a guest with NPT enabled.
347
+ */
348
+ static int nested_svm_load_cr3 (struct kvm_vcpu * vcpu , unsigned long cr3 ,
349
+ bool nested_npt )
350
+ {
351
+ return kvm_set_cr3 (vcpu , cr3 );
352
+ }
353
+
339
354
static void nested_prepare_vmcb_save (struct vcpu_svm * svm , struct vmcb * nested_vmcb )
340
355
{
341
356
/* Load the nested guest state */
@@ -349,7 +364,8 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_v
349
364
svm_set_efer (& svm -> vcpu , nested_vmcb -> save .efer );
350
365
svm_set_cr0 (& svm -> vcpu , nested_vmcb -> save .cr0 );
351
366
svm_set_cr4 (& svm -> vcpu , nested_vmcb -> save .cr4 );
352
- (void )kvm_set_cr3 (& svm -> vcpu , nested_vmcb -> save .cr3 );
367
+ (void )nested_svm_load_cr3 (& svm -> vcpu , nested_vmcb -> save .cr3 ,
368
+ nested_npt_enabled (svm ));
353
369
354
370
svm -> vmcb -> save .cr2 = svm -> vcpu .arch .cr2 = nested_vmcb -> save .cr2 ;
355
371
kvm_rax_write (& svm -> vcpu , nested_vmcb -> save .rax );
@@ -368,7 +384,8 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_v
368
384
static void nested_prepare_vmcb_control (struct vcpu_svm * svm )
369
385
{
370
386
const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK ;
371
- if (svm -> nested .ctl .nested_ctl & SVM_NESTED_CTL_NP_ENABLE )
387
+
388
+ if (nested_npt_enabled (svm ))
372
389
nested_svm_init_mmu_context (& svm -> vcpu );
373
390
374
391
/* Guest paging mode is active - reset mmu */
0 commit comments