@@ -3543,11 +3543,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3543
3543
if (!VALID_PAGE (* root_hpa ))
3544
3544
return ;
3545
3545
3546
- /*
3547
- * The "root" may be a special root, e.g. a PAE entry, treat it as a
3548
- * SPTE to ensure any non-PA bits are dropped.
3549
- */
3550
- sp = spte_to_child_sp (* root_hpa );
3546
+ sp = root_to_sp (* root_hpa );
3551
3547
if (WARN_ON_ONCE (!sp ))
3552
3548
return ;
3553
3549
@@ -3593,7 +3589,7 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
3593
3589
& invalid_list );
3594
3590
3595
3591
if (free_active_root ) {
3596
- if (to_shadow_page (mmu -> root .hpa )) {
3592
+ if (root_to_sp (mmu -> root .hpa )) {
3597
3593
mmu_free_root_page (kvm , & mmu -> root .hpa , & invalid_list );
3598
3594
} else if (mmu -> pae_root ) {
3599
3595
for (i = 0 ; i < 4 ; ++ i ) {
@@ -3617,6 +3613,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
3617
3613
void kvm_mmu_free_guest_mode_roots (struct kvm * kvm , struct kvm_mmu * mmu )
3618
3614
{
3619
3615
unsigned long roots_to_free = 0 ;
3616
+ struct kvm_mmu_page * sp ;
3620
3617
hpa_t root_hpa ;
3621
3618
int i ;
3622
3619
@@ -3631,8 +3628,8 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
3631
3628
if (!VALID_PAGE (root_hpa ))
3632
3629
continue ;
3633
3630
3634
- if (! to_shadow_page ( root_hpa ) ||
3635
- to_shadow_page ( root_hpa ) -> role .guest_mode )
3631
+ sp = root_to_sp ( root_hpa );
3632
+ if (! sp || sp -> role .guest_mode )
3636
3633
roots_to_free |= KVM_MMU_ROOT_PREVIOUS (i );
3637
3634
}
3638
3635
@@ -3987,7 +3984,7 @@ static bool is_unsync_root(hpa_t root)
3987
3984
* requirement isn't satisfied.
3988
3985
*/
3989
3986
smp_rmb ();
3990
- sp = to_shadow_page (root );
3987
+ sp = root_to_sp (root );
3991
3988
3992
3989
/*
3993
3990
* PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the
@@ -4017,11 +4014,12 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
4017
4014
4018
4015
if (vcpu -> arch .mmu -> cpu_role .base .level >= PT64_ROOT_4LEVEL ) {
4019
4016
hpa_t root = vcpu -> arch .mmu -> root .hpa ;
4020
- sp = to_shadow_page (root );
4021
4017
4022
4018
if (!is_unsync_root (root ))
4023
4019
return ;
4024
4020
4021
+ sp = root_to_sp (root );
4022
+
4025
4023
write_lock (& vcpu -> kvm -> mmu_lock );
4026
4024
mmu_sync_children (vcpu , sp , true);
4027
4025
write_unlock (& vcpu -> kvm -> mmu_lock );
@@ -4351,7 +4349,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
4351
4349
static bool is_page_fault_stale (struct kvm_vcpu * vcpu ,
4352
4350
struct kvm_page_fault * fault )
4353
4351
{
4354
- struct kvm_mmu_page * sp = to_shadow_page (vcpu -> arch .mmu -> root .hpa );
4352
+ struct kvm_mmu_page * sp = root_to_sp (vcpu -> arch .mmu -> root .hpa );
4355
4353
4356
4354
/* Special roots, e.g. pae_root, are not backed by shadow pages. */
4357
4355
if (sp && is_obsolete_sp (vcpu -> kvm , sp ))
@@ -4531,7 +4529,7 @@ static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4531
4529
{
4532
4530
return (role .direct || pgd == root -> pgd ) &&
4533
4531
VALID_PAGE (root -> hpa ) &&
4534
- role .word == to_shadow_page (root -> hpa )-> role .word ;
4532
+ role .word == root_to_sp (root -> hpa )-> role .word ;
4535
4533
}
4536
4534
4537
4535
/*
@@ -4605,7 +4603,7 @@ static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
4605
4603
* having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
4606
4604
* later if necessary.
4607
4605
*/
4608
- if (VALID_PAGE (mmu -> root .hpa ) && !to_shadow_page (mmu -> root .hpa ))
4606
+ if (VALID_PAGE (mmu -> root .hpa ) && !root_to_sp (mmu -> root .hpa ))
4609
4607
kvm_mmu_free_roots (kvm , mmu , KVM_MMU_ROOT_CURRENT );
4610
4608
4611
4609
if (VALID_PAGE (mmu -> root .hpa ))
@@ -4653,7 +4651,7 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
4653
4651
*/
4654
4652
if (!new_role .direct )
4655
4653
__clear_sp_write_flooding_count (
4656
- to_shadow_page (vcpu -> arch .mmu -> root .hpa ));
4654
+ root_to_sp (vcpu -> arch .mmu -> root .hpa ));
4657
4655
}
4658
4656
EXPORT_SYMBOL_GPL (kvm_mmu_new_pgd );
4659
4657
@@ -5508,7 +5506,7 @@ static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
5508
5506
* (c) KVM doesn't track previous roots for PAE paging, and the guest
5509
5507
* is unlikely to zap an in-use PGD.
5510
5508
*/
5511
- sp = to_shadow_page (root_hpa );
5509
+ sp = root_to_sp (root_hpa );
5512
5510
return !sp || is_obsolete_sp (kvm , sp );
5513
5511
}
5514
5512
0 commit comments