Skip to content

Commit 9ecc1c1

Browse files
bysuisean-jc
authored andcommitted
KVM: x86/mmu: Only allocate shadowed translation cache for sp->role.level <= KVM_MAX_HUGEPAGE_LEVEL
Only the indirect SP with sp->role.level <= KVM_MAX_HUGEPAGE_LEVEL might have leaf gptes, so allocation of shadowed translation cache is needed only for it. Then, it can use sp->shadowed_translation to determine whether to use the information in the shadowed translation cache or not. Also, extend the WARN in FNAME(sync_spte)() to ensure that this won't break shadow_mmu_get_sp_for_split(). Suggested-by: Lai Jiangshan <[email protected]> Signed-off-by: Hou Wenlong <[email protected]> Link: https://lore.kernel.org/r/5b0cda8a7456cda476b14fca36414a56f921dd52.1715398655.git.houwenlong.hwl@antgroup.com Signed-off-by: Sean Christopherson <[email protected]>
1 parent 4f8973e commit 9ecc1c1

File tree

2 files changed

+7
-7
lines changed

2 files changed

+7
-7
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -719,7 +719,7 @@ static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
719719
if (sp->role.passthrough)
720720
return sp->gfn;
721721

722-
if (!sp->role.direct)
722+
if (sp->shadowed_translation)
723723
return sp->shadowed_translation[index] >> PAGE_SHIFT;
724724

725725
return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
@@ -733,7 +733,7 @@ static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
733733
*/
734734
static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
735735
{
736-
if (sp_has_gptes(sp))
736+
if (sp->shadowed_translation)
737737
return sp->shadowed_translation[index] & ACC_ALL;
738738

739739
/*
@@ -754,7 +754,7 @@ static u32 kvm_mmu_page_get_access(struct kvm_mmu_page *sp, int index)
754754
static void kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index,
755755
gfn_t gfn, unsigned int access)
756756
{
757-
if (sp_has_gptes(sp)) {
757+
if (sp->shadowed_translation) {
758758
sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
759759
return;
760760
}
@@ -1697,8 +1697,7 @@ static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
16971697
hlist_del(&sp->hash_link);
16981698
list_del(&sp->link);
16991699
free_page((unsigned long)sp->spt);
1700-
if (!sp->role.direct)
1701-
free_page((unsigned long)sp->shadowed_translation);
1700+
free_page((unsigned long)sp->shadowed_translation);
17021701
kmem_cache_free(mmu_page_header_cache, sp);
17031702
}
17041703

@@ -2200,7 +2199,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
22002199

22012200
sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
22022201
sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
2203-
if (!role.direct)
2202+
if (!role.direct && role.level <= KVM_MAX_HUGEPAGE_LEVEL)
22042203
sp->shadowed_translation = kvm_mmu_memory_cache_alloc(caches->shadowed_info_cache);
22052204

22062205
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);

arch/x86/kvm/mmu/paging_tmpl.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -911,7 +911,8 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int
911911
gpa_t pte_gpa;
912912
gfn_t gfn;
913913

914-
if (WARN_ON_ONCE(sp->spt[i] == SHADOW_NONPRESENT_VALUE))
914+
if (WARN_ON_ONCE(sp->spt[i] == SHADOW_NONPRESENT_VALUE ||
915+
!sp->shadowed_translation))
915916
return 0;
916917

917918
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);

0 commit comments

Comments
 (0)