Skip to content

Commit efaa5b9

Browse files
author
Marc Zyngier
committed
KVM: arm64: Use TTL hint in when invalidating stage-2 translations
Since we often have a precise idea of the level we're dealing with when invalidating TLBs, we can provide it to as a hint to our invalidation helper. Reviewed-by: James Morse <[email protected]> Reviewed-by: Alexandru Elisei <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
1 parent a0e50aa commit efaa5b9

File tree

4 files changed

+23
-19
lines changed

4 files changed

+23
-19
lines changed

arch/arm64/include/asm/kvm_asm.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,8 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
9191
#endif
9292

9393
extern void __kvm_flush_vm_context(void);
94-
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa);
94+
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
95+
int level);
9596
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
9697
extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
9798

arch/arm64/kvm/hyp/nvhe/tlb.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,8 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
4646
}
4747
}
4848

49-
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
49+
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
50+
phys_addr_t ipa, int level)
5051
{
5152
struct tlb_inv_context cxt;
5253

@@ -62,7 +63,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
6263
* whole of Stage-1. Weep...
6364
*/
6465
ipa >>= 12;
65-
__tlbi(ipas2e1is, ipa);
66+
__tlbi_level(ipas2e1is, ipa, level);
6667

6768
/*
6869
* We have to ensure completion of the invalidation at Stage-2,

arch/arm64/kvm/hyp/vhe/tlb.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,8 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
7979
local_irq_restore(cxt->flags);
8080
}
8181

82-
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
82+
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
83+
phys_addr_t ipa, int level)
8384
{
8485
struct tlb_inv_context cxt;
8586

@@ -94,7 +95,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
9495
* whole of Stage-1. Weep...
9596
*/
9697
ipa >>= 12;
97-
__tlbi(ipas2e1is, ipa);
98+
__tlbi_level(ipas2e1is, ipa, level);
9899

99100
/*
100101
* We have to ensure completion of the invalidation at Stage-2,

arch/arm64/kvm/mmu.c

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -58,9 +58,10 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
5858
kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
5959
}
6060

61-
static void kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa)
61+
static void kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
62+
int level)
6263
{
63-
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ipa);
64+
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ipa, level);
6465
}
6566

6667
/*
@@ -102,7 +103,7 @@ static void stage2_dissolve_pmd(struct kvm_s2_mmu *mmu, phys_addr_t addr, pmd_t
102103
return;
103104

104105
pmd_clear(pmd);
105-
kvm_tlb_flush_vmid_ipa(mmu, addr);
106+
kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
106107
put_page(virt_to_page(pmd));
107108
}
108109

@@ -122,7 +123,7 @@ static void stage2_dissolve_pud(struct kvm_s2_mmu *mmu, phys_addr_t addr, pud_t
122123
return;
123124

124125
stage2_pud_clear(kvm, pudp);
125-
kvm_tlb_flush_vmid_ipa(mmu, addr);
126+
kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
126127
put_page(virt_to_page(pudp));
127128
}
128129

@@ -163,7 +164,7 @@ static void clear_stage2_pgd_entry(struct kvm_s2_mmu *mmu, pgd_t *pgd, phys_addr
163164
struct kvm *kvm = mmu->kvm;
164165
p4d_t *p4d_table __maybe_unused = stage2_p4d_offset(kvm, pgd, 0UL);
165166
stage2_pgd_clear(kvm, pgd);
166-
kvm_tlb_flush_vmid_ipa(mmu, addr);
167+
kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
167168
stage2_p4d_free(kvm, p4d_table);
168169
put_page(virt_to_page(pgd));
169170
}
@@ -173,7 +174,7 @@ static void clear_stage2_p4d_entry(struct kvm_s2_mmu *mmu, p4d_t *p4d, phys_addr
173174
struct kvm *kvm = mmu->kvm;
174175
pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, p4d, 0);
175176
stage2_p4d_clear(kvm, p4d);
176-
kvm_tlb_flush_vmid_ipa(mmu, addr);
177+
kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
177178
stage2_pud_free(kvm, pud_table);
178179
put_page(virt_to_page(p4d));
179180
}
@@ -185,7 +186,7 @@ static void clear_stage2_pud_entry(struct kvm_s2_mmu *mmu, pud_t *pud, phys_addr
185186

186187
VM_BUG_ON(stage2_pud_huge(kvm, *pud));
187188
stage2_pud_clear(kvm, pud);
188-
kvm_tlb_flush_vmid_ipa(mmu, addr);
189+
kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
189190
stage2_pmd_free(kvm, pmd_table);
190191
put_page(virt_to_page(pud));
191192
}
@@ -195,7 +196,7 @@ static void clear_stage2_pmd_entry(struct kvm_s2_mmu *mmu, pmd_t *pmd, phys_addr
195196
pte_t *pte_table = pte_offset_kernel(pmd, 0);
196197
VM_BUG_ON(pmd_thp_or_huge(*pmd));
197198
pmd_clear(pmd);
198-
kvm_tlb_flush_vmid_ipa(mmu, addr);
199+
kvm_tlb_flush_vmid_ipa(mmu, addr, S2_NO_LEVEL_HINT);
199200
free_page((unsigned long)pte_table);
200201
put_page(virt_to_page(pmd));
201202
}
@@ -273,7 +274,7 @@ static void unmap_stage2_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
273274
pte_t old_pte = *pte;
274275

275276
kvm_set_pte(pte, __pte(0));
276-
kvm_tlb_flush_vmid_ipa(mmu, addr);
277+
kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PTE_LEVEL);
277278

278279
/* No need to invalidate the cache for device mappings */
279280
if (!kvm_is_device_pfn(pte_pfn(old_pte)))
@@ -302,7 +303,7 @@ static void unmap_stage2_pmds(struct kvm_s2_mmu *mmu, pud_t *pud,
302303
pmd_t old_pmd = *pmd;
303304

304305
pmd_clear(pmd);
305-
kvm_tlb_flush_vmid_ipa(mmu, addr);
306+
kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
306307

307308
kvm_flush_dcache_pmd(old_pmd);
308309

@@ -332,7 +333,7 @@ static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d,
332333
pud_t old_pud = *pud;
333334

334335
stage2_pud_clear(kvm, pud);
335-
kvm_tlb_flush_vmid_ipa(mmu, addr);
336+
kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
336337
kvm_flush_dcache_pud(old_pud);
337338
put_page(virt_to_page(pud));
338339
} else {
@@ -1260,7 +1261,7 @@ static int stage2_set_pmd_huge(struct kvm_s2_mmu *mmu,
12601261
*/
12611262
WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
12621263
pmd_clear(pmd);
1263-
kvm_tlb_flush_vmid_ipa(mmu, addr);
1264+
kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PMD_LEVEL);
12641265
} else {
12651266
get_page(virt_to_page(pmd));
12661267
}
@@ -1302,7 +1303,7 @@ static int stage2_set_pud_huge(struct kvm_s2_mmu *mmu,
13021303

13031304
WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
13041305
stage2_pud_clear(kvm, pudp);
1305-
kvm_tlb_flush_vmid_ipa(mmu, addr);
1306+
kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PUD_LEVEL);
13061307
} else {
13071308
get_page(virt_to_page(pudp));
13081309
}
@@ -1451,7 +1452,7 @@ static int stage2_set_pte(struct kvm_s2_mmu *mmu,
14511452
return 0;
14521453

14531454
kvm_set_pte(pte, __pte(0));
1454-
kvm_tlb_flush_vmid_ipa(mmu, addr);
1455+
kvm_tlb_flush_vmid_ipa(mmu, addr, S2_PTE_LEVEL);
14551456
} else {
14561457
get_page(virt_to_page(pte));
14571458
}

0 commit comments

Comments
 (0)