@@ -58,9 +58,10 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
58
58
kvm_call_hyp (__kvm_tlb_flush_vmid , & kvm -> arch .mmu );
59
59
}
60
60
61
- static void kvm_tlb_flush_vmid_ipa (struct kvm_s2_mmu * mmu , phys_addr_t ipa )
61
+ static void kvm_tlb_flush_vmid_ipa (struct kvm_s2_mmu * mmu , phys_addr_t ipa ,
62
+ int level )
62
63
{
63
- kvm_call_hyp (__kvm_tlb_flush_vmid_ipa , mmu , ipa );
64
+ kvm_call_hyp (__kvm_tlb_flush_vmid_ipa , mmu , ipa , level );
64
65
}
65
66
66
67
/*
@@ -102,7 +103,7 @@ static void stage2_dissolve_pmd(struct kvm_s2_mmu *mmu, phys_addr_t addr, pmd_t
102
103
return ;
103
104
104
105
pmd_clear (pmd );
105
- kvm_tlb_flush_vmid_ipa (mmu , addr );
106
+ kvm_tlb_flush_vmid_ipa (mmu , addr , S2_PMD_LEVEL );
106
107
put_page (virt_to_page (pmd ));
107
108
}
108
109
@@ -122,7 +123,7 @@ static void stage2_dissolve_pud(struct kvm_s2_mmu *mmu, phys_addr_t addr, pud_t
122
123
return ;
123
124
124
125
stage2_pud_clear (kvm , pudp );
125
- kvm_tlb_flush_vmid_ipa (mmu , addr );
126
+ kvm_tlb_flush_vmid_ipa (mmu , addr , S2_PUD_LEVEL );
126
127
put_page (virt_to_page (pudp ));
127
128
}
128
129
@@ -163,7 +164,7 @@ static void clear_stage2_pgd_entry(struct kvm_s2_mmu *mmu, pgd_t *pgd, phys_addr
163
164
struct kvm * kvm = mmu -> kvm ;
164
165
p4d_t * p4d_table __maybe_unused = stage2_p4d_offset (kvm , pgd , 0UL );
165
166
stage2_pgd_clear (kvm , pgd );
166
- kvm_tlb_flush_vmid_ipa (mmu , addr );
167
+ kvm_tlb_flush_vmid_ipa (mmu , addr , S2_NO_LEVEL_HINT );
167
168
stage2_p4d_free (kvm , p4d_table );
168
169
put_page (virt_to_page (pgd ));
169
170
}
@@ -173,7 +174,7 @@ static void clear_stage2_p4d_entry(struct kvm_s2_mmu *mmu, p4d_t *p4d, phys_addr
173
174
struct kvm * kvm = mmu -> kvm ;
174
175
pud_t * pud_table __maybe_unused = stage2_pud_offset (kvm , p4d , 0 );
175
176
stage2_p4d_clear (kvm , p4d );
176
- kvm_tlb_flush_vmid_ipa (mmu , addr );
177
+ kvm_tlb_flush_vmid_ipa (mmu , addr , S2_NO_LEVEL_HINT );
177
178
stage2_pud_free (kvm , pud_table );
178
179
put_page (virt_to_page (p4d ));
179
180
}
@@ -185,7 +186,7 @@ static void clear_stage2_pud_entry(struct kvm_s2_mmu *mmu, pud_t *pud, phys_addr
185
186
186
187
VM_BUG_ON (stage2_pud_huge (kvm , * pud ));
187
188
stage2_pud_clear (kvm , pud );
188
- kvm_tlb_flush_vmid_ipa (mmu , addr );
189
+ kvm_tlb_flush_vmid_ipa (mmu , addr , S2_NO_LEVEL_HINT );
189
190
stage2_pmd_free (kvm , pmd_table );
190
191
put_page (virt_to_page (pud ));
191
192
}
@@ -195,7 +196,7 @@ static void clear_stage2_pmd_entry(struct kvm_s2_mmu *mmu, pmd_t *pmd, phys_addr
195
196
pte_t * pte_table = pte_offset_kernel (pmd , 0 );
196
197
VM_BUG_ON (pmd_thp_or_huge (* pmd ));
197
198
pmd_clear (pmd );
198
- kvm_tlb_flush_vmid_ipa (mmu , addr );
199
+ kvm_tlb_flush_vmid_ipa (mmu , addr , S2_NO_LEVEL_HINT );
199
200
free_page ((unsigned long )pte_table );
200
201
put_page (virt_to_page (pmd ));
201
202
}
@@ -273,7 +274,7 @@ static void unmap_stage2_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
273
274
pte_t old_pte = * pte ;
274
275
275
276
kvm_set_pte (pte , __pte (0 ));
276
- kvm_tlb_flush_vmid_ipa (mmu , addr );
277
+ kvm_tlb_flush_vmid_ipa (mmu , addr , S2_PTE_LEVEL );
277
278
278
279
/* No need to invalidate the cache for device mappings */
279
280
if (!kvm_is_device_pfn (pte_pfn (old_pte )))
@@ -302,7 +303,7 @@ static void unmap_stage2_pmds(struct kvm_s2_mmu *mmu, pud_t *pud,
302
303
pmd_t old_pmd = * pmd ;
303
304
304
305
pmd_clear (pmd );
305
- kvm_tlb_flush_vmid_ipa (mmu , addr );
306
+ kvm_tlb_flush_vmid_ipa (mmu , addr , S2_PMD_LEVEL );
306
307
307
308
kvm_flush_dcache_pmd (old_pmd );
308
309
@@ -332,7 +333,7 @@ static void unmap_stage2_puds(struct kvm_s2_mmu *mmu, p4d_t *p4d,
332
333
pud_t old_pud = * pud ;
333
334
334
335
stage2_pud_clear (kvm , pud );
335
- kvm_tlb_flush_vmid_ipa (mmu , addr );
336
+ kvm_tlb_flush_vmid_ipa (mmu , addr , S2_PUD_LEVEL );
336
337
kvm_flush_dcache_pud (old_pud );
337
338
put_page (virt_to_page (pud ));
338
339
} else {
@@ -1260,7 +1261,7 @@ static int stage2_set_pmd_huge(struct kvm_s2_mmu *mmu,
1260
1261
*/
1261
1262
WARN_ON_ONCE (pmd_pfn (old_pmd ) != pmd_pfn (* new_pmd ));
1262
1263
pmd_clear (pmd );
1263
- kvm_tlb_flush_vmid_ipa (mmu , addr );
1264
+ kvm_tlb_flush_vmid_ipa (mmu , addr , S2_PMD_LEVEL );
1264
1265
} else {
1265
1266
get_page (virt_to_page (pmd ));
1266
1267
}
@@ -1302,7 +1303,7 @@ static int stage2_set_pud_huge(struct kvm_s2_mmu *mmu,
1302
1303
1303
1304
WARN_ON_ONCE (kvm_pud_pfn (old_pud ) != kvm_pud_pfn (* new_pudp ));
1304
1305
stage2_pud_clear (kvm , pudp );
1305
- kvm_tlb_flush_vmid_ipa (mmu , addr );
1306
+ kvm_tlb_flush_vmid_ipa (mmu , addr , S2_PUD_LEVEL );
1306
1307
} else {
1307
1308
get_page (virt_to_page (pudp ));
1308
1309
}
@@ -1451,7 +1452,7 @@ static int stage2_set_pte(struct kvm_s2_mmu *mmu,
1451
1452
return 0 ;
1452
1453
1453
1454
kvm_set_pte (pte , __pte (0 ));
1454
- kvm_tlb_flush_vmid_ipa (mmu , addr );
1455
+ kvm_tlb_flush_vmid_ipa (mmu , addr , S2_PTE_LEVEL );
1455
1456
} else {
1456
1457
get_page (virt_to_page (pte ));
1457
1458
}
0 commit comments