15
15
#include <asm/kvm_arm.h>
16
16
#include <asm/kvm_mmu.h>
17
17
#include <asm/kvm_pgtable.h>
18
+ #include <asm/kvm_pkvm.h>
18
19
#include <asm/kvm_ras.h>
19
20
#include <asm/kvm_asm.h>
20
21
#include <asm/kvm_emulate.h>
@@ -31,6 +32,8 @@ static phys_addr_t __ro_after_init hyp_idmap_vector;
31
32
32
33
static unsigned long __ro_after_init io_map_base ;
33
34
35
+ #define KVM_PGT_FN (fn ) (!is_protected_kvm_enabled() ? fn : p ## fn)
36
+
34
37
static phys_addr_t __stage2_range_addr_end (phys_addr_t addr , phys_addr_t end ,
35
38
phys_addr_t size )
36
39
{
@@ -147,7 +150,7 @@ static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr,
147
150
return - EINVAL ;
148
151
149
152
next = __stage2_range_addr_end (addr , end , chunk_size );
150
- ret = kvm_pgtable_stage2_split (pgt , addr , next - addr , cache );
153
+ ret = KVM_PGT_FN ( kvm_pgtable_stage2_split ) (pgt , addr , next - addr , cache );
151
154
if (ret )
152
155
break ;
153
156
} while (addr = next , addr != end );
@@ -168,15 +171,23 @@ static bool memslot_is_logging(struct kvm_memory_slot *memslot)
168
171
*/
169
172
int kvm_arch_flush_remote_tlbs (struct kvm * kvm )
170
173
{
171
- kvm_call_hyp (__kvm_tlb_flush_vmid , & kvm -> arch .mmu );
174
+ if (is_protected_kvm_enabled ())
175
+ kvm_call_hyp_nvhe (__pkvm_tlb_flush_vmid , kvm -> arch .pkvm .handle );
176
+ else
177
+ kvm_call_hyp (__kvm_tlb_flush_vmid , & kvm -> arch .mmu );
172
178
return 0 ;
173
179
}
174
180
175
181
int kvm_arch_flush_remote_tlbs_range (struct kvm * kvm ,
176
182
gfn_t gfn , u64 nr_pages )
177
183
{
178
- kvm_tlb_flush_vmid_range (& kvm -> arch .mmu ,
179
- gfn << PAGE_SHIFT , nr_pages << PAGE_SHIFT );
184
+ u64 size = nr_pages << PAGE_SHIFT ;
185
+ u64 addr = gfn << PAGE_SHIFT ;
186
+
187
+ if (is_protected_kvm_enabled ())
188
+ kvm_call_hyp_nvhe (__pkvm_tlb_flush_vmid , kvm -> arch .pkvm .handle );
189
+ else
190
+ kvm_tlb_flush_vmid_range (& kvm -> arch .mmu , addr , size );
180
191
return 0 ;
181
192
}
182
193
@@ -225,7 +236,7 @@ static void stage2_free_unlinked_table_rcu_cb(struct rcu_head *head)
225
236
void * pgtable = page_to_virt (page );
226
237
s8 level = page_private (page );
227
238
228
- kvm_pgtable_stage2_free_unlinked (& kvm_s2_mm_ops , pgtable , level );
239
+ KVM_PGT_FN ( kvm_pgtable_stage2_free_unlinked ) (& kvm_s2_mm_ops , pgtable , level );
229
240
}
230
241
231
242
static void stage2_free_unlinked_table (void * addr , s8 level )
@@ -324,7 +335,7 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
324
335
325
336
lockdep_assert_held_write (& kvm -> mmu_lock );
326
337
WARN_ON (size & ~PAGE_MASK );
327
- WARN_ON (stage2_apply_range (mmu , start , end , kvm_pgtable_stage2_unmap ,
338
+ WARN_ON (stage2_apply_range (mmu , start , end , KVM_PGT_FN ( kvm_pgtable_stage2_unmap ) ,
328
339
may_block ));
329
340
}
330
341
@@ -336,7 +347,7 @@ void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start,
336
347
337
348
void kvm_stage2_flush_range (struct kvm_s2_mmu * mmu , phys_addr_t addr , phys_addr_t end )
338
349
{
339
- stage2_apply_range_resched (mmu , addr , end , kvm_pgtable_stage2_flush );
350
+ stage2_apply_range_resched (mmu , addr , end , KVM_PGT_FN ( kvm_pgtable_stage2_flush ) );
340
351
}
341
352
342
353
static void stage2_flush_memslot (struct kvm * kvm ,
@@ -942,10 +953,14 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
942
953
return - ENOMEM ;
943
954
944
955
mmu -> arch = & kvm -> arch ;
945
- err = kvm_pgtable_stage2_init (pgt , mmu , & kvm_s2_mm_ops );
956
+ err = KVM_PGT_FN ( kvm_pgtable_stage2_init ) (pgt , mmu , & kvm_s2_mm_ops );
946
957
if (err )
947
958
goto out_free_pgtable ;
948
959
960
+ mmu -> pgt = pgt ;
961
+ if (is_protected_kvm_enabled ())
962
+ return 0 ;
963
+
949
964
mmu -> last_vcpu_ran = alloc_percpu (typeof (* mmu -> last_vcpu_ran ));
950
965
if (!mmu -> last_vcpu_ran ) {
951
966
err = - ENOMEM ;
@@ -959,7 +974,6 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
959
974
mmu -> split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT ;
960
975
mmu -> split_page_cache .gfp_zero = __GFP_ZERO ;
961
976
962
- mmu -> pgt = pgt ;
963
977
mmu -> pgd_phys = __pa (pgt -> pgd );
964
978
965
979
if (kvm_is_nested_s2_mmu (kvm , mmu ))
@@ -968,7 +982,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
968
982
return 0 ;
969
983
970
984
out_destroy_pgtable :
971
- kvm_pgtable_stage2_destroy (pgt );
985
+ KVM_PGT_FN ( kvm_pgtable_stage2_destroy ) (pgt );
972
986
out_free_pgtable :
973
987
kfree (pgt );
974
988
return err ;
@@ -1065,7 +1079,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
1065
1079
write_unlock (& kvm -> mmu_lock );
1066
1080
1067
1081
if (pgt ) {
1068
- kvm_pgtable_stage2_destroy (pgt );
1082
+ KVM_PGT_FN ( kvm_pgtable_stage2_destroy ) (pgt );
1069
1083
kfree (pgt );
1070
1084
}
1071
1085
}
@@ -1082,16 +1096,24 @@ static void *hyp_mc_alloc_fn(void *unused)
1082
1096
1083
1097
void free_hyp_memcache (struct kvm_hyp_memcache * mc )
1084
1098
{
1085
- if (is_protected_kvm_enabled ())
1086
- __free_hyp_memcache (mc , hyp_mc_free_fn ,
1087
- kvm_host_va , NULL );
1099
+ if (!is_protected_kvm_enabled ())
1100
+ return ;
1101
+
1102
+ kfree (mc -> mapping );
1103
+ __free_hyp_memcache (mc , hyp_mc_free_fn , kvm_host_va , NULL );
1088
1104
}
1089
1105
1090
1106
int topup_hyp_memcache (struct kvm_hyp_memcache * mc , unsigned long min_pages )
1091
1107
{
1092
1108
if (!is_protected_kvm_enabled ())
1093
1109
return 0 ;
1094
1110
1111
+ if (!mc -> mapping ) {
1112
+ mc -> mapping = kzalloc (sizeof (struct pkvm_mapping ), GFP_KERNEL_ACCOUNT );
1113
+ if (!mc -> mapping )
1114
+ return - ENOMEM ;
1115
+ }
1116
+
1095
1117
return __topup_hyp_memcache (mc , min_pages , hyp_mc_alloc_fn ,
1096
1118
kvm_host_pa , NULL );
1097
1119
}
@@ -1130,8 +1152,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1130
1152
break ;
1131
1153
1132
1154
write_lock (& kvm -> mmu_lock );
1133
- ret = kvm_pgtable_stage2_map (pgt , addr , PAGE_SIZE , pa , prot ,
1134
- & cache , 0 );
1155
+ ret = KVM_PGT_FN ( kvm_pgtable_stage2_map ) (pgt , addr , PAGE_SIZE ,
1156
+ pa , prot , & cache , 0 );
1135
1157
write_unlock (& kvm -> mmu_lock );
1136
1158
if (ret )
1137
1159
break ;
@@ -1151,7 +1173,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1151
1173
*/
1152
1174
void kvm_stage2_wp_range (struct kvm_s2_mmu * mmu , phys_addr_t addr , phys_addr_t end )
1153
1175
{
1154
- stage2_apply_range_resched (mmu , addr , end , kvm_pgtable_stage2_wrprotect );
1176
+ stage2_apply_range_resched (mmu , addr , end , KVM_PGT_FN ( kvm_pgtable_stage2_wrprotect ) );
1155
1177
}
1156
1178
1157
1179
/**
@@ -1442,9 +1464,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1442
1464
unsigned long mmu_seq ;
1443
1465
phys_addr_t ipa = fault_ipa ;
1444
1466
struct kvm * kvm = vcpu -> kvm ;
1445
- struct kvm_mmu_memory_cache * memcache = & vcpu -> arch .mmu_page_cache ;
1446
1467
struct vm_area_struct * vma ;
1447
1468
short vma_shift ;
1469
+ void * memcache ;
1448
1470
gfn_t gfn ;
1449
1471
kvm_pfn_t pfn ;
1450
1472
bool logging_active = memslot_is_logging (memslot );
@@ -1472,8 +1494,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1472
1494
* and a write fault needs to collapse a block entry into a table.
1473
1495
*/
1474
1496
if (!fault_is_perm || (logging_active && write_fault )) {
1475
- ret = kvm_mmu_topup_memory_cache (memcache ,
1476
- kvm_mmu_cache_min_pages (vcpu -> arch .hw_mmu ));
1497
+ int min_pages = kvm_mmu_cache_min_pages (vcpu -> arch .hw_mmu );
1498
+
1499
+ if (!is_protected_kvm_enabled ()) {
1500
+ memcache = & vcpu -> arch .mmu_page_cache ;
1501
+ ret = kvm_mmu_topup_memory_cache (memcache , min_pages );
1502
+ } else {
1503
+ memcache = & vcpu -> arch .pkvm_memcache ;
1504
+ ret = topup_hyp_memcache (memcache , min_pages );
1505
+ }
1477
1506
if (ret )
1478
1507
return ret ;
1479
1508
}
@@ -1494,7 +1523,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1494
1523
* logging_active is guaranteed to never be true for VM_PFNMAP
1495
1524
* memslots.
1496
1525
*/
1497
- if (logging_active ) {
1526
+ if (logging_active || is_protected_kvm_enabled () ) {
1498
1527
force_pte = true;
1499
1528
vma_shift = PAGE_SHIFT ;
1500
1529
} else {
@@ -1634,7 +1663,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1634
1663
prot |= kvm_encode_nested_level (nested );
1635
1664
}
1636
1665
1637
- read_lock ( & kvm -> mmu_lock );
1666
+ kvm_fault_lock ( kvm );
1638
1667
pgt = vcpu -> arch .hw_mmu -> pgt ;
1639
1668
if (mmu_invalidate_retry (kvm , mmu_seq )) {
1640
1669
ret = - EAGAIN ;
@@ -1696,16 +1725,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1696
1725
* PTE, which will be preserved.
1697
1726
*/
1698
1727
prot &= ~KVM_NV_GUEST_MAP_SZ ;
1699
- ret = kvm_pgtable_stage2_relax_perms (pgt , fault_ipa , prot , flags );
1728
+ ret = KVM_PGT_FN ( kvm_pgtable_stage2_relax_perms ) (pgt , fault_ipa , prot , flags );
1700
1729
} else {
1701
- ret = kvm_pgtable_stage2_map (pgt , fault_ipa , vma_pagesize ,
1730
+ ret = KVM_PGT_FN ( kvm_pgtable_stage2_map ) (pgt , fault_ipa , vma_pagesize ,
1702
1731
__pfn_to_phys (pfn ), prot ,
1703
1732
memcache , flags );
1704
1733
}
1705
1734
1706
1735
out_unlock :
1707
1736
kvm_release_faultin_page (kvm , page , !!ret , writable );
1708
- read_unlock ( & kvm -> mmu_lock );
1737
+ kvm_fault_unlock ( kvm );
1709
1738
1710
1739
/* Mark the page dirty only if the fault is handled successfully */
1711
1740
if (writable && !ret )
@@ -1724,7 +1753,7 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1724
1753
1725
1754
read_lock (& vcpu -> kvm -> mmu_lock );
1726
1755
mmu = vcpu -> arch .hw_mmu ;
1727
- kvm_pgtable_stage2_mkyoung (mmu -> pgt , fault_ipa , flags );
1756
+ KVM_PGT_FN ( kvm_pgtable_stage2_mkyoung ) (mmu -> pgt , fault_ipa , flags );
1728
1757
read_unlock (& vcpu -> kvm -> mmu_lock );
1729
1758
}
1730
1759
@@ -1764,7 +1793,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
1764
1793
}
1765
1794
1766
1795
/* Falls between the IPA range and the PARange? */
1767
- if (fault_ipa >= BIT_ULL (vcpu -> arch .hw_mmu -> pgt -> ia_bits )) {
1796
+ if (fault_ipa >= BIT_ULL (VTCR_EL2_IPA ( vcpu -> arch .hw_mmu -> vtcr ) )) {
1768
1797
fault_ipa |= kvm_vcpu_get_hfar (vcpu ) & GENMASK (11 , 0 );
1769
1798
1770
1799
if (is_iabt )
@@ -1930,7 +1959,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1930
1959
if (!kvm -> arch .mmu .pgt )
1931
1960
return false;
1932
1961
1933
- return kvm_pgtable_stage2_test_clear_young (kvm -> arch .mmu .pgt ,
1962
+ return KVM_PGT_FN ( kvm_pgtable_stage2_test_clear_young ) (kvm -> arch .mmu .pgt ,
1934
1963
range -> start << PAGE_SHIFT ,
1935
1964
size , true);
1936
1965
/*
@@ -1946,7 +1975,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1946
1975
if (!kvm -> arch .mmu .pgt )
1947
1976
return false;
1948
1977
1949
- return kvm_pgtable_stage2_test_clear_young (kvm -> arch .mmu .pgt ,
1978
+ return KVM_PGT_FN ( kvm_pgtable_stage2_test_clear_young ) (kvm -> arch .mmu .pgt ,
1950
1979
range -> start << PAGE_SHIFT ,
1951
1980
size , false);
1952
1981
}
0 commit comments