@@ -1212,7 +1212,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1212
1212
exec_fault = kvm_vcpu_trap_is_exec_fault (vcpu );
1213
1213
VM_BUG_ON (write_fault && exec_fault );
1214
1214
1215
- if (fault_status == FSC_PERM && !write_fault && !exec_fault ) {
1215
+ if (fault_status == ESR_ELx_FSC_PERM && !write_fault && !exec_fault ) {
1216
1216
kvm_err ("Unexpected L2 read permission error\n" );
1217
1217
return - EFAULT ;
1218
1218
}
@@ -1277,7 +1277,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1277
1277
* only exception to this is when dirty logging is enabled at runtime
1278
1278
* and a write fault needs to collapse a block entry into a table.
1279
1279
*/
1280
- if (fault_status != FSC_PERM || (logging_active && write_fault )) {
1280
+ if (fault_status != ESR_ELx_FSC_PERM ||
1281
+ (logging_active && write_fault )) {
1281
1282
ret = kvm_mmu_topup_memory_cache (memcache ,
1282
1283
kvm_mmu_cache_min_pages (kvm ));
1283
1284
if (ret )
@@ -1342,15 +1343,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1342
1343
* backed by a THP and thus use block mapping if possible.
1343
1344
*/
1344
1345
if (vma_pagesize == PAGE_SIZE && !(force_pte || device )) {
1345
- if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE )
1346
+ if (fault_status == ESR_ELx_FSC_PERM &&
1347
+ fault_granule > PAGE_SIZE )
1346
1348
vma_pagesize = fault_granule ;
1347
1349
else
1348
1350
vma_pagesize = transparent_hugepage_adjust (kvm , memslot ,
1349
1351
hva , & pfn ,
1350
1352
& fault_ipa );
1351
1353
}
1352
1354
1353
- if (fault_status != FSC_PERM && !device && kvm_has_mte (kvm )) {
1355
+ if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte (kvm )) {
1354
1356
/* Check the VMM hasn't introduced a new disallowed VMA */
1355
1357
if (kvm_vma_mte_allowed (vma )) {
1356
1358
sanitise_mte_tags (kvm , pfn , vma_pagesize );
@@ -1376,7 +1378,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1376
1378
* permissions only if vma_pagesize equals fault_granule. Otherwise,
1377
1379
* kvm_pgtable_stage2_map() should be called to change block size.
1378
1380
*/
1379
- if (fault_status == FSC_PERM && vma_pagesize == fault_granule )
1381
+ if (fault_status == ESR_ELx_FSC_PERM && vma_pagesize == fault_granule )
1380
1382
ret = kvm_pgtable_stage2_relax_perms (pgt , fault_ipa , prot );
1381
1383
else
1382
1384
ret = kvm_pgtable_stage2_map (pgt , fault_ipa , vma_pagesize ,
@@ -1441,7 +1443,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
1441
1443
fault_ipa = kvm_vcpu_get_fault_ipa (vcpu );
1442
1444
is_iabt = kvm_vcpu_trap_is_iabt (vcpu );
1443
1445
1444
- if (fault_status == FSC_FAULT ) {
1446
+ if (fault_status == ESR_ELx_FSC_FAULT ) {
1445
1447
/* Beyond sanitised PARange (which is the IPA limit) */
1446
1448
if (fault_ipa >= BIT_ULL (get_kvm_ipa_limit ())) {
1447
1449
kvm_inject_size_fault (vcpu );
@@ -1476,8 +1478,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
1476
1478
kvm_vcpu_get_hfar (vcpu ), fault_ipa );
1477
1479
1478
1480
/* Check the stage-2 fault is trans. fault or write fault */
1479
- if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1480
- fault_status != FSC_ACCESS ) {
1481
+ if (fault_status != ESR_ELx_FSC_FAULT &&
1482
+ fault_status != ESR_ELx_FSC_PERM &&
1483
+ fault_status != ESR_ELx_FSC_ACCESS ) {
1481
1484
kvm_err ("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n" ,
1482
1485
kvm_vcpu_trap_get_class (vcpu ),
1483
1486
(unsigned long )kvm_vcpu_trap_get_fault (vcpu ),
@@ -1539,7 +1542,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
1539
1542
/* Userspace should not be able to register out-of-bounds IPAs */
1540
1543
VM_BUG_ON (fault_ipa >= kvm_phys_size (vcpu -> kvm ));
1541
1544
1542
- if (fault_status == FSC_ACCESS ) {
1545
+ if (fault_status == ESR_ELx_FSC_ACCESS ) {
1543
1546
handle_access_fault (vcpu , fault_ipa );
1544
1547
ret = 1 ;
1545
1548
goto out_unlock ;
0 commit comments