Skip to content

Commit b0803ba

Browse files
author
Marc Zyngier
committed
KVM: arm64: Convert FSC_* over to ESR_ELx_FSC_*
The former is an AArch32 legacy, so let's move over to the verbose (and strictly identical) version. This involves moving some of the #defines that were private to KVM into the more generic esr.h. Signed-off-by: Marc Zyngier <[email protected]>
1 parent b8f8d19 commit b0803ba

File tree

6 files changed

+33
-36
lines changed

6 files changed

+33
-36
lines changed

arch/arm64/include/asm/esr.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,15 @@
114114
#define ESR_ELx_FSC_ACCESS (0x08)
115115
#define ESR_ELx_FSC_FAULT (0x04)
116116
#define ESR_ELx_FSC_PERM (0x0C)
117+
#define ESR_ELx_FSC_SEA_TTW0 (0x14)
118+
#define ESR_ELx_FSC_SEA_TTW1 (0x15)
119+
#define ESR_ELx_FSC_SEA_TTW2 (0x16)
120+
#define ESR_ELx_FSC_SEA_TTW3 (0x17)
121+
#define ESR_ELx_FSC_SECC (0x18)
122+
#define ESR_ELx_FSC_SECC_TTW0 (0x1c)
123+
#define ESR_ELx_FSC_SECC_TTW1 (0x1d)
124+
#define ESR_ELx_FSC_SECC_TTW2 (0x1e)
125+
#define ESR_ELx_FSC_SECC_TTW3 (0x1f)
117126

118127
/* ISS field definitions for Data Aborts */
119128
#define ESR_ELx_ISV_SHIFT (24)

arch/arm64/include/asm/kvm_arm.h

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -319,21 +319,6 @@
319319
BIT(18) | \
320320
GENMASK(16, 15))
321321

322-
/* For compatibility with fault code shared with 32-bit */
323-
#define FSC_FAULT ESR_ELx_FSC_FAULT
324-
#define FSC_ACCESS ESR_ELx_FSC_ACCESS
325-
#define FSC_PERM ESR_ELx_FSC_PERM
326-
#define FSC_SEA ESR_ELx_FSC_EXTABT
327-
#define FSC_SEA_TTW0 (0x14)
328-
#define FSC_SEA_TTW1 (0x15)
329-
#define FSC_SEA_TTW2 (0x16)
330-
#define FSC_SEA_TTW3 (0x17)
331-
#define FSC_SECC (0x18)
332-
#define FSC_SECC_TTW0 (0x1c)
333-
#define FSC_SECC_TTW1 (0x1d)
334-
#define FSC_SECC_TTW2 (0x1e)
335-
#define FSC_SECC_TTW3 (0x1f)
336-
337322
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
338323
#define HPFAR_MASK (~UL(0xf))
339324
/*

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -349,16 +349,16 @@ static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *v
349349
static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
350350
{
351351
switch (kvm_vcpu_trap_get_fault(vcpu)) {
352-
case FSC_SEA:
353-
case FSC_SEA_TTW0:
354-
case FSC_SEA_TTW1:
355-
case FSC_SEA_TTW2:
356-
case FSC_SEA_TTW3:
357-
case FSC_SECC:
358-
case FSC_SECC_TTW0:
359-
case FSC_SECC_TTW1:
360-
case FSC_SECC_TTW2:
361-
case FSC_SECC_TTW3:
352+
case ESR_ELx_FSC_EXTABT:
353+
case ESR_ELx_FSC_SEA_TTW0:
354+
case ESR_ELx_FSC_SEA_TTW1:
355+
case ESR_ELx_FSC_SEA_TTW2:
356+
case ESR_ELx_FSC_SEA_TTW3:
357+
case ESR_ELx_FSC_SECC:
358+
case ESR_ELx_FSC_SECC_TTW0:
359+
case ESR_ELx_FSC_SECC_TTW1:
360+
case ESR_ELx_FSC_SECC_TTW2:
361+
case ESR_ELx_FSC_SECC_TTW3:
362362
return true;
363363
default:
364364
return false;

arch/arm64/kvm/hyp/include/hyp/fault.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
6060
*/
6161
if (!(esr & ESR_ELx_S1PTW) &&
6262
(cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
63-
(esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
63+
(esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM)) {
6464
if (!__translate_far_to_hpfar(far, &hpfar))
6565
return false;
6666
} else {

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -367,7 +367,7 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
367367
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
368368
bool valid;
369369

370-
valid = kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
370+
valid = kvm_vcpu_trap_get_fault_type(vcpu) == ESR_ELx_FSC_FAULT &&
371371
kvm_vcpu_dabt_isvalid(vcpu) &&
372372
!kvm_vcpu_abt_issea(vcpu) &&
373373
!kvm_vcpu_abt_iss1tw(vcpu);

arch/arm64/kvm/mmu.c

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1212,7 +1212,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
12121212
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
12131213
VM_BUG_ON(write_fault && exec_fault);
12141214

1215-
if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
1215+
if (fault_status == ESR_ELx_FSC_PERM && !write_fault && !exec_fault) {
12161216
kvm_err("Unexpected L2 read permission error\n");
12171217
return -EFAULT;
12181218
}
@@ -1277,7 +1277,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
12771277
* only exception to this is when dirty logging is enabled at runtime
12781278
* and a write fault needs to collapse a block entry into a table.
12791279
*/
1280-
if (fault_status != FSC_PERM || (logging_active && write_fault)) {
1280+
if (fault_status != ESR_ELx_FSC_PERM ||
1281+
(logging_active && write_fault)) {
12811282
ret = kvm_mmu_topup_memory_cache(memcache,
12821283
kvm_mmu_cache_min_pages(kvm));
12831284
if (ret)
@@ -1342,15 +1343,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
13421343
* backed by a THP and thus use block mapping if possible.
13431344
*/
13441345
if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
1345-
if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE)
1346+
if (fault_status == ESR_ELx_FSC_PERM &&
1347+
fault_granule > PAGE_SIZE)
13461348
vma_pagesize = fault_granule;
13471349
else
13481350
vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
13491351
hva, &pfn,
13501352
&fault_ipa);
13511353
}
13521354

1353-
if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
1355+
if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
13541356
/* Check the VMM hasn't introduced a new disallowed VMA */
13551357
if (kvm_vma_mte_allowed(vma)) {
13561358
sanitise_mte_tags(kvm, pfn, vma_pagesize);
@@ -1376,7 +1378,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
13761378
* permissions only if vma_pagesize equals fault_granule. Otherwise,
13771379
* kvm_pgtable_stage2_map() should be called to change block size.
13781380
*/
1379-
if (fault_status == FSC_PERM && vma_pagesize == fault_granule)
1381+
if (fault_status == ESR_ELx_FSC_PERM && vma_pagesize == fault_granule)
13801382
ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
13811383
else
13821384
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
@@ -1441,7 +1443,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
14411443
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
14421444
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
14431445

1444-
if (fault_status == FSC_FAULT) {
1446+
if (fault_status == ESR_ELx_FSC_FAULT) {
14451447
/* Beyond sanitised PARange (which is the IPA limit) */
14461448
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
14471449
kvm_inject_size_fault(vcpu);
@@ -1476,8 +1478,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
14761478
kvm_vcpu_get_hfar(vcpu), fault_ipa);
14771479

14781480
/* Check the stage-2 fault is trans. fault or write fault */
1479-
if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1480-
fault_status != FSC_ACCESS) {
1481+
if (fault_status != ESR_ELx_FSC_FAULT &&
1482+
fault_status != ESR_ELx_FSC_PERM &&
1483+
fault_status != ESR_ELx_FSC_ACCESS) {
14811484
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
14821485
kvm_vcpu_trap_get_class(vcpu),
14831486
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1539,7 +1542,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
15391542
/* Userspace should not be able to register out-of-bounds IPAs */
15401543
VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
15411544

1542-
if (fault_status == FSC_ACCESS) {
1545+
if (fault_status == ESR_ELx_FSC_ACCESS) {
15431546
handle_access_fault(vcpu, fault_ipa);
15441547
ret = 1;
15451548
goto out_unlock;

0 commit comments

Comments
 (0)