Skip to content

Commit 0843e0c

Browse files
Marc Zyngieroupton
authored andcommitted
KVM: arm64: Get rid of ARM64_FEATURE_MASK()
The ARM64_FEATURE_MASK() macro was a hack introduce whilst the automatic generation of sysreg encoding was introduced, and was too unreliable to be entirely trusted. We are in a better place now, and we could really do without this macro. Get rid of it altogether. Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
1 parent 7a765aa commit 0843e0c

File tree

11 files changed

+44
-50
lines changed

11 files changed

+44
-50
lines changed

arch/arm64/include/asm/sysreg.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1146,9 +1146,6 @@
11461146

11471147
#define ARM64_FEATURE_FIELD_BITS 4
11481148

1149-
/* Defined for compatibility only, do not add new users. */
1150-
#define ARM64_FEATURE_MASK(x) (x##_MASK)
1151-
11521149
#ifdef __ASSEMBLY__
11531150

11541151
.macro mrs_s, rt, sreg

arch/arm64/kvm/arm.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2404,12 +2404,12 @@ static u64 get_hyp_id_aa64pfr0_el1(void)
24042404
*/
24052405
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
24062406

2407-
val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
2408-
ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
2407+
val &= ~(ID_AA64PFR0_EL1_CSV2 |
2408+
ID_AA64PFR0_EL1_CSV3);
24092409

2410-
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
2410+
val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV2,
24112411
arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
2412-
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
2412+
val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV3,
24132413
arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
24142414

24152415
return val;

arch/arm64/kvm/sys_regs.c

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1615,18 +1615,18 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
16151615
break;
16161616
case SYS_ID_AA64ISAR1_EL1:
16171617
if (!vcpu_has_ptrauth(vcpu))
1618-
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1619-
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1620-
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1621-
ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1618+
val &= ~(ID_AA64ISAR1_EL1_APA |
1619+
ID_AA64ISAR1_EL1_API |
1620+
ID_AA64ISAR1_EL1_GPA |
1621+
ID_AA64ISAR1_EL1_GPI);
16221622
break;
16231623
case SYS_ID_AA64ISAR2_EL1:
16241624
if (!vcpu_has_ptrauth(vcpu))
1625-
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1626-
ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
1625+
val &= ~(ID_AA64ISAR2_EL1_APA3 |
1626+
ID_AA64ISAR2_EL1_GPA3);
16271627
if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
16281628
has_broken_cntvoff())
1629-
val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
1629+
val &= ~ID_AA64ISAR2_EL1_WFxT;
16301630
break;
16311631
case SYS_ID_AA64ISAR3_EL1:
16321632
val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX;
@@ -1642,7 +1642,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
16421642
ID_AA64MMFR3_EL1_S1PIE;
16431643
break;
16441644
case SYS_ID_MMFR4_EL1:
1645-
val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
1645+
val &= ~ID_MMFR4_EL1_CCIDX;
16461646
break;
16471647
}
16481648

@@ -1828,22 +1828,22 @@ static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val)
18281828
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
18291829

18301830
if (!kvm_has_mte(vcpu->kvm)) {
1831-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1832-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
1831+
val &= ~ID_AA64PFR1_EL1_MTE;
1832+
val &= ~ID_AA64PFR1_EL1_MTE_frac;
18331833
}
18341834

18351835
if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) &&
18361836
SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP))
1837-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RAS_frac);
1838-
1839-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1840-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap);
1841-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI);
1842-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS);
1843-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE);
1844-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
1845-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
1846-
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
1837+
val &= ~ID_AA64PFR1_EL1_RAS_frac;
1838+
1839+
val &= ~ID_AA64PFR1_EL1_SME;
1840+
val &= ~ID_AA64PFR1_EL1_RNDR_trap;
1841+
val &= ~ID_AA64PFR1_EL1_NMI;
1842+
val &= ~ID_AA64PFR1_EL1_GCS;
1843+
val &= ~ID_AA64PFR1_EL1_THE;
1844+
val &= ~ID_AA64PFR1_EL1_MTEX;
1845+
val &= ~ID_AA64PFR1_EL1_PFAR;
1846+
val &= ~ID_AA64PFR1_EL1_MPAM_frac;
18471847

18481848
return val;
18491849
}

tools/arch/arm64/include/asm/sysreg.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1080,9 +1080,6 @@
10801080

10811081
#define ARM64_FEATURE_FIELD_BITS 4
10821082

1083-
/* Defined for compatibility only, do not add new users. */
1084-
#define ARM64_FEATURE_MASK(x) (x##_MASK)
1085-
10861083
#ifdef __ASSEMBLY__
10871084

10881085
.macro mrs_s, rt, sreg

tools/testing/selftests/kvm/arm64/aarch32_id_regs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
146146

147147
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
148148

149-
el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
149+
el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val);
150150
return el0 == ID_AA64PFR0_EL1_EL0_IMP;
151151
}
152152

tools/testing/selftests/kvm/arm64/debug-exceptions.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -116,12 +116,12 @@ static void reset_debug_state(void)
116116

117117
/* Reset all bcr/bvr/wcr/wvr registers */
118118
dfr0 = read_sysreg(id_aa64dfr0_el1);
119-
brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), dfr0);
119+
brps = FIELD_GET(ID_AA64DFR0_EL1_BRPs, dfr0);
120120
for (i = 0; i <= brps; i++) {
121121
write_dbgbcr(i, 0);
122122
write_dbgbvr(i, 0);
123123
}
124-
wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), dfr0);
124+
wrps = FIELD_GET(ID_AA64DFR0_EL1_WRPs, dfr0);
125125
for (i = 0; i <= wrps; i++) {
126126
write_dbgwcr(i, 0);
127127
write_dbgwvr(i, 0);
@@ -418,7 +418,7 @@ static void guest_code_ss(int test_cnt)
418418

419419
static int debug_version(uint64_t id_aa64dfr0)
420420
{
421-
return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), id_aa64dfr0);
421+
return FIELD_GET(ID_AA64DFR0_EL1_DebugVer, id_aa64dfr0);
422422
}
423423

424424
static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn)
@@ -539,14 +539,14 @@ void test_guest_debug_exceptions_all(uint64_t aa64dfr0)
539539
int b, w, c;
540540

541541
/* Number of breakpoints */
542-
brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), aa64dfr0) + 1;
542+
brp_num = FIELD_GET(ID_AA64DFR0_EL1_BRPs, aa64dfr0) + 1;
543543
__TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required");
544544

545545
/* Number of watchpoints */
546-
wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), aa64dfr0) + 1;
546+
wrp_num = FIELD_GET(ID_AA64DFR0_EL1_WRPs, aa64dfr0) + 1;
547547

548548
/* Number of context aware breakpoints */
549-
ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_CTX_CMPs), aa64dfr0) + 1;
549+
ctx_brp_num = FIELD_GET(ID_AA64DFR0_EL1_CTX_CMPs, aa64dfr0) + 1;
550550

551551
pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__,
552552
brp_num, wrp_num, ctx_brp_num);

tools/testing/selftests/kvm/arm64/no-vgic-v3.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ static void guest_code(void)
5454
* Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having
5555
* hidden the feature at runtime without any other userspace action.
5656
*/
57-
__GUEST_ASSERT(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC),
57+
__GUEST_ASSERT(FIELD_GET(ID_AA64PFR0_EL1_GIC,
5858
read_sysreg(id_aa64pfr0_el1)) == 0,
5959
"GICv3 wrongly advertised");
6060

@@ -165,7 +165,7 @@ int main(int argc, char *argv[])
165165

166166
vm = vm_create_with_one_vcpu(&vcpu, NULL);
167167
pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
168-
__TEST_REQUIRE(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), pfr0),
168+
__TEST_REQUIRE(FIELD_GET(ID_AA64PFR0_EL1_GIC, pfr0),
169169
"GICv3 not supported.");
170170
kvm_vm_free(vm);
171171

tools/testing/selftests/kvm/arm64/page_fault_test.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -95,14 +95,14 @@ static bool guest_check_lse(void)
9595
uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
9696
uint64_t atomic;
9797

98-
atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC), isar0);
98+
atomic = FIELD_GET(ID_AA64ISAR0_EL1_ATOMIC, isar0);
9999
return atomic >= 2;
100100
}
101101

102102
static bool guest_check_dc_zva(void)
103103
{
104104
uint64_t dczid = read_sysreg(dczid_el0);
105-
uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_EL0_DZP), dczid);
105+
uint64_t dzp = FIELD_GET(DCZID_EL0_DZP, dczid);
106106

107107
return dzp == 0;
108108
}
@@ -195,7 +195,7 @@ static bool guest_set_ha(void)
195195
uint64_t hadbs, tcr;
196196

197197
/* Skip if HA is not supported. */
198-
hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS), mmfr1);
198+
hadbs = FIELD_GET(ID_AA64MMFR1_EL1_HAFDBS, mmfr1);
199199
if (hadbs == 0)
200200
return false;
201201

tools/testing/selftests/kvm/arm64/set_id_regs.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -594,8 +594,8 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
594594
*/
595595
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
596596

597-
mte = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), val);
598-
mte_frac = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac), val);
597+
mte = FIELD_GET(ID_AA64PFR1_EL1_MTE, val);
598+
mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val);
599599
if (mte != ID_AA64PFR1_EL1_MTE_MTE2 ||
600600
mte_frac != ID_AA64PFR1_EL1_MTE_frac_NI) {
601601
ksft_test_result_skip("MTE_ASYNC or MTE_ASYMM are supported, nothing to test\n");
@@ -612,7 +612,7 @@ static void test_user_set_mte_reg(struct kvm_vcpu *vcpu)
612612
}
613613

614614
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
615-
mte_frac = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac), val);
615+
mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val);
616616
if (mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI)
617617
ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac=0 accepted and still 0xF\n");
618618
else
@@ -774,7 +774,7 @@ int main(void)
774774

775775
/* Check for AARCH64 only system */
776776
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
777-
el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
777+
el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val);
778778
aarch64_only = (el0 == ID_AA64PFR0_EL1_EL0_IMP);
779779

780780
ksft_print_header();

tools/testing/selftests/kvm/arm64/vpmu_counter_access.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -441,7 +441,7 @@ static void create_vpmu_vm(void *guest_code)
441441

442442
/* Make sure that PMUv3 support is indicated in the ID register */
443443
dfr0 = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
444-
pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
444+
pmuver = FIELD_GET(ID_AA64DFR0_EL1_PMUVer, dfr0);
445445
TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
446446
pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
447447
"Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);

0 commit comments

Comments
 (0)