Skip to content

Commit b04b331

Browse files
author
Marc Zyngier
committed
Merge remote-tracking branch 'arm64/for-next/sysreg' into kvmarm-master/next
Merge arm64/for-next/sysreg in order to avoid upstream conflicts due to the never ending sysreg repainting... Signed-off-by: Marc Zyngier <[email protected]>
2 parents c317c6d + 10453bf commit b04b331

30 files changed

+774
-490
lines changed

arch/arm64/include/asm/assembler.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -384,8 +384,8 @@ alternative_cb_end
384384
.macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
385385
mrs \tmp0, ID_AA64MMFR0_EL1
386386
// Narrow PARange to fit the PS field in TCR_ELx
387-
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
388-
mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX
387+
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
388+
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
389389
cmp \tmp0, \tmp1
390390
csel \tmp0, \tmp1, \tmp0, hi
391391
bfi \tcr, \tmp0, \pos, #3
@@ -512,7 +512,7 @@ alternative_endif
512512
*/
513513
.macro reset_pmuserenr_el0, tmpreg
514514
mrs \tmpreg, id_aa64dfr0_el1
515-
sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
515+
sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
516516
cmp \tmpreg, #1 // Skip if no PMU present
517517
b.lt 9000f
518518
msr pmuserenr_el0, xzr // Disable PMU access from EL0
@@ -524,7 +524,7 @@ alternative_endif
524524
*/
525525
.macro reset_amuserenr_el0, tmpreg
526526
mrs \tmpreg, id_aa64pfr0_el1 // Check ID_AA64PFR0_EL1
527-
ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4
527+
ubfx \tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
528528
cbz \tmpreg, .Lskip_\@ // Skip if no AMU present
529529
msr_s SYS_AMUSERENR_EL0, xzr // Disable AMU access from EL0
530530
.Lskip_\@:
@@ -612,7 +612,7 @@ alternative_endif
612612
.macro offset_ttbr1, ttbr, tmp
613613
#ifdef CONFIG_ARM64_VA_BITS_52
614614
mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
615-
and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
615+
and \tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
616616
cbnz \tmp, .Lskipoffs_\@
617617
orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
618618
.Lskipoffs_\@ :

arch/arm64/include/asm/cache.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,10 +45,6 @@ static inline unsigned int arch_slab_minalign(void)
4545
#define arch_slab_minalign() arch_slab_minalign()
4646
#endif
4747

48-
#define CTR_CACHE_MINLINE_MASK \
49-
(0xf << CTR_EL0_DMINLINE_SHIFT | \
50-
CTR_EL0_IMINLINE_MASK << CTR_EL0_IMINLINE_SHIFT)
51-
5248
#define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
5349

5450
#define ICACHEF_ALIASING 0

arch/arm64/include/asm/cpufeature.h

Lines changed: 33 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -553,7 +553,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
553553
u64 mask = GENMASK_ULL(field + 3, field);
554554

555555
/* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
556-
if (val == ID_AA64DFR0_PMUVER_IMP_DEF)
556+
if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
557557
val = 0;
558558

559559
if (val > cap) {
@@ -597,43 +597,43 @@ static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
597597

598598
static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
599599
{
600-
return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
601-
cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
600+
return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGEND_SHIFT) == 0x1 ||
601+
cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1;
602602
}
603603

604604
static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
605605
{
606-
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT);
606+
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL1_SHIFT);
607607

608-
return val == ID_AA64PFR0_ELx_32BIT_64BIT;
608+
return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
609609
}
610610

611611
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
612612
{
613-
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
613+
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_EL0_SHIFT);
614614

615-
return val == ID_AA64PFR0_ELx_32BIT_64BIT;
615+
return val == ID_AA64PFR0_EL1_ELx_32BIT_64BIT;
616616
}
617617

618618
static inline bool id_aa64pfr0_sve(u64 pfr0)
619619
{
620-
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT);
620+
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SVE_SHIFT);
621621

622622
return val > 0;
623623
}
624624

625625
static inline bool id_aa64pfr1_sme(u64 pfr1)
626626
{
627-
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_SME_SHIFT);
627+
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_SME_SHIFT);
628628

629629
return val > 0;
630630
}
631631

632632
static inline bool id_aa64pfr1_mte(u64 pfr1)
633633
{
634-
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT);
634+
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
635635

636-
return val >= ID_AA64PFR1_MTE;
636+
return val >= ID_AA64PFR1_EL1_MTE_MTE2;
637637
}
638638

639639
void __init setup_cpu_features(void);
@@ -659,7 +659,7 @@ static inline bool supports_csv2p3(int scope)
659659
pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
660660

661661
csv2_val = cpuid_feature_extract_unsigned_field(pfr0,
662-
ID_AA64PFR0_CSV2_SHIFT);
662+
ID_AA64PFR0_EL1_CSV2_SHIFT);
663663
return csv2_val == 3;
664664
}
665665

@@ -694,10 +694,10 @@ static inline bool system_supports_4kb_granule(void)
694694

695695
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
696696
val = cpuid_feature_extract_unsigned_field(mmfr0,
697-
ID_AA64MMFR0_TGRAN4_SHIFT);
697+
ID_AA64MMFR0_EL1_TGRAN4_SHIFT);
698698

699-
return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) &&
700-
(val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX);
699+
return (val >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN) &&
700+
(val <= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX);
701701
}
702702

703703
static inline bool system_supports_64kb_granule(void)
@@ -707,10 +707,10 @@ static inline bool system_supports_64kb_granule(void)
707707

708708
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
709709
val = cpuid_feature_extract_unsigned_field(mmfr0,
710-
ID_AA64MMFR0_TGRAN64_SHIFT);
710+
ID_AA64MMFR0_EL1_TGRAN64_SHIFT);
711711

712-
return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) &&
713-
(val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX);
712+
return (val >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN) &&
713+
(val <= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX);
714714
}
715715

716716
static inline bool system_supports_16kb_granule(void)
@@ -720,10 +720,10 @@ static inline bool system_supports_16kb_granule(void)
720720

721721
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
722722
val = cpuid_feature_extract_unsigned_field(mmfr0,
723-
ID_AA64MMFR0_TGRAN16_SHIFT);
723+
ID_AA64MMFR0_EL1_TGRAN16_SHIFT);
724724

725-
return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) &&
726-
(val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX);
725+
return (val >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN) &&
726+
(val <= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX);
727727
}
728728

729729
static inline bool system_supports_mixed_endian_el0(void)
@@ -738,7 +738,7 @@ static inline bool system_supports_mixed_endian(void)
738738

739739
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
740740
val = cpuid_feature_extract_unsigned_field(mmfr0,
741-
ID_AA64MMFR0_BIGENDEL_SHIFT);
741+
ID_AA64MMFR0_EL1_BIGEND_SHIFT);
742742

743743
return val == 0x1;
744744
}
@@ -840,13 +840,13 @@ extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
840840
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
841841
{
842842
switch (parange) {
843-
case ID_AA64MMFR0_PARANGE_32: return 32;
844-
case ID_AA64MMFR0_PARANGE_36: return 36;
845-
case ID_AA64MMFR0_PARANGE_40: return 40;
846-
case ID_AA64MMFR0_PARANGE_42: return 42;
847-
case ID_AA64MMFR0_PARANGE_44: return 44;
848-
case ID_AA64MMFR0_PARANGE_48: return 48;
849-
case ID_AA64MMFR0_PARANGE_52: return 52;
843+
case ID_AA64MMFR0_EL1_PARANGE_32: return 32;
844+
case ID_AA64MMFR0_EL1_PARANGE_36: return 36;
845+
case ID_AA64MMFR0_EL1_PARANGE_40: return 40;
846+
case ID_AA64MMFR0_EL1_PARANGE_42: return 42;
847+
case ID_AA64MMFR0_EL1_PARANGE_44: return 44;
848+
case ID_AA64MMFR0_EL1_PARANGE_48: return 48;
849+
case ID_AA64MMFR0_EL1_PARANGE_52: return 52;
850850
/*
851851
* A future PE could use a value unknown to the kernel.
852852
* However, by the "D10.1.4 Principles of the ID scheme
@@ -868,14 +868,14 @@ static inline bool cpu_has_hw_af(void)
868868

869869
mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
870870
return cpuid_feature_extract_unsigned_field(mmfr1,
871-
ID_AA64MMFR1_HADBS_SHIFT);
871+
ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
872872
}
873873

874874
static inline bool cpu_has_pan(void)
875875
{
876876
u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
877877
return cpuid_feature_extract_unsigned_field(mmfr1,
878-
ID_AA64MMFR1_PAN_SHIFT);
878+
ID_AA64MMFR1_EL1_PAN_SHIFT);
879879
}
880880

881881
#ifdef CONFIG_ARM64_AMU_EXTN
@@ -896,8 +896,8 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
896896
int vmid_bits;
897897

898898
vmid_bits = cpuid_feature_extract_unsigned_field(mmfr1,
899-
ID_AA64MMFR1_VMIDBITS_SHIFT);
900-
if (vmid_bits == ID_AA64MMFR1_VMIDBITS_16)
899+
ID_AA64MMFR1_EL1_VMIDBits_SHIFT);
900+
if (vmid_bits == ID_AA64MMFR1_EL1_VMIDBits_16)
901901
return 16;
902902

903903
/*

arch/arm64/include/asm/el2_setup.h

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040

4141
.macro __init_el2_debug
4242
mrs x1, id_aa64dfr0_el1
43-
sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
43+
sbfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
4444
cmp x0, #1
4545
b.lt .Lskip_pmu_\@ // Skip if no PMU present
4646
mrs x0, pmcr_el0 // Disable debug access traps
@@ -49,7 +49,7 @@
4949
csel x2, xzr, x0, lt // all PMU counters from EL1
5050

5151
/* Statistical profiling */
52-
ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
52+
ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
5353
cbz x0, .Lskip_spe_\@ // Skip if SPE not present
5454

5555
mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2,
@@ -65,7 +65,7 @@
6565

6666
.Lskip_spe_\@:
6767
/* Trace buffer */
68-
ubfx x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4
68+
ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4
6969
cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present
7070

7171
mrs_s x0, SYS_TRBIDR_EL1
@@ -83,7 +83,7 @@
8383
/* LORegions */
8484
.macro __init_el2_lor
8585
mrs x1, id_aa64mmfr1_el1
86-
ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
86+
ubfx x0, x1, #ID_AA64MMFR1_EL1_LO_SHIFT, 4
8787
cbz x0, .Lskip_lor_\@
8888
msr_s SYS_LORC_EL1, xzr
8989
.Lskip_lor_\@:
@@ -97,7 +97,7 @@
9797
/* GICv3 system register access */
9898
.macro __init_el2_gicv3
9999
mrs x0, id_aa64pfr0_el1
100-
ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
100+
ubfx x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4
101101
cbz x0, .Lskip_gicv3_\@
102102

103103
mrs_s x0, SYS_ICC_SRE_EL2
@@ -132,12 +132,12 @@
132132
/* Disable any fine grained traps */
133133
.macro __init_el2_fgt
134134
mrs x1, id_aa64mmfr0_el1
135-
ubfx x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4
135+
ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4
136136
cbz x1, .Lskip_fgt_\@
137137

138138
mov x0, xzr
139139
mrs x1, id_aa64dfr0_el1
140-
ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
140+
ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
141141
cmp x1, #3
142142
b.lt .Lset_debug_fgt_\@
143143
/* Disable PMSNEVFR_EL1 read and write traps */
@@ -149,7 +149,7 @@
149149

150150
mov x0, xzr
151151
mrs x1, id_aa64pfr1_el1
152-
ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
152+
ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4
153153
cbz x1, .Lset_fgt_\@
154154

155155
/* Disable nVHE traps of TPIDR2 and SMPRI */
@@ -162,7 +162,7 @@
162162
msr_s SYS_HFGITR_EL2, xzr
163163

164164
mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
165-
ubfx x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4
165+
ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
166166
cbz x1, .Lskip_fgt_\@
167167

168168
msr_s SYS_HAFGRTR_EL2, xzr

arch/arm64/include/asm/hw_breakpoint.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@ static inline int get_num_brps(void)
142142
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
143143
return 1 +
144144
cpuid_feature_extract_unsigned_field(dfr0,
145-
ID_AA64DFR0_BRPS_SHIFT);
145+
ID_AA64DFR0_EL1_BRPs_SHIFT);
146146
}
147147

148148
/* Determine number of WRP registers available. */
@@ -151,7 +151,7 @@ static inline int get_num_wrps(void)
151151
u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
152152
return 1 +
153153
cpuid_feature_extract_unsigned_field(dfr0,
154-
ID_AA64DFR0_WRPS_SHIFT);
154+
ID_AA64DFR0_EL1_WRPs_SHIFT);
155155
}
156156

157157
#endif /* __ASM_BREAKPOINT_H */

arch/arm64/include/asm/kvm_pgtable.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@
1616
static inline u64 kvm_get_parange(u64 mmfr0)
1717
{
1818
u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
19-
ID_AA64MMFR0_PARANGE_SHIFT);
20-
if (parange > ID_AA64MMFR0_PARANGE_MAX)
21-
parange = ID_AA64MMFR0_PARANGE_MAX;
19+
ID_AA64MMFR0_EL1_PARANGE_SHIFT);
20+
if (parange > ID_AA64MMFR0_EL1_PARANGE_MAX)
21+
parange = ID_AA64MMFR0_EL1_PARANGE_MAX;
2222

2323
return parange;
2424
}

0 commit comments

Comments
 (0)