Skip to content

Commit 71c3c77

Browse files
author
Marc Zyngier
committed
KVM: arm64: vgic-v3: Convert userspace accessors over to FIELD_GET/FIELD_PREP
The GICv3 userspace accessors are all about dealing with conversion between fields from architectural registers and internal representations. However, and owing to the age of this code, the accessors use a combination of shift/mask that is hard to read. It is nonetheless easy to make it better by using the FIELD_{GET,PREP} macros that solely rely on a mask. This results in somewhat nicer looking code, and is probably easier to maintain. Reviewed-by: Reiji Watanabe <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
1 parent cbcf14d commit 71c3c77

File tree

1 file changed

+27
-33
lines changed

1 file changed

+27
-33
lines changed

arch/arm64/kvm/vgic-sys-reg-v3.c

Lines changed: 27 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -23,39 +23,34 @@ static int set_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
2323
* Disallow restoring VM state if not supported by this
2424
* hardware.
2525
*/
26-
host_pri_bits = ((val & ICC_CTLR_EL1_PRI_BITS_MASK) >>
27-
ICC_CTLR_EL1_PRI_BITS_SHIFT) + 1;
26+
host_pri_bits = FIELD_GET(ICC_CTLR_EL1_PRI_BITS_MASK, val) + 1;
2827
if (host_pri_bits > vgic_v3_cpu->num_pri_bits)
2928
return -EINVAL;
3029

3130
vgic_v3_cpu->num_pri_bits = host_pri_bits;
3231

33-
host_id_bits = (val & ICC_CTLR_EL1_ID_BITS_MASK) >>
34-
ICC_CTLR_EL1_ID_BITS_SHIFT;
32+
host_id_bits = FIELD_GET(ICC_CTLR_EL1_ID_BITS_MASK, val);
3533
if (host_id_bits > vgic_v3_cpu->num_id_bits)
3634
return -EINVAL;
3735

3836
vgic_v3_cpu->num_id_bits = host_id_bits;
3937

40-
host_seis = ((kvm_vgic_global_state.ich_vtr_el2 &
41-
ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT);
42-
seis = (val & ICC_CTLR_EL1_SEIS_MASK) >>
43-
ICC_CTLR_EL1_SEIS_SHIFT;
38+
host_seis = FIELD_GET(ICH_VTR_SEIS_MASK, kvm_vgic_global_state.ich_vtr_el2);
39+
seis = FIELD_GET(ICC_CTLR_EL1_SEIS_MASK, val);
4440
if (host_seis != seis)
4541
return -EINVAL;
4642

47-
host_a3v = ((kvm_vgic_global_state.ich_vtr_el2 &
48-
ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT);
49-
a3v = (val & ICC_CTLR_EL1_A3V_MASK) >> ICC_CTLR_EL1_A3V_SHIFT;
43+
host_a3v = FIELD_GET(ICH_VTR_A3V_MASK, kvm_vgic_global_state.ich_vtr_el2);
44+
a3v = FIELD_GET(ICC_CTLR_EL1_A3V_MASK, val);
5045
if (host_a3v != a3v)
5146
return -EINVAL;
5247

5348
/*
5449
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
5550
* The vgic_set_vmcr() will convert to ICH_VMCR layout.
5651
*/
57-
vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
58-
vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
52+
vmcr.cbpr = FIELD_GET(ICC_CTLR_EL1_CBPR_MASK, val);
53+
vmcr.eoim = FIELD_GET(ICC_CTLR_EL1_EOImode_MASK, val);
5954
vgic_set_vmcr(vcpu, &vmcr);
6055

6156
return 0;
@@ -70,20 +65,19 @@ static int get_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
7065

7166
vgic_get_vmcr(vcpu, &vmcr);
7267
val = 0;
73-
val |= (vgic_v3_cpu->num_pri_bits - 1) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
74-
val |= vgic_v3_cpu->num_id_bits << ICC_CTLR_EL1_ID_BITS_SHIFT;
75-
val |= ((kvm_vgic_global_state.ich_vtr_el2 &
76-
ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT) <<
77-
ICC_CTLR_EL1_SEIS_SHIFT;
78-
val |= ((kvm_vgic_global_state.ich_vtr_el2 &
79-
ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT) <<
80-
ICC_CTLR_EL1_A3V_SHIFT;
68+
val |= FIELD_PREP(ICC_CTLR_EL1_PRI_BITS_MASK, vgic_v3_cpu->num_pri_bits - 1);
69+
val |= FIELD_PREP(ICC_CTLR_EL1_ID_BITS_MASK, vgic_v3_cpu->num_id_bits);
70+
val |= FIELD_PREP(ICC_CTLR_EL1_SEIS_MASK,
71+
FIELD_GET(ICH_VTR_SEIS_MASK,
72+
kvm_vgic_global_state.ich_vtr_el2));
73+
val |= FIELD_PREP(ICC_CTLR_EL1_A3V_MASK,
74+
FIELD_GET(ICH_VTR_A3V_MASK, kvm_vgic_global_state.ich_vtr_el2));
8175
/*
8276
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
8377
* Extract it directly using ICC_CTLR_EL1 reg definitions.
8478
*/
85-
val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
86-
val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
79+
val |= FIELD_PREP(ICC_CTLR_EL1_CBPR_MASK, vmcr.cbpr);
80+
val |= FIELD_PREP(ICC_CTLR_EL1_EOImode_MASK, vmcr.eoim);
8781

8882
*valp = val;
8983

@@ -96,7 +90,7 @@ static int set_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
9690
struct vgic_vmcr vmcr;
9791

9892
vgic_get_vmcr(vcpu, &vmcr);
99-
vmcr.pmr = (val & ICC_PMR_EL1_MASK) >> ICC_PMR_EL1_SHIFT;
93+
vmcr.pmr = FIELD_GET(ICC_PMR_EL1_MASK, val);
10094
vgic_set_vmcr(vcpu, &vmcr);
10195

10296
return 0;
@@ -108,7 +102,7 @@ static int get_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
108102
struct vgic_vmcr vmcr;
109103

110104
vgic_get_vmcr(vcpu, &vmcr);
111-
*val = (vmcr.pmr << ICC_PMR_EL1_SHIFT) & ICC_PMR_EL1_MASK;
105+
*val = FIELD_PREP(ICC_PMR_EL1_MASK, vmcr.pmr);
112106

113107
return 0;
114108
}
@@ -119,7 +113,7 @@ static int set_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
119113
struct vgic_vmcr vmcr;
120114

121115
vgic_get_vmcr(vcpu, &vmcr);
122-
vmcr.bpr = (val & ICC_BPR0_EL1_MASK) >> ICC_BPR0_EL1_SHIFT;
116+
vmcr.bpr = FIELD_GET(ICC_BPR0_EL1_MASK, val);
123117
vgic_set_vmcr(vcpu, &vmcr);
124118

125119
return 0;
@@ -131,7 +125,7 @@ static int get_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
131125
struct vgic_vmcr vmcr;
132126

133127
vgic_get_vmcr(vcpu, &vmcr);
134-
*val = (vmcr.bpr << ICC_BPR0_EL1_SHIFT) & ICC_BPR0_EL1_MASK;
128+
*val = FIELD_PREP(ICC_BPR0_EL1_MASK, vmcr.bpr);
135129

136130
return 0;
137131
}
@@ -143,7 +137,7 @@ static int set_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
143137

144138
vgic_get_vmcr(vcpu, &vmcr);
145139
if (!vmcr.cbpr) {
146-
vmcr.abpr = (val & ICC_BPR1_EL1_MASK) >> ICC_BPR1_EL1_SHIFT;
140+
vmcr.abpr = FIELD_GET(ICC_BPR1_EL1_MASK, val);
147141
vgic_set_vmcr(vcpu, &vmcr);
148142
}
149143

@@ -157,7 +151,7 @@ static int get_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
157151

158152
vgic_get_vmcr(vcpu, &vmcr);
159153
if (!vmcr.cbpr)
160-
*val = (vmcr.abpr << ICC_BPR1_EL1_SHIFT) & ICC_BPR1_EL1_MASK;
154+
*val = FIELD_PREP(ICC_BPR1_EL1_MASK, vmcr.abpr);
161155
else
162156
*val = min((vmcr.bpr + 1), 7U);
163157

@@ -171,7 +165,7 @@ static int set_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
171165
struct vgic_vmcr vmcr;
172166

173167
vgic_get_vmcr(vcpu, &vmcr);
174-
vmcr.grpen0 = (val & ICC_IGRPEN0_EL1_MASK) >> ICC_IGRPEN0_EL1_SHIFT;
168+
vmcr.grpen0 = FIELD_GET(ICC_IGRPEN0_EL1_MASK, val);
175169
vgic_set_vmcr(vcpu, &vmcr);
176170

177171
return 0;
@@ -183,7 +177,7 @@ static int get_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
183177
struct vgic_vmcr vmcr;
184178

185179
vgic_get_vmcr(vcpu, &vmcr);
186-
*val = (vmcr.grpen0 << ICC_IGRPEN0_EL1_SHIFT) & ICC_IGRPEN0_EL1_MASK;
180+
*val = FIELD_PREP(ICC_IGRPEN0_EL1_MASK, vmcr.grpen0);
187181

188182
return 0;
189183
}
@@ -194,7 +188,7 @@ static int set_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
194188
struct vgic_vmcr vmcr;
195189

196190
vgic_get_vmcr(vcpu, &vmcr);
197-
vmcr.grpen1 = (val & ICC_IGRPEN1_EL1_MASK) >> ICC_IGRPEN1_EL1_SHIFT;
191+
vmcr.grpen1 = FIELD_GET(ICC_IGRPEN1_EL1_MASK, val);
198192
vgic_set_vmcr(vcpu, &vmcr);
199193

200194
return 0;
@@ -206,7 +200,7 @@ static int get_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
206200
struct vgic_vmcr vmcr;
207201

208202
vgic_get_vmcr(vcpu, &vmcr);
209-
*val = (vmcr.grpen1 << ICC_IGRPEN1_EL1_SHIFT) & ICC_IGRPEN1_EL1_MASK;
203+
*val = FIELD_GET(ICC_IGRPEN1_EL1_MASK, vmcr.grpen1);
210204

211205
return 0;
212206
}

0 commit comments

Comments
 (0)