Skip to content

Commit 8800b7c

Browse files
author
Marc Zyngier
committed
KVM: arm64: Add RMW specific sysreg accessor
In a number of cases, we perform a Read-Modify-Write operation on a system register, meaning that we would apply the RESx masks twice. Instead, provide a new accessor that performs this RMW operation, allowing the masks to be applied exactly once per operation. Reviewed-by: Miguel Luis <[email protected]> Reviewed-by: Oliver Upton <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Marc Zyngier <[email protected]>
1 parent 6678791 commit 8800b7c

File tree

6 files changed

+32
-21
lines changed

6 files changed

+32
-21
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1118,6 +1118,17 @@ u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
11181118
ctxt_sys_reg(ctxt, (r)) = __v; \
11191119
} while (0)
11201120

1121+
#define __vcpu_rmw_sys_reg(v, r, op, val) \
1122+
do { \
1123+
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
1124+
u64 __v = ctxt_sys_reg(ctxt, (r)); \
1125+
__v op (val); \
1126+
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
1127+
__v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
1128+
\
1129+
ctxt_sys_reg(ctxt, (r)) = __v; \
1130+
} while (0)
1131+
11211132
#define __vcpu_sys_reg(v,r) \
11221133
(*({ \
11231134
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \

arch/arm64/kvm/debug.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -216,9 +216,9 @@ void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu)
216216
void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val)
217217
{
218218
if (val & OSLAR_EL1_OSLK)
219-
__vcpu_sys_reg(vcpu, OSLSR_EL1) |= OSLSR_EL1_OSLK;
219+
__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, |=, OSLSR_EL1_OSLK);
220220
else
221-
__vcpu_sys_reg(vcpu, OSLSR_EL1) &= ~OSLSR_EL1_OSLK;
221+
__vcpu_rmw_sys_reg(vcpu, OSLSR_EL1, &=, ~OSLSR_EL1_OSLK);
222222

223223
preempt_disable();
224224
kvm_arch_vcpu_put(vcpu);

arch/arm64/kvm/hyp/vhe/sysreg-sr.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,8 +70,8 @@ static void __sysreg_save_vel2_state(struct kvm_vcpu *vcpu)
7070
*/
7171
val = read_sysreg_el1(SYS_CNTKCTL);
7272
val &= CNTKCTL_VALID_BITS;
73-
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) &= ~CNTKCTL_VALID_BITS;
74-
__vcpu_sys_reg(vcpu, CNTHCTL_EL2) |= val;
73+
__vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, &=, ~CNTKCTL_VALID_BITS);
74+
__vcpu_rmw_sys_reg(vcpu, CNTHCTL_EL2, |=, val);
7575
}
7676

7777
__vcpu_assign_sys_reg(vcpu, SP_EL2, read_sysreg(sp_el1));

arch/arm64/kvm/nested.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1757,7 +1757,7 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
17571757

17581758
out:
17591759
for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)
1760-
(void)__vcpu_sys_reg(vcpu, sr);
1760+
__vcpu_rmw_sys_reg(vcpu, sr, |=, 0);
17611761

17621762
return 0;
17631763
}

arch/arm64/kvm/pmu-emul.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -510,7 +510,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
510510
continue;
511511

512512
/* Mark overflow */
513-
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
513+
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(i));
514514

515515
if (kvm_pmu_counter_can_chain(pmc))
516516
kvm_pmu_counter_increment(vcpu, BIT(i + 1),
@@ -556,7 +556,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
556556
perf_event->attr.sample_period = period;
557557
perf_event->hw.sample_period = period;
558558

559-
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
559+
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, BIT(idx));
560560

561561
if (kvm_pmu_counter_can_chain(pmc))
562562
kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
@@ -914,9 +914,9 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
914914
{
915915
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
916916

917-
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
918-
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
919-
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
917+
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, mask);
918+
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, mask);
919+
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, mask);
920920

921921
kvm_pmu_reprogram_counter_mask(vcpu, mask);
922922
}

arch/arm64/kvm/sys_regs.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -791,15 +791,15 @@ static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
791791
mask |= GENMASK(n - 1, 0);
792792

793793
reset_unknown(vcpu, r);
794-
__vcpu_sys_reg(vcpu, r->reg) &= mask;
794+
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask);
795795

796796
return __vcpu_sys_reg(vcpu, r->reg);
797797
}
798798

799799
static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
800800
{
801801
reset_unknown(vcpu, r);
802-
__vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
802+
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0));
803803

804804
return __vcpu_sys_reg(vcpu, r->reg);
805805
}
@@ -811,15 +811,15 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
811811
return 0;
812812

813813
reset_unknown(vcpu, r);
814-
__vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm);
814+
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm));
815815

816816
return __vcpu_sys_reg(vcpu, r->reg);
817817
}
818818

819819
static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
820820
{
821821
reset_unknown(vcpu, r);
822-
__vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK;
822+
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK);
823823

824824
return __vcpu_sys_reg(vcpu, r->reg);
825825
}
@@ -1103,10 +1103,10 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
11031103
val = p->regval & mask;
11041104
if (r->Op2 & 0x1)
11051105
/* accessing PMCNTENSET_EL0 */
1106-
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
1106+
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val);
11071107
else
11081108
/* accessing PMCNTENCLR_EL0 */
1109-
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
1109+
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val);
11101110

11111111
kvm_pmu_reprogram_counter_mask(vcpu, val);
11121112
} else {
@@ -1129,10 +1129,10 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
11291129

11301130
if (r->Op2 & 0x1)
11311131
/* accessing PMINTENSET_EL1 */
1132-
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
1132+
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val);
11331133
else
11341134
/* accessing PMINTENCLR_EL1 */
1135-
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
1135+
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val);
11361136
} else {
11371137
p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
11381138
}
@@ -1151,10 +1151,10 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
11511151
if (p->is_write) {
11521152
if (r->CRm & 0x2)
11531153
/* accessing PMOVSSET_EL0 */
1154-
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
1154+
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask));
11551155
else
11561156
/* accessing PMOVSCLR_EL0 */
1157-
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
1157+
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask));
11581158
} else {
11591159
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
11601160
}
@@ -4786,7 +4786,7 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
47864786
r->reset(vcpu, r);
47874787

47884788
if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS)
4789-
(void)__vcpu_sys_reg(vcpu, r->reg);
4789+
__vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0);
47904790
}
47914791

47924792
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);

0 commit comments

Comments
 (0)