Skip to content

Commit 8a8ff06

Browse files
author
Marc Zyngier
committed
KVM: arm64: nv: Fix tracking of shadow list registers
Wei-Lin reports that the tracking of shadow list registers is majorly broken when resync'ing the L2 state after a run, as we confuse the guest's LR index with the host's, potentially losing the interrupt state. While this could be fixed by adding yet another side index to track it (Wei-Lin's fix), it may be better to refactor this code to avoid having a side index altogether, limiting the risk to introduce this class of bugs. A key observation is that the shadow index is always the number of bits in the lr_map bitmap. With that, the parallel indexing scheme can be completely dropped. While doing this, introduce a couple of helpers that abstract the index conversion and some of the LR repainting, making the whole exercise much simpler. Reported-by: Wei-Lin Chang <[email protected]> Reviewed-by: Wei-Lin Chang <[email protected]> Reviewed-by: Oliver Upton <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected] Link: https://lore.kernel.org/r/[email protected]
1 parent e04c78d commit 8a8ff06

File tree

1 file changed

+42
-39
lines changed

1 file changed

+42
-39
lines changed

arch/arm64/kvm/vgic/vgic-v3-nested.c

Lines changed: 42 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,11 @@ struct shadow_if {
3636

3737
static DEFINE_PER_CPU(struct shadow_if, shadow_if);
3838

39+
static int lr_map_idx_to_shadow_idx(struct shadow_if *shadow_if, int idx)
40+
{
41+
return hweight16(shadow_if->lr_map & (BIT(idx) - 1));
42+
}
43+
3944
/*
4045
* Nesting GICv3 support
4146
*
@@ -209,6 +214,29 @@ u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu)
209214
return reg;
210215
}
211216

217+
static u64 translate_lr_pintid(struct kvm_vcpu *vcpu, u64 lr)
218+
{
219+
struct vgic_irq *irq;
220+
221+
if (!(lr & ICH_LR_HW))
222+
return lr;
223+
224+
/* We have the HW bit set, check for validity of pINTID */
225+
irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr));
226+
/* If there was no real mapping, nuke the HW bit */
227+
if (!irq || !irq->hw || irq->intid > VGIC_MAX_SPI)
228+
lr &= ~ICH_LR_HW;
229+
230+
/* Translate the virtual mapping to the real one, even if invalid */
231+
if (irq) {
232+
lr &= ~ICH_LR_PHYS_ID_MASK;
233+
lr |= FIELD_PREP(ICH_LR_PHYS_ID_MASK, (u64)irq->hwintid);
234+
vgic_put_irq(vcpu->kvm, irq);
235+
}
236+
237+
return lr;
238+
}
239+
212240
/*
213241
* For LRs which have HW bit set such as timer interrupts, we modify them to
214242
* have the host hardware interrupt number instead of the virtual one programmed
@@ -217,58 +245,37 @@ u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu)
217245
static void vgic_v3_create_shadow_lr(struct kvm_vcpu *vcpu,
218246
struct vgic_v3_cpu_if *s_cpu_if)
219247
{
220-
unsigned long lr_map = 0;
221-
int index = 0;
248+
struct shadow_if *shadow_if;
249+
250+
shadow_if = container_of(s_cpu_if, struct shadow_if, cpuif);
251+
shadow_if->lr_map = 0;
222252

223253
for (int i = 0; i < kvm_vgic_global_state.nr_lr; i++) {
224254
u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i));
225-
struct vgic_irq *irq;
226255

227256
if (!(lr & ICH_LR_STATE))
228-
lr = 0;
229-
230-
if (!(lr & ICH_LR_HW))
231-
goto next;
232-
233-
/* We have the HW bit set, check for validity of pINTID */
234-
irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr));
235-
if (!irq || !irq->hw || irq->intid > VGIC_MAX_SPI ) {
236-
/* There was no real mapping, so nuke the HW bit */
237-
lr &= ~ICH_LR_HW;
238-
if (irq)
239-
vgic_put_irq(vcpu->kvm, irq);
240-
goto next;
241-
}
242-
243-
/* Translate the virtual mapping to the real one */
244-
lr &= ~ICH_LR_PHYS_ID_MASK;
245-
lr |= FIELD_PREP(ICH_LR_PHYS_ID_MASK, (u64)irq->hwintid);
257+
continue;
246258

247-
vgic_put_irq(vcpu->kvm, irq);
259+
lr = translate_lr_pintid(vcpu, lr);
248260

249-
next:
250-
s_cpu_if->vgic_lr[index] = lr;
251-
if (lr) {
252-
lr_map |= BIT(i);
253-
index++;
254-
}
261+
s_cpu_if->vgic_lr[hweight16(shadow_if->lr_map)] = lr;
262+
shadow_if->lr_map |= BIT(i);
255263
}
256264

257-
container_of(s_cpu_if, struct shadow_if, cpuif)->lr_map = lr_map;
258-
s_cpu_if->used_lrs = index;
265+
s_cpu_if->used_lrs = hweight16(shadow_if->lr_map);
259266
}
260267

261268
void vgic_v3_sync_nested(struct kvm_vcpu *vcpu)
262269
{
263270
struct shadow_if *shadow_if = get_shadow_if();
264-
int i, index = 0;
271+
int i;
265272

266273
for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) {
267274
u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i));
268275
struct vgic_irq *irq;
269276

270277
if (!(lr & ICH_LR_HW) || !(lr & ICH_LR_STATE))
271-
goto next;
278+
continue;
272279

273280
/*
274281
* If we had a HW lr programmed by the guest hypervisor, we
@@ -277,15 +284,13 @@ void vgic_v3_sync_nested(struct kvm_vcpu *vcpu)
277284
*/
278285
irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr));
279286
if (WARN_ON(!irq)) /* Shouldn't happen as we check on load */
280-
goto next;
287+
continue;
281288

282-
lr = __gic_v3_get_lr(index);
289+
lr = __gic_v3_get_lr(lr_map_idx_to_shadow_idx(shadow_if, i));
283290
if (!(lr & ICH_LR_STATE))
284291
irq->active = false;
285292

286293
vgic_put_irq(vcpu->kvm, irq);
287-
next:
288-
index++;
289294
}
290295
}
291296

@@ -368,13 +373,11 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
368373
val = __vcpu_sys_reg(vcpu, ICH_LRN(i));
369374

370375
val &= ~ICH_LR_STATE;
371-
val |= s_cpu_if->vgic_lr[i] & ICH_LR_STATE;
376+
val |= s_cpu_if->vgic_lr[lr_map_idx_to_shadow_idx(shadow_if, i)] & ICH_LR_STATE;
372377

373378
__vcpu_assign_sys_reg(vcpu, ICH_LRN(i), val);
374-
s_cpu_if->vgic_lr[i] = 0;
375379
}
376380

377-
shadow_if->lr_map = 0;
378381
vcpu->arch.vgic_cpu.vgic_v3.used_lrs = 0;
379382
}
380383

0 commit comments

Comments
 (0)