Skip to content

Commit fc5d1f1

Browse files
christofferdall-armMarc Zyngier
authored andcommitted
KVM: arm64: vgic-v3: Take cpu_if pointer directly instead of vcpu
If we move the used_lrs field to the version-specific cpu interface structure, the following functions only operate on the struct vgic_v3_cpu_if and not the full vcpu: __vgic_v3_save_state __vgic_v3_restore_state __vgic_v3_activate_traps __vgic_v3_deactivate_traps __vgic_v3_save_aprs __vgic_v3_restore_aprs This is going to be very useful for nested virt, so move the used_lrs field and change the prototypes and implementations of these functions to take the cpu_if parameter directly. No functional change. Reviewed-by: James Morse <[email protected]> Signed-off-by: Christoffer Dall <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
1 parent 0a78791 commit fc5d1f1

File tree

7 files changed

+54
-53
lines changed

7 files changed

+54
-53
lines changed

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -56,12 +56,12 @@
5656

5757
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
5858

59-
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
60-
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
61-
void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
62-
void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
63-
void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
64-
void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
59+
void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
60+
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
61+
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
62+
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
63+
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
64+
void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
6565
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
6666

6767
void __timer_enable_traps(struct kvm_vcpu *vcpu);

arch/arm64/kvm/hyp/switch.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -270,17 +270,17 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
270270
static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
271271
{
272272
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
273-
__vgic_v3_save_state(vcpu);
274-
__vgic_v3_deactivate_traps(vcpu);
273+
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
274+
__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
275275
}
276276
}
277277

278278
/* Restore VGICv3 state on non_VEH systems */
279279
static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
280280
{
281281
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
282-
__vgic_v3_activate_traps(vcpu);
283-
__vgic_v3_restore_state(vcpu);
282+
__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
283+
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
284284
}
285285
}
286286

arch/arm64/kvm/hyp/vgic-v3-sr.c

Lines changed: 10 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -194,10 +194,9 @@ static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
194194
return val;
195195
}
196196

197-
void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
197+
void __hyp_text __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
198198
{
199-
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
200-
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
199+
u64 used_lrs = cpu_if->used_lrs;
201200

202201
/*
203202
* Make sure stores to the GIC via the memory mapped interface
@@ -230,10 +229,9 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
230229
}
231230
}
232231

233-
void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
232+
void __hyp_text __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
234233
{
235-
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
236-
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
234+
u64 used_lrs = cpu_if->used_lrs;
237235
int i;
238236

239237
if (used_lrs || cpu_if->its_vpe.its_vm) {
@@ -257,10 +255,8 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
257255
}
258256
}
259257

260-
void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
258+
void __hyp_text __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
261259
{
262-
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
263-
264260
/*
265261
* VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
266262
* Group0 interrupt (as generated in GICv2 mode) to be
@@ -306,9 +302,8 @@ void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
306302
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
307303
}
308304

309-
void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
305+
void __hyp_text __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
310306
{
311-
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
312307
u64 val;
313308

314309
if (!cpu_if->vgic_sre) {
@@ -333,15 +328,11 @@ void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
333328
write_gicreg(0, ICH_HCR_EL2);
334329
}
335330

336-
void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
331+
void __hyp_text __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
337332
{
338-
struct vgic_v3_cpu_if *cpu_if;
339333
u64 val;
340334
u32 nr_pre_bits;
341335

342-
vcpu = kern_hyp_va(vcpu);
343-
cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
344-
345336
val = read_gicreg(ICH_VTR_EL2);
346337
nr_pre_bits = vtr_to_nr_pre_bits(val);
347338

@@ -370,15 +361,11 @@ void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
370361
}
371362
}
372363

373-
void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
364+
void __hyp_text __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
374365
{
375-
struct vgic_v3_cpu_if *cpu_if;
376366
u64 val;
377367
u32 nr_pre_bits;
378368

379-
vcpu = kern_hyp_va(vcpu);
380-
cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
381-
382369
val = read_gicreg(ICH_VTR_EL2);
383370
nr_pre_bits = vtr_to_nr_pre_bits(val);
384371

@@ -451,7 +438,7 @@ static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
451438
u32 vmcr,
452439
u64 *lr_val)
453440
{
454-
unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
441+
unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
455442
u8 priority = GICv3_IDLE_PRIORITY;
456443
int i, lr = -1;
457444

@@ -490,7 +477,7 @@ static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
490477
static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
491478
int intid, u64 *lr_val)
492479
{
493-
unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
480+
unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
494481
int i;
495482

496483
for (i = 0; i < used_lrs; i++) {

arch/arm64/kvm/vgic/vgic-v2.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
5656

5757
cpuif->vgic_hcr &= ~GICH_HCR_UIE;
5858

59-
for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
59+
for (lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++) {
6060
u32 val = cpuif->vgic_lr[lr];
6161
u32 cpuid, intid = val & GICH_LR_VIRTUALID;
6262
struct vgic_irq *irq;
@@ -120,7 +120,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
120120
vgic_put_irq(vcpu->kvm, irq);
121121
}
122122

123-
vgic_cpu->used_lrs = 0;
123+
cpuif->used_lrs = 0;
124124
}
125125

126126
/*
@@ -427,7 +427,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
427427
static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
428428
{
429429
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
430-
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
430+
u64 used_lrs = cpu_if->used_lrs;
431431
u64 elrsr;
432432
int i;
433433

@@ -448,7 +448,7 @@ static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
448448
void vgic_v2_save_state(struct kvm_vcpu *vcpu)
449449
{
450450
void __iomem *base = kvm_vgic_global_state.vctrl_base;
451-
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
451+
u64 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
452452

453453
if (!base)
454454
return;
@@ -463,7 +463,7 @@ void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
463463
{
464464
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
465465
void __iomem *base = kvm_vgic_global_state.vctrl_base;
466-
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
466+
u64 used_lrs = cpu_if->used_lrs;
467467
int i;
468468

469469
if (!base)

arch/arm64/kvm/vgic/vgic-v3.c

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
3939

4040
cpuif->vgic_hcr &= ~ICH_HCR_UIE;
4141

42-
for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
42+
for (lr = 0; lr < cpuif->used_lrs; lr++) {
4343
u64 val = cpuif->vgic_lr[lr];
4444
u32 intid, cpuid;
4545
struct vgic_irq *irq;
@@ -111,7 +111,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
111111
vgic_put_irq(vcpu->kvm, irq);
112112
}
113113

114-
vgic_cpu->used_lrs = 0;
114+
cpuif->used_lrs = 0;
115115
}
116116

117117
/* Requires the irq to be locked already */
@@ -662,10 +662,10 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
662662
if (likely(cpu_if->vgic_sre))
663663
kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
664664

665-
kvm_call_hyp(__vgic_v3_restore_aprs, vcpu);
665+
kvm_call_hyp(__vgic_v3_restore_aprs, kern_hyp_va(cpu_if));
666666

667667
if (has_vhe())
668-
__vgic_v3_activate_traps(vcpu);
668+
__vgic_v3_activate_traps(cpu_if);
669669

670670
WARN_ON(vgic_v4_load(vcpu));
671671
}
@@ -680,12 +680,14 @@ void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
680680

681681
void vgic_v3_put(struct kvm_vcpu *vcpu)
682682
{
683+
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
684+
683685
WARN_ON(vgic_v4_put(vcpu, false));
684686

685687
vgic_v3_vmcr_sync(vcpu);
686688

687-
kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
689+
kvm_call_hyp(__vgic_v3_save_aprs, kern_hyp_va(cpu_if));
688690

689691
if (has_vhe())
690-
__vgic_v3_deactivate_traps(vcpu);
692+
__vgic_v3_deactivate_traps(cpu_if);
691693
}

arch/arm64/kvm/vgic/vgic.c

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -786,6 +786,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
786786
int count;
787787
bool multi_sgi;
788788
u8 prio = 0xff;
789+
int i = 0;
789790

790791
lockdep_assert_held(&vgic_cpu->ap_list_lock);
791792

@@ -827,11 +828,14 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
827828
}
828829
}
829830

830-
vcpu->arch.vgic_cpu.used_lrs = count;
831-
832831
/* Nuke remaining LRs */
833-
for ( ; count < kvm_vgic_global_state.nr_lr; count++)
834-
vgic_clear_lr(vcpu, count);
832+
for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
833+
vgic_clear_lr(vcpu, i);
834+
835+
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
836+
vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
837+
else
838+
vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
835839
}
836840

837841
static inline bool can_access_vgic_from_kernel(void)
@@ -849,13 +853,13 @@ static inline void vgic_save_state(struct kvm_vcpu *vcpu)
849853
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
850854
vgic_v2_save_state(vcpu);
851855
else
852-
__vgic_v3_save_state(vcpu);
856+
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
853857
}
854858

855859
/* Sync back the hardware VGIC state into our emulation after a guest's run. */
856860
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
857861
{
858-
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
862+
int used_lrs;
859863

860864
/* An empty ap_list_head implies used_lrs == 0 */
861865
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
@@ -864,7 +868,12 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
864868
if (can_access_vgic_from_kernel())
865869
vgic_save_state(vcpu);
866870

867-
if (vgic_cpu->used_lrs)
871+
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
872+
used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
873+
else
874+
used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
875+
876+
if (used_lrs)
868877
vgic_fold_lr_state(vcpu);
869878
vgic_prune_ap_list(vcpu);
870879
}
@@ -874,7 +883,7 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
874883
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
875884
vgic_v2_restore_state(vcpu);
876885
else
877-
__vgic_v3_restore_state(vcpu);
886+
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
878887
}
879888

880889
/* Flush our emulation state into the GIC hardware before entering the guest. */

include/kvm/arm_vgic.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -274,6 +274,8 @@ struct vgic_v2_cpu_if {
274274
u32 vgic_vmcr;
275275
u32 vgic_apr;
276276
u32 vgic_lr[VGIC_V2_MAX_LRS];
277+
278+
unsigned int used_lrs;
277279
};
278280

279281
struct vgic_v3_cpu_if {
@@ -291,6 +293,8 @@ struct vgic_v3_cpu_if {
291293
* linking the Linux IRQ subsystem and the ITS together.
292294
*/
293295
struct its_vpe its_vpe;
296+
297+
unsigned int used_lrs;
294298
};
295299

296300
struct vgic_cpu {
@@ -300,7 +304,6 @@ struct vgic_cpu {
300304
struct vgic_v3_cpu_if vgic_v3;
301305
};
302306

303-
unsigned int used_lrs;
304307
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
305308

306309
raw_spinlock_t ap_list_lock; /* Protects the ap_list */

0 commit comments

Comments
 (0)