@@ -66,6 +66,7 @@ typedef enum {
66
66
KVM_SET_LEVEL_INFO_HIGH ,
67
67
KVM_INJECT_IRQFD ,
68
68
KVM_WRITE_ISPENDR ,
69
+ KVM_WRITE_ISACTIVER ,
69
70
} kvm_inject_cmd ;
70
71
71
72
struct kvm_inject_args {
@@ -96,6 +97,9 @@ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
96
97
#define KVM_INJECT (cmd , intid ) \
97
98
_KVM_INJECT_MULTI(cmd, intid, 1, false)
98
99
100
+ #define KVM_ACTIVATE (cmd , intid ) \
101
+ kvm_inject_call(cmd, intid, 1, 1, false);
102
+
99
103
struct kvm_inject_desc {
100
104
kvm_inject_cmd cmd ;
101
105
/* can inject PPIs, PPIs, and/or SPIs. */
@@ -119,13 +123,22 @@ static struct kvm_inject_desc inject_level_fns[] = {
119
123
{ 0 , },
120
124
};
121
125
126
+ static struct kvm_inject_desc set_active_fns [] = {
127
+ /* sgi ppi spi */
128
+ { KVM_WRITE_ISACTIVER , true, true, true },
129
+ { 0 , },
130
+ };
131
+
122
132
#define for_each_inject_fn (t , f ) \
123
133
for ((f) = (t); (f)->cmd; (f)++)
124
134
125
135
#define for_each_supported_inject_fn (args , t , f ) \
126
136
for_each_inject_fn(t, f) \
127
137
if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD)
128
138
139
+ #define for_each_supported_activate_fn (args , t , f ) \
140
+ for_each_supported_inject_fn((args), (t), (f))
141
+
129
142
/* Shared between the guest main thread and the IRQ handlers. */
130
143
volatile uint64_t irq_handled ;
131
144
volatile uint32_t irqnr_received [MAX_SPI + 1 ];
@@ -147,6 +160,12 @@ static uint64_t gic_read_ap1r0(void)
147
160
return reg ;
148
161
}
149
162
163
+ static void gic_write_ap1r0 (uint64_t val )
164
+ {
165
+ write_sysreg_s (val , SYS_ICV_AP1R0_EL1 );
166
+ isb ();
167
+ }
168
+
150
169
static void guest_set_irq_line (uint32_t intid , uint32_t level );
151
170
152
171
static void guest_irq_generic_handler (bool eoi_split , bool level_sensitive )
@@ -274,6 +293,55 @@ static void guest_inject(struct test_args *args,
274
293
reset_priorities (args );
275
294
}
276
295
296
+ /*
297
+ * Restore the active state of multiple concurrent IRQs (given by
298
+ * concurrent_irqs). This does what a live-migration would do on the
299
+ * destination side assuming there are some active IRQs that were not
300
+ * deactivated yet.
301
+ */
302
+ static void guest_restore_active (struct test_args * args ,
303
+ uint32_t first_intid , uint32_t num ,
304
+ kvm_inject_cmd cmd )
305
+ {
306
+ uint32_t prio , intid , ap1r ;
307
+ int i ;
308
+
309
+ /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
310
+ * in descending order, so intid+1 can preempt intid.
311
+ */
312
+ for (i = 0 , prio = (num - 1 ) * 8 ; i < num ; i ++ , prio -= 8 ) {
313
+ GUEST_ASSERT (prio >= 0 );
314
+ intid = i + first_intid ;
315
+ gic_set_priority (intid , prio );
316
+ }
317
+
318
+ /* In a real migration, KVM would restore all GIC state before running
319
+ * guest code.
320
+ */
321
+ for (i = 0 ; i < num ; i ++ ) {
322
+ intid = i + first_intid ;
323
+ KVM_ACTIVATE (cmd , intid );
324
+ ap1r = gic_read_ap1r0 ();
325
+ ap1r |= 1U << i ;
326
+ gic_write_ap1r0 (ap1r );
327
+ }
328
+
329
+ /* This is where the "migration" would occur. */
330
+
331
+ /* finish handling the IRQs starting with the highest priority one. */
332
+ for (i = 0 ; i < num ; i ++ ) {
333
+ intid = num - i - 1 + first_intid ;
334
+ gic_set_eoi (intid );
335
+ if (args -> eoi_split )
336
+ gic_set_dir (intid );
337
+ }
338
+
339
+ for (i = 0 ; i < num ; i ++ )
340
+ GUEST_ASSERT (!gic_irq_get_active (i + first_intid ));
341
+ GUEST_ASSERT_EQ (gic_read_ap1r0 (), 0 );
342
+ GUEST_ASSERT_IAR_EMPTY ();
343
+ }
344
+
277
345
/*
278
346
* Polls the IAR until it's not a spurious interrupt.
279
347
*
@@ -391,6 +459,19 @@ static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
391
459
test_inject_preemption (args , MIN_SPI , 4 , f -> cmd );
392
460
}
393
461
462
+ static void test_restore_active (struct test_args * args , struct kvm_inject_desc * f )
463
+ {
464
+ /* Test up to 4 active IRQs. Same reason as in test_preemption. */
465
+ if (f -> sgi )
466
+ guest_restore_active (args , MIN_SGI , 4 , f -> cmd );
467
+
468
+ if (f -> ppi )
469
+ guest_restore_active (args , MIN_PPI , 4 , f -> cmd );
470
+
471
+ if (f -> spi )
472
+ guest_restore_active (args , MIN_SPI , 4 , f -> cmd );
473
+ }
474
+
394
475
static void guest_code (struct test_args args )
395
476
{
396
477
uint32_t i , nr_irqs = args .nr_irqs ;
@@ -422,6 +503,12 @@ static void guest_code(struct test_args args)
422
503
test_injection_failure (& args , f );
423
504
}
424
505
506
+ /* Restore the active state of IRQs. This would happen when live
507
+ * migrating IRQs in the middle of being handled.
508
+ */
509
+ for_each_supported_activate_fn (& args , set_active_fns , f )
510
+ test_restore_active (& args , f );
511
+
425
512
GUEST_DONE ();
426
513
}
427
514
@@ -619,6 +706,10 @@ static void run_guest_cmd(struct kvm_vm *vm, int gic_fd,
619
706
kvm_irq_write_ispendr_check (gic_fd , i ,
620
707
VCPU_ID , expect_failure );
621
708
break ;
709
+ case KVM_WRITE_ISACTIVER :
710
+ for (i = intid ; i < intid + num ; i ++ )
711
+ kvm_irq_write_isactiver (gic_fd , i , VCPU_ID );
712
+ break ;
622
713
default :
623
714
break ;
624
715
}
0 commit comments