@@ -41,6 +41,7 @@ struct test_args {
41
41
*/
42
42
#define KVM_NUM_PRIOS 32
43
43
#define KVM_PRIO_SHIFT 3 /* steps of 8 = 1 << 3 */
44
+ #define KVM_PRIO_STEPS (1 << KVM_PRIO_SHIFT) /* 8 */
44
45
#define LOWEST_PRIO (KVM_NUM_PRIOS - 1)
45
46
#define CPU_PRIO_MASK (LOWEST_PRIO << KVM_PRIO_SHIFT) /* 0xf8 */
46
47
#define IRQ_DEFAULT_PRIO (LOWEST_PRIO - 1)
@@ -212,6 +213,74 @@ static void guest_inject(struct test_args *args,
212
213
reset_priorities (args );
213
214
}
214
215
216
+ /*
217
+ * Polls the IAR until it's not a spurious interrupt.
218
+ *
219
+ * This function should only be used in test_inject_preemption (with IRQs
220
+ * masked).
221
+ */
222
+ static uint32_t wait_for_and_activate_irq (void )
223
+ {
224
+ uint32_t intid ;
225
+
226
+ do {
227
+ asm volatile ("wfi" : : : "memory" );
228
+ intid = gic_get_and_ack_irq ();
229
+ } while (intid == IAR_SPURIOUS );
230
+
231
+ return intid ;
232
+ }
233
+
234
+ /*
235
+ * Inject multiple concurrent IRQs (num IRQs starting at first_intid) and
236
+ * handle them without handling the actual exceptions. This is done by masking
237
+ * interrupts for the whole test.
238
+ */
239
+ static void test_inject_preemption (struct test_args * args ,
240
+ uint32_t first_intid , int num ,
241
+ kvm_inject_cmd cmd )
242
+ {
243
+ uint32_t intid , prio , step = KVM_PRIO_STEPS ;
244
+ int i ;
245
+
246
+ /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
247
+ * in descending order, so intid+1 can preempt intid.
248
+ */
249
+ for (i = 0 , prio = (num - 1 ) * step ; i < num ; i ++ , prio -= step ) {
250
+ GUEST_ASSERT (prio >= 0 );
251
+ intid = i + first_intid ;
252
+ gic_set_priority (intid , prio );
253
+ }
254
+
255
+ local_irq_disable ();
256
+
257
+ for (i = 0 ; i < num ; i ++ ) {
258
+ uint32_t tmp ;
259
+ intid = i + first_intid ;
260
+ kvm_inject_call (cmd , intid , 1 );
261
+ /* Each successive IRQ will preempt the previous one. */
262
+ tmp = wait_for_and_activate_irq ();
263
+ GUEST_ASSERT_EQ (tmp , intid );
264
+ }
265
+
266
+ /* finish handling the IRQs starting with the highest priority one. */
267
+ for (i = 0 ; i < num ; i ++ ) {
268
+ intid = num - i - 1 + first_intid ;
269
+ gic_set_eoi (intid );
270
+ if (args -> eoi_split )
271
+ gic_set_dir (intid );
272
+ }
273
+
274
+ local_irq_enable ();
275
+
276
+ for (i = 0 ; i < num ; i ++ )
277
+ GUEST_ASSERT (!gic_irq_get_active (i + first_intid ));
278
+ GUEST_ASSERT_EQ (gic_read_ap1r0 (), 0 );
279
+ GUEST_ASSERT_IAR_EMPTY ();
280
+
281
+ reset_priorities (args );
282
+ }
283
+
215
284
static void test_injection (struct test_args * args , struct kvm_inject_desc * f )
216
285
{
217
286
uint32_t nr_irqs = args -> nr_irqs ;
@@ -231,6 +300,24 @@ static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
231
300
}
232
301
}
233
302
303
+ static void test_preemption (struct test_args * args , struct kvm_inject_desc * f )
304
+ {
305
+ /*
306
+ * Test up to 4 levels of preemption. The reason is that KVM doesn't
307
+ * currently implement the ability to have more than the number-of-LRs
308
+ * number of concurrently active IRQs. The number of LRs implemented is
309
+ * IMPLEMENTATION DEFINED, however, it seems that most implement 4.
310
+ */
311
+ if (f -> sgi )
312
+ test_inject_preemption (args , MIN_SGI , 4 , f -> cmd );
313
+
314
+ if (f -> ppi )
315
+ test_inject_preemption (args , MIN_PPI , 4 , f -> cmd );
316
+
317
+ if (f -> spi )
318
+ test_inject_preemption (args , MIN_SPI , 4 , f -> cmd );
319
+ }
320
+
234
321
static void guest_code (struct test_args args )
235
322
{
236
323
uint32_t i , nr_irqs = args .nr_irqs ;
@@ -249,8 +336,10 @@ static void guest_code(struct test_args args)
249
336
local_irq_enable ();
250
337
251
338
/* Start the tests. */
252
- for_each_inject_fn (inject_edge_fns , f )
339
+ for_each_inject_fn (inject_edge_fns , f ) {
253
340
test_injection (& args , f );
341
+ test_preemption (& args , f );
342
+ }
254
343
255
344
GUEST_DONE ();
256
345
}
0 commit comments