15
15
#include <zephyr/zsr.h>
16
16
#include <zephyr/arch/common/exc_handle.h>
17
17
18
- #ifdef CONFIG_XTENSA_GEN_HANDLERS
19
- #include <xtensa_handlers.h>
20
- #else
21
- #include <_soc_inthandlers.h>
22
- #endif
23
-
24
18
#include <kernel_internal.h>
25
19
#include <xtensa_internal.h>
26
20
#include <xtensa_stack.h>
@@ -370,123 +364,36 @@ void arch_ipi_lazy_coprocessors_save(void)
370
364
#endif
371
365
}
372
366
373
- /* The wrapper code lives here instead of in the python script that
374
- * generates _xtensa_handle_one_int*(). Seems cleaner, still kind of
375
- * ugly.
376
- *
377
- * This may be unused depending on number of interrupt levels
378
- * supported by the SoC.
379
- */
380
367
381
368
#if XCHAL_NUM_INTERRUPTS <= 32
382
- #define DEF_INT_C_HANDLER (l ) \
383
- __unused void *xtensa_int##l##_c(void *interrupted_stack) \
384
- { \
385
- uint32_t irqs, intenable, m; \
386
- usage_stop(); \
387
- __asm__ volatile("rsr.interrupt %0" : "=r"(irqs)); \
388
- __asm__ volatile("rsr.intenable %0" : "=r"(intenable)); \
389
- irqs &= intenable; \
390
- while ((m = _xtensa_handle_one_int##l(0, irqs))) { \
391
- irqs ^= m; \
392
- __asm__ volatile("wsr.intclear %0" : : "r"(m)); \
393
- } \
394
- return return_to(interrupted_stack); \
395
- }
396
- #endif /* XCHAL_NUM_INTERRUPTS <= 32 */
397
-
398
- #if XCHAL_NUM_INTERRUPTS > 32 && XCHAL_NUM_INTERRUPTS <= 64
399
- #define DEF_INT_C_HANDLER (l ) \
400
- __unused void *xtensa_int##l##_c(void *interrupted_stack) \
401
- { \
402
- uint32_t irqs, intenable, m; \
403
- usage_stop(); \
404
- __asm__ volatile("rsr.interrupt %0" : "=r"(irqs)); \
405
- __asm__ volatile("rsr.intenable %0" : "=r"(intenable)); \
406
- irqs &= intenable; \
407
- while ((m = _xtensa_handle_one_int##l(0, irqs))) { \
408
- irqs ^= m; \
409
- __asm__ volatile("wsr.intclear %0" : : "r"(m)); \
410
- } \
411
- __asm__ volatile("rsr.interrupt1 %0" : "=r"(irqs)); \
412
- __asm__ volatile("rsr.intenable1 %0" : "=r"(intenable)); \
413
- irqs &= intenable; \
414
- while ((m = _xtensa_handle_one_int##l(1, irqs))) { \
415
- irqs ^= m; \
416
- __asm__ volatile("wsr.intclear1 %0" : : "r"(m)); \
417
- } \
418
- return return_to(interrupted_stack); \
419
- }
420
- #endif /* XCHAL_NUM_INTERRUPTS > 32 && XCHAL_NUM_INTERRUPTS <= 64 */
421
-
422
- #if XCHAL_NUM_INTERRUPTS > 64 && XCHAL_NUM_INTERRUPTS <= 96
423
- #define DEF_INT_C_HANDLER (l ) \
424
- __unused void *xtensa_int##l##_c(void *interrupted_stack) \
425
- { \
426
- uint32_t irqs, intenable, m; \
427
- usage_stop(); \
428
- __asm__ volatile("rsr.interrupt %0" : "=r"(irqs)); \
429
- __asm__ volatile("rsr.intenable %0" : "=r"(intenable)); \
430
- irqs &= intenable; \
431
- while ((m = _xtensa_handle_one_int##l(0, irqs))) { \
432
- irqs ^= m; \
433
- __asm__ volatile("wsr.intclear %0" : : "r"(m)); \
434
- } \
435
- __asm__ volatile("rsr.interrupt1 %0" : "=r"(irqs)); \
436
- __asm__ volatile("rsr.intenable1 %0" : "=r"(intenable)); \
437
- irqs &= intenable; \
438
- while ((m = _xtensa_handle_one_int##l(1, irqs))) { \
439
- irqs ^= m; \
440
- __asm__ volatile("wsr.intclear1 %0" : : "r"(m)); \
441
- } \
442
- __asm__ volatile("rsr.interrupt2 %0" : "=r"(irqs)); \
443
- __asm__ volatile("rsr.intenable2 %0" : "=r"(intenable)); \
444
- irqs &= intenable; \
445
- while ((m = _xtensa_handle_one_int##l(2, irqs))) { \
446
- irqs ^= m; \
447
- __asm__ volatile("wsr.intclear2 %0" : : "r"(m)); \
448
- } \
449
- return return_to(interrupted_stack); \
450
- }
451
- #endif /* XCHAL_NUM_INTERRUPTS > 64 && XCHAL_NUM_INTERRUPTS <= 96 */
452
-
453
- #if XCHAL_NUM_INTERRUPTS > 96
454
- #define DEF_INT_C_HANDLER (l ) \
455
- __unused void *xtensa_int##l##_c(void *interrupted_stack) \
456
- { \
457
- uint32_t irqs, intenable, m; \
458
- usage_stop(); \
459
- __asm__ volatile("rsr.interrupt %0" : "=r"(irqs)); \
460
- __asm__ volatile("rsr.intenable %0" : "=r"(intenable)); \
461
- irqs &= intenable; \
462
- while ((m = _xtensa_handle_one_int##l(0, irqs))) { \
463
- irqs ^= m; \
464
- __asm__ volatile("wsr.intclear %0" : : "r"(m)); \
465
- } \
466
- __asm__ volatile("rsr.interrupt1 %0" : "=r"(irqs)); \
467
- __asm__ volatile("rsr.intenable1 %0" : "=r"(intenable)); \
468
- irqs &= intenable; \
469
- while ((m = _xtensa_handle_one_int##l(1, irqs))) { \
470
- irqs ^= m; \
471
- __asm__ volatile("wsr.intclear1 %0" : : "r"(m)); \
472
- } \
473
- __asm__ volatile("rsr.interrupt2 %0" : "=r"(irqs)); \
474
- __asm__ volatile("rsr.intenable2 %0" : "=r"(intenable)); \
475
- irqs &= intenable; \
476
- while ((m = _xtensa_handle_one_int##l(2, irqs))) { \
477
- irqs ^= m; \
478
- __asm__ volatile("wsr.intclear2 %0" : : "r"(m)); \
479
- } \
480
- __asm__ volatile("rsr.interrupt3 %0" : "=r"(irqs)); \
481
- __asm__ volatile("rsr.intenable3 %0" : "=r"(intenable)); \
482
- irqs &= intenable; \
483
- while ((m = _xtensa_handle_one_int##l(3, irqs))) { \
484
- irqs ^= m; \
485
- __asm__ volatile("wsr.intclear3 %0" : : "r"(m)); \
486
- } \
487
- return return_to(interrupted_stack); \
488
- }
489
- #endif /* XCHAL_NUM_INTERRUPTS > 96 */
369
+ #define DECLARE_IRQ (lvl ) \
370
+ { \
371
+ XCHAL_INTLEVEL##lvl##_MASK, \
372
+ }
373
+ #elif XCHAL_NUM_INTERRUPTS <= 64
374
+ #define DECLARE_IRQ (lvl ) \
375
+ { \
376
+ XCHAL_INTLEVEL##lvl##_MASK, \
377
+ XCHAL_INTLEVEL##lvl##_MASK1, \
378
+ }
379
+ #elif XCHAL_NUM_INTERRUPTS <= 96
380
+ #define DECLARE_IRQ (lvl ) \
381
+ { \
382
+ XCHAL_INTLEVEL##lvl##_MASK, \
383
+ XCHAL_INTLEVEL##lvl##_MASK1, \
384
+ XCHAL_INTLEVEL##lvl##_MASK2, \
385
+ }
386
+ #elif XCHAL_NUM_INTERRUPTS <= 128
387
+ #define DECLARE_IRQ (lvl ) \
388
+ { \
389
+ XCHAL_INTLEVEL##lvl##_MASK, \
390
+ XCHAL_INTLEVEL##lvl##_MASK1, \
391
+ XCHAL_INTLEVEL##lvl##_MASK2, \
392
+ XCHAL_INTLEVEL##lvl##_MASK3, \
393
+ }
394
+ #else
395
+ #error "xtensa supports up to 128 interrupts"
396
+ #endif
490
397
491
398
#if XCHAL_HAVE_NMI
492
399
#define MAX_INTR_LEVEL XCHAL_NMILEVEL
@@ -497,6 +404,102 @@ __unused void *xtensa_int##l##_c(void *interrupted_stack) \
497
404
#define MAX_INTR_LEVEL 0
498
405
#endif
499
406
407
+ #define GRP_COUNT (ROUND_UP(XCHAL_NUM_INTERRUPTS, 32) / 32)
408
+
409
+ static const uint32_t xtensa_lvl_mask [MAX_INTR_LEVEL ][GRP_COUNT ] = {
410
+ #if MAX_INTR_LEVEL >= 1
411
+ DECLARE_IRQ (1 ),
412
+ #endif
413
+ #if MAX_INTR_LEVEL >= 2
414
+ DECLARE_IRQ (2 ),
415
+ #endif
416
+ #if MAX_INTR_LEVEL >= 3
417
+ DECLARE_IRQ (3 ),
418
+ #endif
419
+ #if MAX_INTR_LEVEL >= 4
420
+ DECLARE_IRQ (4 ),
421
+ #endif
422
+ #if MAX_INTR_LEVEL >= 5
423
+ DECLARE_IRQ (5 ),
424
+ #endif
425
+ #if MAX_INTR_LEVEL >= 6
426
+ DECLARE_IRQ (6 ),
427
+ #endif
428
+ #if MAX_INTR_LEVEL >= 7
429
+ DECLARE_IRQ (7 ),
430
+ #endif
431
+ };
432
+
433
+ /* Handles all interrupts for given IRQ Level.
434
+ * - Supports up to 128 interrupts (max supported by Xtensa)
435
+ * - Supports all IRQ levels
436
+ * - Uses __builtin_ctz that for most xtensa configurations will be optimized using nsau instruction
437
+ */
438
+ __unused static void xtensa_handle_irq_lvl (int irq_lvl )
439
+ {
440
+ int irq ;
441
+ uint32_t irq_mask ;
442
+ uint32_t intenable ;
443
+ #if XCHAL_NUM_INTERRUPTS > 0
444
+ __asm__ volatile ("rsr.interrupt %0" : "=r" (irq_mask ));
445
+ __asm__ volatile ("rsr.intenable %0" : "=r" (intenable ));
446
+ irq_mask &= intenable ;
447
+ irq_mask &= xtensa_lvl_mask [irq_lvl - 1 ][0 ];
448
+ while (irq_mask ) {
449
+ irq = __builtin_ctz (irq_mask );
450
+ _sw_isr_table [irq ].isr (_sw_isr_table [irq ].arg );
451
+ __asm__ volatile ("wsr.intclear %0" : : "r" (BIT (irq )));
452
+ irq_mask ^= BIT (irq );
453
+ }
454
+ #endif
455
+ #if XCHAL_NUM_INTERRUPTS > 32
456
+ __asm__ volatile ("rsr.interrupt1 %0" : "=r" (irq_mask ));
457
+ __asm__ volatile ("rsr.intenable1 %0" : "=r" (intenable ));
458
+ irq_mask &= intenable ;
459
+ irq_mask &= xtensa_lvl_mask [irq_lvl - 1 ][1 ];
460
+ while (irq_mask ) {
461
+ irq = __builtin_ctz (irq_mask );
462
+ _sw_isr_table [irq + 32 ].isr (_sw_isr_table [irq + 32 ].arg );
463
+ __asm__ volatile ("wsr.intclear1 %0" : : "r" (BIT (irq )));
464
+ irq_mask ^= BIT (irq );
465
+ }
466
+ #endif
467
+
468
+ #if XCHAL_NUM_INTERRUPTS > 64
469
+ __asm__ volatile ("rsr.interrupt2 %0" : "=r" (irq_mask ));
470
+ __asm__ volatile ("rsr.intenable2 %0" : "=r" (intenable2 ));
471
+ irq_mask &= intenable ;
472
+ irq_mask &= xtensa_lvl_mask [irq_lvl - 1 ][2 ];
473
+ while (irq_mask ) {
474
+ irq = __builtin_ctz (irq_mask );
475
+ _sw_isr_table [irq + 64 ].isr (_sw_isr_table [irq + 64 ].arg );
476
+ __asm__ volatile ("wsr.intclear2 %0" : : "r" (BIT (irq )));
477
+ irq_mask ^= BIT (irq );
478
+ }
479
+ #endif
480
+ #if XCHAL_NUM_INTERRUPTS > 96
481
+ __asm__ volatile ("rsr.interrupt3 %0" : "=r" (irq_mask ));
482
+ __asm__ volatile ("rsr.intenable3 %0" : "=r" (intenable ));
483
+ irq_mask &= intenable ;
484
+ irq_mask &= xtensa_lvl_mask [irq_lvl - 1 ][3 ];
485
+
486
+ while (irq_mask ) {
487
+ irq = __builtin_ctz (irq_mask );
488
+ _sw_isr_table [irq + 96 ].isr (_sw_isr_table [irq + 96 ].arg );
489
+ __asm__ volatile ("wsr.intclear3 %0" : : "r" (BIT (irq )));
490
+ irq_mask ^= BIT (irq );
491
+ }
492
+ #endif
493
+ }
494
+
495
+ #define DEF_INT_C_HANDLER (l ) \
496
+ __unused void *xtensa_int##l##_c(void *interrupted_stack) \
497
+ { \
498
+ usage_stop(); \
499
+ xtensa_handle_irq_lvl(l); \
500
+ return return_to(interrupted_stack); \
501
+ }
502
+
500
503
#if MAX_INTR_LEVEL >= 2
501
504
DEF_INT_C_HANDLER (2 )
502
505
#endif
0 commit comments