Skip to content

Commit df40dff

Browse files
rysiofhenrikbrixandersen
authored andcommitted
arch: xtensa: clean up interrupt handling
Simplifying flow of handling interrupts: - removing all _soc_inthandlers.h - removing xtensa_intgen* - removing XTENSA_GEN_HANDLERS Kconfig - keeping optimized irq detection - single handler with irq level as parameter Signed-off-by: Maciej Kusio <[email protected]>
1 parent 885329c commit df40dff

File tree

22 files changed

+124
-3249
lines changed

22 files changed

+124
-3249
lines changed

arch/xtensa/Kconfig

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -22,16 +22,6 @@ config XTENSA_RESET_VECTOR
2222
This is always needed for the simulator. Real boards may already
2323
implement this in boot ROM.
2424

25-
config XTENSA_GEN_HANDLERS
26-
bool "Automatically generate interrupt handlers"
27-
default n
28-
help
29-
When set, an "xtensa_handlers.h" file is generated
30-
containing definitions for the interrupt entry code of the
31-
target Xtensa core, based automatically on the details in
32-
the core-isa.h file. This replaces the previous scheme
33-
where a _soc_inthandlers.h file would be generated offline.
34-
3525
config XTENSA_USE_CORE_CRT1
3626
bool "Use crt1.S from core"
3727
default y

arch/xtensa/core/CMakeLists.txt

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -108,25 +108,6 @@ add_dependencies(zephyr_interface zsr_h)
108108

109109
unset(MAY_NEED_SYSCALL_SCRATCH_REG)
110110

111-
# Similar: auto-generate interrupt handlers
112-
set(HANDLERS ${CMAKE_BINARY_DIR}/zephyr/include/generated/xtensa_handlers)
113-
114-
add_custom_command(
115-
OUTPUT ${HANDLERS}_tmp.c
116-
COMMAND ${CMAKE_C_COMPILER} -E -U__XCC__
117-
${XTENSA_CONFIG_HAL_INCLUDE_DIR}
118-
-o ${HANDLERS}_tmp.c
119-
- < ${CMAKE_CURRENT_SOURCE_DIR}/xtensa_intgen.tmpl)
120-
121-
add_custom_command(
122-
OUTPUT ${HANDLERS}.h
123-
DEPENDS ${HANDLERS}_tmp.c
124-
COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/xtensa_intgen.py
125-
${HANDLERS}_tmp.c > ${HANDLERS}.h)
126-
127-
add_custom_target(xtensa_handlers_h DEPENDS ${HANDLERS}.h)
128-
add_dependencies(zephyr_interface xtensa_handlers_h)
129-
130111
# Auto-generate interrupt vector entry
131112
set(VECS_LD ${CMAKE_BINARY_DIR}/zephyr/include/generated/xtensa_vectors.ld)
132113
add_custom_command(OUTPUT ${VECS_LD} DEPENDS ${CORE_ISA_DM}

arch/xtensa/core/vector_handlers.c

Lines changed: 124 additions & 121 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,6 @@
1515
#include <zephyr/zsr.h>
1616
#include <zephyr/arch/common/exc_handle.h>
1717

18-
#ifdef CONFIG_XTENSA_GEN_HANDLERS
19-
#include <xtensa_handlers.h>
20-
#else
21-
#include <_soc_inthandlers.h>
22-
#endif
23-
2418
#include <kernel_internal.h>
2519
#include <xtensa_internal.h>
2620
#include <xtensa_stack.h>
@@ -370,123 +364,36 @@ void arch_ipi_lazy_coprocessors_save(void)
370364
#endif
371365
}
372366

373-
/* The wrapper code lives here instead of in the python script that
374-
* generates _xtensa_handle_one_int*(). Seems cleaner, still kind of
375-
* ugly.
376-
*
377-
* This may be unused depending on number of interrupt levels
378-
* supported by the SoC.
379-
*/
380367

381368
#if XCHAL_NUM_INTERRUPTS <= 32
382-
#define DEF_INT_C_HANDLER(l) \
383-
__unused void *xtensa_int##l##_c(void *interrupted_stack) \
384-
{ \
385-
uint32_t irqs, intenable, m; \
386-
usage_stop(); \
387-
__asm__ volatile("rsr.interrupt %0" : "=r"(irqs)); \
388-
__asm__ volatile("rsr.intenable %0" : "=r"(intenable)); \
389-
irqs &= intenable; \
390-
while ((m = _xtensa_handle_one_int##l(0, irqs))) { \
391-
irqs ^= m; \
392-
__asm__ volatile("wsr.intclear %0" : : "r"(m)); \
393-
} \
394-
return return_to(interrupted_stack); \
395-
}
396-
#endif /* XCHAL_NUM_INTERRUPTS <= 32 */
397-
398-
#if XCHAL_NUM_INTERRUPTS > 32 && XCHAL_NUM_INTERRUPTS <= 64
399-
#define DEF_INT_C_HANDLER(l) \
400-
__unused void *xtensa_int##l##_c(void *interrupted_stack) \
401-
{ \
402-
uint32_t irqs, intenable, m; \
403-
usage_stop(); \
404-
__asm__ volatile("rsr.interrupt %0" : "=r"(irqs)); \
405-
__asm__ volatile("rsr.intenable %0" : "=r"(intenable)); \
406-
irqs &= intenable; \
407-
while ((m = _xtensa_handle_one_int##l(0, irqs))) { \
408-
irqs ^= m; \
409-
__asm__ volatile("wsr.intclear %0" : : "r"(m)); \
410-
} \
411-
__asm__ volatile("rsr.interrupt1 %0" : "=r"(irqs)); \
412-
__asm__ volatile("rsr.intenable1 %0" : "=r"(intenable)); \
413-
irqs &= intenable; \
414-
while ((m = _xtensa_handle_one_int##l(1, irqs))) { \
415-
irqs ^= m; \
416-
__asm__ volatile("wsr.intclear1 %0" : : "r"(m)); \
417-
} \
418-
return return_to(interrupted_stack); \
419-
}
420-
#endif /* XCHAL_NUM_INTERRUPTS > 32 && XCHAL_NUM_INTERRUPTS <= 64 */
421-
422-
#if XCHAL_NUM_INTERRUPTS > 64 && XCHAL_NUM_INTERRUPTS <= 96
423-
#define DEF_INT_C_HANDLER(l) \
424-
__unused void *xtensa_int##l##_c(void *interrupted_stack) \
425-
{ \
426-
uint32_t irqs, intenable, m; \
427-
usage_stop(); \
428-
__asm__ volatile("rsr.interrupt %0" : "=r"(irqs)); \
429-
__asm__ volatile("rsr.intenable %0" : "=r"(intenable)); \
430-
irqs &= intenable; \
431-
while ((m = _xtensa_handle_one_int##l(0, irqs))) { \
432-
irqs ^= m; \
433-
__asm__ volatile("wsr.intclear %0" : : "r"(m)); \
434-
} \
435-
__asm__ volatile("rsr.interrupt1 %0" : "=r"(irqs)); \
436-
__asm__ volatile("rsr.intenable1 %0" : "=r"(intenable)); \
437-
irqs &= intenable; \
438-
while ((m = _xtensa_handle_one_int##l(1, irqs))) { \
439-
irqs ^= m; \
440-
__asm__ volatile("wsr.intclear1 %0" : : "r"(m)); \
441-
} \
442-
__asm__ volatile("rsr.interrupt2 %0" : "=r"(irqs)); \
443-
__asm__ volatile("rsr.intenable2 %0" : "=r"(intenable)); \
444-
irqs &= intenable; \
445-
while ((m = _xtensa_handle_one_int##l(2, irqs))) { \
446-
irqs ^= m; \
447-
__asm__ volatile("wsr.intclear2 %0" : : "r"(m)); \
448-
} \
449-
return return_to(interrupted_stack); \
450-
}
451-
#endif /* XCHAL_NUM_INTERRUPTS > 64 && XCHAL_NUM_INTERRUPTS <= 96 */
452-
453-
#if XCHAL_NUM_INTERRUPTS > 96
454-
#define DEF_INT_C_HANDLER(l) \
455-
__unused void *xtensa_int##l##_c(void *interrupted_stack) \
456-
{ \
457-
uint32_t irqs, intenable, m; \
458-
usage_stop(); \
459-
__asm__ volatile("rsr.interrupt %0" : "=r"(irqs)); \
460-
__asm__ volatile("rsr.intenable %0" : "=r"(intenable)); \
461-
irqs &= intenable; \
462-
while ((m = _xtensa_handle_one_int##l(0, irqs))) { \
463-
irqs ^= m; \
464-
__asm__ volatile("wsr.intclear %0" : : "r"(m)); \
465-
} \
466-
__asm__ volatile("rsr.interrupt1 %0" : "=r"(irqs)); \
467-
__asm__ volatile("rsr.intenable1 %0" : "=r"(intenable)); \
468-
irqs &= intenable; \
469-
while ((m = _xtensa_handle_one_int##l(1, irqs))) { \
470-
irqs ^= m; \
471-
__asm__ volatile("wsr.intclear1 %0" : : "r"(m)); \
472-
} \
473-
__asm__ volatile("rsr.interrupt2 %0" : "=r"(irqs)); \
474-
__asm__ volatile("rsr.intenable2 %0" : "=r"(intenable)); \
475-
irqs &= intenable; \
476-
while ((m = _xtensa_handle_one_int##l(2, irqs))) { \
477-
irqs ^= m; \
478-
__asm__ volatile("wsr.intclear2 %0" : : "r"(m)); \
479-
} \
480-
__asm__ volatile("rsr.interrupt3 %0" : "=r"(irqs)); \
481-
__asm__ volatile("rsr.intenable3 %0" : "=r"(intenable)); \
482-
irqs &= intenable; \
483-
while ((m = _xtensa_handle_one_int##l(3, irqs))) { \
484-
irqs ^= m; \
485-
__asm__ volatile("wsr.intclear3 %0" : : "r"(m)); \
486-
} \
487-
return return_to(interrupted_stack); \
488-
}
489-
#endif /* XCHAL_NUM_INTERRUPTS > 96 */
369+
#define DECLARE_IRQ(lvl) \
370+
{ \
371+
XCHAL_INTLEVEL##lvl##_MASK, \
372+
}
373+
#elif XCHAL_NUM_INTERRUPTS <= 64
374+
#define DECLARE_IRQ(lvl) \
375+
{ \
376+
XCHAL_INTLEVEL##lvl##_MASK, \
377+
XCHAL_INTLEVEL##lvl##_MASK1, \
378+
}
379+
#elif XCHAL_NUM_INTERRUPTS <= 96
380+
#define DECLARE_IRQ(lvl) \
381+
{ \
382+
XCHAL_INTLEVEL##lvl##_MASK, \
383+
XCHAL_INTLEVEL##lvl##_MASK1, \
384+
XCHAL_INTLEVEL##lvl##_MASK2, \
385+
}
386+
#elif XCHAL_NUM_INTERRUPTS <= 128
387+
#define DECLARE_IRQ(lvl) \
388+
{ \
389+
XCHAL_INTLEVEL##lvl##_MASK, \
390+
XCHAL_INTLEVEL##lvl##_MASK1, \
391+
XCHAL_INTLEVEL##lvl##_MASK2, \
392+
XCHAL_INTLEVEL##lvl##_MASK3, \
393+
}
394+
#else
395+
#error "xtensa supports up to 128 interrupts"
396+
#endif
490397

491398
#if XCHAL_HAVE_NMI
492399
#define MAX_INTR_LEVEL XCHAL_NMILEVEL
@@ -497,6 +404,102 @@ __unused void *xtensa_int##l##_c(void *interrupted_stack) \
497404
#define MAX_INTR_LEVEL 0
498405
#endif
499406

407+
#define GRP_COUNT (ROUND_UP(XCHAL_NUM_INTERRUPTS, 32) / 32)
408+
409+
static const uint32_t xtensa_lvl_mask[MAX_INTR_LEVEL][GRP_COUNT] = {
410+
#if MAX_INTR_LEVEL >= 1
411+
DECLARE_IRQ(1),
412+
#endif
413+
#if MAX_INTR_LEVEL >= 2
414+
DECLARE_IRQ(2),
415+
#endif
416+
#if MAX_INTR_LEVEL >= 3
417+
DECLARE_IRQ(3),
418+
#endif
419+
#if MAX_INTR_LEVEL >= 4
420+
DECLARE_IRQ(4),
421+
#endif
422+
#if MAX_INTR_LEVEL >= 5
423+
DECLARE_IRQ(5),
424+
#endif
425+
#if MAX_INTR_LEVEL >= 6
426+
DECLARE_IRQ(6),
427+
#endif
428+
#if MAX_INTR_LEVEL >= 7
429+
DECLARE_IRQ(7),
430+
#endif
431+
};
432+
433+
/* Handles all interrupts for given IRQ Level.
434+
* - Supports up to 128 interrupts (max supported by Xtensa)
435+
* - Supports all IRQ levels
436+
* - Uses __builtin_ctz that for most xtensa configurations will be optimized using nsau instruction
437+
*/
438+
__unused static void xtensa_handle_irq_lvl(int irq_lvl)
439+
{
440+
int irq;
441+
uint32_t irq_mask;
442+
uint32_t intenable;
443+
#if XCHAL_NUM_INTERRUPTS > 0
444+
__asm__ volatile("rsr.interrupt %0" : "=r"(irq_mask));
445+
__asm__ volatile("rsr.intenable %0" : "=r"(intenable));
446+
irq_mask &= intenable;
447+
irq_mask &= xtensa_lvl_mask[irq_lvl - 1][0];
448+
while (irq_mask) {
449+
irq = __builtin_ctz(irq_mask);
450+
_sw_isr_table[irq].isr(_sw_isr_table[irq].arg);
451+
__asm__ volatile("wsr.intclear %0" : : "r"(BIT(irq)));
452+
irq_mask ^= BIT(irq);
453+
}
454+
#endif
455+
#if XCHAL_NUM_INTERRUPTS > 32
456+
__asm__ volatile("rsr.interrupt1 %0" : "=r"(irq_mask));
457+
__asm__ volatile("rsr.intenable1 %0" : "=r"(intenable));
458+
irq_mask &= intenable;
459+
irq_mask &= xtensa_lvl_mask[irq_lvl - 1][1];
460+
while (irq_mask) {
461+
irq = __builtin_ctz(irq_mask);
462+
_sw_isr_table[irq + 32].isr(_sw_isr_table[irq + 32].arg);
463+
__asm__ volatile("wsr.intclear1 %0" : : "r"(BIT(irq)));
464+
irq_mask ^= BIT(irq);
465+
}
466+
#endif
467+
468+
#if XCHAL_NUM_INTERRUPTS > 64
469+
__asm__ volatile("rsr.interrupt2 %0" : "=r"(irq_mask));
470+
__asm__ volatile("rsr.intenable2 %0" : "=r"(intenable2));
471+
irq_mask &= intenable;
472+
irq_mask &= xtensa_lvl_mask[irq_lvl - 1][2];
473+
while (irq_mask) {
474+
irq = __builtin_ctz(irq_mask);
475+
_sw_isr_table[irq + 64].isr(_sw_isr_table[irq + 64].arg);
476+
__asm__ volatile("wsr.intclear2 %0" : : "r"(BIT(irq)));
477+
irq_mask ^= BIT(irq);
478+
}
479+
#endif
480+
#if XCHAL_NUM_INTERRUPTS > 96
481+
__asm__ volatile("rsr.interrupt3 %0" : "=r"(irq_mask));
482+
__asm__ volatile("rsr.intenable3 %0" : "=r"(intenable));
483+
irq_mask &= intenable;
484+
irq_mask &= xtensa_lvl_mask[irq_lvl - 1][3];
485+
486+
while (irq_mask) {
487+
irq = __builtin_ctz(irq_mask);
488+
_sw_isr_table[irq + 96].isr(_sw_isr_table[irq + 96].arg);
489+
__asm__ volatile("wsr.intclear3 %0" : : "r"(BIT(irq)));
490+
irq_mask ^= BIT(irq);
491+
}
492+
#endif
493+
}
494+
495+
#define DEF_INT_C_HANDLER(l) \
496+
__unused void *xtensa_int##l##_c(void *interrupted_stack) \
497+
{ \
498+
usage_stop(); \
499+
xtensa_handle_irq_lvl(l); \
500+
return return_to(interrupted_stack); \
501+
}
502+
500503
#if MAX_INTR_LEVEL >= 2
501504
DEF_INT_C_HANDLER(2)
502505
#endif

0 commit comments

Comments
 (0)