77 * @file
88 * @brief Xtensa specific kernel interface header
99 * This header contains the Xtensa specific kernel interface. It is included
10- * by the generic kernel interface header (include/arch/cpu.h)
10+ * by the generic kernel interface header (include/zephyr/ arch/cpu.h)
1111 */
1212
1313#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_H_
3333#include <zephyr/arch/xtensa/thread_stack.h>
3434#include <zephyr/sys/slist.h>
3535
36+ #include <zephyr/drivers/timer/system_timer.h>
37+
3638#include <zephyr/arch/xtensa/xtensa_mmu.h>
3739
3840/**
@@ -61,7 +63,23 @@ struct arch_mem_domain {
6163 sys_snode_t node ;
6264};
6365
66+ /**
67+ * @brief Generate hardware exception.
68+ *
69+ * This generates hardware exception which is used by ARCH_EXCEPT().
70+ *
71+ * @param reason_p Reason for exception.
72+ */
6473extern void xtensa_arch_except (int reason_p );
74+
75+ /**
76+ * @brief Generate kernel oops.
77+ *
78+ * This generates kernel oops which is used by arch_syscall_oops().
79+ *
80+ * @param reason_p Reason for exception.
81+ * @param ssf Stack pointer.
82+ */
6583extern void xtensa_arch_kernel_oops (int reason_p , void * ssf );
6684
6785#ifdef CONFIG_USERSPACE
@@ -79,9 +97,9 @@ extern void xtensa_arch_kernel_oops(int reason_p, void *ssf);
7997#else
8098
8199#define ARCH_EXCEPT (reason_p ) do { \
82- xtensa_arch_except(reason_p); \
83- CODE_UNREACHABLE; \
84- } while (false)
100+ xtensa_arch_except(reason_p); \
101+ CODE_UNREACHABLE; \
102+ } while (false)
85103
86104#endif
87105
@@ -93,44 +111,47 @@ __syscall void xtensa_user_fault(unsigned int reason);
93111extern void z_irq_priority_set (uint32_t irq , uint32_t prio , uint32_t flags );
94112
95113#define ARCH_IRQ_CONNECT (irq_p , priority_p , isr_p , isr_param_p , flags_p ) \
96- { \
97- Z_ISR_DECLARE(irq_p, flags_p, isr_p, isr_param_p); \
98- }
99-
100- extern uint32_t sys_clock_cycle_get_32 (void );
114+ { \
115+ Z_ISR_DECLARE(irq_p, flags_p, isr_p, isr_param_p); \
116+ }
101117
118+ /** Implementation of @ref arch_k_cycle_get_32. */
102119static inline uint32_t arch_k_cycle_get_32 (void )
103120{
104121 return sys_clock_cycle_get_32 ();
105122}
106123
107- extern uint64_t sys_clock_cycle_get_64 (void );
108-
124+ /** Implementation of @ref arch_k_cycle_get_64. */
109125static inline uint64_t arch_k_cycle_get_64 (void )
110126{
111127 return sys_clock_cycle_get_64 ();
112128}
113129
130+ /** Implementation of @ref arch_nop. */
114131static ALWAYS_INLINE void arch_nop (void )
115132{
116133 __asm__ volatile ("nop" );
117134}
118135
136+ /**
137+ * @brief Lock VECBASE if supported by hardware.
138+ *
139+ * The bit 0 of VECBASE acts as a lock bit on hardware supporting
140+ * this feature. When this bit is set, VECBASE cannot be changed
141+ * until it is cleared by hardware reset. When the hardware does not
142+ * support this bit, it is hardwired to 0.
143+ */
119144static ALWAYS_INLINE void xtensa_vecbase_lock (void )
120145{
121146 int vecbase ;
122147
123148 __asm__ volatile ("rsr.vecbase %0" : "=r" (vecbase ));
124-
125- /* In some targets the bit 0 of VECBASE works as lock bit.
126- * When this bit set, VECBASE can't be changed until it is cleared by
127- * reset. When the target does not have it, it is hardwired to 0.
128- **/
129149 __asm__ volatile ("wsr.vecbase %0; rsync" : : "r" (vecbase | 1 ));
130150}
131151
132- #if defined(CONFIG_XTENSA_RPO_CACHE )
133- #if defined(CONFIG_ARCH_HAS_COHERENCE )
152+ #if defined(CONFIG_XTENSA_RPO_CACHE ) || defined(__DOXYGEN__ )
153+ #if defined(CONFIG_ARCH_HAS_COHERENCE ) || defined(__DOXYGEN__ )
154+ /** Implementation of @ref arch_mem_coherent. */
134155static inline bool arch_mem_coherent (void * ptr )
135156{
136157 size_t addr = (size_t ) ptr ;
@@ -139,13 +160,39 @@ static inline bool arch_mem_coherent(void *ptr)
139160}
140161#endif
141162
163+ /**
164+ * @brief Test if a pointer is in cached region.
165+ *
166+ * Some hardware may map the same physical memory twice
167+ * so that it can be seen in both (incoherent) cached mappings
168+ * and a coherent "shared" area. This tests if a particular
169+ * pointer is within the cached, coherent area.
170+ *
171+ * @param ptr Pointer
172+ *
173+ * @retval True if pointer is in cached region.
174+ * @retval False if pointer is not in cached region.
175+ */
142176static inline bool arch_xtensa_is_ptr_cached (void * ptr )
143177{
144178 size_t addr = (size_t ) ptr ;
145179
146180 return (addr >> 29 ) == CONFIG_XTENSA_CACHED_REGION ;
147181}
148182
183+ /**
184+ * @brief Test if a pointer is in un-cached region.
185+ *
186+ * Some hardware may map the same physical memory twice
187+ * so that it can be seen in both (incoherent) cached mappings
188+ * and a coherent "shared" area. This tests if a particular
189+ * pointer is within the un-cached, incoherent area.
190+ *
191+ * @param ptr Pointer
192+ *
193+ * @retval True if pointer is not in cached region.
194+ * @retval False if pointer is in cached region.
195+ */
149196static inline bool arch_xtensa_is_ptr_uncached (void * ptr )
150197{
151198 size_t addr = (size_t ) ptr ;
@@ -173,6 +220,7 @@ static ALWAYS_INLINE uint32_t z_xtrpoflip(uint32_t addr, uint32_t rto, uint32_t
173220 return (addr & ~(7U << 29 )) | rto ;
174221 }
175222}
223+
176224/**
177225 * @brief Return cached pointer to a RAM address
178226 *
@@ -271,10 +319,14 @@ static inline void *arch_xtensa_uncached_ptr(void __sparse_cache *ptr)
271319 addr += addrincr ; \
272320} while (0 )
273321
274- #define ARCH_XTENSA_SET_RPO_TLB () do { \
275- register uint32_t addr = 0, addrincr = 0x20000000; \
276- FOR_EACH(_SET_ONE_TLB, (;), 0, 1, 2, 3, 4, 5, 6, 7); \
277- } while (0)
322+ /**
323+ * @brief Setup RPO TLB registers.
324+ */
325+ #define ARCH_XTENSA_SET_RPO_TLB () \
326+ do { \
327+ register uint32_t addr = 0, addrincr = 0x20000000; \
328+ FOR_EACH(_SET_ONE_TLB, (;), 0, 1, 2, 3, 4, 5, 6, 7); \
329+ } while (0)
278330
279331#else /* CONFIG_XTENSA_RPO_CACHE */
280332
@@ -304,7 +356,17 @@ static inline void *arch_xtensa_uncached_ptr(void *ptr)
304356
305357#endif /* CONFIG_XTENSA_RPO_CACHE */
306358
307- #ifdef CONFIG_XTENSA_MMU
359+ #if defined(CONFIG_XTENSA_MMU ) || defined(__DOXYGEN__ )
360+ /**
361+ * @brief Peform additional steps after MMU initialization.
362+ *
363+ * This performs additional steps related to memory management
364+ * after the main MMU initialization code. This needs to defined
365+ * in the SoC layer. Default is do no nothing.
366+ *
367+ * @param is_core0 True if this is called while executing on
368+ * CPU core #0.
369+ */
308370extern void arch_xtensa_mmu_post_init (bool is_core0 );
309371#endif
310372
0 commit comments