Skip to content

Commit 0ee1e28

Browse files
dcpleungnashif
authored andcommitted
xtensa: polish doxygen and add to missing doc
This polishes doxygen to, hopefully, make it better looking on the API doc. Also adds missing doc to various functions and macros. Signed-off-by: Daniel Leung <[email protected]>
1 parent 035c8d8 commit 0ee1e28

File tree

6 files changed

+196
-45
lines changed

6 files changed

+196
-45
lines changed

include/zephyr/arch/xtensa/arch.h

Lines changed: 85 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
* @file
88
* @brief Xtensa specific kernel interface header
99
* This header contains the Xtensa specific kernel interface. It is included
10-
* by the generic kernel interface header (include/arch/cpu.h)
10+
* by the generic kernel interface header (include/zephyr/arch/cpu.h)
1111
*/
1212

1313
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_ARCH_H_
@@ -33,6 +33,8 @@
3333
#include <zephyr/arch/xtensa/thread_stack.h>
3434
#include <zephyr/sys/slist.h>
3535

36+
#include <zephyr/drivers/timer/system_timer.h>
37+
3638
#include <zephyr/arch/xtensa/xtensa_mmu.h>
3739

3840
/**
@@ -61,7 +63,23 @@ struct arch_mem_domain {
6163
sys_snode_t node;
6264
};
6365

66+
/**
67+
* @brief Generate hardware exception.
68+
*
69+
* This generates hardware exception which is used by ARCH_EXCEPT().
70+
*
71+
* @param reason_p Reason for exception.
72+
*/
6473
extern void xtensa_arch_except(int reason_p);
74+
75+
/**
76+
* @brief Generate kernel oops.
77+
*
78+
* This generates kernel oops which is used by arch_syscall_oops().
79+
*
80+
* @param reason_p Reason for exception.
81+
* @param ssf Stack pointer.
82+
*/
6583
extern void xtensa_arch_kernel_oops(int reason_p, void *ssf);
6684

6785
#ifdef CONFIG_USERSPACE
@@ -79,9 +97,9 @@ extern void xtensa_arch_kernel_oops(int reason_p, void *ssf);
7997
#else
8098

8199
#define ARCH_EXCEPT(reason_p) do { \
82-
xtensa_arch_except(reason_p); \
83-
CODE_UNREACHABLE; \
84-
} while (false)
100+
xtensa_arch_except(reason_p); \
101+
CODE_UNREACHABLE; \
102+
} while (false)
85103

86104
#endif
87105

@@ -93,44 +111,47 @@ __syscall void xtensa_user_fault(unsigned int reason);
93111
extern void z_irq_priority_set(uint32_t irq, uint32_t prio, uint32_t flags);
94112

95113
#define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
96-
{ \
97-
Z_ISR_DECLARE(irq_p, flags_p, isr_p, isr_param_p); \
98-
}
99-
100-
extern uint32_t sys_clock_cycle_get_32(void);
114+
{ \
115+
Z_ISR_DECLARE(irq_p, flags_p, isr_p, isr_param_p); \
116+
}
101117

118+
/** Implementation of @ref arch_k_cycle_get_32. */
102119
static inline uint32_t arch_k_cycle_get_32(void)
103120
{
104121
return sys_clock_cycle_get_32();
105122
}
106123

107-
extern uint64_t sys_clock_cycle_get_64(void);
108-
124+
/** Implementation of @ref arch_k_cycle_get_64. */
109125
static inline uint64_t arch_k_cycle_get_64(void)
110126
{
111127
return sys_clock_cycle_get_64();
112128
}
113129

130+
/** Implementation of @ref arch_nop. */
114131
static ALWAYS_INLINE void arch_nop(void)
115132
{
116133
__asm__ volatile("nop");
117134
}
118135

136+
/**
137+
* @brief Lock VECBASE if supported by hardware.
138+
*
139+
* The bit 0 of VECBASE acts as a lock bit on hardware supporting
140+
* this feature. When this bit is set, VECBASE cannot be changed
141+
* until it is cleared by hardware reset. When the hardware does not
142+
* support this bit, it is hardwired to 0.
143+
*/
119144
static ALWAYS_INLINE void xtensa_vecbase_lock(void)
120145
{
121146
int vecbase;
122147

123148
__asm__ volatile("rsr.vecbase %0" : "=r" (vecbase));
124-
125-
/* In some targets the bit 0 of VECBASE works as lock bit.
126-
* When this bit set, VECBASE can't be changed until it is cleared by
127-
* reset. When the target does not have it, it is hardwired to 0.
128-
**/
129149
__asm__ volatile("wsr.vecbase %0; rsync" : : "r" (vecbase | 1));
130150
}
131151

132-
#if defined(CONFIG_XTENSA_RPO_CACHE)
133-
#if defined(CONFIG_ARCH_HAS_COHERENCE)
152+
#if defined(CONFIG_XTENSA_RPO_CACHE) || defined(__DOXYGEN__)
153+
#if defined(CONFIG_ARCH_HAS_COHERENCE) || defined(__DOXYGEN__)
154+
/** Implementation of @ref arch_mem_coherent. */
134155
static inline bool arch_mem_coherent(void *ptr)
135156
{
136157
size_t addr = (size_t) ptr;
@@ -139,13 +160,39 @@ static inline bool arch_mem_coherent(void *ptr)
139160
}
140161
#endif
141162

163+
/**
164+
* @brief Test if a pointer is in cached region.
165+
*
166+
* Some hardware may map the same physical memory twice
167+
* so that it can be seen in both (incoherent) cached mappings
168+
* and a coherent "shared" area. This tests if a particular
169+
* pointer is within the cached, coherent area.
170+
*
171+
* @param ptr Pointer
172+
*
173+
* @retval True if pointer is in cached region.
174+
* @retval False if pointer is not in cached region.
175+
*/
142176
static inline bool arch_xtensa_is_ptr_cached(void *ptr)
143177
{
144178
size_t addr = (size_t) ptr;
145179

146180
return (addr >> 29) == CONFIG_XTENSA_CACHED_REGION;
147181
}
148182

183+
/**
184+
* @brief Test if a pointer is in un-cached region.
185+
*
186+
* Some hardware may map the same physical memory twice
187+
* so that it can be seen in both (incoherent) cached mappings
188+
* and a coherent "shared" area. This tests if a particular
189+
* pointer is within the un-cached, incoherent area.
190+
*
191+
* @param ptr Pointer
192+
*
193+
* @retval True if pointer is not in cached region.
194+
* @retval False if pointer is in cached region.
195+
*/
149196
static inline bool arch_xtensa_is_ptr_uncached(void *ptr)
150197
{
151198
size_t addr = (size_t) ptr;
@@ -173,6 +220,7 @@ static ALWAYS_INLINE uint32_t z_xtrpoflip(uint32_t addr, uint32_t rto, uint32_t
173220
return (addr & ~(7U << 29)) | rto;
174221
}
175222
}
223+
176224
/**
177225
* @brief Return cached pointer to a RAM address
178226
*
@@ -271,10 +319,14 @@ static inline void *arch_xtensa_uncached_ptr(void __sparse_cache *ptr)
271319
addr += addrincr; \
272320
} while (0)
273321

274-
#define ARCH_XTENSA_SET_RPO_TLB() do { \
275-
register uint32_t addr = 0, addrincr = 0x20000000; \
276-
FOR_EACH(_SET_ONE_TLB, (;), 0, 1, 2, 3, 4, 5, 6, 7); \
277-
} while (0)
322+
/**
323+
* @brief Setup RPO TLB registers.
324+
*/
325+
#define ARCH_XTENSA_SET_RPO_TLB() \
326+
do { \
327+
register uint32_t addr = 0, addrincr = 0x20000000; \
328+
FOR_EACH(_SET_ONE_TLB, (;), 0, 1, 2, 3, 4, 5, 6, 7); \
329+
} while (0)
278330

279331
#else /* CONFIG_XTENSA_RPO_CACHE */
280332

@@ -304,7 +356,17 @@ static inline void *arch_xtensa_uncached_ptr(void *ptr)
304356

305357
#endif /* CONFIG_XTENSA_RPO_CACHE */
306358

307-
#ifdef CONFIG_XTENSA_MMU
359+
#if defined(CONFIG_XTENSA_MMU) || defined(__DOXYGEN__)
360+
/**
361+
* @brief Peform additional steps after MMU initialization.
362+
*
363+
* This performs additional steps related to memory management
364+
* after the main MMU initialization code. This needs to defined
365+
* in the SoC layer. Default is do no nothing.
366+
*
367+
* @param is_core0 True if this is called while executing on
368+
* CPU core #0.
369+
*/
308370
extern void arch_xtensa_mmu_post_init(bool is_core0);
309371
#endif
310372

include/zephyr/arch/xtensa/arch_inlines.h

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,26 +13,53 @@
1313
#include <zephyr/kernel_structs.h>
1414
#include <zsr.h>
1515

16+
/**
17+
* @brief Read a special register.
18+
*
19+
* @param sr Name of special register.
20+
*
21+
* @return Value of special register.
22+
*/
1623
#define XTENSA_RSR(sr) \
1724
({uint32_t v; \
1825
__asm__ volatile ("rsr." sr " %0" : "=a"(v)); \
1926
v; })
2027

28+
/**
29+
* @brief Write to a special register.
30+
*
31+
* @param sr Name of special register.
32+
* @param v Value to be written to special register.
33+
*/
2134
#define XTENSA_WSR(sr, v) \
2235
do { \
2336
__asm__ volatile ("wsr." sr " %0" : : "r"(v)); \
2437
} while (false)
2538

39+
/**
40+
* @brief Read a user register.
41+
*
42+
* @param ur Name of user register.
43+
*
44+
* @return Value of user register.
45+
*/
2646
#define XTENSA_RUR(ur) \
2747
({uint32_t v; \
2848
__asm__ volatile ("rur." ur " %0" : "=a"(v)); \
2949
v; })
3050

51+
/**
52+
* @brief Write to a user register.
53+
*
54+
* @param ur Name of user register.
55+
* @param v Value to be written to user register.
56+
*/
3157
#define XTENSA_WUR(ur, v) \
3258
do { \
3359
__asm__ volatile ("wur." ur " %0" : : "r"(v)); \
3460
} while (false)
3561

62+
/** Implementation of @ref arch_curr_cpu. */
3663
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
3764
{
3865
_cpu_t *cpu;
@@ -42,6 +69,7 @@ static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
4269
return cpu;
4370
}
4471

72+
/** Implementation of @ref arch_proc_id. */
4573
static ALWAYS_INLINE uint32_t arch_proc_id(void)
4674
{
4775
uint32_t prid;
@@ -54,6 +82,7 @@ static ALWAYS_INLINE uint32_t arch_proc_id(void)
5482
extern unsigned int soc_num_cpus;
5583
#endif
5684

85+
/** Implementation of @ref arch_num_cpus. */
5786
static ALWAYS_INLINE unsigned int arch_num_cpus(void)
5887
{
5988
#ifdef CONFIG_SOC_HAS_RUNTIME_NUM_CPUS

0 commit comments

Comments
 (0)