1010
1111/* There's a spinlock validation framework available when asserts are
1212 * enabled. It adds a relatively hefty overhead (about 3k or so) to
13- * kernel code size, don't use on platforms known to be small. (Note
14- * we're using the kconfig value here. This isn't defined for every
15- * board, but the default of zero works well as an "infinity"
16- * fallback. There is a DT_FLASH_SIZE parameter too, but that seems
17- * even more poorly supported.
13+ * kernel code size, don't use on platforms known to be small.
1814 */
19- #if (CONFIG_FLASH_SIZE == 0 ) || (CONFIG_FLASH_SIZE > 32 )
20- #if defined(CONFIG_ASSERT ) && (CONFIG_MP_NUM_CPUS < 4 )
15+ #ifdef CONFIG_SPIN_VALIDATE
2116#include <sys/__assert.h>
2217#include <stdbool.h>
2318struct k_spinlock ;
2419bool z_spin_lock_valid (struct k_spinlock * l );
2520bool z_spin_unlock_valid (struct k_spinlock * l );
2621void z_spin_lock_set_owner (struct k_spinlock * l );
27- #define SPIN_VALIDATE
28- #endif
29- #endif
22+ BUILD_ASSERT_MSG (CONFIG_MP_NUM_CPUS < 4 , "Too many CPUs for mask" );
23+ #endif /* CONFIG_SPIN_VALIDATE */
3024
3125struct k_spinlock_key {
3226 int key ;
@@ -39,15 +33,16 @@ struct k_spinlock {
3933 atomic_t locked ;
4034#endif
4135
42- #ifdef SPIN_VALIDATE
36+ #ifdef CONFIG_SPIN_VALIDATE
4337 /* Stores the thread that holds the lock with the locking CPU
4438 * ID in the bottom two bits.
4539 */
4640 uintptr_t thread_cpu ;
4741#endif
4842
49- #if defined(CONFIG_CPLUSPLUS ) && !defined(CONFIG_SMP ) && !defined(SPIN_VALIDATE )
50- /* If CONFIG_SMP and SPIN_VALIDATE are both not defined
43+ #if defined(CONFIG_CPLUSPLUS ) && !defined(CONFIG_SMP ) && \
44+ !defined(CONFIG_SPIN_VALIDATE )
45+ /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
5146 * the k_spinlock struct will have no members. The result
5247 * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
5348 *
@@ -75,7 +70,7 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
7570 */
7671 k .key = arch_irq_lock ();
7772
78- #ifdef SPIN_VALIDATE
73+ #ifdef CONFIG_SPIN_VALIDATE
7974 __ASSERT (z_spin_lock_valid (l ), "Recursive spinlock" );
8075#endif
8176
@@ -84,7 +79,7 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
8479 }
8580#endif
8681
87- #ifdef SPIN_VALIDATE
82+ #ifdef CONFIG_SPIN_VALIDATE
8883 z_spin_lock_set_owner (l );
8984#endif
9085 return k ;
@@ -94,7 +89,7 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
9489 k_spinlock_key_t key )
9590{
9691 ARG_UNUSED (l );
97- #ifdef SPIN_VALIDATE
92+ #ifdef CONFIG_SPIN_VALIDATE
9893 __ASSERT (z_spin_unlock_valid (l ), "Not my spinlock!" );
9994#endif
10095
@@ -117,7 +112,7 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
117112static ALWAYS_INLINE void k_spin_release (struct k_spinlock * l )
118113{
119114 ARG_UNUSED (l );
120- #ifdef SPIN_VALIDATE
115+ #ifdef CONFIG_SPIN_VALIDATE
121116 __ASSERT (z_spin_unlock_valid (l ), "Not my spinlock!" );
122117#endif
123118#ifdef CONFIG_SMP
0 commit comments