Skip to content

Commit c9d7840

Browse files
daor-otinashif
authored andcommitted
spinlock: Make SPIN_VALIDATE a Kconfig option.
SPIN_VALIDATE is, as it was previously, enabled per default when having less than 4 CPUs and either having no flash or a flash size greater than 32kB. Small targets, which needs to have asserts enabled, can chose to have the spinlock validation enabled or not and thereby decide whether the overhead added is acceptable or not. Signed-off-by: Danny Oerndrup <[email protected]>
1 parent e181e1b commit c9d7840

File tree

4 files changed

+25
-21
lines changed

4 files changed

+25
-21
lines changed

include/spinlock.h

Lines changed: 12 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -10,23 +10,17 @@
1010

1111
/* There's a spinlock validation framework available when asserts are
1212
* enabled. It adds a relatively hefty overhead (about 3k or so) to
13-
* kernel code size, don't use on platforms known to be small. (Note
14-
* we're using the kconfig value here. This isn't defined for every
15-
* board, but the default of zero works well as an "infinity"
16-
* fallback. There is a DT_FLASH_SIZE parameter too, but that seems
17-
* even more poorly supported.
13+
* kernel code size, don't use on platforms known to be small.
1814
*/
19-
#if (CONFIG_FLASH_SIZE == 0) || (CONFIG_FLASH_SIZE > 32)
20-
#if defined(CONFIG_ASSERT) && (CONFIG_MP_NUM_CPUS < 4)
15+
#ifdef CONFIG_SPIN_VALIDATE
2116
#include <sys/__assert.h>
2217
#include <stdbool.h>
2318
struct k_spinlock;
2419
bool z_spin_lock_valid(struct k_spinlock *l);
2520
bool z_spin_unlock_valid(struct k_spinlock *l);
2621
void z_spin_lock_set_owner(struct k_spinlock *l);
27-
#define SPIN_VALIDATE
28-
#endif
29-
#endif
22+
BUILD_ASSERT_MSG(CONFIG_MP_NUM_CPUS < 4, "Too many CPUs for mask");
23+
#endif /* CONFIG_SPIN_VALIDATE */
3024

3125
struct k_spinlock_key {
3226
int key;
@@ -39,15 +33,16 @@ struct k_spinlock {
3933
atomic_t locked;
4034
#endif
4135

42-
#ifdef SPIN_VALIDATE
36+
#ifdef CONFIG_SPIN_VALIDATE
4337
/* Stores the thread that holds the lock with the locking CPU
4438
* ID in the bottom two bits.
4539
*/
4640
uintptr_t thread_cpu;
4741
#endif
4842

49-
#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && !defined(SPIN_VALIDATE)
50-
/* If CONFIG_SMP and SPIN_VALIDATE are both not defined
43+
#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \
44+
!defined(CONFIG_SPIN_VALIDATE)
45+
/* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
5146
* the k_spinlock struct will have no members. The result
5247
* is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
5348
*
@@ -75,7 +70,7 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
7570
*/
7671
k.key = arch_irq_lock();
7772

78-
#ifdef SPIN_VALIDATE
73+
#ifdef CONFIG_SPIN_VALIDATE
7974
__ASSERT(z_spin_lock_valid(l), "Recursive spinlock");
8075
#endif
8176

@@ -84,7 +79,7 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
8479
}
8580
#endif
8681

87-
#ifdef SPIN_VALIDATE
82+
#ifdef CONFIG_SPIN_VALIDATE
8883
z_spin_lock_set_owner(l);
8984
#endif
9085
return k;
@@ -94,7 +89,7 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
9489
k_spinlock_key_t key)
9590
{
9691
ARG_UNUSED(l);
97-
#ifdef SPIN_VALIDATE
92+
#ifdef CONFIG_SPIN_VALIDATE
9893
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
9994
#endif
10095

@@ -117,7 +112,7 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
117112
static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
118113
{
119114
ARG_UNUSED(l);
120-
#ifdef SPIN_VALIDATE
115+
#ifdef CONFIG_SPIN_VALIDATE
121116
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
122117
#endif
123118
#ifdef CONFIG_SMP

kernel/sched.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -650,7 +650,7 @@ void *z_get_next_switch_handle(void *interrupted)
650650
#endif
651651
_current_cpu->swap_ok = 0;
652652
set_current(th);
653-
#ifdef SPIN_VALIDATE
653+
#ifdef CONFIG_SPIN_VALIDATE
654654
/* Changed _current! Update the spinlock
655655
* bookeeping so the validation doesn't get
656656
* confused when the "wrong" thread tries to

kernel/thread.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -853,7 +853,7 @@ FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
853853
/* These spinlock assertion predicates are defined here because having
854854
* them in spinlock.h is a giant header ordering headache.
855855
*/
856-
#ifdef SPIN_VALIDATE
856+
#ifdef CONFIG_SPIN_VALIDATE
857857
bool z_spin_lock_valid(struct k_spinlock *l)
858858
{
859859
uintptr_t thread_cpu = l->thread_cpu;
@@ -879,8 +879,7 @@ void z_spin_lock_set_owner(struct k_spinlock *l)
879879
{
880880
l->thread_cpu = _current_cpu->id | (uintptr_t)_current;
881881
}
882-
883-
#endif
882+
#endif /* CONFIG_SPIN_VALIDATE */
884883

885884
int z_impl_k_float_disable(struct k_thread *thread)
886885
{

subsys/debug/Kconfig

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,16 @@ config ASSERT_LEVEL
163163
Level 1: on + warning in every file that includes __assert.h
164164
Level 2: on + no warning
165165

166+
config SPIN_VALIDATE
167+
bool "Enable spinlock validation"
168+
depends on ASSERT
169+
depends on MP_NUM_CPUS < 4
170+
default y if !FLASH || FLASH_SIZE > 32
171+
help
172+
There's a spinlock validation framework available when asserts are
173+
enabled. It adds a relatively hefty overhead (about 3k or so) to
174+
kernel code size, don't use on platforms known to be small.
175+
166176
config FORCE_NO_ASSERT
167177
bool "Force-disable no assertions"
168178
help

0 commit comments

Comments
 (0)