Skip to content

Commit 05b0342

Browse files
committed
Replace interrupt masking with spinlock in semaphore for SMP support
The original semaphore implementation used NOSCHED_ENTER() and NOSCHED_LEAVE() to protect critical sections by disabling interrupts, which was sufficient in single-core environments. To support SMP, we replace these macros with a spinlock based on RV32A atomic instructions. This ensures safe access to shared semaphore state, including the count and wait queue, when multiple harts operate concurrently. This change is necessary to avoid race conditions during mo_sem_wait(), mo_sem_signal(), and other semaphore operations under multi-hart scheduling.
1 parent 779dc0b commit 05b0342

File tree

1 file changed

+16
-12
lines changed

1 file changed

+16
-12
lines changed

kernel/semaphore.c

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
*/
99

1010
#include <hal.h>
11+
#include <spinlock.h>
1112
#include <sys/semaphore.h>
1213
#include <sys/task.h>
1314

@@ -25,6 +26,9 @@ struct sem_t {
2526
/* Magic number for semaphore validation */
2627
#define SEM_MAGIC 0x53454D00 /* "SEM\0" */
2728

29+
static spinlock_t semaphore_lock = SPINLOCK_INITIALIZER;
30+
static uint32_t semaphore_flags = 0;
31+
2832
static inline bool sem_is_valid(const sem_t *s)
2933
{
3034
return s && s->magic == SEM_MAGIC && s->wait_q && s->max_waiters > 0 &&
@@ -80,11 +84,11 @@ int32_t mo_sem_destroy(sem_t *s)
8084
if (unlikely(!sem_is_valid(s)))
8185
return ERR_FAIL;
8286

83-
NOSCHED_ENTER();
87+
spin_lock_irqsave(&semaphore_lock, &semaphore_flags);
8488

8589
/* Check if any tasks are waiting - unsafe to destroy if so */
8690
if (unlikely(queue_count(s->wait_q) > 0)) {
87-
NOSCHED_LEAVE();
91+
spin_unlock_irqrestore(&semaphore_lock, semaphore_flags);
8892
return ERR_TASK_BUSY;
8993
}
9094

@@ -93,7 +97,7 @@ int32_t mo_sem_destroy(sem_t *s)
9397
queue_t *wait_q = s->wait_q;
9498
s->wait_q = NULL;
9599

96-
NOSCHED_LEAVE();
100+
spin_unlock_irqrestore(&semaphore_lock, semaphore_flags);
97101

98102
/* Clean up resources outside critical section */
99103
queue_destroy(wait_q);
@@ -108,19 +112,19 @@ void mo_sem_wait(sem_t *s)
108112
panic(ERR_SEM_OPERATION);
109113
}
110114

111-
NOSCHED_ENTER();
115+
spin_lock_irqsave(&semaphore_lock, &semaphore_flags);
112116

113117
/* Fast path: resource available and no waiters (preserves FIFO ordering) */
114118
if (likely(s->count > 0 && queue_count(s->wait_q) == 0)) {
115119
s->count--;
116-
NOSCHED_LEAVE();
120+
spin_unlock_irqrestore(&semaphore_lock, semaphore_flags);
117121
return;
118122
}
119123

120124
/* Slow path: must wait for resource */
121125
/* Verify wait queue has capacity before attempting to block */
122126
if (unlikely(queue_count(s->wait_q) >= s->max_waiters)) {
123-
NOSCHED_LEAVE();
127+
spin_unlock_irqrestore(&semaphore_lock, semaphore_flags);
124128
panic(ERR_SEM_OPERATION); /* Queue overflow - system error */
125129
}
126130

@@ -145,15 +149,15 @@ int32_t mo_sem_trywait(sem_t *s)
145149

146150
int32_t result = ERR_FAIL;
147151

148-
NOSCHED_ENTER();
152+
spin_lock_irqsave(&semaphore_lock, &semaphore_flags);
149153

150154
/* Only succeed if resource available AND no waiters (preserves FIFO) */
151155
if (s->count > 0 && queue_count(s->wait_q) == 0) {
152156
s->count--;
153157
result = ERR_OK;
154158
}
155159

156-
NOSCHED_LEAVE();
160+
spin_unlock_irqrestore(&semaphore_lock, semaphore_flags);
157161
return result;
158162
}
159163

@@ -167,7 +171,7 @@ void mo_sem_signal(sem_t *s)
167171
bool should_yield = false;
168172
tcb_t *awakened_task = NULL;
169173

170-
NOSCHED_ENTER();
174+
spin_lock_irqsave(&semaphore_lock, &semaphore_flags);
171175

172176
/* Check if any tasks are waiting for resources */
173177
if (queue_count(s->wait_q) > 0) {
@@ -198,7 +202,7 @@ void mo_sem_signal(sem_t *s)
198202
*/
199203
}
200204

201-
NOSCHED_LEAVE();
205+
spin_unlock_irqrestore(&semaphore_lock, semaphore_flags);
202206

203207
/* Yield outside critical section if we awakened a task.
204208
* This improves system responsiveness by allowing the awakened task to run
@@ -228,9 +232,9 @@ int32_t mo_sem_waiting_count(sem_t *s)
228232

229233
int32_t count;
230234

231-
NOSCHED_ENTER();
235+
spin_lock_irqsave(&semaphore_lock, &semaphore_flags);
232236
count = queue_count(s->wait_q);
233-
NOSCHED_LEAVE();
237+
spin_unlock_irqrestore(&semaphore_lock, semaphore_flags);
234238

235239
return count;
236240
}

0 commit comments

Comments
 (0)