Skip to content

Commit 8ba911a

Browse files
committed
Replace interrupt masking with spinlock in mutex for SMP support
The mutex and condition variable implementation previously relied on NOSCHED_ENTER() and NOSCHED_LEAVE() to protect critical sections by disabling interrupts. This works in single-core environments but breaks down under SMP due to race conditions between harts. This patch replaces those macros with a spinlock built using RV32A atomic instructions. The spinlock protects access to shared state including mutex ownership, waiter lists, and condition wait queues. This change ensures correct mutual exclusion and atomicity when multiple harts concurrently lock/unlock mutexes or signal condition variables.
1 parent 7ea07a7 commit 8ba911a

File tree

1 file changed

+47
-42
lines changed

1 file changed

+47
-42
lines changed

kernel/mutex.c

Lines changed: 47 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,17 @@
44
* that are independent of the semaphore module.
55
*/
66

7+
#include <spinlock.h>
78
#include <lib/libc.h>
89
#include <sys/mutex.h>
910
#include <sys/task.h>
1011

1112
#include "private/error.h"
1213
#include "private/utils.h"
1314

15+
static spinlock_t mutex_lock = SPINLOCK_INITIALIZER;
16+
static uint32_t mutex_flags = 0;
17+
1418
/* Validate mutex pointer and structure integrity */
1519
static inline bool mutex_is_valid(const mutex_t *m)
1620
{
@@ -112,17 +116,17 @@ int32_t mo_mutex_destroy(mutex_t *m)
112116
if (unlikely(!mutex_is_valid(m)))
113117
return ERR_FAIL;
114118

115-
NOSCHED_ENTER();
119+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
116120

117121
/* Check if any tasks are waiting */
118122
if (unlikely(!list_is_empty(m->waiters))) {
119-
NOSCHED_LEAVE();
123+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
120124
return ERR_TASK_BUSY;
121125
}
122126

123127
/* Check if mutex is still owned */
124128
if (unlikely(m->owner_tid != 0)) {
125-
NOSCHED_LEAVE();
129+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
126130
return ERR_TASK_BUSY;
127131
}
128132

@@ -132,7 +136,7 @@ int32_t mo_mutex_destroy(mutex_t *m)
132136
m->waiters = NULL;
133137
m->owner_tid = 0;
134138

135-
NOSCHED_LEAVE();
139+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
136140

137141
/* Clean up resources outside critical section */
138142
list_destroy(waiters);
@@ -146,18 +150,18 @@ int32_t mo_mutex_lock(mutex_t *m)
146150

147151
uint16_t self_tid = mo_task_id();
148152

149-
NOSCHED_ENTER();
153+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
150154

151155
/* Non-recursive: reject if caller already owns it */
152156
if (unlikely(m->owner_tid == self_tid)) {
153-
NOSCHED_LEAVE();
157+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
154158
return ERR_TASK_BUSY;
155159
}
156160

157161
/* Fast path: mutex is free, acquire immediately */
158162
if (likely(m->owner_tid == 0)) {
159163
m->owner_tid = self_tid;
160-
NOSCHED_LEAVE();
164+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
161165
return ERR_OK;
162166
}
163167

@@ -177,7 +181,7 @@ int32_t mo_mutex_trylock(mutex_t *m)
177181
uint16_t self_tid = mo_task_id();
178182
int32_t result = ERR_TASK_BUSY;
179183

180-
NOSCHED_ENTER();
184+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
181185

182186
if (unlikely(m->owner_tid == self_tid)) {
183187
/* Already owned by caller (non-recursive) */
@@ -189,7 +193,7 @@ int32_t mo_mutex_trylock(mutex_t *m)
189193
}
190194
/* else: owned by someone else, return ERR_TASK_BUSY */
191195

192-
NOSCHED_LEAVE();
196+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
193197
return result;
194198
}
195199

@@ -203,41 +207,42 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks)
203207

204208
uint16_t self_tid = mo_task_id();
205209

206-
NOSCHED_ENTER();
210+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
207211

208212
/* Non-recursive check */
209213
if (unlikely(m->owner_tid == self_tid)) {
210-
NOSCHED_LEAVE();
214+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
211215
return ERR_TASK_BUSY;
212216
}
213217

214218
/* Fast path: mutex is free */
215219
if (m->owner_tid == 0) {
216220
m->owner_tid = self_tid;
217-
NOSCHED_LEAVE();
221+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
218222
return ERR_OK;
219223
}
220224

221225
/* Slow path: must block with timeout using delay mechanism */
222226
tcb_t *self = kcb->task_current->data;
223227
if (unlikely(!list_pushback(m->waiters, self))) {
224-
NOSCHED_LEAVE();
228+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
225229
panic(ERR_SEM_OPERATION);
226230
}
227231

228232
/* Set up timeout using task delay mechanism */
229233
self->delay = ticks;
230234
self->state = TASK_BLOCKED;
231235

232-
NOSCHED_LEAVE();
236+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
233237

234238
/* Yield and let the scheduler handle timeout via delay mechanism */
235239
mo_task_yield();
236240

237241
/* Check result after waking up */
238242
int32_t result;
239243

240-
NOSCHED_ENTER();
244+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
245+
241246
if (self->state == TASK_BLOCKED) {
242247
/* We woke up due to timeout, not mutex unlock */
243248
if (remove_self_from_waiters(m->waiters)) {
@@ -252,7 +257,7 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks)
252257
/* We were woken by mutex unlock - check ownership */
253258
result = (m->owner_tid == self_tid) ? ERR_OK : ERR_FAIL;
254259
}
255-
NOSCHED_LEAVE();
260+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
256261

257262
return result;
258263
}
@@ -264,11 +269,11 @@ int32_t mo_mutex_unlock(mutex_t *m)
264269

265270
uint16_t self_tid = mo_task_id();
266271

267-
NOSCHED_ENTER();
272+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
268273

269274
/* Verify caller owns the mutex */
270275
if (unlikely(m->owner_tid != self_tid)) {
271-
NOSCHED_LEAVE();
276+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
272277
return ERR_NOT_OWNER;
273278
}
274279

@@ -296,7 +301,7 @@ int32_t mo_mutex_unlock(mutex_t *m)
296301
}
297302
}
298303

299-
NOSCHED_LEAVE();
304+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
300305
return ERR_OK;
301306
}
302307

@@ -314,9 +319,9 @@ int32_t mo_mutex_waiting_count(mutex_t *m)
314319
return -1;
315320

316321
int32_t count;
317-
NOSCHED_ENTER();
322+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
318323
count = m->waiters ? (int32_t) m->waiters->length : 0;
319-
NOSCHED_LEAVE();
324+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
320325

321326
return count;
322327
}
@@ -348,11 +353,11 @@ int32_t mo_cond_destroy(cond_t *c)
348353
if (unlikely(!cond_is_valid(c)))
349354
return ERR_FAIL;
350355

351-
NOSCHED_ENTER();
356+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
352357

353358
/* Check if any tasks are waiting */
354359
if (unlikely(!list_is_empty(c->waiters))) {
355-
NOSCHED_LEAVE();
360+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
356361
return ERR_TASK_BUSY;
357362
}
358363

@@ -361,7 +366,7 @@ int32_t mo_cond_destroy(cond_t *c)
361366
list_t *waiters = c->waiters;
362367
c->waiters = NULL;
363368

364-
NOSCHED_LEAVE();
369+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
365370

366371
/* Clean up resources outside critical section */
367372
list_destroy(waiters);
@@ -382,22 +387,22 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m)
382387
tcb_t *self = kcb->task_current->data;
383388

384389
/* Atomically add to wait list */
385-
NOSCHED_ENTER();
390+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
386391
if (unlikely(!list_pushback(c->waiters, self))) {
387-
NOSCHED_LEAVE();
392+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
388393
panic(ERR_SEM_OPERATION);
389394
}
390395
self->state = TASK_BLOCKED;
391-
NOSCHED_LEAVE();
396+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
392397

393398
/* Release mutex */
394399
int32_t unlock_result = mo_mutex_unlock(m);
395400
if (unlikely(unlock_result != ERR_OK)) {
396401
/* Failed to unlock - remove from wait list and restore state */
397-
NOSCHED_ENTER();
402+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
398403
remove_self_from_waiters(c->waiters);
399404
self->state = TASK_READY;
400-
NOSCHED_LEAVE();
405+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
401406
return unlock_result;
402407
}
403408

@@ -424,24 +429,24 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks)
424429
tcb_t *self = kcb->task_current->data;
425430

426431
/* Atomically add to wait list with timeout */
427-
NOSCHED_ENTER();
432+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
428433
if (unlikely(!list_pushback(c->waiters, self))) {
429-
NOSCHED_LEAVE();
434+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
430435
panic(ERR_SEM_OPERATION);
431436
}
432437
self->delay = ticks;
433438
self->state = TASK_BLOCKED;
434-
NOSCHED_LEAVE();
439+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
435440

436441
/* Release mutex */
437442
int32_t unlock_result = mo_mutex_unlock(m);
438443
if (unlikely(unlock_result != ERR_OK)) {
439444
/* Failed to unlock - cleanup and restore */
440-
NOSCHED_ENTER();
445+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
441446
remove_self_from_waiters(c->waiters);
442447
self->state = TASK_READY;
443448
self->delay = 0;
444-
NOSCHED_LEAVE();
449+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
445450
return unlock_result;
446451
}
447452

@@ -450,7 +455,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks)
450455

451456
/* Determine why we woke up */
452457
int32_t wait_status;
453-
NOSCHED_ENTER();
458+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
454459

455460
if (self->state == TASK_BLOCKED) {
456461
/* Timeout occurred - remove from wait list */
@@ -463,7 +468,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks)
463468
wait_status = ERR_OK;
464469
}
465470

466-
NOSCHED_LEAVE();
471+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
467472

468473
/* Re-acquire mutex regardless of timeout status */
469474
int32_t lock_result = mo_mutex_lock(m);
@@ -477,7 +482,7 @@ int32_t mo_cond_signal(cond_t *c)
477482
if (unlikely(!cond_is_valid(c)))
478483
return ERR_FAIL;
479484

480-
NOSCHED_ENTER();
485+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
481486

482487
if (!list_is_empty(c->waiters)) {
483488
tcb_t *waiter = (tcb_t *) list_pop(c->waiters);
@@ -494,7 +499,7 @@ int32_t mo_cond_signal(cond_t *c)
494499
}
495500
}
496501

497-
NOSCHED_LEAVE();
502+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
498503
return ERR_OK;
499504
}
500505

@@ -503,7 +508,7 @@ int32_t mo_cond_broadcast(cond_t *c)
503508
if (unlikely(!cond_is_valid(c)))
504509
return ERR_FAIL;
505510

506-
NOSCHED_ENTER();
511+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
507512

508513
/* Wake all waiting tasks */
509514
while (!list_is_empty(c->waiters)) {
@@ -521,7 +526,7 @@ int32_t mo_cond_broadcast(cond_t *c)
521526
}
522527
}
523528

524-
NOSCHED_LEAVE();
529+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
525530
return ERR_OK;
526531
}
527532

@@ -531,9 +536,9 @@ int32_t mo_cond_waiting_count(cond_t *c)
531536
return -1;
532537

533538
int32_t count;
534-
NOSCHED_ENTER();
539+
spin_lock_irqsave(&mutex_lock, &mutex_flags);
535540
count = c->waiters ? (int32_t) c->waiters->length : 0;
536-
NOSCHED_LEAVE();
541+
spin_unlock_irqrestore(&mutex_lock, mutex_flags);
537542

538543
return count;
539544
}

0 commit comments

Comments
 (0)