4
4
* that are independent of the semaphore module.
5
5
*/
6
6
7
+ #include <spinlock.h>
7
8
#include <lib/libc.h>
8
9
#include <sys/mutex.h>
9
10
#include <sys/task.h>
10
11
11
12
#include "private/error.h"
12
13
#include "private/utils.h"
13
14
15
+ static spinlock_t mutex_lock = SPINLOCK_INITIALIZER ;
16
+ static uint32_t mutex_flags = 0 ;
17
+
14
18
/* Validate mutex pointer and structure integrity */
15
19
static inline bool mutex_is_valid (const mutex_t * m )
16
20
{
@@ -112,17 +116,17 @@ int32_t mo_mutex_destroy(mutex_t *m)
112
116
if (unlikely (!mutex_is_valid (m )))
113
117
return ERR_FAIL ;
114
118
115
- NOSCHED_ENTER ( );
119
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
116
120
117
121
/* Check if any tasks are waiting */
118
122
if (unlikely (!list_is_empty (m -> waiters ))) {
119
- NOSCHED_LEAVE ( );
123
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
120
124
return ERR_TASK_BUSY ;
121
125
}
122
126
123
127
/* Check if mutex is still owned */
124
128
if (unlikely (m -> owner_tid != 0 )) {
125
- NOSCHED_LEAVE ( );
129
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
126
130
return ERR_TASK_BUSY ;
127
131
}
128
132
@@ -132,7 +136,7 @@ int32_t mo_mutex_destroy(mutex_t *m)
132
136
m -> waiters = NULL ;
133
137
m -> owner_tid = 0 ;
134
138
135
- NOSCHED_LEAVE ( );
139
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
136
140
137
141
/* Clean up resources outside critical section */
138
142
list_destroy (waiters );
@@ -146,18 +150,18 @@ int32_t mo_mutex_lock(mutex_t *m)
146
150
147
151
uint16_t self_tid = mo_task_id ();
148
152
149
- NOSCHED_ENTER ( );
153
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
150
154
151
155
/* Non-recursive: reject if caller already owns it */
152
156
if (unlikely (m -> owner_tid == self_tid )) {
153
- NOSCHED_LEAVE ( );
157
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
154
158
return ERR_TASK_BUSY ;
155
159
}
156
160
157
161
/* Fast path: mutex is free, acquire immediately */
158
162
if (likely (m -> owner_tid == 0 )) {
159
163
m -> owner_tid = self_tid ;
160
- NOSCHED_LEAVE ( );
164
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
161
165
return ERR_OK ;
162
166
}
163
167
@@ -177,7 +181,7 @@ int32_t mo_mutex_trylock(mutex_t *m)
177
181
uint16_t self_tid = mo_task_id ();
178
182
int32_t result = ERR_TASK_BUSY ;
179
183
180
- NOSCHED_ENTER ( );
184
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
181
185
182
186
if (unlikely (m -> owner_tid == self_tid )) {
183
187
/* Already owned by caller (non-recursive) */
@@ -189,7 +193,7 @@ int32_t mo_mutex_trylock(mutex_t *m)
189
193
}
190
194
/* else: owned by someone else, return ERR_TASK_BUSY */
191
195
192
- NOSCHED_LEAVE ( );
196
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
193
197
return result ;
194
198
}
195
199
@@ -203,41 +207,42 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks)
203
207
204
208
uint16_t self_tid = mo_task_id ();
205
209
206
- NOSCHED_ENTER ( );
210
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
207
211
208
212
/* Non-recursive check */
209
213
if (unlikely (m -> owner_tid == self_tid )) {
210
- NOSCHED_LEAVE ( );
214
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
211
215
return ERR_TASK_BUSY ;
212
216
}
213
217
214
218
/* Fast path: mutex is free */
215
219
if (m -> owner_tid == 0 ) {
216
220
m -> owner_tid = self_tid ;
217
- NOSCHED_LEAVE ( );
221
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
218
222
return ERR_OK ;
219
223
}
220
224
221
225
/* Slow path: must block with timeout using delay mechanism */
222
226
tcb_t * self = kcb -> task_current -> data ;
223
227
if (unlikely (!list_pushback (m -> waiters , self ))) {
224
- NOSCHED_LEAVE ( );
228
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
225
229
panic (ERR_SEM_OPERATION );
226
230
}
227
231
228
232
/* Set up timeout using task delay mechanism */
229
233
self -> delay = ticks ;
230
234
self -> state = TASK_BLOCKED ;
231
235
232
- NOSCHED_LEAVE ( );
236
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
233
237
234
238
/* Yield and let the scheduler handle timeout via delay mechanism */
235
239
mo_task_yield ();
236
240
237
241
/* Check result after waking up */
238
242
int32_t result ;
239
243
240
- NOSCHED_ENTER ();
244
+ spin_lock_irqsave (& mutex_lock , & mutex_flags );
245
+
241
246
if (self -> state == TASK_BLOCKED ) {
242
247
/* We woke up due to timeout, not mutex unlock */
243
248
if (remove_self_from_waiters (m -> waiters )) {
@@ -252,7 +257,7 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks)
252
257
/* We were woken by mutex unlock - check ownership */
253
258
result = (m -> owner_tid == self_tid ) ? ERR_OK : ERR_FAIL ;
254
259
}
255
- NOSCHED_LEAVE ( );
260
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
256
261
257
262
return result ;
258
263
}
@@ -264,11 +269,11 @@ int32_t mo_mutex_unlock(mutex_t *m)
264
269
265
270
uint16_t self_tid = mo_task_id ();
266
271
267
- NOSCHED_ENTER ( );
272
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
268
273
269
274
/* Verify caller owns the mutex */
270
275
if (unlikely (m -> owner_tid != self_tid )) {
271
- NOSCHED_LEAVE ( );
276
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
272
277
return ERR_NOT_OWNER ;
273
278
}
274
279
@@ -296,7 +301,7 @@ int32_t mo_mutex_unlock(mutex_t *m)
296
301
}
297
302
}
298
303
299
- NOSCHED_LEAVE ( );
304
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
300
305
return ERR_OK ;
301
306
}
302
307
@@ -314,9 +319,9 @@ int32_t mo_mutex_waiting_count(mutex_t *m)
314
319
return -1 ;
315
320
316
321
int32_t count ;
317
- NOSCHED_ENTER ( );
322
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
318
323
count = m -> waiters ? (int32_t ) m -> waiters -> length : 0 ;
319
- NOSCHED_LEAVE ( );
324
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
320
325
321
326
return count ;
322
327
}
@@ -348,11 +353,11 @@ int32_t mo_cond_destroy(cond_t *c)
348
353
if (unlikely (!cond_is_valid (c )))
349
354
return ERR_FAIL ;
350
355
351
- NOSCHED_ENTER ( );
356
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
352
357
353
358
/* Check if any tasks are waiting */
354
359
if (unlikely (!list_is_empty (c -> waiters ))) {
355
- NOSCHED_LEAVE ( );
360
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
356
361
return ERR_TASK_BUSY ;
357
362
}
358
363
@@ -361,7 +366,7 @@ int32_t mo_cond_destroy(cond_t *c)
361
366
list_t * waiters = c -> waiters ;
362
367
c -> waiters = NULL ;
363
368
364
- NOSCHED_LEAVE ( );
369
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
365
370
366
371
/* Clean up resources outside critical section */
367
372
list_destroy (waiters );
@@ -382,22 +387,22 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m)
382
387
tcb_t * self = kcb -> task_current -> data ;
383
388
384
389
/* Atomically add to wait list */
385
- NOSCHED_ENTER ( );
390
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
386
391
if (unlikely (!list_pushback (c -> waiters , self ))) {
387
- NOSCHED_LEAVE ( );
392
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
388
393
panic (ERR_SEM_OPERATION );
389
394
}
390
395
self -> state = TASK_BLOCKED ;
391
- NOSCHED_LEAVE ( );
396
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
392
397
393
398
/* Release mutex */
394
399
int32_t unlock_result = mo_mutex_unlock (m );
395
400
if (unlikely (unlock_result != ERR_OK )) {
396
401
/* Failed to unlock - remove from wait list and restore state */
397
- NOSCHED_ENTER ( );
402
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
398
403
remove_self_from_waiters (c -> waiters );
399
404
self -> state = TASK_READY ;
400
- NOSCHED_LEAVE ( );
405
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
401
406
return unlock_result ;
402
407
}
403
408
@@ -424,24 +429,24 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks)
424
429
tcb_t * self = kcb -> task_current -> data ;
425
430
426
431
/* Atomically add to wait list with timeout */
427
- NOSCHED_ENTER ( );
432
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
428
433
if (unlikely (!list_pushback (c -> waiters , self ))) {
429
- NOSCHED_LEAVE ( );
434
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
430
435
panic (ERR_SEM_OPERATION );
431
436
}
432
437
self -> delay = ticks ;
433
438
self -> state = TASK_BLOCKED ;
434
- NOSCHED_LEAVE ( );
439
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
435
440
436
441
/* Release mutex */
437
442
int32_t unlock_result = mo_mutex_unlock (m );
438
443
if (unlikely (unlock_result != ERR_OK )) {
439
444
/* Failed to unlock - cleanup and restore */
440
- NOSCHED_ENTER ( );
445
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
441
446
remove_self_from_waiters (c -> waiters );
442
447
self -> state = TASK_READY ;
443
448
self -> delay = 0 ;
444
- NOSCHED_LEAVE ( );
449
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
445
450
return unlock_result ;
446
451
}
447
452
@@ -450,7 +455,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks)
450
455
451
456
/* Determine why we woke up */
452
457
int32_t wait_status ;
453
- NOSCHED_ENTER ( );
458
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
454
459
455
460
if (self -> state == TASK_BLOCKED ) {
456
461
/* Timeout occurred - remove from wait list */
@@ -463,7 +468,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks)
463
468
wait_status = ERR_OK ;
464
469
}
465
470
466
- NOSCHED_LEAVE ( );
471
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
467
472
468
473
/* Re-acquire mutex regardless of timeout status */
469
474
int32_t lock_result = mo_mutex_lock (m );
@@ -477,7 +482,7 @@ int32_t mo_cond_signal(cond_t *c)
477
482
if (unlikely (!cond_is_valid (c )))
478
483
return ERR_FAIL ;
479
484
480
- NOSCHED_ENTER ( );
485
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
481
486
482
487
if (!list_is_empty (c -> waiters )) {
483
488
tcb_t * waiter = (tcb_t * ) list_pop (c -> waiters );
@@ -494,7 +499,7 @@ int32_t mo_cond_signal(cond_t *c)
494
499
}
495
500
}
496
501
497
- NOSCHED_LEAVE ( );
502
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
498
503
return ERR_OK ;
499
504
}
500
505
@@ -503,7 +508,7 @@ int32_t mo_cond_broadcast(cond_t *c)
503
508
if (unlikely (!cond_is_valid (c )))
504
509
return ERR_FAIL ;
505
510
506
- NOSCHED_ENTER ( );
511
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
507
512
508
513
/* Wake all waiting tasks */
509
514
while (!list_is_empty (c -> waiters )) {
@@ -521,7 +526,7 @@ int32_t mo_cond_broadcast(cond_t *c)
521
526
}
522
527
}
523
528
524
- NOSCHED_LEAVE ( );
529
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
525
530
return ERR_OK ;
526
531
}
527
532
@@ -531,9 +536,9 @@ int32_t mo_cond_waiting_count(cond_t *c)
531
536
return -1 ;
532
537
533
538
int32_t count ;
534
- NOSCHED_ENTER ( );
539
+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
535
540
count = c -> waiters ? (int32_t ) c -> waiters -> length : 0 ;
536
- NOSCHED_LEAVE ( );
541
+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
537
542
538
543
return count ;
539
544
}
0 commit comments