Skip to content

Commit 30330c2

Browse files
committed
Replace interrupt masking with spinlock in mqueue for SMP support
The original message queue implementation used CRITICAL_ENTER() and CRITICAL_LEAVE() to protect critical sections by disabling interrupts. This was sufficient for single-core systems, where only one hart could execute tasks. To support SMP, we replace these macros with a proper spinlock using RV32A atomic instructions. This ensures safe access to the internal queue structures when multiple harts concurrently interact with message queues. This change eliminates potential race conditions in message queue operations as we move toward multi-hart scheduling.
1 parent e26b7c9 commit 30330c2

File tree

1 file changed

+15
-10
lines changed

1 file changed

+15
-10
lines changed

kernel/mqueue.c

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,14 @@
66
#include <sys/mqueue.h>
77
#include <sys/task.h>
88

9+
#include <spinlock.h>
10+
911
#include "private/error.h"
1012
#include "private/utils.h"
1113

14+
static spinlock_t queue_lock = SPINLOCK_INITIALIZER;
15+
static uint32_t queue_flags = 0;
16+
1217
mq_t *mo_mq_create(uint16_t max_items)
1318
{
1419
mq_t *mq = malloc(sizeof *mq);
@@ -31,15 +36,15 @@ int32_t mo_mq_destroy(mq_t *mq)
3136
if (unlikely(!mq->q))
3237
return ERR_FAIL; /* Invalid mqueue state */
3338

34-
CRITICAL_ENTER();
39+
spin_lock_irqsave(&queue_lock, &queue_flags);
3540

36-
if (unlikely(queue_count(mq->q) != 0)) { /* refuse to destroy non-empty q */
37-
CRITICAL_LEAVE();
41+
if (queue_count(mq->q) != 0) { /* refuse to destroy non-empty q */
42+
spin_unlock_irqrestore(&queue_lock, queue_flags);
3843
return ERR_MQ_NOTEMPTY;
3944
}
4045

4146
/* Safe to destroy now - no need to hold critical section */
42-
CRITICAL_LEAVE();
47+
spin_unlock_irqrestore(&queue_lock, queue_flags);
4348

4449
queue_destroy(mq->q);
4550
free(mq);
@@ -54,9 +59,9 @@ int32_t mo_mq_enqueue(mq_t *mq, message_t *msg)
5459

5560
int32_t rc;
5661

57-
CRITICAL_ENTER();
62+
spin_lock_irqsave(&queue_lock, &queue_flags);
5863
rc = queue_enqueue(mq->q, msg);
59-
CRITICAL_LEAVE();
64+
spin_unlock_irqrestore(&queue_lock, queue_flags);
6065

6166
return rc; /* 0 on success, −1 on full */
6267
}
@@ -69,9 +74,9 @@ message_t *mo_mq_dequeue(mq_t *mq)
6974

7075
message_t *msg;
7176

72-
CRITICAL_ENTER();
77+
spin_lock_irqsave(&queue_lock, &queue_flags);
7378
msg = queue_dequeue(mq->q);
74-
CRITICAL_LEAVE();
79+
spin_unlock_irqrestore(&queue_lock, queue_flags);
7580

7681
return msg; /* NULL when queue is empty */
7782
}
@@ -84,9 +89,9 @@ message_t *mo_mq_peek(mq_t *mq)
8489

8590
message_t *msg;
8691

87-
CRITICAL_ENTER();
92+
spin_lock_irqsave(&queue_lock, &queue_flags);
8893
msg = queue_peek(mq->q);
89-
CRITICAL_LEAVE();
94+
spin_unlock_irqrestore(&queue_lock, queue_flags);
9095

9196
return msg; /* NULL when queue is empty */
9297
}

0 commit comments

Comments
 (0)