Skip to content

Commit 2bed37e

Browse files
Nicolas Pitrenashif
authored andcommitted
mem_slab: move global lock to per slab lock
This avoids contention between unrelated slabs and allows for userspace accessible slabs when located in memory partitions. Signed-off-by: Nicolas Pitre <[email protected]>
1 parent 2b32e47 commit 2bed37e

File tree

2 files changed

+9
-8
lines changed

2 files changed

+9
-8
lines changed

include/kernel.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4724,6 +4724,7 @@ __syscall size_t k_pipe_write_avail(struct k_pipe *pipe);
47244724

47254725
struct k_mem_slab {
47264726
_wait_q_t wait_q;
4727+
struct k_spinlock lock;
47274728
uint32_t num_blocks;
47284729
size_t block_size;
47294730
char *buffer;
@@ -4740,6 +4741,7 @@ struct k_mem_slab {
47404741
#define Z_MEM_SLAB_INITIALIZER(obj, slab_buffer, slab_block_size, \
47414742
slab_num_blocks) \
47424743
{ \
4744+
.lock = {}, \
47434745
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
47444746
.num_blocks = slab_num_blocks, \
47454747
.block_size = slab_block_size, \

kernel/mem_slab.c

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,6 @@
1515
#include <init.h>
1616
#include <sys/check.h>
1717

18-
static struct k_spinlock lock;
19-
2018
#ifdef CONFIG_OBJECT_TRACING
2119
struct k_mem_slab *_trace_list_k_mem_slab;
2220
#endif /* CONFIG_OBJECT_TRACING */
@@ -88,6 +86,7 @@ int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
8886
slab->block_size = block_size;
8987
slab->buffer = buffer;
9088
slab->num_used = 0U;
89+
slab->lock = (struct k_spinlock) {};
9190

9291
#ifdef CONFIG_MEM_SLAB_TRACE_MAX_UTILIZATION
9392
slab->max_used = 0U;
@@ -108,7 +107,7 @@ int k_mem_slab_init(struct k_mem_slab *slab, void *buffer,
108107

109108
int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
110109
{
111-
k_spinlock_key_t key = k_spin_lock(&lock);
110+
k_spinlock_key_t key = k_spin_lock(&slab->lock);
112111
int result;
113112

114113
if (slab->free_list != NULL) {
@@ -128,34 +127,34 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
128127
result = -ENOMEM;
129128
} else {
130129
/* wait for a free block or timeout */
131-
result = z_pend_curr(&lock, key, &slab->wait_q, timeout);
130+
result = z_pend_curr(&slab->lock, key, &slab->wait_q, timeout);
132131
if (result == 0) {
133132
*mem = _current->base.swap_data;
134133
}
135134
return result;
136135
}
137136

138-
k_spin_unlock(&lock, key);
137+
k_spin_unlock(&slab->lock, key);
139138

140139
return result;
141140
}
142141

143142
void k_mem_slab_free(struct k_mem_slab *slab, void **mem)
144143
{
145-
k_spinlock_key_t key = k_spin_lock(&lock);
144+
k_spinlock_key_t key = k_spin_lock(&slab->lock);
146145

147146
if (slab->free_list == NULL) {
148147
struct k_thread *pending_thread = z_unpend_first_thread(&slab->wait_q);
149148

150149
if (pending_thread != NULL) {
151150
z_thread_return_value_set_with_data(pending_thread, 0, *mem);
152151
z_ready_thread(pending_thread);
153-
z_reschedule(&lock, key);
152+
z_reschedule(&slab->lock, key);
154153
return;
155154
}
156155
}
157156
**(char ***) mem = slab->free_list;
158157
slab->free_list = *(char **) mem;
159158
slab->num_used--;
160-
k_spin_unlock(&lock, key);
159+
k_spin_unlock(&slab->lock, key);
161160
}

0 commit comments

Comments
 (0)