Skip to content

Commit 4b40dce

Browse files
ycsincfriedt
authored andcommitted
net: mgmt: Use mutex for net_mgmt_lock
Conceptually the net_mgmt_lock should be a mutex instead of a semaphore. It is easier to identify the owner of a mutex and debug when deadlock happens, so convert it. Signed-off-by: Yong Cong Sin <[email protected]>
1 parent 5a71377 commit 4b40dce

File tree

1 file changed

+11
-11
lines changed

1 file changed

+11
-11
lines changed

subsys/net/ip/net_mgmt.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ struct mgmt_event_wait {
3434
};
3535

3636
static K_SEM_DEFINE(network_event, 0, K_SEM_MAX_LIMIT);
37-
static K_SEM_DEFINE(net_mgmt_lock, 1, 1);
37+
static K_MUTEX_DEFINE(net_mgmt_lock);
3838

3939
K_KERNEL_STACK_DEFINE(mgmt_stack, CONFIG_NET_MGMT_EVENT_STACK_SIZE);
4040
static struct k_thread mgmt_thread_data;
@@ -54,7 +54,7 @@ static inline void mgmt_push_event(uint32_t mgmt_event, struct net_if *iface,
5454
ARG_UNUSED(length);
5555
#endif /* CONFIG_NET_MGMT_EVENT_INFO */
5656

57-
k_sem_take(&net_mgmt_lock, K_FOREVER);
57+
(void)k_mutex_lock(&net_mgmt_lock, K_FOREVER);
5858

5959
i_idx = in_event + 1;
6060
if (i_idx == CONFIG_NET_MGMT_EVENT_QUEUE_SIZE) {
@@ -69,7 +69,7 @@ static inline void mgmt_push_event(uint32_t mgmt_event, struct net_if *iface,
6969
} else {
7070
NET_ERR("Event info length %zu > max size %zu",
7171
length, NET_EVENT_INFO_MAX_SIZE);
72-
k_sem_give(&net_mgmt_lock);
72+
(void)k_mutex_unlock(&net_mgmt_lock);
7373

7474
return;
7575
}
@@ -97,7 +97,7 @@ static inline void mgmt_push_event(uint32_t mgmt_event, struct net_if *iface,
9797

9898
in_event = i_idx;
9999

100-
k_sem_give(&net_mgmt_lock);
100+
(void)k_mutex_unlock(&net_mgmt_lock);
101101
}
102102

103103
static inline struct mgmt_event_entry *mgmt_pop_event(void)
@@ -226,7 +226,7 @@ static void mgmt_thread(void)
226226

227227
while (1) {
228228
k_sem_take(&network_event, K_FOREVER);
229-
k_sem_take(&net_mgmt_lock, K_FOREVER);
229+
(void)k_mutex_lock(&net_mgmt_lock, K_FOREVER);
230230

231231
NET_DBG("Handling events, forwarding it relevantly");
232232

@@ -240,7 +240,7 @@ static void mgmt_thread(void)
240240
k_sem_count_get(&network_event));
241241

242242
k_sem_init(&network_event, 0, K_SEM_MAX_LIMIT);
243-
k_sem_give(&net_mgmt_lock);
243+
(void)k_mutex_unlock(&net_mgmt_lock);
244244

245245
continue;
246246
}
@@ -249,7 +249,7 @@ static void mgmt_thread(void)
249249

250250
mgmt_clean_event(mgmt_event);
251251

252-
k_sem_give(&net_mgmt_lock);
252+
(void)k_mutex_unlock(&net_mgmt_lock);
253253

254254
k_yield();
255255
}
@@ -312,26 +312,26 @@ void net_mgmt_add_event_callback(struct net_mgmt_event_callback *cb)
312312
{
313313
NET_DBG("Adding event callback %p", cb);
314314

315-
k_sem_take(&net_mgmt_lock, K_FOREVER);
315+
(void)k_mutex_lock(&net_mgmt_lock, K_FOREVER);
316316

317317
sys_slist_prepend(&event_callbacks, &cb->node);
318318

319319
mgmt_add_event_mask(cb->event_mask);
320320

321-
k_sem_give(&net_mgmt_lock);
321+
(void)k_mutex_unlock(&net_mgmt_lock);
322322
}
323323

324324
void net_mgmt_del_event_callback(struct net_mgmt_event_callback *cb)
325325
{
326326
NET_DBG("Deleting event callback %p", cb);
327327

328-
k_sem_take(&net_mgmt_lock, K_FOREVER);
328+
(void)k_mutex_lock(&net_mgmt_lock, K_FOREVER);
329329

330330
sys_slist_find_and_remove(&event_callbacks, &cb->node);
331331

332332
mgmt_rebuild_global_event_mask();
333333

334-
k_sem_give(&net_mgmt_lock);
334+
(void)k_mutex_unlock(&net_mgmt_lock);
335335
}
336336

337337
void net_mgmt_event_notify_with_info(uint32_t mgmt_event, struct net_if *iface,

0 commit comments

Comments
 (0)