@@ -34,7 +34,7 @@ struct mgmt_event_wait {
34
34
};
35
35
36
36
static K_SEM_DEFINE (network_event , 0 , K_SEM_MAX_LIMIT ) ;
37
- static K_SEM_DEFINE (net_mgmt_lock , 1 , 1 );
37
+ static K_MUTEX_DEFINE (net_mgmt_lock );
38
38
39
39
K_KERNEL_STACK_DEFINE (mgmt_stack , CONFIG_NET_MGMT_EVENT_STACK_SIZE );
40
40
static struct k_thread mgmt_thread_data ;
@@ -54,7 +54,7 @@ static inline void mgmt_push_event(uint32_t mgmt_event, struct net_if *iface,
54
54
ARG_UNUSED (length );
55
55
#endif /* CONFIG_NET_MGMT_EVENT_INFO */
56
56
57
- k_sem_take (& net_mgmt_lock , K_FOREVER );
57
+ ( void ) k_mutex_lock (& net_mgmt_lock , K_FOREVER );
58
58
59
59
i_idx = in_event + 1 ;
60
60
if (i_idx == CONFIG_NET_MGMT_EVENT_QUEUE_SIZE ) {
@@ -69,7 +69,7 @@ static inline void mgmt_push_event(uint32_t mgmt_event, struct net_if *iface,
69
69
} else {
70
70
NET_ERR ("Event info length %zu > max size %zu" ,
71
71
length , NET_EVENT_INFO_MAX_SIZE );
72
- k_sem_give (& net_mgmt_lock );
72
+ ( void ) k_mutex_unlock (& net_mgmt_lock );
73
73
74
74
return ;
75
75
}
@@ -97,7 +97,7 @@ static inline void mgmt_push_event(uint32_t mgmt_event, struct net_if *iface,
97
97
98
98
in_event = i_idx ;
99
99
100
- k_sem_give (& net_mgmt_lock );
100
+ ( void ) k_mutex_unlock (& net_mgmt_lock );
101
101
}
102
102
103
103
static inline struct mgmt_event_entry * mgmt_pop_event (void )
@@ -226,7 +226,7 @@ static void mgmt_thread(void)
226
226
227
227
while (1 ) {
228
228
k_sem_take (& network_event , K_FOREVER );
229
- k_sem_take (& net_mgmt_lock , K_FOREVER );
229
+ ( void ) k_mutex_lock (& net_mgmt_lock , K_FOREVER );
230
230
231
231
NET_DBG ("Handling events, forwarding it relevantly" );
232
232
@@ -240,7 +240,7 @@ static void mgmt_thread(void)
240
240
k_sem_count_get (& network_event ));
241
241
242
242
k_sem_init (& network_event , 0 , K_SEM_MAX_LIMIT );
243
- k_sem_give (& net_mgmt_lock );
243
+ ( void ) k_mutex_unlock (& net_mgmt_lock );
244
244
245
245
continue ;
246
246
}
@@ -249,7 +249,7 @@ static void mgmt_thread(void)
249
249
250
250
mgmt_clean_event (mgmt_event );
251
251
252
- k_sem_give (& net_mgmt_lock );
252
+ ( void ) k_mutex_unlock (& net_mgmt_lock );
253
253
254
254
k_yield ();
255
255
}
@@ -312,26 +312,26 @@ void net_mgmt_add_event_callback(struct net_mgmt_event_callback *cb)
312
312
{
313
313
NET_DBG ("Adding event callback %p" , cb );
314
314
315
- k_sem_take (& net_mgmt_lock , K_FOREVER );
315
+ ( void ) k_mutex_lock (& net_mgmt_lock , K_FOREVER );
316
316
317
317
sys_slist_prepend (& event_callbacks , & cb -> node );
318
318
319
319
mgmt_add_event_mask (cb -> event_mask );
320
320
321
- k_sem_give (& net_mgmt_lock );
321
+ ( void ) k_mutex_unlock (& net_mgmt_lock );
322
322
}
323
323
324
324
void net_mgmt_del_event_callback (struct net_mgmt_event_callback * cb )
325
325
{
326
326
NET_DBG ("Deleting event callback %p" , cb );
327
327
328
- k_sem_take (& net_mgmt_lock , K_FOREVER );
328
+ ( void ) k_mutex_lock (& net_mgmt_lock , K_FOREVER );
329
329
330
330
sys_slist_find_and_remove (& event_callbacks , & cb -> node );
331
331
332
332
mgmt_rebuild_global_event_mask ();
333
333
334
- k_sem_give (& net_mgmt_lock );
334
+ ( void ) k_mutex_unlock (& net_mgmt_lock );
335
335
}
336
336
337
337
void net_mgmt_event_notify_with_info (uint32_t mgmt_event , struct net_if * iface ,
0 commit comments