Skip to content

Commit 7d01c5e

Browse files
VudentzAnas Nashif
authored andcommitted
poll: Enable multiple threads to use k_poll in the same object
This is necessary in order for k_queue_get to work properly since that is used with buffer pools which might be used by multiple threads asking for buffers. Jira: ZEP-2553 Signed-off-by: Luiz Augusto von Dentz <[email protected]>
1 parent 7725fc0 commit 7d01c5e

File tree

9 files changed

+113
-115
lines changed

9 files changed

+113
-115
lines changed

include/kernel.h

Lines changed: 16 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -98,11 +98,11 @@ typedef sys_dlist_t _wait_q_t;
9898
#endif
9999

100100
#ifdef CONFIG_POLL
101-
#define _POLL_EVENT_OBJ_INIT \
102-
.poll_event = NULL,
103-
#define _POLL_EVENT struct k_poll_event *poll_event
101+
#define _POLL_EVENT_OBJ_INIT(obj) \
102+
.poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events),
103+
#define _POLL_EVENT sys_dlist_t poll_events
104104
#else
105-
#define _POLL_EVENT_OBJ_INIT
105+
#define _POLL_EVENT_OBJ_INIT(obj)
106106
#define _POLL_EVENT
107107
#endif
108108

@@ -1327,7 +1327,7 @@ struct k_queue {
13271327
{ \
13281328
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
13291329
.data_q = SYS_SLIST_STATIC_INIT(&obj.data_q), \
1330-
_POLL_EVENT_OBJ_INIT \
1330+
_POLL_EVENT_OBJ_INIT(obj) \
13311331
_OBJECT_TRACING_INIT \
13321332
}
13331333

@@ -2360,7 +2360,7 @@ struct k_sem {
23602360
.wait_q = SYS_DLIST_STATIC_INIT(&obj.wait_q), \
23612361
.count = initial_count, \
23622362
.limit = count_limit, \
2363-
_POLL_EVENT_OBJ_INIT \
2363+
_POLL_EVENT_OBJ_INIT(obj) \
23642364
_OBJECT_TRACING_INIT \
23652365
}
23662366

@@ -3544,9 +3544,6 @@ enum _poll_states_bits {
35443544
/* default state when creating event */
35453545
_POLL_STATE_NOT_READY,
35463546

3547-
/* there was another poller already on the object */
3548-
_POLL_STATE_EADDRINUSE,
3549-
35503547
/* signaled by k_poll_signal() */
35513548
_POLL_STATE_SIGNALED,
35523549

@@ -3601,7 +3598,6 @@ enum k_poll_modes {
36013598

36023599
/* public - values for k_poll_event.state bitfield */
36033600
#define K_POLL_STATE_NOT_READY 0
3604-
#define K_POLL_STATE_EADDRINUSE _POLL_STATE_BIT(_POLL_STATE_EADDRINUSE)
36053601
#define K_POLL_STATE_SIGNALED _POLL_STATE_BIT(_POLL_STATE_SIGNALED)
36063602
#define K_POLL_STATE_SEM_AVAILABLE _POLL_STATE_BIT(_POLL_STATE_SEM_AVAILABLE)
36073603
#define K_POLL_STATE_DATA_AVAILABLE _POLL_STATE_BIT(_POLL_STATE_DATA_AVAILABLE)
@@ -3610,7 +3606,7 @@ enum k_poll_modes {
36103606
/* public - poll signal object */
36113607
struct k_poll_signal {
36123608
/* PRIVATE - DO NOT TOUCH */
3613-
struct k_poll_event *poll_event;
3609+
sys_dlist_t poll_events;
36143610

36153611
/*
36163612
* 1 if the event has been signaled, 0 otherwise. Stays set to 1 until
@@ -3622,14 +3618,17 @@ struct k_poll_signal {
36223618
int result;
36233619
};
36243620

3625-
#define K_POLL_SIGNAL_INITIALIZER() \
3621+
#define K_POLL_SIGNAL_INITIALIZER(obj) \
36263622
{ \
3627-
.poll_event = NULL, \
3623+
.poll_events = SYS_DLIST_STATIC_INIT(&obj.poll_events), \
36283624
.signaled = 0, \
36293625
.result = 0, \
36303626
}
36313627

36323628
struct k_poll_event {
3629+
/* PRIVATE - DO NOT TOUCH */
3630+
sys_dnode_t _node;
3631+
36333632
/* PRIVATE - DO NOT TOUCH */
36343633
struct _poller *poller;
36353634

@@ -3716,16 +3715,9 @@ extern void k_poll_event_init(struct k_poll_event *event, u32_t type,
37163715
* reason, the k_poll() call is more effective when the objects being polled
37173716
* only have one thread, the polling thread, trying to acquire them.
37183717
*
3719-
* Only one thread can be polling for a particular object at a given time. If
3720-
* another thread tries to poll on it, the k_poll() call returns -EADDRINUSE
3721-
* and returns as soon as it has finished handling the other events. This means
3722-
* that k_poll() can return -EADDRINUSE and have the state value of some events
3723-
* be non-K_POLL_STATE_NOT_READY. When this condition occurs, the @a timeout
3724-
* parameter is ignored.
3725-
*
3726-
* When k_poll() returns 0 or -EADDRINUSE, the caller should loop on all the
3727-
* events that were passed to k_poll() and check the state field for the values
3728-
* that were expected and take the associated actions.
3718+
* When k_poll() returns 0, the caller should loop on all the events that were
3719+
* passed to k_poll() and check the state field for the values that were
3720+
* expected and take the associated actions.
37293721
*
37303722
* Before being reused for another call to k_poll(), the user has to reset the
37313723
* state field to K_POLL_STATE_NOT_READY.
@@ -3736,7 +3728,6 @@ extern void k_poll_event_init(struct k_poll_event *event, u32_t type,
37363728
* or one of the special values K_NO_WAIT and K_FOREVER.
37373729
*
37383730
* @retval 0 One or more events are ready.
3739-
* @retval -EADDRINUSE One or more objects already had a poller.
37403731
* @retval -EAGAIN Waiting period timed out.
37413732
*/
37423733

@@ -3777,8 +3768,7 @@ extern void k_poll_signal_init(struct k_poll_signal *signal);
37773768
extern int k_poll_signal(struct k_poll_signal *signal, int result);
37783769

37793770
/* private internal function */
3780-
extern int _handle_obj_poll_event(struct k_poll_event **obj_poll_event,
3781-
u32_t state);
3771+
extern int _handle_obj_poll_events(sys_dlist_t *events, u32_t state);
37823772

37833773
/**
37843774
* @} end defgroup poll_apis

kernel/poll.c

Lines changed: 52 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -89,30 +89,46 @@ static inline int is_condition_met(struct k_poll_event *event, u32_t *state)
8989
return 0;
9090
}
9191

92+
static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
93+
struct _poller *poller)
94+
{
95+
struct k_poll_event *pending;
96+
97+
pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
98+
if (!pending || _is_t1_higher_prio_than_t2(pending->poller->thread,
99+
poller->thread)) {
100+
sys_dlist_append(events, &event->_node);
101+
return;
102+
}
103+
104+
SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
105+
if (_is_t1_higher_prio_than_t2(poller->thread,
106+
pending->poller->thread)) {
107+
sys_dlist_insert_before(events, &pending->_node,
108+
&event->_node);
109+
return;
110+
}
111+
}
112+
113+
sys_dlist_append(events, &event->_node);
114+
}
115+
92116
/* must be called with interrupts locked */
93-
static inline int register_event(struct k_poll_event *event)
117+
static inline int register_event(struct k_poll_event *event,
118+
struct _poller *poller)
94119
{
95120
switch (event->type) {
96121
case K_POLL_TYPE_SEM_AVAILABLE:
97122
__ASSERT(event->sem, "invalid semaphore\n");
98-
if (event->sem->poll_event) {
99-
return -EADDRINUSE;
100-
}
101-
event->sem->poll_event = event;
123+
add_event(&event->sem->poll_events, event, poller);
102124
break;
103125
case K_POLL_TYPE_DATA_AVAILABLE:
104126
__ASSERT(event->queue, "invalid queue\n");
105-
if (event->queue->poll_event) {
106-
return -EADDRINUSE;
107-
}
108-
event->queue->poll_event = event;
127+
add_event(&event->queue->poll_events, event, poller);
109128
break;
110129
case K_POLL_TYPE_SIGNAL:
111-
__ASSERT(event->queue, "invalid poll signal\n");
112-
if (event->signal->poll_event) {
113-
return -EADDRINUSE;
114-
}
115-
event->signal->poll_event = event;
130+
__ASSERT(event->signal, "invalid poll signal\n");
131+
add_event(&event->signal->poll_events, event, poller);
116132
break;
117133
case K_POLL_TYPE_IGNORE:
118134
/* nothing to do */
@@ -122,6 +138,8 @@ static inline int register_event(struct k_poll_event *event)
122138
break;
123139
}
124140

141+
event->poller = poller;
142+
125143
return 0;
126144
}
127145

@@ -133,15 +151,15 @@ static inline void clear_event_registration(struct k_poll_event *event)
133151
switch (event->type) {
134152
case K_POLL_TYPE_SEM_AVAILABLE:
135153
__ASSERT(event->sem, "invalid semaphore\n");
136-
event->sem->poll_event = NULL;
154+
sys_dlist_remove(&event->_node);
137155
break;
138156
case K_POLL_TYPE_DATA_AVAILABLE:
139157
__ASSERT(event->queue, "invalid queue\n");
140-
event->queue->poll_event = NULL;
158+
sys_dlist_remove(&event->_node);
141159
break;
142160
case K_POLL_TYPE_SIGNAL:
143161
__ASSERT(event->signal, "invalid poll signal\n");
144-
event->signal->poll_event = NULL;
162+
sys_dlist_remove(&event->_node);
145163
break;
146164
case K_POLL_TYPE_IGNORE:
147165
/* nothing to do */
@@ -176,18 +194,13 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
176194
__ASSERT(events, "NULL events\n");
177195
__ASSERT(num_events > 0, "zero events\n");
178196

179-
int last_registered = -1, in_use = 0, rc;
197+
int last_registered = -1, rc;
180198
unsigned int key;
181199

182200
key = irq_lock();
183201
set_polling_state(_current);
184202
irq_unlock(key);
185203

186-
/*
187-
* We can get by with one poller structure for all events for now:
188-
* if/when we allow multiple threads to poll on the same object, we
189-
* will need one per poll event associated with an object.
190-
*/
191204
struct _poller poller = { .thread = _current };
192205

193206
/* find events whose condition is already fulfilled */
@@ -198,18 +211,10 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
198211
if (is_condition_met(&events[ii], &state)) {
199212
set_event_ready(&events[ii], state);
200213
clear_polling_state(_current);
201-
} else if (timeout != K_NO_WAIT && is_polling() && !in_use) {
202-
rc = register_event(&events[ii]);
214+
} else if (timeout != K_NO_WAIT && is_polling()) {
215+
rc = register_event(&events[ii], &poller);
203216
if (rc == 0) {
204-
events[ii].poller = &poller;
205217
++last_registered;
206-
} else if (rc == -EADDRINUSE) {
207-
/* setting in_use also prevents any further
208-
* registrations by the current thread
209-
*/
210-
in_use = -EADDRINUSE;
211-
events[ii].state = K_POLL_STATE_EADDRINUSE;
212-
clear_polling_state(_current);
213218
} else {
214219
__ASSERT(0, "unexpected return code\n");
215220
}
@@ -224,16 +229,12 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
224229
* condition is met, either when looping through the events here or
225230
* because one of the events registered has had its state changed, or
226231
* that one of the objects we wanted to poll on already had a thread
227-
* polling on it. We can remove all registrations and return either
228-
* success or a -EADDRINUSE error. In the case of a -EADDRINUSE error,
229-
* the events that were available are still flagged as such, and it is
230-
* valid for the caller to consider them available, as if this function
231-
* returned success.
232+
* polling on it.
232233
*/
233234
if (!is_polling()) {
234235
clear_event_registrations(events, last_registered, key);
235236
irq_unlock(key);
236-
return in_use;
237+
return 0;
237238
}
238239

239240
clear_polling_state(_current);
@@ -306,38 +307,43 @@ static int _signal_poll_event(struct k_poll_event *event, u32_t state,
306307
}
307308

308309
/* returns 1 if a reschedule must take place, 0 otherwise */
309-
/* *obj_poll_event is guaranteed to not be NULL */
310-
int _handle_obj_poll_event(struct k_poll_event **obj_poll_event, u32_t state)
310+
int _handle_obj_poll_events(sys_dlist_t *events, u32_t state)
311311
{
312-
struct k_poll_event *poll_event = *obj_poll_event;
312+
struct k_poll_event *poll_event;
313313
int must_reschedule;
314314

315-
*obj_poll_event = NULL;
315+
poll_event = (struct k_poll_event *)sys_dlist_get(events);
316+
if (!poll_event) {
317+
return 0;
318+
}
319+
316320
(void)_signal_poll_event(poll_event, state, &must_reschedule);
317321
return must_reschedule;
318322
}
319323

320324
void k_poll_signal_init(struct k_poll_signal *signal)
321325
{
322-
signal->poll_event = NULL;
326+
sys_dlist_init(&signal->poll_events);
323327
signal->signaled = 0;
324328
/* signal->result is left unitialized */
325329
}
326330

327331
int k_poll_signal(struct k_poll_signal *signal, int result)
328332
{
329333
unsigned int key = irq_lock();
334+
struct k_poll_event *poll_event;
330335
int must_reschedule;
331336

332337
signal->result = result;
333338
signal->signaled = 1;
334339

335-
if (!signal->poll_event) {
340+
poll_event = (struct k_poll_event *)sys_dlist_get(&signal->poll_events);
341+
if (!poll_event) {
336342
irq_unlock(key);
337343
return 0;
338344
}
339345

340-
int rc = _signal_poll_event(signal->poll_event, K_POLL_STATE_SIGNALED,
346+
int rc = _signal_poll_event(poll_event, K_POLL_STATE_SIGNALED,
341347
&must_reschedule);
342348

343349
if (must_reschedule) {

kernel/queue.c

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,9 @@ void k_queue_init(struct k_queue *queue)
5151
{
5252
sys_slist_init(&queue->data_q);
5353
sys_dlist_init(&queue->wait_q);
54-
55-
_INIT_OBJ_POLL_EVENT(queue);
54+
#if defined(CONFIG_POLL)
55+
sys_dlist_init(&queue->poll_events);
56+
#endif
5657

5758
SYS_TRACING_OBJ_INIT(k_queue, queue);
5859
}
@@ -67,13 +68,12 @@ static void prepare_thread_to_run(struct k_thread *thread, void *data)
6768
#endif /* CONFIG_POLL */
6869

6970
/* returns 1 if a reschedule must take place, 0 otherwise */
70-
static inline int handle_poll_event(struct k_queue *queue)
71+
static inline int handle_poll_events(struct k_queue *queue)
7172
{
7273
#ifdef CONFIG_POLL
7374
u32_t state = K_POLL_STATE_DATA_AVAILABLE;
7475

75-
return queue->poll_event ?
76-
_handle_obj_poll_event(&queue->poll_event, state) : 0;
76+
return _handle_obj_poll_events(&queue->poll_events, state);
7777
#else
7878
return 0;
7979
#endif
@@ -95,7 +95,7 @@ void k_queue_cancel_wait(struct k_queue *queue)
9595
}
9696
}
9797
#else
98-
if (handle_poll_event(queue)) {
98+
if (handle_poll_events(queue)) {
9999
(void)_Swap(key);
100100
return;
101101
}
@@ -126,7 +126,7 @@ void k_queue_insert(struct k_queue *queue, void *prev, void *data)
126126
sys_slist_insert(&queue->data_q, prev, data);
127127

128128
#if defined(CONFIG_POLL)
129-
if (handle_poll_event(queue)) {
129+
if (handle_poll_events(queue)) {
130130
(void)_Swap(key);
131131
return;
132132
}
@@ -171,7 +171,7 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
171171
}
172172
#else
173173
sys_slist_append_list(&queue->data_q, head, tail);
174-
if (handle_poll_event(queue)) {
174+
if (handle_poll_events(queue)) {
175175
(void)_Swap(key);
176176
return;
177177
}
@@ -206,11 +206,10 @@ static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
206206
event.state = K_POLL_STATE_NOT_READY;
207207

208208
err = k_poll(&event, 1, timeout);
209-
if (err == -EAGAIN) {
209+
if (err) {
210210
return NULL;
211211
}
212212

213-
__ASSERT_NO_MSG(err == 0);
214213
__ASSERT_NO_MSG(event.state == K_POLL_STATE_FIFO_DATA_AVAILABLE);
215214

216215
return sys_slist_get(&queue->data_q);

0 commit comments

Comments
 (0)