@@ -89,30 +89,46 @@ static inline int is_condition_met(struct k_poll_event *event, u32_t *state)
89
89
return 0 ;
90
90
}
91
91
92
+ static inline void add_event (sys_dlist_t * events , struct k_poll_event * event ,
93
+ struct _poller * poller )
94
+ {
95
+ struct k_poll_event * pending ;
96
+
97
+ pending = (struct k_poll_event * )sys_dlist_peek_tail (events );
98
+ if (!pending || _is_t1_higher_prio_than_t2 (pending -> poller -> thread ,
99
+ poller -> thread )) {
100
+ sys_dlist_append (events , & event -> _node );
101
+ return ;
102
+ }
103
+
104
+ SYS_DLIST_FOR_EACH_CONTAINER (events , pending , _node ) {
105
+ if (_is_t1_higher_prio_than_t2 (poller -> thread ,
106
+ pending -> poller -> thread )) {
107
+ sys_dlist_insert_before (events , & pending -> _node ,
108
+ & event -> _node );
109
+ return ;
110
+ }
111
+ }
112
+
113
+ sys_dlist_append (events , & event -> _node );
114
+ }
115
+
92
116
/* must be called with interrupts locked */
93
- static inline int register_event (struct k_poll_event * event )
117
+ static inline int register_event (struct k_poll_event * event ,
118
+ struct _poller * poller )
94
119
{
95
120
switch (event -> type ) {
96
121
case K_POLL_TYPE_SEM_AVAILABLE :
97
122
__ASSERT (event -> sem , "invalid semaphore\n" );
98
- if (event -> sem -> poll_event ) {
99
- return - EADDRINUSE ;
100
- }
101
- event -> sem -> poll_event = event ;
123
+ add_event (& event -> sem -> poll_events , event , poller );
102
124
break ;
103
125
case K_POLL_TYPE_DATA_AVAILABLE :
104
126
__ASSERT (event -> queue , "invalid queue\n" );
105
- if (event -> queue -> poll_event ) {
106
- return - EADDRINUSE ;
107
- }
108
- event -> queue -> poll_event = event ;
127
+ add_event (& event -> queue -> poll_events , event , poller );
109
128
break ;
110
129
case K_POLL_TYPE_SIGNAL :
111
- __ASSERT (event -> queue , "invalid poll signal\n" );
112
- if (event -> signal -> poll_event ) {
113
- return - EADDRINUSE ;
114
- }
115
- event -> signal -> poll_event = event ;
130
+ __ASSERT (event -> signal , "invalid poll signal\n" );
131
+ add_event (& event -> signal -> poll_events , event , poller );
116
132
break ;
117
133
case K_POLL_TYPE_IGNORE :
118
134
/* nothing to do */
@@ -122,6 +138,8 @@ static inline int register_event(struct k_poll_event *event)
122
138
break ;
123
139
}
124
140
141
+ event -> poller = poller ;
142
+
125
143
return 0 ;
126
144
}
127
145
@@ -133,15 +151,15 @@ static inline void clear_event_registration(struct k_poll_event *event)
133
151
switch (event -> type ) {
134
152
case K_POLL_TYPE_SEM_AVAILABLE :
135
153
__ASSERT (event -> sem , "invalid semaphore\n" );
136
- event -> sem -> poll_event = NULL ;
154
+ sys_dlist_remove ( & event -> _node ) ;
137
155
break ;
138
156
case K_POLL_TYPE_DATA_AVAILABLE :
139
157
__ASSERT (event -> queue , "invalid queue\n" );
140
- event -> queue -> poll_event = NULL ;
158
+ sys_dlist_remove ( & event -> _node ) ;
141
159
break ;
142
160
case K_POLL_TYPE_SIGNAL :
143
161
__ASSERT (event -> signal , "invalid poll signal\n" );
144
- event -> signal -> poll_event = NULL ;
162
+ sys_dlist_remove ( & event -> _node ) ;
145
163
break ;
146
164
case K_POLL_TYPE_IGNORE :
147
165
/* nothing to do */
@@ -176,18 +194,13 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
176
194
__ASSERT (events , "NULL events\n" );
177
195
__ASSERT (num_events > 0 , "zero events\n" );
178
196
179
- int last_registered = -1 , in_use = 0 , rc ;
197
+ int last_registered = -1 , rc ;
180
198
unsigned int key ;
181
199
182
200
key = irq_lock ();
183
201
set_polling_state (_current );
184
202
irq_unlock (key );
185
203
186
- /*
187
- * We can get by with one poller structure for all events for now:
188
- * if/when we allow multiple threads to poll on the same object, we
189
- * will need one per poll event associated with an object.
190
- */
191
204
struct _poller poller = { .thread = _current };
192
205
193
206
/* find events whose condition is already fulfilled */
@@ -198,18 +211,10 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
198
211
if (is_condition_met (& events [ii ], & state )) {
199
212
set_event_ready (& events [ii ], state );
200
213
clear_polling_state (_current );
201
- } else if (timeout != K_NO_WAIT && is_polling () && ! in_use ) {
202
- rc = register_event (& events [ii ]);
214
+ } else if (timeout != K_NO_WAIT && is_polling ()) {
215
+ rc = register_event (& events [ii ], & poller );
203
216
if (rc == 0 ) {
204
- events [ii ].poller = & poller ;
205
217
++ last_registered ;
206
- } else if (rc == - EADDRINUSE ) {
207
- /* setting in_use also prevents any further
208
- * registrations by the current thread
209
- */
210
- in_use = - EADDRINUSE ;
211
- events [ii ].state = K_POLL_STATE_EADDRINUSE ;
212
- clear_polling_state (_current );
213
218
} else {
214
219
__ASSERT (0 , "unexpected return code\n" );
215
220
}
@@ -224,16 +229,12 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
224
229
* condition is met, either when looping through the events here or
225
230
* because one of the events registered has had its state changed, or
226
231
* that one of the objects we wanted to poll on already had a thread
227
- * polling on it. We can remove all registrations and return either
228
- * success or a -EADDRINUSE error. In the case of a -EADDRINUSE error,
229
- * the events that were available are still flagged as such, and it is
230
- * valid for the caller to consider them available, as if this function
231
- * returned success.
232
+ * polling on it.
232
233
*/
233
234
if (!is_polling ()) {
234
235
clear_event_registrations (events , last_registered , key );
235
236
irq_unlock (key );
236
- return in_use ;
237
+ return 0 ;
237
238
}
238
239
239
240
clear_polling_state (_current );
@@ -306,38 +307,43 @@ static int _signal_poll_event(struct k_poll_event *event, u32_t state,
306
307
}
307
308
308
309
/* returns 1 if a reschedule must take place, 0 otherwise */
309
- /* *obj_poll_event is guaranteed to not be NULL */
310
- int _handle_obj_poll_event (struct k_poll_event * * obj_poll_event , u32_t state )
310
+ int _handle_obj_poll_events (sys_dlist_t * events , u32_t state )
311
311
{
312
- struct k_poll_event * poll_event = * obj_poll_event ;
312
+ struct k_poll_event * poll_event ;
313
313
int must_reschedule ;
314
314
315
- * obj_poll_event = NULL ;
315
+ poll_event = (struct k_poll_event * )sys_dlist_get (events );
316
+ if (!poll_event ) {
317
+ return 0 ;
318
+ }
319
+
316
320
(void )_signal_poll_event (poll_event , state , & must_reschedule );
317
321
return must_reschedule ;
318
322
}
319
323
320
324
void k_poll_signal_init (struct k_poll_signal * signal )
321
325
{
322
- signal -> poll_event = NULL ;
326
+ sys_dlist_init ( & signal -> poll_events ) ;
323
327
signal -> signaled = 0 ;
324
328
/* signal->result is left unitialized */
325
329
}
326
330
327
331
int k_poll_signal (struct k_poll_signal * signal , int result )
328
332
{
329
333
unsigned int key = irq_lock ();
334
+ struct k_poll_event * poll_event ;
330
335
int must_reschedule ;
331
336
332
337
signal -> result = result ;
333
338
signal -> signaled = 1 ;
334
339
335
- if (!signal -> poll_event ) {
340
+ poll_event = (struct k_poll_event * )sys_dlist_get (& signal -> poll_events );
341
+ if (!poll_event ) {
336
342
irq_unlock (key );
337
343
return 0 ;
338
344
}
339
345
340
- int rc = _signal_poll_event (signal -> poll_event , K_POLL_STATE_SIGNALED ,
346
+ int rc = _signal_poll_event (poll_event , K_POLL_STATE_SIGNALED ,
341
347
& must_reschedule );
342
348
343
349
if (must_reschedule ) {
0 commit comments