21
21
#include <stdint.h>
22
22
#include <string.h>
23
23
24
+ // check if the event is allocaded by user - event address is outside queues internal buffer address range
25
+ #define EQUEUE_IS_USER_ALLOCATED_EVENT (e ) (((uintptr_t)(e) < (uintptr_t)q->buffer) || ((uintptr_t)(e) > ((uintptr_t)q->slab.data)))
26
+
24
27
// calculate the relative-difference between absolute times while
25
28
// correctly handling overflow conditions
26
29
static inline int equeue_tickdiff (unsigned a , unsigned b )
@@ -64,9 +67,15 @@ int equeue_create_inplace(equeue_t *q, size_t size, void *buffer)
64
67
{
65
68
// setup queue around provided buffer
66
69
// ensure buffer and size are aligned
67
- q -> buffer = (void * )(((uintptr_t ) buffer + sizeof (void * ) -1 ) & ~(sizeof (void * ) -1 ));
68
- size -= (char * ) q -> buffer - (char * ) buffer ;
69
- size &= ~(sizeof (void * ) -1 );
70
+ if (size >= sizeof (void * )) {
71
+ q -> buffer = (void * )(((uintptr_t ) buffer + sizeof (void * ) -1 ) & ~(sizeof (void * ) -1 ));
72
+ size -= (char * ) q -> buffer - (char * ) buffer ;
73
+ size &= ~(sizeof (void * ) -1 );
74
+ } else {
75
+ // don't align when size less then pointer size
76
+ // e.g. static queue (size == 1)
77
+ q -> buffer = buffer ;
78
+ }
70
79
71
80
q -> allocated = 0 ;
72
81
@@ -220,15 +229,13 @@ void equeue_dealloc(equeue_t *q, void *p)
220
229
e -> dtor (e + 1 );
221
230
}
222
231
223
- equeue_mem_dealloc (q , e );
232
+ if (!EQUEUE_IS_USER_ALLOCATED_EVENT (e )) {
233
+ equeue_mem_dealloc (q , e );
234
+ }
224
235
}
225
236
226
-
227
- // equeue scheduling functions
228
- static int equeue_enqueue (equeue_t * q , struct equeue_event * e , unsigned tick )
237
+ void equeue_enqueue (equeue_t * q , struct equeue_event * e , unsigned tick )
229
238
{
230
- // setup event and hash local id with buffer offset for unique id
231
- int id = (e -> id << q -> npw2 ) | ((unsigned char * )e - q -> buffer );
232
239
e -> target = tick + equeue_clampdiff (e -> target , tick );
233
240
e -> generation = q -> generation ;
234
241
@@ -254,7 +261,6 @@ static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick)
254
261
if (e -> next ) {
255
262
e -> next -> ref = & e -> next ;
256
263
}
257
-
258
264
e -> sibling = 0 ;
259
265
}
260
266
@@ -267,24 +273,19 @@ static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick)
267
273
q -> background .update (q -> background .timer ,
268
274
equeue_clampdiff (e -> target , tick ));
269
275
}
270
-
271
276
equeue_mutex_unlock (& q -> queuelock );
272
-
273
- return id ;
274
277
}
275
278
276
- static struct equeue_event * equeue_unqueue (equeue_t * q , int id )
279
+ // equeue scheduling functions
280
+ static int equeue_event_id (equeue_t * q , struct equeue_event * e )
277
281
{
278
- // decode event from unique id and check that the local id matches
279
- struct equeue_event * e = ( struct equeue_event * )
280
- & q -> buffer [ id & (( 1 << q -> npw2 ) - 1 )];
282
+ // setup event and hash local id with buffer offset for unique id
283
+ return (( e -> id << q -> npw2 ) | (( unsigned char * )e - q -> buffer ));
284
+ }
281
285
286
+ static struct equeue_event * equeue_unqueue_by_address (equeue_t * q , struct equeue_event * e )
287
+ {
282
288
equeue_mutex_lock (& q -> queuelock );
283
- if (e -> id != id >> q -> npw2 ) {
284
- equeue_mutex_unlock (& q -> queuelock );
285
- return 0 ;
286
- }
287
-
288
289
// clear the event and check if already in-flight
289
290
e -> cb = 0 ;
290
291
e -> period = -1 ;
@@ -310,6 +311,26 @@ static struct equeue_event *equeue_unqueue(equeue_t *q, int id)
310
311
e -> next -> ref = e -> ref ;
311
312
}
312
313
}
314
+ equeue_mutex_unlock (& q -> queuelock );
315
+ return e ;
316
+ }
317
+
318
+ static struct equeue_event * equeue_unqueue_by_id (equeue_t * q , int id )
319
+ {
320
+ // decode event from unique id and check that the local id matches
321
+ struct equeue_event * e = (struct equeue_event * )
322
+ & q -> buffer [id & ((1 << q -> npw2 ) - 1 )];
323
+
324
+ equeue_mutex_lock (& q -> queuelock );
325
+ if (e -> id != id >> q -> npw2 ) {
326
+ equeue_mutex_unlock (& q -> queuelock );
327
+ return 0 ;
328
+ }
329
+
330
+ if (0 == equeue_unqueue_by_address (q , e )) {
331
+ equeue_mutex_unlock (& q -> queuelock );
332
+ return 0 ;
333
+ }
313
334
314
335
equeue_incid (q , e );
315
336
equeue_mutex_unlock (& q -> queuelock );
@@ -369,18 +390,30 @@ int equeue_post(equeue_t *q, void (*cb)(void *), void *p)
369
390
e -> cb = cb ;
370
391
e -> target = tick + e -> target ;
371
392
372
- int id = equeue_enqueue (q , e , tick );
393
+ equeue_enqueue (q , e , tick );
394
+ int id = equeue_event_id (q , e );
373
395
equeue_sema_signal (& q -> eventsema );
374
396
return id ;
375
397
}
376
398
399
+ void equeue_post_user_allocated (equeue_t * q , void (* cb )(void * ), void * p )
400
+ {
401
+ struct equeue_event * e = (struct equeue_event * )p ;
402
+ unsigned tick = equeue_tick ();
403
+ e -> cb = cb ;
404
+ e -> target = tick + e -> target ;
405
+
406
+ equeue_enqueue (q , e , tick );
407
+ equeue_sema_signal (& q -> eventsema );
408
+ }
409
+
377
410
bool equeue_cancel (equeue_t * q , int id )
378
411
{
379
412
if (!id ) {
380
413
return false;
381
414
}
382
415
383
- struct equeue_event * e = equeue_unqueue (q , id );
416
+ struct equeue_event * e = equeue_unqueue_by_id (q , id );
384
417
if (e ) {
385
418
equeue_dealloc (q , e + 1 );
386
419
return true;
@@ -389,6 +422,21 @@ bool equeue_cancel(equeue_t *q, int id)
389
422
}
390
423
}
391
424
425
+ bool equeue_cancel_user_allocated (equeue_t * q , void * e )
426
+ {
427
+ if (!e ) {
428
+ return false;
429
+ }
430
+
431
+ struct equeue_event * _e = equeue_unqueue_by_address (q , e );
432
+ if (_e ) {
433
+ equeue_dealloc (q , _e + 1 );
434
+ return true;
435
+ } else {
436
+ return false;
437
+ }
438
+ }
439
+
392
440
int equeue_timeleft (equeue_t * q , int id )
393
441
{
394
442
int ret = -1 ;
@@ -409,6 +457,21 @@ int equeue_timeleft(equeue_t *q, int id)
409
457
return ret ;
410
458
}
411
459
460
+ int equeue_timeleft_user_allocated (equeue_t * q , void * e )
461
+ {
462
+ int ret = -1 ;
463
+
464
+ if (!e ) {
465
+ return -1 ;
466
+ }
467
+
468
+ struct equeue_event * _e = (struct equeue_event * )e ;
469
+ equeue_mutex_lock (& q -> queuelock );
470
+ ret = equeue_clampdiff (_e -> target , equeue_tick ());
471
+ equeue_mutex_unlock (& q -> queuelock );
472
+ return ret ;
473
+ }
474
+
412
475
void equeue_break (equeue_t * q )
413
476
{
414
477
equeue_mutex_lock (& q -> queuelock );
0 commit comments