Skip to content

Commit 66da694

Browse files
equeue: add user allocated event support
Allow posting events allocated outside queue memory
1 parent 95fc8cf commit 66da694

File tree

2 files changed

+121
-24
lines changed

2 files changed

+121
-24
lines changed

events/equeue.h

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,17 @@ void equeue_event_dtor(void *event, void (*dtor)(void *));
176176
// be passed to equeue_cancel.
177177
int equeue_post(equeue_t *queue, void (*cb)(void *), void *event);
178178

179+
// Post an user allocated event onto the event queue
180+
//
181+
// The equeue_post_user_allocated function takes a callback and a pointer
182+
// to an event allocated by user. The specified callback will be executed
183+
// in the context of the event queue's dispatch loop with the allocated
184+
// event as its argument.
185+
//
186+
// The equeue_post_user_allocated function is irq safe and can act as
187+
// a mechanism for moving events out of irq contexts.
188+
void equeue_post_user_allocated(equeue_t *queue, void (*cb)(void *), void *event);
189+
179190
// Cancel an in-flight event
180191
//
181192
// Attempts to cancel an event referenced by the unique id returned from
@@ -191,6 +202,20 @@ int equeue_post(equeue_t *queue, void (*cb)(void *), void *event);
191202
// Returning false if invalid id or already started executing.
192203
bool equeue_cancel(equeue_t *queue, int id);
193204

205+
// Cancel an in-flight user allocated event
206+
//
207+
// Attempts to cancel an event referenced by its address.
208+
// It is safe to call equeue_cancel_user_allocated after an event
209+
// has already been dispatched.
210+
//
211+
// The equeue_cancel_user_allocated function is irq safe.
212+
//
213+
// If called while the event queue's dispatch loop is active,
214+
// equeue_cancel_user_allocated does not guarantee that the event
215+
// will not not execute after it returns as the event may have
216+
// already begun executing.
217+
bool equeue_cancel_user_allocated(equeue_t *queue, void *event);
218+
194219
// Query how much time is left for delayed event
195220
//
196221
// If event is delayed, this function can be used to query how much time
@@ -200,6 +225,15 @@ bool equeue_cancel(equeue_t *queue, int id);
200225
//
201226
int equeue_timeleft(equeue_t *q, int id);
202227

228+
// Query how much time is left for delayed user allocated event
229+
//
230+
// If event is delayed, this function can be used to query how much time
231+
// is left until the event is due to be dispatched.
232+
//
233+
// This function is irq safe.
234+
//
235+
int equeue_timeleft_user_allocated(equeue_t *q, void *event);
236+
203237
// Background an event queue onto a single-shot timer
204238
//
205239
// The provided update function will be called to indicate when the queue

events/source/equeue.c

Lines changed: 87 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,9 @@
2121
#include <stdint.h>
2222
#include <string.h>
2323

24+
// check if the event is allocaded by user - event address is outside queues internal buffer address range
25+
#define EQUEUE_IS_USER_ALLOCATED_EVENT(e) (((uintptr_t)(e) < (uintptr_t)q->buffer) || ((uintptr_t)(e) > ((uintptr_t)q->slab.data)))
26+
2427
// calculate the relative-difference between absolute times while
2528
// correctly handling overflow conditions
2629
static inline int equeue_tickdiff(unsigned a, unsigned b)
@@ -64,9 +67,15 @@ int equeue_create_inplace(equeue_t *q, size_t size, void *buffer)
6467
{
6568
// setup queue around provided buffer
6669
// ensure buffer and size are aligned
67-
q->buffer = (void *)(((uintptr_t) buffer + sizeof(void *) -1) & ~(sizeof(void *) -1));
68-
size -= (char *) q->buffer - (char *) buffer;
69-
size &= ~(sizeof(void *) -1);
70+
if (size >= sizeof(void *)) {
71+
q->buffer = (void *)(((uintptr_t) buffer + sizeof(void *) -1) & ~(sizeof(void *) -1));
72+
size -= (char *) q->buffer - (char *) buffer;
73+
size &= ~(sizeof(void *) -1);
74+
} else {
75+
// don't align when size less then pointer size
76+
// e.g. static queue (size == 1)
77+
q->buffer = buffer;
78+
}
7079

7180
q->allocated = 0;
7281

@@ -220,15 +229,13 @@ void equeue_dealloc(equeue_t *q, void *p)
220229
e->dtor(e + 1);
221230
}
222231

223-
equeue_mem_dealloc(q, e);
232+
if (!EQUEUE_IS_USER_ALLOCATED_EVENT(e)) {
233+
equeue_mem_dealloc(q, e);
234+
}
224235
}
225236

226-
227-
// equeue scheduling functions
228-
static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick)
237+
void equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick)
229238
{
230-
// setup event and hash local id with buffer offset for unique id
231-
int id = (e->id << q->npw2) | ((unsigned char *)e - q->buffer);
232239
e->target = tick + equeue_clampdiff(e->target, tick);
233240
e->generation = q->generation;
234241

@@ -254,7 +261,6 @@ static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick)
254261
if (e->next) {
255262
e->next->ref = &e->next;
256263
}
257-
258264
e->sibling = 0;
259265
}
260266

@@ -267,24 +273,19 @@ static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick)
267273
q->background.update(q->background.timer,
268274
equeue_clampdiff(e->target, tick));
269275
}
270-
271276
equeue_mutex_unlock(&q->queuelock);
272-
273-
return id;
274277
}
275278

276-
static struct equeue_event *equeue_unqueue(equeue_t *q, int id)
279+
// equeue scheduling functions
280+
static int equeue_event_id(equeue_t *q, struct equeue_event *e)
277281
{
278-
// decode event from unique id and check that the local id matches
279-
struct equeue_event *e = (struct equeue_event *)
280-
&q->buffer[id & ((1 << q->npw2) - 1)];
282+
// setup event and hash local id with buffer offset for unique id
283+
return ((e->id << q->npw2) | ((unsigned char *)e - q->buffer));
284+
}
281285

286+
static struct equeue_event *equeue_unqueue_by_address(equeue_t *q, struct equeue_event *e)
287+
{
282288
equeue_mutex_lock(&q->queuelock);
283-
if (e->id != id >> q->npw2) {
284-
equeue_mutex_unlock(&q->queuelock);
285-
return 0;
286-
}
287-
288289
// clear the event and check if already in-flight
289290
e->cb = 0;
290291
e->period = -1;
@@ -310,6 +311,26 @@ static struct equeue_event *equeue_unqueue(equeue_t *q, int id)
310311
e->next->ref = e->ref;
311312
}
312313
}
314+
equeue_mutex_unlock(&q->queuelock);
315+
return e;
316+
}
317+
318+
static struct equeue_event *equeue_unqueue_by_id(equeue_t *q, int id)
319+
{
320+
// decode event from unique id and check that the local id matches
321+
struct equeue_event *e = (struct equeue_event *)
322+
&q->buffer[id & ((1 << q->npw2) - 1)];
323+
324+
equeue_mutex_lock(&q->queuelock);
325+
if (e->id != id >> q->npw2) {
326+
equeue_mutex_unlock(&q->queuelock);
327+
return 0;
328+
}
329+
330+
if (0 == equeue_unqueue_by_address(q, e)) {
331+
equeue_mutex_unlock(&q->queuelock);
332+
return 0;
333+
}
313334

314335
equeue_incid(q, e);
315336
equeue_mutex_unlock(&q->queuelock);
@@ -369,18 +390,30 @@ int equeue_post(equeue_t *q, void (*cb)(void *), void *p)
369390
e->cb = cb;
370391
e->target = tick + e->target;
371392

372-
int id = equeue_enqueue(q, e, tick);
393+
equeue_enqueue(q, e, tick);
394+
int id = equeue_event_id(q, e);
373395
equeue_sema_signal(&q->eventsema);
374396
return id;
375397
}
376398

399+
void equeue_post_user_allocated(equeue_t *q, void (*cb)(void *), void *p)
400+
{
401+
struct equeue_event *e = (struct equeue_event *)p;
402+
unsigned tick = equeue_tick();
403+
e->cb = cb;
404+
e->target = tick + e->target;
405+
406+
equeue_enqueue(q, e, tick);
407+
equeue_sema_signal(&q->eventsema);
408+
}
409+
377410
bool equeue_cancel(equeue_t *q, int id)
378411
{
379412
if (!id) {
380413
return false;
381414
}
382415

383-
struct equeue_event *e = equeue_unqueue(q, id);
416+
struct equeue_event *e = equeue_unqueue_by_id(q, id);
384417
if (e) {
385418
equeue_dealloc(q, e + 1);
386419
return true;
@@ -389,6 +422,21 @@ bool equeue_cancel(equeue_t *q, int id)
389422
}
390423
}
391424

425+
bool equeue_cancel_user_allocated(equeue_t *q, void *e)
426+
{
427+
if (!e) {
428+
return false;
429+
}
430+
431+
struct equeue_event *_e = equeue_unqueue_by_address(q, e);
432+
if (_e) {
433+
equeue_dealloc(q, _e + 1);
434+
return true;
435+
} else {
436+
return false;
437+
}
438+
}
439+
392440
int equeue_timeleft(equeue_t *q, int id)
393441
{
394442
int ret = -1;
@@ -409,6 +457,21 @@ int equeue_timeleft(equeue_t *q, int id)
409457
return ret;
410458
}
411459

460+
int equeue_timeleft_user_allocated(equeue_t *q, void *e)
461+
{
462+
int ret = -1;
463+
464+
if (!e) {
465+
return -1;
466+
}
467+
468+
struct equeue_event *_e = (struct equeue_event *)e;
469+
equeue_mutex_lock(&q->queuelock);
470+
ret = equeue_clampdiff(_e->target, equeue_tick());
471+
equeue_mutex_unlock(&q->queuelock);
472+
return ret;
473+
}
474+
412475
void equeue_break(equeue_t *q)
413476
{
414477
equeue_mutex_lock(&q->queuelock);

0 commit comments

Comments
 (0)