@@ -13,10 +13,10 @@ namespace impl
13
13
{
14
14
class IAsyncQueueDispatcherBase
15
15
{
16
- public :
16
+ protected :
17
17
struct future_base_t ;
18
18
// dont want to play around with relaxed memory ordering yet
19
- struct request_base_t
19
+ struct request_base_t // TODO: protect to anyone but inheritor
20
20
{
21
21
public:
22
22
enum class STATE : uint32_t
@@ -92,6 +92,8 @@ class IAsyncQueueDispatcherBase
92
92
LOCKED=4
93
93
};
94
94
95
+ protected:
96
+ friend struct request_base_t ;
95
97
// ! REQUESTING THREAD: done as part of filling out the request
96
98
virtual inline void associate_request (request_base_t * req)
97
99
{
@@ -111,7 +113,6 @@ class IAsyncQueueDispatcherBase
111
113
state.exchangeNotify <true >(STATE::READY,STATE::EXECUTING);
112
114
}
113
115
114
- protected:
115
116
// the base class is not directly usable
116
117
virtual inline ~future_base_t ()
117
118
{
@@ -129,7 +130,7 @@ class IAsyncQueueDispatcherBase
129
130
atomic_state_t <STATE,STATE::INITIAL> state= {};
130
131
};
131
132
132
- protected:
133
+ // not meant for direct usage
133
134
IAsyncQueueDispatcherBase () = default ;
134
135
~IAsyncQueueDispatcherBase () = default ;
135
136
@@ -145,7 +146,7 @@ class IAsyncQueueDispatcherBase
145
146
}
146
147
147
148
public:
148
- inline future_t () = default;
149
+ inline future_t () {}
149
150
inline ~future_t ()
150
151
{
151
152
discard ();
@@ -230,7 +231,7 @@ class IAsyncQueueDispatcherBase
230
231
}
231
232
232
233
// ! Can only be called once! If returns false means has been cancelled and nothing happened
233
- inline bool move_into (T& dst)
234
+ [[nodiscard]] inline bool move_into (T& dst)
234
235
{
235
236
T* pSrc = acquire ();
236
237
if (!pSrc)
@@ -240,6 +241,15 @@ class IAsyncQueueDispatcherBase
240
241
return true ;
241
242
}
242
243
244
+ // ! Utility to write less code, WILL ASSERT IF IT FAILS TO ACQUIRE! So don't use on futures that might be cancelled or fail.
245
+ inline T copy_out ()
246
+ {
247
+ T retval;
248
+ const bool success = move_into (retval);
249
+ assert (success);
250
+ return retval;
251
+ }
252
+
243
253
// !
244
254
virtual inline void discard ()
245
255
{
@@ -250,14 +260,13 @@ class IAsyncQueueDispatcherBase
250
260
protected:
251
261
// construct the retval element
252
262
template <typename ... Args>
253
- inline void notify (Args&&... args)
263
+ inline void construct (Args&&... args)
254
264
{
255
265
storage_t::construct (std::forward<Args>(args)...);
256
- future_base_t::notify ();
257
266
}
258
267
};
259
268
template <typename T>
260
- struct cancellable_future_t final : public future_t <T>
269
+ struct cancellable_future_t : public future_t <T>
261
270
{
262
271
using base_t = future_t <T>;
263
272
std::atomic<request_base_t *> request = nullptr ;
@@ -267,7 +276,7 @@ class IAsyncQueueDispatcherBase
267
276
inline bool cancel ()
268
277
{
269
278
auto expected = base_t ::STATE::ASSOCIATED;
270
- if (state.tryTransition (STATE::EXECUTING,expected))
279
+ if (base_t :: state.tryTransition (base_t :: STATE::EXECUTING,expected))
271
280
{
272
281
// Since we're here we've managed to move from ASSOCIATED to fake "EXECUTING" this means that the Request is either:
273
282
// 1. RECORDING but after returning from `base_t::associate_request`
@@ -278,7 +287,7 @@ class IAsyncQueueDispatcherBase
278
287
request.exchange (nullptr )->cancel ();
279
288
280
289
// after doing everything, we can mark ourselves as cleaned up
281
- state.exchangeNotify <false >(STATE::INITIAL,STATE::EXECUTING);
290
+ base_t :: state.exchangeNotify <false >(base_t :: STATE::INITIAL, base_t :: STATE::EXECUTING);
282
291
return true ;
283
292
}
284
293
// we're here because either:
@@ -288,7 +297,7 @@ class IAsyncQueueDispatcherBase
288
297
// - request is ready
289
298
// - storage is locked/acquired
290
299
// sanity check (there's a tiny gap between transitioning to EXECUTING and disassociating request)
291
- assert (expected==STATE::EXECUTING || request==nullptr );
300
+ assert (expected==base_t :: STATE::EXECUTING || request==nullptr );
292
301
return false ;
293
302
}
294
303
@@ -303,14 +312,14 @@ class IAsyncQueueDispatcherBase
303
312
}
304
313
305
314
private:
306
- inline void associate_request (request_base_t * req) override
315
+ inline void associate_request (request_base_t * req) override final
307
316
{
308
317
base_t::associate_request (req);
309
318
request_base_t * prev = request.exchange (req);
310
319
// sanity check
311
320
assert (prev==nullptr );
312
321
}
313
- inline bool disassociate_request () override
322
+ inline bool disassociate_request () override final
314
323
{
315
324
if (base_t::disassociate_request ())
316
325
{
@@ -357,44 +366,39 @@ inline void IAsyncQueueDispatcherBase::request_base_t::notify()
357
366
* void init(internal_state_t* state); // required only in case of custom internal state
358
367
*
359
368
* void exit(internal_state_t* state); // optional, no `state` parameter in case of no internal state
360
- *
361
- * void request_impl(request_t& req, ...); // `...` are parameteres forwarded from request(), the request's state is locked with a mutex during the call
362
- * void process_request(request_t& req, internal_state_t& state); // no `state` parameter in case of no internal state
363
- * void background_work() // optional, does nothing if not provided
364
369
*
370
+ * // no `state` parameter in case of no internal state
371
+ * void process_request(request_metadata_t& req, internal_state_t& state);
372
+ *
373
+ * void background_work() // optional, does nothing if not provided
365
374
*
366
- * Provided RequestType shall define 5 methods:
367
- * void start();
368
- * void finalize();
369
- * bool wait();
370
- * void notify();
371
- * TODO: [outdated docs] lock() will be called just before processing the request, and unlock() will be called just after processing the request.
372
- * Those are to enable safe external write access to the request struct for user-defined purposes.
373
- *
374
- * wait_for_result() will wait until the Async queue completes processing the request and notifies us that the request is ready,
375
- * the request will remain locked upon return (so nothing overwrites its address on the circular buffer)
376
375
*
377
- * notify_all_ready() takes an r-value reference to an already locked mutex and notifies any waiters then releases the lock
376
+ * The `lock()` will be called just before calling into `background_work()` and processing any requests via `process_request()`,
377
+ * `unlock()` will be called just after processing the request (if any).
378
378
*/
379
- template <typename CRTP, typename RequestType , uint32_t BufferSize = 256u , typename InternalStateType = void >
379
+ template <typename CRTP, typename request_metadata_t , uint32_t BufferSize= 256u , typename InternalStateType= void >
380
380
class IAsyncQueueDispatcher : public IThreadHandler <CRTP,InternalStateType>, protected impl::IAsyncQueueDispatcherBase
381
381
{
382
- static_assert (std::is_base_of_v<impl::IAsyncQueueDispatcherBase::request_base_t ,RequestType>, " Request type must derive from request_base_t!" );
383
382
static_assert (BufferSize>0u , " BufferSize must not be 0!" );
384
383
static_assert (core::isPoT(BufferSize), " BufferSize must be power of two!" );
385
384
386
385
protected:
387
386
using base_t = IThreadHandler<CRTP,InternalStateType>;
388
387
friend base_t ; // TODO: remove, some functions should just be protected
389
388
389
+ struct request_t : public request_base_t
390
+ {
391
+ request_metadata_t m_metadata;
392
+ };
393
+
390
394
private:
391
395
constexpr static inline uint32_t MaxRequestCount = BufferSize;
392
396
393
397
// maybe one day we'll abstract this into a lockless queue
394
398
using atomic_counter_t = std::atomic_uint64_t ;
395
399
using counter_t = atomic_counter_t ::value_type;
396
400
397
- RequestType request_pool[MaxRequestCount];
401
+ request_t request_pool[MaxRequestCount];
398
402
atomic_counter_t cb_begin = 0u ;
399
403
atomic_counter_t cb_end = 0u ;
400
404
@@ -413,39 +417,41 @@ class IAsyncQueueDispatcher : public IThreadHandler<CRTP,InternalStateType>, pro
413
417
using cvar_t = typename base_t ::cvar_t ;
414
418
using internal_state_t = typename base_t ::internal_state_t ;
415
419
416
- using request_t = RequestType;
420
+ template <typename T>
421
+ using future_t = impl::IAsyncQueueDispatcherBase::future_t <T>;
422
+ template <typename T>
423
+ using cancellable_future_t = impl::IAsyncQueueDispatcherBase::cancellable_future_t <T>;
417
424
418
- // Returns a reference to a request's storage in the circular buffer after processing the moved arguments
419
- // YOU MUST CONSUME THE REQUEST by calling `discard_storage()` on it EXACTLY ONCE!
420
- // YOU MUST CALL IT EVEN IF THERE'S NO DATA YOU WISH TO GET BACK FROM IT!
421
- // (if you don't the queue will deadlock because of an unresolved overflow)
422
- template <typename ... Args>
423
- request_t & request (Args&&... args)
425
+ // ! Constructs a request with `args` via `CRTP::request_impl` on the circular buffer after there's enough space to accomodate it.
426
+ // ! Then it associates the request to a future passed in as the first argument.
427
+ template <typename T, typename ... Args>
428
+ void request (future_t <T>* _future, Args&&... args)
424
429
{
425
- auto virtualIx = cb_end++;
426
- auto safe_begin = virtualIx<MaxRequestCount ? static_cast <counter_t >(0 ) : (virtualIx-MaxRequestCount+1u );
427
-
428
- for (counter_t old_begin; (old_begin = cb_begin.load ()) < safe_begin; )
430
+ // get next output index
431
+ const auto virtualIx = cb_end++;
432
+ // protect against overflow by waiting for the worker to catch up
433
+ const auto safe_begin = virtualIx<MaxRequestCount ? static_cast <counter_t >(0 ) : (virtualIx-MaxRequestCount+1u );
434
+ for (counter_t old_begin; (old_begin=cb_begin.load ())<safe_begin; )
429
435
cb_begin.wait (old_begin);
430
436
437
+ // get actual storage index now
431
438
const auto r_id = wrapAround (virtualIx);
432
439
433
440
request_t & req = request_pool[r_id];
434
441
req.start ();
435
- static_cast <CRTP*>( this )-> request_impl ( req, std::forward<Args>(args)...);
436
- req.finalize ();
442
+ req. m_metadata = request_metadata_t ( std::forward<Args>(args)...);
443
+ req.finalize (_future );
437
444
438
445
{
439
446
auto global_lk = base_t::createLock ();
440
447
// wake up queue thread (needs to happen under a lock to not miss a wakeup)
441
448
base_t ::m_cvar.notify_one ();
442
449
}
443
- return req;
444
450
}
445
451
446
452
protected:
447
453
inline ~IAsyncQueueDispatcher () {}
448
- void background_work () {}
454
+ inline void background_work () {}
449
455
450
456
private:
451
457
template <typename ... Args>
@@ -463,17 +469,16 @@ class IAsyncQueueDispatcher : public IThreadHandler<CRTP,InternalStateType>, pro
463
469
464
470
request_t & req = request_pool[r_id];
465
471
// do NOT allow cancelling or modification of the request while working on it
466
- if (req.wait_for_work ())
472
+ if (req.wait ())
467
473
{
468
- // if the request supports cancelling and got cancelled, the wait_for_work function may return false
469
- static_cast <CRTP*>(this )->process_request (req, optional_internal_state...);
474
+ // if the request supports cancelling and got cancelled, then `wait()` function may return false
475
+ static_cast <CRTP*>(this )->process_request (req.m_metadata ,optional_internal_state...);
476
+ req.notify ();
470
477
}
471
478
// wake the waiter up
472
- req.notify_ready ();
473
479
cb_begin++;
474
- #if __cplusplus >= 202002L
475
- cb_begin.notify_one ();
476
- #endif
480
+ // this does not need to happen under a lock, because its not a condvar
481
+ cb_begin.notify_one ();
477
482
}
478
483
lock.lock ();
479
484
}
0 commit comments