24
24
#include " swift/ABI/MetadataValues.h"
25
25
#include " swift/Runtime/Config.h"
26
26
#include " swift/Basic/STLExtras.h"
27
- #include " TaskQueues.h"
28
27
#include " bitset"
29
28
#include " string"
29
+ #include " queue"
30
30
31
31
namespace swift {
32
32
class AsyncTask ;
@@ -43,11 +43,11 @@ class alignas(2 * alignof(void*)) Job {
43
43
enum {
44
44
// / The next waiting task link, an AsyncTask that is waiting on a future.
45
45
NextWaitingTaskIndex = 0 ,
46
- // / The next completed task link, an AsyncTask that is completed however
47
- // / has not been polled yet (by `group.next()`), so the channel task keeps
48
- // / the list in completion order, such that they can be polled out one by
49
- // / one.
50
- NextChannelCompletedTaskIndex = 1 ,
46
+ // /// The next completed task link, an AsyncTask that is completed however
47
+ // /// has not been polled yet (by `group.next()`), so the channel task keeps
48
+ // /// the list in completion order, such that they can be polled out one by
49
+ // /// one.
50
+ // NextChannelCompletedTaskIndex = 1,
51
51
};
52
52
53
53
public:
@@ -223,7 +223,7 @@ class AsyncTask : public HeapObject, public Job {
223
223
class GroupFragment {
224
224
public:
225
225
// / Describes the status of the channel.
226
- enum class ReadyQueueStatus : uintptr_t {
226
+ enum class ReadyStatus : uintptr_t {
227
227
// / The channel is empty, no tasks are pending.
228
228
// / Return immediately, there is no point in suspending.
229
229
// /
@@ -238,23 +238,12 @@ class AsyncTask : public HeapObject, public Job {
238
238
Error = 0b11 ,
239
239
};
240
240
241
- // / Describes the status of the future.
242
- // /
243
- // / Futures always begin in the "Executing" state, and will always
244
- // / make a single state change to either Success or Error.
241
+ // / Describes the status of the waiting task that is suspended on `next()`.
245
242
enum class WaitStatus : uintptr_t {
246
- // / The storage is not accessible.
247
- Executing = 0 ,
248
-
249
- // / The future has completed with result (of type \c resultType).
250
- Success = 1 ,
251
-
252
- // / The future has completed by throwing an error (an \c Error
253
- // / existential).
254
- Error = 2 ,
243
+ Waiting = 0 ,
255
244
};
256
245
257
- enum class ChannelPollStatus : uintptr_t {
246
+ enum class GroupPollStatus : uintptr_t {
258
247
// / The channel is known to be empty and we can immediately return nil.
259
248
Empty = 0 ,
260
249
@@ -271,7 +260,7 @@ class AsyncTask : public HeapObject, public Job {
271
260
272
261
// / The result of waiting on a Channel (TaskGroup).
273
262
struct GroupPollResult {
274
- ChannelPollStatus status; // TODO: pack it into storage pointer or not worth it?
263
+ GroupPollStatus status; // TODO: pack it into storage pointer or not worth it?
275
264
276
265
// / Storage for the result of the future.
277
266
// /
@@ -292,18 +281,18 @@ class AsyncTask : public HeapObject, public Job {
292
281
AsyncTask *retainedTask;
293
282
294
283
bool isStorageAccessible () {
295
- return status == ChannelPollStatus ::Success ||
296
- status == ChannelPollStatus ::Error ||
297
- status == ChannelPollStatus ::Empty;
284
+ return status == GroupPollStatus ::Success ||
285
+ status == GroupPollStatus ::Error ||
286
+ status == GroupPollStatus ::Empty;
298
287
}
299
288
300
289
static GroupPollResult get (AsyncTask *asyncTask, bool hadErrorResult,
301
290
bool needsSwiftRelease) {
302
291
auto fragment = asyncTask->futureFragment ();
303
292
return GroupPollResult{
304
293
/* status*/ hadErrorResult ?
305
- GroupFragment::ChannelPollStatus ::Error :
306
- GroupFragment::ChannelPollStatus ::Success,
294
+ GroupFragment::GroupPollStatus ::Error :
295
+ GroupFragment::GroupPollStatus ::Success,
307
296
/* storage*/ hadErrorResult ?
308
297
reinterpret_cast <OpaqueValue *>(fragment->getError ()) :
309
298
fragment->getStoragePtr (),
@@ -321,15 +310,15 @@ class AsyncTask : public HeapObject, public Job {
321
310
322
311
uintptr_t storage;
323
312
324
- ReadyQueueStatus getStatus () const {
325
- return static_cast <ReadyQueueStatus >(storage & statusMask);
313
+ ReadyStatus getStatus () const {
314
+ return static_cast <ReadyStatus >(storage & statusMask);
326
315
}
327
316
328
317
AsyncTask *getTask () const {
329
318
return reinterpret_cast <AsyncTask *>(storage & ~statusMask);
330
319
}
331
320
332
- static ReadyQueueItem get (ReadyQueueStatus status, AsyncTask *task) {
321
+ static ReadyQueueItem get (ReadyStatus status, AsyncTask *task) {
333
322
assert (task == nullptr || task->isFuture ());
334
323
return ReadyQueueItem{
335
324
reinterpret_cast <uintptr_t >(task) | static_cast <uintptr_t >(status)};
@@ -379,7 +368,7 @@ class AsyncTask : public HeapObject, public Job {
379
368
}
380
369
381
370
unsigned int waitingTasks () {
382
- return status & maskWaiting; // consider only `maskWaiting` bits
371
+ return status & maskWaiting;
383
372
}
384
373
385
374
bool isEmpty () {
@@ -416,20 +405,55 @@ class AsyncTask : public HeapObject, public Job {
416
405
};
417
406
};
418
407
408
+ template <typename T>
409
+ class NaiveQueue {
410
+ std::queue<T> queue;
411
+
412
+ public:
413
+ NaiveQueue () = default ;
414
+ NaiveQueue (const NaiveQueue<T> &) = delete ;
415
+ NaiveQueue& operator =(const NaiveQueue<T> &) = delete ;
416
+
417
+ NaiveQueue (NaiveQueue<T>&& other) {
418
+ queue = std::move (other.queue );
419
+ }
420
+
421
+ virtual ~NaiveQueue () { }
422
+
423
+ bool dequeue (T &output) {
424
+ if (queue.empty ()) {
425
+ return false ;
426
+ }
427
+ output = queue.front ();
428
+ queue.pop ();
429
+ return true ;
430
+ }
431
+
432
+ void enqueue (const T item) {
433
+ queue.push (item);
434
+ }
435
+
436
+ };
437
+
419
438
private:
420
439
440
+ // TODO: move to lockless via the status atomic
441
+ mutable std::mutex mutex;
442
+
421
443
// / Used for queue management, counting number of waiting and ready tasks
422
- // TODO: we likely can collapse these into the wait queue if we try hard enough?
423
- // but we'd lose the ability to get counts I think.
424
444
std::atomic<unsigned long > status;
425
445
426
446
// / Queue containing completed tasks offered into this channel.
427
447
// /
428
448
// / The low bits contain the status, the rest of the pointer is the
429
449
// / AsyncTask.
430
- // mpsc_queue_t<ReadyQueueItem> readyQueue;
431
- MutexQueue<ReadyQueueItem> readyQueue;
432
- // TODO: Try the same queue strategy as the waitQueue
450
+ NaiveQueue<ReadyQueueItem> readyQueue;
451
+ // mpsc_queue_t<ReadyQueueItem> readyQueue; // TODO: can we get away with an MPSC queue here once actor executors land?
452
+
453
+ // NOTE: this style of "queue" is not very nice for the group,
454
+ // because it acts more like a stack, and we really want completion order
455
+ // for the task group, thus not using this style (which the wait queue does)
456
+ // std::atomic<ReadyQueueItem> readyQueue;
433
457
434
458
// / Queue containing all of the tasks that are waiting in `get()`.
435
459
// /
@@ -440,15 +464,14 @@ class AsyncTask : public HeapObject, public Job {
440
464
// / AsyncTask.
441
465
std::atomic<WaitQueueItem> waitQueue;
442
466
443
- // Trailing storage for the result itself. The storage will be uninitialized.
444
- // Use the `readyQueue` to poll for values from the channel instead.
445
467
friend class AsyncTask ;
446
468
447
469
public:
448
470
explicit GroupFragment ()
449
471
: status(GroupStatus::initial().status),
450
472
readyQueue(),
451
- waitQueue(WaitQueueItem::get(WaitStatus::Executing, nullptr )) {}
473
+ // readyQueue(ReadyQueueItem::get(ReadyStatus::Empty, nullptr)),
474
+ waitQueue(WaitQueueItem::get(WaitStatus::Waiting, nullptr )) {}
452
475
453
476
// / Destroy the storage associated with the channel.
454
477
void destroy ();
@@ -460,35 +483,29 @@ class AsyncTask : public HeapObject, public Job {
460
483
461
484
GroupStatus statusLoad () {
462
485
return GroupStatus {
463
- // status.load(std::memory_order_relaxed )
464
- status.load (std::memory_order_acquire)
486
+ // status.load(std::memory_order_acquire )
487
+ status.load (std::memory_order_seq_cst) // TODO: acquire instead
465
488
};
466
489
}
467
490
468
491
// / Returns *assumed* new status, including the just performed +1.
469
- GroupStatus statusAddReadyTaskLoad () {
470
- auto s = GroupStatus {
471
- // status.fetch_add(GroupStatus::oneReadyTask, std::memory_order_relaxed)
472
- status.fetch_add (GroupStatus::oneReadyTask, std::memory_order_release) + GroupStatus::oneReadyTask
473
- };
492
+ GroupStatus statusAddReadyTaskAcquire () {
493
+ auto old = status.fetch_add (GroupStatus::oneReadyTask, std::memory_order_acquire);
494
+ auto s = GroupStatus {old + GroupStatus::oneReadyTask };
474
495
assert (s.readyTasks () <= s.pendingTasks ());
475
496
return s;
476
497
}
477
498
478
499
// / Returns *assumed* new status, including the just performed +1.
479
- GroupStatus statusAddPendingTaskLoad () {
480
- return GroupStatus {
481
- // status.fetch_add(GroupStatus::onePendingTask, std::memory_order_relaxed)
482
- status.fetch_add (GroupStatus::onePendingTask, std::memory_order_release) + GroupStatus::onePendingTask
483
- };
500
+ GroupStatus statusAddPendingTaskRelaxed () {
501
+ auto old = status.fetch_add (GroupStatus::onePendingTask, std::memory_order_relaxed);
502
+ return GroupStatus {old + GroupStatus::onePendingTask };
484
503
}
485
504
486
505
// / Returns *assumed* new status, including the just performed +1.
487
- GroupStatus statusAddWaitingTaskLoad () {
488
- return GroupStatus {
489
- // status.fetch_add(GroupStatus::oneWaitingTask, std::memory_order_relaxed)
490
- status.fetch_add (GroupStatus::oneWaitingTask, std::memory_order_release) + GroupStatus::oneWaitingTask
491
- };
506
+ GroupStatus statusAddWaitingTaskAcquire () {
507
+ auto old = status.fetch_add (GroupStatus::oneWaitingTask, std::memory_order_acquire);
508
+ return GroupStatus { old + GroupStatus::oneWaitingTask };
492
509
}
493
510
494
511
// / Remove waiting task, without taking any pending task.
@@ -526,22 +543,16 @@ class AsyncTask : public HeapObject, public Job {
526
543
527
544
// / Offer result of a task into this channel.
528
545
// / The value is enqueued at the end of the channel.
529
- // /
530
- // / Upon enqueue, any waiting tasks will be scheduled on the given executor. // TODO: not precisely right
531
- void
532
- groupOffer (AsyncTask *completed, AsyncContext *context, ExecutorRef executor);
533
-
534
- // FutureFragment::Status
535
- // AsyncTask::waitGroupNext(AsyncTask *waitingTask);
546
+ void groupOffer (AsyncTask *completed, AsyncContext *context, ExecutorRef executor);
536
547
537
548
// / Attempt to dequeue ready tasks and complete the waitingTask.
538
549
// /
539
550
// / If unable to complete the waiting task immediately (with an readily
540
- // / available completed task), either return ChannelPollStatus ::Empty if it is known
541
- // / that no tasks are in flight, or ChannelPollStatus::Waiting if there are
542
- // / tasks in flight and we'll eventually be woken up by a completion.
543
- GroupFragment::GroupPollResult
544
- groupPoll (AsyncTask *waitingTask);
551
+ // / available completed task), either returns an `GroupPollStatus ::Empty`
552
+ // / result if it is known that no pending tasks in the group,
553
+ // / or a `GroupPollStatus::Waiting` result if there are tasks in flight
554
+ // / and the waitingTask eventually be woken up by a completion.
555
+ GroupFragment::GroupPollResult groupPoll (AsyncTask *waitingTask);
545
556
546
557
// ==== TaskGroup Child ------------------------------------------------------
547
558
@@ -695,13 +706,12 @@ class AsyncTask : public HeapObject, public Job {
695
706
SchedulerPrivate[NextWaitingTaskIndex]);
696
707
}
697
708
698
- // / Access the next completed task, which establishes a singly linked list of
699
- // / tasks that are waiting to be polled from a task group channel.
700
- // FIXME: remove and replace with a fifo queue in the Channel task itself.
701
- AsyncTask *&getNextChannelCompletedTask () {
702
- return reinterpret_cast <AsyncTask *&>(
703
- SchedulerPrivate[NextChannelCompletedTaskIndex]);
704
- }
709
+ // /// Access the next completed task, which establishes a singly linked list of
710
+ // /// tasks that are waiting to be polled from a task group channel.
711
+ // AsyncTask *&getNextChannelReadyTask() {
712
+ // return reinterpret_cast<AsyncTask *&>(
713
+ // SchedulerPrivate[NextChannelCompletedTaskIndex]);
714
+ // }
705
715
};
706
716
707
717
// The compiler will eventually assume these.
@@ -807,10 +817,6 @@ class FutureAsyncContext : public AsyncContext {
807
817
SwiftError *errorResult = nullptr ;
808
818
OpaqueValue *indirectResult;
809
819
810
-
811
- // TODO: this is to support "offer into queue on complete"
812
- AsyncContext *parentChannel = nullptr ; // TODO: no idea if we need this or not
813
-
814
820
using AsyncContext::AsyncContext;
815
821
};
816
822
0 commit comments