Skip to content

Commit 1d46b80

Browse files
committed
Revert "Future-proof the layout of AsyncTask."
This reverts commit 7eb1052.
1 parent 837a95d commit 1d46b80

File tree

11 files changed

+162
-248
lines changed

11 files changed

+162
-248
lines changed

include/swift/ABI/MetadataValues.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,6 @@ enum {
4747
/// in a default actor.
4848
NumWords_DefaultActor = 10,
4949

50-
/// The number of words in a task.
51-
NumWords_AsyncTask = 16,
52-
5350
/// The number of words in a task group.
5451
NumWords_TaskGroup = 32,
5552

include/swift/ABI/Task.h

Lines changed: 66 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ class alignas(2 * alignof(void*)) Job :
126126
};
127127

128128
// The compiler will eventually assume these.
129-
#if SWIFT_POINTER_IS_8_BYTES
129+
#if defined(__LP64__) || defined(_WIN64)
130130
static_assert(sizeof(Job) == 6 * sizeof(void*),
131131
"Job size is wrong");
132132
#else
@@ -136,6 +136,46 @@ static_assert(sizeof(Job) == 8 * sizeof(void*),
136136
static_assert(alignof(Job) == 2 * alignof(void*),
137137
"Job alignment is wrong");
138138

139+
/// The current state of a task's status records.
140+
class ActiveTaskStatus {
141+
enum : uintptr_t {
142+
IsCancelled = 0x1,
143+
IsLocked = 0x2,
144+
RecordMask = ~uintptr_t(IsCancelled | IsLocked)
145+
};
146+
147+
uintptr_t Value;
148+
149+
public:
150+
constexpr ActiveTaskStatus() : Value(0) {}
151+
ActiveTaskStatus(TaskStatusRecord *innermostRecord,
152+
bool cancelled, bool locked)
153+
: Value(reinterpret_cast<uintptr_t>(innermostRecord)
154+
+ (locked ? IsLocked : 0)
155+
+ (cancelled ? IsCancelled : 0)) {}
156+
157+
/// Is the task currently cancelled?
158+
bool isCancelled() const { return Value & IsCancelled; }
159+
160+
/// Is there an active lock on the cancellation information?
161+
bool isLocked() const { return Value & IsLocked; }
162+
163+
/// Return the innermost cancellation record. Code running
164+
/// asynchronously with this task should not access this record
165+
/// without having first locked it; see swift_taskCancel.
166+
TaskStatusRecord *getInnermostRecord() const {
167+
return reinterpret_cast<TaskStatusRecord*>(Value & RecordMask);
168+
}
169+
170+
static TaskStatusRecord *getStatusRecordParent(TaskStatusRecord *ptr);
171+
172+
using record_iterator =
173+
LinkedListIterator<TaskStatusRecord, getStatusRecordParent>;
174+
llvm::iterator_range<record_iterator> records() const {
175+
return record_iterator::rangeBeginning(getInnermostRecord());
176+
}
177+
};
178+
139179
class NullaryContinuationJob : public Job {
140180

141181
private:
@@ -172,16 +212,6 @@ class NullaryContinuationJob : public Job {
172212
/// it can hold, and thus must be the *last* fragment.
173213
class AsyncTask : public Job {
174214
public:
175-
// On 32-bit targets, there is a word of tail padding remaining
176-
// in Job, and ResumeContext will fit into that, at offset 28.
177-
// Private then has offset 32.
178-
// On 64-bit targets, there is no tail padding in Job, and so
179-
// ResumeContext has offset 48. There is therefore another word
180-
// of reserved storage prior to Private (which needs to have
181-
// double-word alignment), which has offset 64.
182-
// We therefore converge and end up with 16 words of storage on
183-
// all platforms.
184-
185215
/// The context for resuming the job. When a task is scheduled
186216
/// as a job, the next continuation should be installed as the
187217
/// ResumeTask pointer in the job header, with this serving as
@@ -192,57 +222,36 @@ class AsyncTask : public Job {
192222
/// prevent it from being corrupted in flight.
193223
AsyncContext * __ptrauth_swift_task_resume_context ResumeContext;
194224

195-
#if SWIFT_POINTER_IS_8_BYTES
196-
void *Reserved64;
197-
#endif
225+
/// The currently-active information about cancellation.
226+
std::atomic<ActiveTaskStatus> Status;
198227

199-
struct PrivateStorage;
228+
/// Reserved for the use of the task-local stack allocator.
229+
void *AllocatorPrivate[4];
200230

201-
/// Private storage for the use of the runtime.
202-
struct alignas(2 * alignof(void*)) OpaquePrivateStorage {
203-
void *Storage[8];
231+
/// Task local values storage container.
232+
TaskLocal::Storage Local;
204233

205-
/// Initialize this storage during the creation of a task.
206-
void initialize(AsyncTask *task);
207-
void initializeWithSlab(AsyncTask *task,
208-
void *slab, size_t slabCapacity);
209-
210-
/// React to the completion of the enclosing task's execution.
211-
void complete(AsyncTask *task);
212-
213-
/// React to the final destruction of the enclosing task.
214-
void destroy();
215-
216-
PrivateStorage &get();
217-
const PrivateStorage &get() const;
218-
};
219-
PrivateStorage &_private();
220-
const PrivateStorage &_private() const;
221-
222-
OpaquePrivateStorage Private;
223-
224-
/// Create a task.
225-
/// This does not initialize Private; callers must call
226-
/// Private.initialize separately.
227234
AsyncTask(const HeapMetadata *metadata, JobFlags flags,
228235
TaskContinuationFunction *run,
229236
AsyncContext *initialContext)
230237
: Job(flags, run, metadata),
231-
ResumeContext(initialContext) {
238+
ResumeContext(initialContext),
239+
Status(ActiveTaskStatus()),
240+
Local(TaskLocal::Storage()) {
232241
assert(flags.isAsyncTask());
233242
Id = getNextTaskId();
234243
}
235244

236245
/// Create a task with "immortal" reference counts.
237246
/// Used for async let tasks.
238-
/// This does not initialize Private; callers must call
239-
/// Private.initialize separately.
240247
AsyncTask(const HeapMetadata *metadata, InlineRefCounts::Immortal_t immortal,
241248
JobFlags flags,
242249
TaskContinuationFunction *run,
243250
AsyncContext *initialContext)
244251
: Job(flags, run, metadata, immortal),
245-
ResumeContext(initialContext) {
252+
ResumeContext(initialContext),
253+
Status(ActiveTaskStatus()),
254+
Local(TaskLocal::Storage()) {
246255
assert(flags.isAsyncTask());
247256
Id = getNextTaskId();
248257
}
@@ -260,18 +269,24 @@ class AsyncTask : public Job {
260269

261270
/// Check whether this task has been cancelled.
262271
/// Checking this is, of course, inherently race-prone on its own.
263-
bool isCancelled() const;
272+
bool isCancelled() const {
273+
return Status.load(std::memory_order_relaxed).isCancelled();
274+
}
264275

265276
// ==== Task Local Values ----------------------------------------------------
266277

267278
void localValuePush(const HeapObject *key,
268-
/* +1 */ OpaqueValue *value,
269-
const Metadata *valueType);
279+
/* +1 */ OpaqueValue *value, const Metadata *valueType) {
280+
Local.pushValue(this, key, value, valueType);
281+
}
270282

271-
OpaqueValue *localValueGet(const HeapObject *key);
283+
OpaqueValue* localValueGet(const HeapObject *key) {
284+
return Local.getValue(this, key);
285+
}
272286

273-
/// Returns true if storage has still more bindings.
274-
void localValuePop();
287+
void localValuePop() {
288+
Local.popValue(this);
289+
}
275290

276291
// ==== Child Fragment -------------------------------------------------------
277292

@@ -513,7 +528,7 @@ class AsyncTask : public Job {
513528
};
514529

515530
// The compiler will eventually assume these.
516-
static_assert(sizeof(AsyncTask) == NumWords_AsyncTask * sizeof(void*),
531+
static_assert(sizeof(AsyncTask) == 14 * sizeof(void*),
517532
"AsyncTask size is wrong");
518533
static_assert(alignof(AsyncTask) == 2 * alignof(void*),
519534
"AsyncTask alignment is wrong");

include/swift/ABI/TaskStatus.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,11 @@ class TaskStatusRecord {
8282
}
8383
};
8484

85+
inline TaskStatusRecord *
86+
ActiveTaskStatus::getStatusRecordParent(TaskStatusRecord *ptr) {
87+
return ptr->getParent();
88+
}
89+
8590
/// A deadline for the task. If this is reached, the task will be
8691
/// automatically cancelled. The deadline can also be queried and used
8792
/// in other ways.

include/swift/Basic/Compiler.h

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -128,13 +128,4 @@
128128
#define SWIFT_ASSERT_ONLY(...) do { __VA_ARGS__; } while (false)
129129
#endif
130130

131-
#if defined(__LP64__) || defined(_WIN64)
132-
#define SWIFT_POINTER_IS_8_BYTES 1
133-
#define SWIFT_POINTER_IS_4_BYTES 0
134-
#else
135-
// TODO: consider supporting 16-bit targets
136-
#define SWIFT_POINTER_IS_8_BYTES 0
137-
#define SWIFT_POINTER_IS_4_BYTES 1
138-
#endif
139-
140131
#endif // SWIFT_BASIC_COMPILER_H

stdlib/public/Concurrency/AsyncLet.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,10 @@ static AsyncLetImpl *asImpl(const AsyncLet *alet) {
8686
const_cast<AsyncLet*>(alet));
8787
}
8888

89+
static AsyncLet *asAbstract(AsyncLetImpl *alet) {
90+
return reinterpret_cast<AsyncLet*>(alet);
91+
}
92+
8993
// =============================================================================
9094
// ==== start ------------------------------------------------------------------
9195

@@ -130,6 +134,7 @@ SWIFT_CC(swiftasync)
130134
static void swift_asyncLet_waitImpl(
131135
OpaqueValue *result, SWIFT_ASYNC_CONTEXT AsyncContext *rawContext,
132136
AsyncLet *alet, Metadata *T) {
137+
auto waitingTask = swift_task_getCurrent();
133138
auto task = alet->getTask();
134139
swift_task_future_wait(result, rawContext, task, T);
135140
}

stdlib/public/Concurrency/Task.cpp

Lines changed: 29 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,8 @@ AsyncTask::~AsyncTask() {
193193
futureFragment()->destroy();
194194
}
195195

196-
Private.destroy();
196+
// Release any objects potentially held as task local values.
197+
Local.destroy(this);
197198
}
198199

199200
SWIFT_CC(swift)
@@ -282,7 +283,13 @@ static void completeTaskImpl(AsyncTask *task,
282283
reinterpret_cast<char *>(context) - sizeof(AsyncContextPrefix));
283284
asyncContextPrefix->errorResult = error;
284285

285-
task->Private.complete(task);
286+
// Destroy and deallocate any remaining task local items.
287+
// We need to do this before we destroy the task local deallocator.
288+
task->Local.destroy(task);
289+
290+
// Tear down the task-local allocator immediately;
291+
// there's no need to wait for the object to be destroyed.
292+
_swift_task_alloc_destroy(task);
286293

287294
#if SWIFT_TASK_PRINTF_DEBUG
288295
fprintf(stderr, "[%p] task %p completed\n", pthread_self(), task);
@@ -514,19 +521,6 @@ static AsyncTaskAndContext swift_task_create_group_future_commonImpl(
514521
fprintf(stderr, "[%p] creating task %p with parent %p\n", pthread_self(), task, parent);
515522
#endif
516523

517-
// Initialize the task-local allocator.
518-
if (isAsyncLetTask) {
519-
initialContext->ResumeParent = reinterpret_cast<TaskContinuationFunction *>(
520-
&completeTask);
521-
assert(parent);
522-
void *initialSlab = (char*)allocation + amountToAllocate;
523-
task->Private.initializeWithSlab(task, initialSlab, initialSlabSize);
524-
} else {
525-
initialContext->ResumeParent = reinterpret_cast<TaskContinuationFunction *>(
526-
closureContext ? &completeTaskWithClosure : &completeTaskAndRelease);
527-
task->Private.initialize(task);
528-
}
529-
530524
// Perform additional linking between parent and child task.
531525
if (parent) {
532526
// If the parent was already cancelled, we carry this flag forward to the child.
@@ -536,9 +530,6 @@ static AsyncTaskAndContext swift_task_create_group_future_commonImpl(
536530
// so they may have been spawned in any case still.
537531
if (swift_task_isCancelled(parent))
538532
swift_task_cancel(task);
539-
540-
// Initialize task locals with a link to the parent task.
541-
task->_private().Local.initializeLinkParent(task, parent);
542533
}
543534

544535
// Configure the initial context.
@@ -551,6 +542,26 @@ static AsyncTaskAndContext swift_task_create_group_future_commonImpl(
551542
initialContext->Flags = AsyncContextKind::Ordinary;
552543
initialContext->Flags.setShouldNotDeallocateInCallee(true);
553544

545+
// Initialize the task-local allocator.
546+
if (isAsyncLetTask) {
547+
initialContext->ResumeParent = reinterpret_cast<TaskContinuationFunction *>(
548+
&completeTask);
549+
assert(parent);
550+
void *initialSlab = (char*)allocation + amountToAllocate;
551+
_swift_task_alloc_initialize_with_slab(task, initialSlab, initialSlabSize);
552+
} else {
553+
initialContext->ResumeParent = reinterpret_cast<TaskContinuationFunction *>(
554+
closureContext ? &completeTaskWithClosure : &completeTaskAndRelease);
555+
_swift_task_alloc_initialize(task);
556+
}
557+
558+
// TODO: if the allocator would be prepared earlier we could do this in some
559+
// other existing if-parent if rather than adding another one here.
560+
if (parent) {
561+
// Initialize task locals with a link to the parent task.
562+
task->Local.initializeLinkParent(task, parent);
563+
}
564+
554565
return {task, initialContext};
555566
}
556567

stdlib/public/Concurrency/TaskAlloc.cpp

Lines changed: 27 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,25 +20,46 @@
2020
#include "swift/Runtime/Concurrency.h"
2121
#include "swift/ABI/Task.h"
2222
#include "TaskPrivate.h"
23-
23+
#include "../runtime/StackAllocator.h"
2424
#include <stdlib.h>
2525

2626
using namespace swift;
2727

2828
namespace {
2929

30+
/// The size of an allocator slab.
31+
///
32+
/// TODO: find the optimal value by experiment.
33+
static constexpr size_t SlabCapacity = 1024;
34+
35+
using TaskAllocator = StackAllocator<SlabCapacity>;
36+
3037
struct GlobalAllocator {
3138
TaskAllocator allocator;
3239
void *spaceForFirstSlab[64];
3340

3441
GlobalAllocator() : allocator(spaceForFirstSlab, sizeof(spaceForFirstSlab)) {}
3542
};
3643

44+
static_assert(alignof(TaskAllocator) <= alignof(decltype(AsyncTask::AllocatorPrivate)),
45+
"task allocator must not be more aligned than "
46+
"allocator-private slot");
47+
3748
} // end anonymous namespace
3849

50+
void swift::_swift_task_alloc_initialize(AsyncTask *task) {
51+
new (task->AllocatorPrivate) TaskAllocator();
52+
}
53+
54+
void swift::_swift_task_alloc_initialize_with_slab(AsyncTask *task,
55+
void *firstSlabBuffer,
56+
size_t bufferCapacity) {
57+
new (task->AllocatorPrivate) TaskAllocator(firstSlabBuffer, bufferCapacity);
58+
}
59+
3960
static TaskAllocator &allocator(AsyncTask *task) {
4061
if (task)
41-
return task->Private.get().Allocator;
62+
return reinterpret_cast<TaskAllocator &>(task->AllocatorPrivate);
4263

4364
// FIXME: this fall-back shouldn't be necessary, but it's useful
4465
// for now, since the current execution tests aren't setting up a task
@@ -47,6 +68,10 @@ static TaskAllocator &allocator(AsyncTask *task) {
4768
return global.allocator;
4869
}
4970

71+
void swift::_swift_task_alloc_destroy(AsyncTask *task) {
72+
allocator(task).~TaskAllocator();
73+
}
74+
5075
void *swift::swift_task_alloc(size_t size) {
5176
return allocator(swift_task_getCurrent()).alloc(size);
5277
}

stdlib/public/Concurrency/TaskLocal.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ TaskLocal::Item::createParentLink(AsyncTask *task, AsyncTask *parent) {
7272
Item *item = new(allocation) Item();
7373

7474
// FIXME: parent pointer must to be the parent STORAGE not just the next item.
75-
auto parentHead = parent->_private().Local.head;
75+
auto parentHead = parent->Local.head;
7676
if (parentHead) {
7777
if (parentHead->isEmpty()) {
7878
switch (parentHead->getNextLinkType()) {
@@ -189,7 +189,7 @@ TaskLocal::Item::createLink(AsyncTask *task,
189189
void *allocation = _swift_task_alloc_specific(task, amountToAllocate);
190190
Item *item = new(allocation) Item(key, valueType);
191191

192-
auto next = task->_private().Local.head;
192+
auto next = task->Local.head;
193193
item->next = reinterpret_cast<uintptr_t>(next) |
194194
static_cast<uintptr_t>(NextLinkType::IsNext);
195195

0 commit comments

Comments
 (0)