Skip to content

Commit 6397e30

Browse files
committed
[Concurrency] Eliminate StatusRecordLockRecord.
Move to a recursive lock inline in the Task. This avoids the need to allocate a lock record and simplifies the code somewhat. Change Task's OpaquePrivateStorage to compute its size at build time based on the sizes of its components, rather than having it be a fixed size. It appears that the fixed size was intended to be part of the ABI, but that didn't happen and we're free to change this size. We need to expand it slightly when using pthread_mutex as the recursive lock, as pthread_mutex is pretty big. Other recursive locks allow it to shrink slightly. We don't have a recursive mutex in our Threading support code, so add a RecursiveMutex type. rdar://113898653
1 parent 498d7cc commit 6397e30

File tree

11 files changed

+396
-409
lines changed

11 files changed

+396
-409
lines changed

include/swift/ABI/MetadataValues.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,6 @@ enum {
6262
/// in a non-default distributed actor.
6363
NumWords_NonDefaultDistributedActor = 12,
6464

65-
/// The number of words in a task.
66-
NumWords_AsyncTask = 24,
67-
6865
/// The number of words in a task group.
6966
NumWords_TaskGroup = 32,
7067

include/swift/ABI/Task.h

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -298,7 +298,18 @@ class AsyncTask : public Job {
298298

299299
/// Private storage for the use of the runtime.
300300
struct alignas(2 * alignof(void*)) OpaquePrivateStorage {
301-
void *Storage[14];
301+
#if SWIFT_CONCURRENCY_ENABLE_PRIORITY_ESCALATION && SWIFT_POINTER_IS_4_BYTES
302+
static constexpr size_t ActiveTaskStatusSize = 4 * sizeof(void *);
303+
#else
304+
static constexpr size_t ActiveTaskStatusSize = 4 * sizeof(void *);
305+
#endif
306+
307+
// Private storage is currently 6 pointers, 16 bytes of non-pointer data,
308+
// the ActiveTaskStatus, and a RecursiveMutex.
309+
static constexpr size_t PrivateStorageSize =
310+
6 * sizeof(void *) + 16 + ActiveTaskStatusSize + sizeof(RecursiveMutex);
311+
312+
void *Storage[PrivateStorageSize];
302313

303314
/// Initialize this storage during the creation of a task.
304315
void initialize(JobPriority basePri);
@@ -755,8 +766,6 @@ class AsyncTask : public Job {
755766
};
756767

757768
// The compiler will eventually assume these.
758-
static_assert(sizeof(AsyncTask) == NumWords_AsyncTask * sizeof(void*),
759-
"AsyncTask size is wrong");
760769
static_assert(alignof(AsyncTask) == 2 * alignof(void*),
761770
"AsyncTask alignment is wrong");
762771
#pragma clang diagnostic push

include/swift/Threading/Impl/C11.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,25 @@ inline void lazy_mutex_unsafe_unlock(lazy_mutex_handle &handle) {
147147
(void)::mtx_unlock(&handle.mutex);
148148
}
149149

150+
// .. Recursive mutex support .................................................
151+
152+
using recursive_mutex_handle = ::mtx_t;
153+
154+
inline void recursive_mutex_init(recursive_mutex_handle &handle,
155+
bool checked = false) {
156+
SWIFT_C11THREADS_CHECK(::mtx_init(&handle, ::mtx_recursive));
157+
}
158+
inline void recursive_mutex_destroy(recursive_mutex_handle &handle) {
159+
::mtx_destroy(&handle);
160+
}
161+
162+
inline void recursive_mutex_lock(recursive_mutex_handle &handle) {
163+
SWIFT_C11THREADS_CHECK(::mtx_lock(&handle));
164+
}
165+
inline void recursive_mutex_unlock(recursive_mutex_handle &handle) {
166+
SWIFT_C11THREADS_CHECK(::mtx_unlock(&handle));
167+
}
168+
150169
// .. ConditionVariable support ..............................................
151170

152171
struct cond_handle {

include/swift/Threading/Impl/Darwin.h

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,42 @@ inline void lazy_mutex_unsafe_unlock(lazy_mutex_handle &handle) {
129129
::os_unfair_lock_unlock(&handle);
130130
}
131131

132+
// .. Recursive mutex support .................................................
133+
134+
// The os_unfair_recursive_lock interface is stable, but not in the SDK. Bring
135+
// our own definitions for what we need.
136+
137+
#define OS_UNFAIR_RECURSIVE_LOCK_INIT \
138+
(os_unfair_recursive_lock{OS_UNFAIR_LOCK_INIT, 0})
139+
140+
typedef struct os_unfair_recursive_lock_s {
141+
os_unfair_lock ourl_lock;
142+
uint32_t ourl_count;
143+
} os_unfair_recursive_lock, *os_unfair_recursive_lock_t;
144+
145+
using recursive_mutex_handle = os_unfair_recursive_lock;
146+
147+
extern "C" void
148+
os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
149+
uint32_t options);
150+
151+
extern "C" void
152+
os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock);
153+
154+
inline void recursive_mutex_init(recursive_mutex_handle &handle,
155+
bool checked = false) {
156+
handle = OS_UNFAIR_RECURSIVE_LOCK_INIT;
157+
}
158+
inline void recursive_mutex_destroy(recursive_mutex_handle &handle) {}
159+
160+
inline void recursive_mutex_lock(recursive_mutex_handle &handle) {
161+
os_unfair_recursive_lock_lock_with_options(&handle, 0);
162+
}
163+
164+
inline void recursive_mutex_unlock(recursive_mutex_handle &handle) {
165+
os_unfair_recursive_lock_unlock(&handle);
166+
}
167+
132168
// .. ConditionVariable support ..............................................
133169

134170
struct cond_handle {

include/swift/Threading/Impl/Linux.h

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,29 @@ inline void lazy_mutex_unsafe_unlock(lazy_mutex_handle &handle) {
134134
(void)::pthread_mutex_unlock(&handle);
135135
}
136136

137+
// .. Recursive mutex support ................................................
138+
139+
using recursive_mutex_handle = ::pthread_mutex_t;
140+
141+
inline void recursive_mutex_init(mutex_handle &handle, bool checked = false) {
142+
::pthread_mutexattr_t attr;
143+
SWIFT_LINUXTHREADS_CHECK(::pthread_mutexattr_init(&attr));
144+
SWIFT_LINUXTHREADS_CHECK(
145+
::pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
146+
SWIFT_LINUXTHREADS_CHECK(::pthread_mutex_init(&handle, &attr));
147+
SWIFT_LINUXTHREADS_CHECK(::pthread_mutexattr_destroy(&attr));
148+
}
149+
inline void recursive_mutex_destroy(mutex_handle &handle) {
150+
SWIFT_LINUXTHREADS_CHECK(::pthread_mutex_destroy(&handle));
151+
}
152+
153+
inline void recursive_mutex_lock(mutex_handle &handle) {
154+
SWIFT_LINUXTHREADS_CHECK(::pthread_mutex_lock(&handle));
155+
}
156+
inline void recursive_mutex_unlock(mutex_handle &handle) {
157+
SWIFT_LINUXTHREADS_CHECK(::pthread_mutex_unlock(&handle));
158+
}
159+
137160
// .. ConditionVariable support ..............................................
138161

139162
struct cond_handle {

include/swift/Threading/Impl/Nothreads.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,16 @@ inline bool lazy_mutex_try_lock(lazy_mutex_handle &handle) { return true; }
6060
inline void lazy_mutex_unsafe_lock(lazy_mutex_handle &handle) {}
6161
inline void lazy_mutex_unsafe_unlock(lazy_mutex_handle &handle) {}
6262

63+
// .. Recursive mutex support .................................................
64+
65+
using recursive_mutex_handle = unsigned;
66+
67+
inline void recursive_mutex_init(recursive_mutex_handle &handle,
68+
bool checked = false) {}
69+
inline void recursive_mutex_destroy(recursive_mutex_handle &handle) {}
70+
inline void recursive_mutex_lock(recursive_mutex_handle &handle) {}
71+
inline void recursive_mutex_unlock(recursive_mutex_handle &handle) {}
72+
6373
// .. ConditionVariable support ..............................................
6474

6575
using cond_handle = unsigned;

include/swift/Threading/Impl/Pthreads.h

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,29 @@ inline void lazy_mutex_unsafe_unlock(lazy_mutex_handle &handle) {
130130
(void)::pthread_mutex_unlock(&handle);
131131
}
132132

133+
// .. Recursive mutex support ................................................
134+
135+
using recursive_mutex_handle = ::pthread_mutex_t;
136+
137+
inline void recursive_mutex_init(mutex_handle &handle, bool checked = false) {
138+
::pthread_mutexattr_t attr;
139+
SWIFT_PTHREADS_CHECK(::pthread_mutexattr_init(&attr));
140+
SWIFT_PTHREADS_CHECK(
141+
::pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
142+
SWIFT_PTHREADS_CHECK(::pthread_mutex_init(&handle, &attr));
143+
SWIFT_PTHREADS_CHECK(::pthread_mutexattr_destroy(&attr));
144+
}
145+
inline void recursive_mutex_destroy(mutex_handle &handle) {
146+
SWIFT_PTHREADS_CHECK(::pthread_mutex_destroy(&handle));
147+
}
148+
149+
inline void recursive_mutex_lock(mutex_handle &handle) {
150+
SWIFT_PTHREADS_CHECK(::pthread_mutex_lock(&handle));
151+
}
152+
inline void recursive_mutex_unlock(mutex_handle &handle) {
153+
SWIFT_PTHREADS_CHECK(::pthread_mutex_unlock(&handle));
154+
}
155+
133156
// .. ConditionVariable support ..............................................
134157

135158
struct cond_handle {

include/swift/Threading/Mutex.h

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -241,6 +241,48 @@ class IndirectMutex {
241241
using SmallMutex =
242242
std::conditional_t<sizeof(Mutex) <= sizeof(void *), Mutex, IndirectMutex>;
243243

244+
/// A recursive variant of Mutex.
245+
class RecursiveMutex {
246+
247+
RecursiveMutex(const Mutex &) = delete;
248+
RecursiveMutex &operator=(const Mutex &) = delete;
249+
RecursiveMutex(Mutex &&) = delete;
250+
RecursiveMutex &operator=(Mutex &&) = delete;
251+
252+
public:
253+
/// Constructs a non-recursive mutex.
254+
///
255+
/// If `checked` is true the mutex will attempt to check for misuse and
256+
/// fatalError when detected. If `checked` is false (the default) the
257+
/// mutex will make little to no effort to check for misuse (more efficient).
258+
explicit RecursiveMutex(bool checked = false) {
259+
threading_impl::recursive_mutex_init(Handle, checked);
260+
}
261+
~RecursiveMutex() { threading_impl::recursive_mutex_destroy(Handle); }
262+
263+
/// The lock() method has the following properties:
264+
/// - Behaves as an atomic operation.
265+
/// - Blocks the calling thread until exclusive ownership of the mutex
266+
/// can be obtained.
267+
/// - Prior m.unlock() operations on the same mutex synchronize-with
268+
/// this lock operation.
269+
/// - Does not throw exceptions but will halt on error (fatalError).
270+
void lock() { threading_impl::recursive_mutex_lock(Handle); }
271+
272+
/// The unlock() method has the following properties:
273+
/// - Behaves as an atomic operation.
274+
/// - Releases the calling thread's ownership of the mutex and
275+
/// synchronizes-with the subsequent successful lock operations on
276+
/// the same object.
277+
/// - The behavior is undefined if the calling thread does not own
278+
/// the mutex.
279+
/// - Does not throw exceptions but will halt on error (fatalError).
280+
void unlock() { threading_impl::recursive_mutex_unlock(Handle); }
281+
282+
protected:
283+
threading_impl::recursive_mutex_handle Handle;
284+
};
285+
244286
} // namespace swift
245287

246288
#endif // SWIFT_THREADING_MUTEX_H

stdlib/public/Concurrency/TaskPrivate.h

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
#include "swift/Runtime/Error.h"
3030
#include "swift/Runtime/Exclusivity.h"
3131
#include "swift/Runtime/HeapObject.h"
32+
#include "swift/Threading/Mutex.h"
3233
#include "swift/Threading/Thread.h"
3334
#include "swift/Threading/ThreadSanitizer.h"
3435
#include <atomic>
@@ -598,27 +599,25 @@ class alignas(2 * sizeof(void*)) ActiveTaskStatus {
598599
#endif
599600
}
600601

601-
/// Is there a lock on the linked list of status records?
602+
/// Does some thread hold the status record lock?
602603
bool isStatusRecordLocked() const { return Flags & IsStatusRecordLocked; }
603-
ActiveTaskStatus withLockingRecord(TaskStatusRecord *lockRecord) const {
604+
ActiveTaskStatus withStatusRecordLocked() const {
604605
assert(!isStatusRecordLocked());
605-
assert(lockRecord->Parent == Record);
606606
#if SWIFT_CONCURRENCY_ENABLE_PRIORITY_ESCALATION
607-
return ActiveTaskStatus(lockRecord, Flags | IsStatusRecordLocked, ExecutionLock);
607+
return ActiveTaskStatus(Record, Flags | IsStatusRecordLocked,
608+
ExecutionLock);
608609
#else
609-
return ActiveTaskStatus(lockRecord, Flags | IsStatusRecordLocked);
610+
return ActiveTaskStatus(Record, Flags | IsStatusRecordLocked);
610611
#endif
611612
}
612-
ActiveTaskStatus withoutLockingRecord() const {
613+
ActiveTaskStatus withoutStatusRecordLocked() const {
613614
assert(isStatusRecordLocked());
614-
assert(Record->getKind() == TaskStatusRecordKind::Private_RecordLock);
615615

616-
// Remove the lock record, and put the next one as the head
617-
auto newRecord = Record->Parent;
618616
#if SWIFT_CONCURRENCY_ENABLE_PRIORITY_ESCALATION
619-
return ActiveTaskStatus(newRecord, Flags & ~IsStatusRecordLocked, ExecutionLock);
617+
return ActiveTaskStatus(Record, Flags & ~IsStatusRecordLocked,
618+
ExecutionLock);
620619
#else
621-
return ActiveTaskStatus(newRecord, Flags & ~IsStatusRecordLocked);
620+
return ActiveTaskStatus(Record, Flags & ~IsStatusRecordLocked);
622621
#endif
623622
}
624623

@@ -779,6 +778,9 @@ struct AsyncTask::PrivateStorage {
779778
/// async task stack when it is needed.
780779
TaskDependencyStatusRecord *dependencyRecord = nullptr;
781780

781+
// The lock used to protect more complicated operations on the task status.
782+
RecursiveMutex statusLock;
783+
782784
// Always create an async task with max priority in ActiveTaskStatus = base
783785
// priority. It will be updated later if needed.
784786
PrivateStorage(JobPriority basePri)

0 commit comments

Comments
 (0)