Skip to content

Commit ecdcb82

Browse files
authored
Merge pull request #80316 from mikeash/lock-record-begone
[Concurrency] Eliminate StatusRecordLockRecord.
2 parents f332937 + 45727af commit ecdcb82

File tree

14 files changed

+470
-413
lines changed

14 files changed

+470
-413
lines changed

include/swift/ABI/MetadataValues.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,6 @@ enum {
6262
/// in a non-default distributed actor.
6363
NumWords_NonDefaultDistributedActor = 12,
6464

65-
/// The number of words in a task.
66-
NumWords_AsyncTask = 24,
67-
6865
/// The number of words in a task group.
6966
NumWords_TaskGroup = 32,
7067

include/swift/ABI/Task.h

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -298,7 +298,18 @@ class AsyncTask : public Job {
298298

299299
/// Private storage for the use of the runtime.
300300
struct alignas(2 * alignof(void*)) OpaquePrivateStorage {
301-
void *Storage[14];
301+
#if SWIFT_CONCURRENCY_ENABLE_PRIORITY_ESCALATION && SWIFT_POINTER_IS_4_BYTES
302+
static constexpr size_t ActiveTaskStatusSize = 4 * sizeof(void *);
303+
#else
304+
static constexpr size_t ActiveTaskStatusSize = 4 * sizeof(void *);
305+
#endif
306+
307+
// Private storage is currently 6 pointers, 16 bytes of non-pointer data,
308+
// the ActiveTaskStatus, and a RecursiveMutex.
309+
static constexpr size_t PrivateStorageSize =
310+
6 * sizeof(void *) + 16 + ActiveTaskStatusSize + sizeof(RecursiveMutex);
311+
312+
void *Storage[PrivateStorageSize];
302313

303314
/// Initialize this storage during the creation of a task.
304315
void initialize(JobPriority basePri);
@@ -755,8 +766,6 @@ class AsyncTask : public Job {
755766
};
756767

757768
// The compiler will eventually assume these.
758-
static_assert(sizeof(AsyncTask) == NumWords_AsyncTask * sizeof(void*),
759-
"AsyncTask size is wrong");
760769
static_assert(alignof(AsyncTask) == 2 * alignof(void*),
761770
"AsyncTask alignment is wrong");
762771
#pragma clang diagnostic push

include/swift/Threading/Impl/C11.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,25 @@ inline void lazy_mutex_unsafe_unlock(lazy_mutex_handle &handle) {
147147
(void)::mtx_unlock(&handle.mutex);
148148
}
149149

150+
// .. Recursive mutex support .................................................
151+
152+
using recursive_mutex_handle = ::mtx_t;
153+
154+
inline void recursive_mutex_init(recursive_mutex_handle &handle,
155+
bool checked = false) {
156+
SWIFT_C11THREADS_CHECK(::mtx_init(&handle, ::mtx_recursive));
157+
}
158+
inline void recursive_mutex_destroy(recursive_mutex_handle &handle) {
159+
::mtx_destroy(&handle);
160+
}
161+
162+
inline void recursive_mutex_lock(recursive_mutex_handle &handle) {
163+
SWIFT_C11THREADS_CHECK(::mtx_lock(&handle));
164+
}
165+
inline void recursive_mutex_unlock(recursive_mutex_handle &handle) {
166+
SWIFT_C11THREADS_CHECK(::mtx_unlock(&handle));
167+
}
168+
150169
// .. ConditionVariable support ..............................................
151170

152171
struct cond_handle {

include/swift/Threading/Impl/Darwin.h

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,42 @@ inline void lazy_mutex_unsafe_unlock(lazy_mutex_handle &handle) {
129129
::os_unfair_lock_unlock(&handle);
130130
}
131131

132+
// .. Recursive mutex support .................................................
133+
134+
// The os_unfair_recursive_lock interface is stable, but not in the SDK. Bring
135+
// our own definitions for what we need.
136+
137+
#define OS_UNFAIR_RECURSIVE_LOCK_INIT \
138+
(os_unfair_recursive_lock{OS_UNFAIR_LOCK_INIT, 0})
139+
140+
typedef struct os_unfair_recursive_lock_s {
141+
os_unfair_lock ourl_lock;
142+
uint32_t ourl_count;
143+
} os_unfair_recursive_lock, *os_unfair_recursive_lock_t;
144+
145+
using recursive_mutex_handle = os_unfair_recursive_lock;
146+
147+
extern "C" void
148+
os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
149+
uint32_t options);
150+
151+
extern "C" void
152+
os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock);
153+
154+
inline void recursive_mutex_init(recursive_mutex_handle &handle,
155+
bool checked = false) {
156+
handle = OS_UNFAIR_RECURSIVE_LOCK_INIT;
157+
}
158+
inline void recursive_mutex_destroy(recursive_mutex_handle &handle) {}
159+
160+
inline void recursive_mutex_lock(recursive_mutex_handle &handle) {
161+
os_unfair_recursive_lock_lock_with_options(&handle, 0);
162+
}
163+
164+
inline void recursive_mutex_unlock(recursive_mutex_handle &handle) {
165+
os_unfair_recursive_lock_unlock(&handle);
166+
}
167+
132168
// .. ConditionVariable support ..............................................
133169

134170
struct cond_handle {

include/swift/Threading/Impl/Linux.h

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -134,6 +134,29 @@ inline void lazy_mutex_unsafe_unlock(lazy_mutex_handle &handle) {
134134
(void)::pthread_mutex_unlock(&handle);
135135
}
136136

137+
// .. Recursive mutex support ................................................
138+
139+
using recursive_mutex_handle = ::pthread_mutex_t;
140+
141+
inline void recursive_mutex_init(recursive_mutex_handle &handle, bool checked = false) {
142+
::pthread_mutexattr_t attr;
143+
SWIFT_LINUXTHREADS_CHECK(::pthread_mutexattr_init(&attr));
144+
SWIFT_LINUXTHREADS_CHECK(
145+
::pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
146+
SWIFT_LINUXTHREADS_CHECK(::pthread_mutex_init(&handle, &attr));
147+
SWIFT_LINUXTHREADS_CHECK(::pthread_mutexattr_destroy(&attr));
148+
}
149+
inline void recursive_mutex_destroy(recursive_mutex_handle &handle) {
150+
SWIFT_LINUXTHREADS_CHECK(::pthread_mutex_destroy(&handle));
151+
}
152+
153+
inline void recursive_mutex_lock(recursive_mutex_handle &handle) {
154+
SWIFT_LINUXTHREADS_CHECK(::pthread_mutex_lock(&handle));
155+
}
156+
inline void recursive_mutex_unlock(recursive_mutex_handle &handle) {
157+
SWIFT_LINUXTHREADS_CHECK(::pthread_mutex_unlock(&handle));
158+
}
159+
137160
// .. ConditionVariable support ..............................................
138161

139162
struct cond_handle {

include/swift/Threading/Impl/Nothreads.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,16 @@ inline bool lazy_mutex_try_lock(lazy_mutex_handle &handle) { return true; }
6060
inline void lazy_mutex_unsafe_lock(lazy_mutex_handle &handle) {}
6161
inline void lazy_mutex_unsafe_unlock(lazy_mutex_handle &handle) {}
6262

63+
// .. Recursive mutex support .................................................
64+
65+
using recursive_mutex_handle = unsigned;
66+
67+
inline void recursive_mutex_init(recursive_mutex_handle &handle,
68+
bool checked = false) {}
69+
inline void recursive_mutex_destroy(recursive_mutex_handle &handle) {}
70+
inline void recursive_mutex_lock(recursive_mutex_handle &handle) {}
71+
inline void recursive_mutex_unlock(recursive_mutex_handle &handle) {}
72+
6373
// .. ConditionVariable support ..............................................
6474

6575
using cond_handle = unsigned;

include/swift/Threading/Impl/Pthreads.h

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,29 @@ inline void lazy_mutex_unsafe_unlock(lazy_mutex_handle &handle) {
130130
(void)::pthread_mutex_unlock(&handle);
131131
}
132132

133+
// .. Recursive mutex support ................................................
134+
135+
using recursive_mutex_handle = ::pthread_mutex_t;
136+
137+
inline void recursive_mutex_init(recursive_mutex_handle &handle, bool checked = false) {
138+
::pthread_mutexattr_t attr;
139+
SWIFT_PTHREADS_CHECK(::pthread_mutexattr_init(&attr));
140+
SWIFT_PTHREADS_CHECK(
141+
::pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
142+
SWIFT_PTHREADS_CHECK(::pthread_mutex_init(&handle, &attr));
143+
SWIFT_PTHREADS_CHECK(::pthread_mutexattr_destroy(&attr));
144+
}
145+
inline void recursive_mutex_destroy(recursive_mutex_handle &handle) {
146+
SWIFT_PTHREADS_CHECK(::pthread_mutex_destroy(&handle));
147+
}
148+
149+
inline void recursive_mutex_lock(recursive_mutex_handle &handle) {
150+
SWIFT_PTHREADS_CHECK(::pthread_mutex_lock(&handle));
151+
}
152+
inline void recursive_mutex_unlock(recursive_mutex_handle &handle) {
153+
SWIFT_PTHREADS_CHECK(::pthread_mutex_unlock(&handle));
154+
}
155+
133156
// .. ConditionVariable support ..............................................
134157

135158
struct cond_handle {

include/swift/Threading/Impl/Win32.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,26 @@ inline void lazy_mutex_unsafe_unlock(lazy_mutex_handle &handle) {
8787
ReleaseSRWLockExclusive(&handle);
8888
}
8989

90+
// .. Recursive mutex support ................................................
91+
92+
using recursive_mutex_handle = SWIFT_CRITICAL_SECTION;
93+
94+
inline void recursive_mutex_init(recursive_mutex_handle &handle,
95+
bool checked = false) {
96+
InitializeCriticalSection(&handle);
97+
}
98+
99+
inline void recursive_mutex_destroy(recursive_mutex_handle &handle) {
100+
DeleteCriticalSection(&handle);
101+
}
102+
103+
inline void recursive_mutex_lock(recursive_mutex_handle &handle) {
104+
EnterCriticalSection(&handle);
105+
}
106+
inline void recursive_mutex_unlock(recursive_mutex_handle &handle) {
107+
LeaveCriticalSection(&handle);
108+
}
109+
90110
// .. ConditionVariable support ..............................................
91111

92112
struct cond_handle {

include/swift/Threading/Impl/Win32/Win32Defs.h

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,14 @@ typedef unsigned char BYTE;
3636
typedef BYTE BOOLEAN;
3737
typedef int BOOL;
3838
typedef unsigned long DWORD;
39+
typedef long LONG;
3940
typedef unsigned long ULONG;
41+
#if defined(_WIN64)
42+
typedef unsigned __int64 ULONG_PTR;
43+
#else
44+
typedef unsigned long ULONG_PTR;
45+
#endif
46+
typedef PVOID HANDLE;
4047

4148
typedef VOID(NTAPI *PFLS_CALLBACK_FUNCTION)(PVOID lpFlsData);
4249

@@ -46,6 +53,11 @@ typedef PRTL_SRWLOCK PSRWLOCK;
4653
typedef struct _RTL_CONDITION_VARIABLE *PRTL_CONDITION_VARIABLE;
4754
typedef PRTL_CONDITION_VARIABLE PCONDITION_VARIABLE;
4855

56+
typedef struct _RTL_CRITICAL_SECTION_DEBUG *PRTL_CRITICAL_SECTION_DEBUG;
57+
typedef struct _RTL_CRITICAL_SECTION *PRTL_CRITICAL_SECTION;
58+
typedef PRTL_CRITICAL_SECTION PCRITICAL_SECTION;
59+
typedef PRTL_CRITICAL_SECTION LPCRITICAL_SECTION;
60+
4961
// These have to be #defines, to avoid problems with <windows.h>
5062
#define RTL_SRWLOCK_INIT {0}
5163
#define SRWLOCK_INIT RTL_SRWLOCK_INIT
@@ -83,6 +95,19 @@ WINBASEAPI BOOL WINAPI SleepConditionVariableSRW(
8395
ULONG Flags
8496
);
8597

98+
WINBASEAPI VOID WINAPI InitializeCriticalSection(
99+
LPCRITICAL_SECTION lpCriticalSection
100+
);
101+
WINBASEAPI VOID WINAPI DeleteCriticalSection(
102+
LPCRITICAL_SECTION lpCriticalSection
103+
);
104+
WINBASEAPI VOID WINAPI EnterCriticalSection(
105+
LPCRITICAL_SECTION lpCriticalSection
106+
);
107+
WINBASEAPI VOID WINAPI LeaveCriticalSection(
108+
LPCRITICAL_SECTION lpCriticalSection
109+
);
110+
86111
WINBASEAPI DWORD WINAPI FlsAlloc(PFLS_CALLBACK_FUNCTION lpCallback);
87112
WINBASEAPI PVOID WINAPI FlsGetValue(DWORD dwFlsIndex);
88113
WINBASEAPI BOOL WINAPI FlsSetValue(DWORD dwFlsIndex, PVOID lpFlsData);
@@ -140,6 +165,34 @@ inline BOOL SleepConditionVariableSRW(PSWIFT_CONDITION_VARIABLE CondVar,
140165
Flags);
141166
}
142167

168+
// And with CRITICAL_SECTION
169+
#pragma pack(push, 8)
170+
typedef struct SWIFT_CRITICAL_SECTION {
171+
PRTL_CRITICAL_SECTION_DEBUG DebugInfo;
172+
LONG LockCount;
173+
LONG RecursionCount;
174+
HANDLE OwningThread;
175+
HANDLE LockSemaphore;
176+
ULONG_PTR SpinCount;
177+
} SWIFT_CRITICAL_SECTION, *PSWIFT_CRITICAL_SECTION;
178+
#pragma pack(pop)
179+
180+
inline VOID InitializeCriticalSection(PSWIFT_CRITICAL_SECTION CritSec) {
181+
::InitializeCriticalSection(reinterpret_cast<LPCRITICAL_SECTION>(CritSec));
182+
}
183+
184+
inline VOID DeleteCriticalSection(PSWIFT_CRITICAL_SECTION CritSec) {
185+
::DeleteCriticalSection(reinterpret_cast<LPCRITICAL_SECTION>(CritSec));
186+
}
187+
188+
inline VOID EnterCriticalSection(PSWIFT_CRITICAL_SECTION CritSec) {
189+
::EnterCriticalSection(reinterpret_cast<LPCRITICAL_SECTION>(CritSec));
190+
}
191+
192+
inline VOID LeaveCriticalSection(PSWIFT_CRITICAL_SECTION CritSec) {
193+
::LeaveCriticalSection(reinterpret_cast<LPCRITICAL_SECTION>(CritSec));
194+
}
195+
143196
} // namespace threading_impl
144197
} // namespace swift
145198

include/swift/Threading/Mutex.h

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -241,6 +241,48 @@ class IndirectMutex {
241241
using SmallMutex =
242242
std::conditional_t<sizeof(Mutex) <= sizeof(void *), Mutex, IndirectMutex>;
243243

244+
/// A recursive variant of Mutex.
245+
class RecursiveMutex {
246+
247+
RecursiveMutex(const RecursiveMutex &) = delete;
248+
RecursiveMutex &operator=(const RecursiveMutex &) = delete;
249+
RecursiveMutex(RecursiveMutex &&) = delete;
250+
RecursiveMutex &operator=(RecursiveMutex &&) = delete;
251+
252+
public:
253+
/// Constructs a non-recursive mutex.
254+
///
255+
/// If `checked` is true the mutex will attempt to check for misuse and
256+
/// fatalError when detected. If `checked` is false (the default) the
257+
/// mutex will make little to no effort to check for misuse (more efficient).
258+
explicit RecursiveMutex(bool checked = false) {
259+
threading_impl::recursive_mutex_init(Handle, checked);
260+
}
261+
~RecursiveMutex() { threading_impl::recursive_mutex_destroy(Handle); }
262+
263+
/// The lock() method has the following properties:
264+
/// - Behaves as an atomic operation.
265+
/// - Blocks the calling thread until exclusive ownership of the mutex
266+
/// can be obtained.
267+
/// - Prior m.unlock() operations on the same mutex synchronize-with
268+
/// this lock operation.
269+
/// - Does not throw exceptions but will halt on error (fatalError).
270+
void lock() { threading_impl::recursive_mutex_lock(Handle); }
271+
272+
/// The unlock() method has the following properties:
273+
/// - Behaves as an atomic operation.
274+
/// - Releases the calling thread's ownership of the mutex and
275+
/// synchronizes-with the subsequent successful lock operations on
276+
/// the same object.
277+
/// - The behavior is undefined if the calling thread does not own
278+
/// the mutex.
279+
/// - Does not throw exceptions but will halt on error (fatalError).
280+
void unlock() { threading_impl::recursive_mutex_unlock(Handle); }
281+
282+
protected:
283+
threading_impl::recursive_mutex_handle Handle;
284+
};
285+
244286
} // namespace swift
245287

246288
#endif // SWIFT_THREADING_MUTEX_H

stdlib/public/Concurrency/TaskPrivate.h

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
#include "swift/Runtime/Error.h"
3030
#include "swift/Runtime/Exclusivity.h"
3131
#include "swift/Runtime/HeapObject.h"
32+
#include "swift/Threading/Mutex.h"
3233
#include "swift/Threading/Thread.h"
3334
#include "swift/Threading/ThreadSanitizer.h"
3435
#include <atomic>
@@ -598,27 +599,25 @@ class alignas(2 * sizeof(void*)) ActiveTaskStatus {
598599
#endif
599600
}
600601

601-
/// Is there a lock on the linked list of status records?
602+
/// Does some thread hold the status record lock?
602603
bool isStatusRecordLocked() const { return Flags & IsStatusRecordLocked; }
603-
ActiveTaskStatus withLockingRecord(TaskStatusRecord *lockRecord) const {
604+
ActiveTaskStatus withStatusRecordLocked() const {
604605
assert(!isStatusRecordLocked());
605-
assert(lockRecord->Parent == Record);
606606
#if SWIFT_CONCURRENCY_ENABLE_PRIORITY_ESCALATION
607-
return ActiveTaskStatus(lockRecord, Flags | IsStatusRecordLocked, ExecutionLock);
607+
return ActiveTaskStatus(Record, Flags | IsStatusRecordLocked,
608+
ExecutionLock);
608609
#else
609-
return ActiveTaskStatus(lockRecord, Flags | IsStatusRecordLocked);
610+
return ActiveTaskStatus(Record, Flags | IsStatusRecordLocked);
610611
#endif
611612
}
612-
ActiveTaskStatus withoutLockingRecord() const {
613+
ActiveTaskStatus withoutStatusRecordLocked() const {
613614
assert(isStatusRecordLocked());
614-
assert(Record->getKind() == TaskStatusRecordKind::Private_RecordLock);
615615

616-
// Remove the lock record, and put the next one as the head
617-
auto newRecord = Record->Parent;
618616
#if SWIFT_CONCURRENCY_ENABLE_PRIORITY_ESCALATION
619-
return ActiveTaskStatus(newRecord, Flags & ~IsStatusRecordLocked, ExecutionLock);
617+
return ActiveTaskStatus(Record, Flags & ~IsStatusRecordLocked,
618+
ExecutionLock);
620619
#else
621-
return ActiveTaskStatus(newRecord, Flags & ~IsStatusRecordLocked);
620+
return ActiveTaskStatus(Record, Flags & ~IsStatusRecordLocked);
622621
#endif
623622
}
624623

@@ -779,6 +778,9 @@ struct AsyncTask::PrivateStorage {
779778
/// async task stack when it is needed.
780779
TaskDependencyStatusRecord *dependencyRecord = nullptr;
781780

781+
// The lock used to protect more complicated operations on the task status.
782+
RecursiveMutex statusLock;
783+
782784
// Always create an async task with max priority in ActiveTaskStatus = base
783785
// priority. It will be updated later if needed.
784786
PrivateStorage(JobPriority basePri)

0 commit comments

Comments
 (0)