Skip to content

Commit 4d29740

Browse files
committed
Use process-lifetime unique thread IDs for ReentrantLock
This means that the thread IDs are now 64-bit on all platforms, so on 32-bit platforms a simple seqlock is used to emulate 64-bit atomics
1 parent 48becf3 commit 4d29740

File tree

1 file changed

+105
-18
lines changed

1 file changed

+105
-18
lines changed

library/std/src/sync/reentrant_lock.rs

+105-18
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
#[cfg(all(test, not(target_os = "emscripten")))]
22
mod tests;
33

4+
use cfg_if::cfg_if;
5+
46
use crate::cell::UnsafeCell;
57
use crate::fmt;
68
use crate::ops::Deref;
79
use crate::panic::{RefUnwindSafe, UnwindSafe};
8-
use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
910
use crate::sys::sync as sys;
1011

1112
/// A re-entrant mutual exclusion lock
@@ -53,8 +54,8 @@ use crate::sys::sync as sys;
5354
//
5455
// The 'owner' field tracks which thread has locked the mutex.
5556
//
56-
// We use current_thread_unique_ptr() as the thread identifier,
57-
// which is just the address of a thread local variable.
57+
// We use current_thread_id() as the thread identifier, which is just the
58+
// current thread's ThreadId, so it's unique across the process lifetime.
5859
//
5960
// If `owner` is set to the identifier of the current thread,
6061
// we assume the mutex is already locked and instead of locking it again,
@@ -72,14 +73,95 @@ use crate::sys::sync as sys;
7273
// since we're not dealing with multiple threads. If it's not equal,
7374
// synchronization is left to the mutex, making relaxed memory ordering for
7475
// the `owner` field fine in all cases.
76+
//
77+
// On systems without 64 bit atomics we use a simple seqlock to emulate a 64 bit Tid using
78+
// 32 bit atomics (which should be supported on all platforms with `std`). This works
79+
// because only one thread at a time (the one holding the mutex) writes to it.
7580
#[unstable(feature = "reentrant_lock", issue = "121440")]
7681
pub struct ReentrantLock<T: ?Sized> {
7782
mutex: sys::Mutex,
78-
owner: AtomicUsize,
83+
owner: Tid,
7984
lock_count: UnsafeCell<u32>,
8085
data: T,
8186
}
8287

88+
cfg_if!(
89+
if #[cfg(target_has_atomic = "64")] {
90+
use crate::sync::atomic::{AtomicU64, Ordering::Relaxed};
91+
92+
struct Tid(AtomicU64);
93+
94+
impl Tid {
95+
const fn new(tid: u64) -> Self {
96+
Self(AtomicU64::new(tid))
97+
}
98+
99+
#[inline]
100+
fn get(&self) -> u64 {
101+
self.0.load(Relaxed)
102+
}
103+
104+
#[inline]
105+
fn set(&self, tid: u64) {
106+
self.0.store(tid, Relaxed)
107+
}
108+
}
109+
} else if #[cfg(target_has_atomic = "32")] {
110+
use crate::sync::atomic::{AtomicU32, Ordering::{Acquire, Relaxed, Release}};
111+
112+
struct Tid {
113+
seq: AtomicU32,
114+
low: AtomicU32,
115+
high: AtomicU32,
116+
}
117+
118+
impl Tid {
119+
const fn new(tid: u64) -> Self {
120+
Self {
121+
seq: AtomicU32::new(0),
122+
low: AtomicU32::new(tid as u32),
123+
high: AtomicU32::new((tid >> 32) as u32),
124+
}
125+
}
126+
127+
#[inline]
128+
fn get(&self) -> u64 {
129+
// Synchronizes with the release-increment in `set()` to ensure
130+
// we only read the data after it's been fully written.
131+
let mut seq = self.seq.load(Acquire);
132+
loop {
133+
if seq % 2 == 0 {
134+
let low = self.low.load(Relaxed);
135+
let high = self.high.load(Relaxed);
136+
// The acquire-increment in `set()` synchronizes with this release
137+
// store to ensure that `get()` doesn't see data from a subsequent
138+
// `set()` call.
139+
match self.seq.compare_exchange_weak(seq, seq, Release, Acquire) {
140+
Ok(_) => return u64::from(low) | (u64::from(high) << 32),
141+
Err(new) => seq = new,
142+
}
143+
} else {
144+
crate::hint::spin_loop();
145+
seq = self.seq.load(Acquire);
146+
}
147+
}
148+
}
149+
150+
#[inline]
151+
// This may only be called from one thread at a time, otherwise
152+
// concurrent `get()` calls may return teared data.
153+
fn set(&self, tid: u64) {
154+
self.seq.fetch_add(1, Acquire);
155+
self.low.store(tid as u32, Relaxed);
156+
self.high.store((tid >> 32) as u32, Relaxed);
157+
self.seq.fetch_add(1, Release);
158+
}
159+
}
160+
} else {
161+
compile_error!("`ReentrantLock` requires at least 32 bit atomics!");
162+
}
163+
);
164+
83165
#[unstable(feature = "reentrant_lock", issue = "121440")]
84166
unsafe impl<T: Send + ?Sized> Send for ReentrantLock<T> {}
85167
#[unstable(feature = "reentrant_lock", issue = "121440")]
@@ -131,7 +213,7 @@ impl<T> ReentrantLock<T> {
131213
pub const fn new(t: T) -> ReentrantLock<T> {
132214
ReentrantLock {
133215
mutex: sys::Mutex::new(),
134-
owner: AtomicUsize::new(0),
216+
owner: Tid::new(0),
135217
lock_count: UnsafeCell::new(0),
136218
data: t,
137219
}
@@ -181,14 +263,14 @@ impl<T: ?Sized> ReentrantLock<T> {
181263
/// assert_eq!(lock.lock().get(), 10);
182264
/// ```
183265
pub fn lock(&self) -> ReentrantLockGuard<'_, T> {
184-
let this_thread = current_thread_unique_ptr();
266+
let this_thread = current_thread_id();
185267
// Safety: We only touch lock_count when we own the lock.
186268
unsafe {
187-
if self.owner.load(Relaxed) == this_thread {
269+
if self.owner.get() == this_thread {
188270
self.increment_lock_count().expect("lock count overflow in reentrant mutex");
189271
} else {
190272
self.mutex.lock();
191-
self.owner.store(this_thread, Relaxed);
273+
self.owner.set(this_thread);
192274
debug_assert_eq!(*self.lock_count.get(), 0);
193275
*self.lock_count.get() = 1;
194276
}
@@ -223,14 +305,14 @@ impl<T: ?Sized> ReentrantLock<T> {
223305
///
224306
/// This function does not block.
225307
pub(crate) fn try_lock(&self) -> Option<ReentrantLockGuard<'_, T>> {
226-
let this_thread = current_thread_unique_ptr();
308+
let this_thread = current_thread_id();
227309
// Safety: We only touch lock_count when we own the lock.
228310
unsafe {
229-
if self.owner.load(Relaxed) == this_thread {
311+
if self.owner.get() == this_thread {
230312
self.increment_lock_count()?;
231313
Some(ReentrantLockGuard { lock: self })
232314
} else if self.mutex.try_lock() {
233-
self.owner.store(this_thread, Relaxed);
315+
self.owner.set(this_thread);
234316
debug_assert_eq!(*self.lock_count.get(), 0);
235317
*self.lock_count.get() = 1;
236318
Some(ReentrantLockGuard { lock: self })
@@ -303,18 +385,23 @@ impl<T: ?Sized> Drop for ReentrantLockGuard<'_, T> {
303385
unsafe {
304386
*self.lock.lock_count.get() -= 1;
305387
if *self.lock.lock_count.get() == 0 {
306-
self.lock.owner.store(0, Relaxed);
388+
self.lock.owner.set(0);
307389
self.lock.mutex.unlock();
308390
}
309391
}
310392
}
311393
}
312394

313-
/// Get an address that is unique per running thread.
395+
/// Returns the current thread's ThreadId value, which is guaranteed
396+
/// to be unique across the lifetime of the process.
314397
///
315-
/// This can be used as a non-null usize-sized ID.
316-
pub(crate) fn current_thread_unique_ptr() -> usize {
317-
// Use a non-drop type to make sure it's still available during thread destruction.
318-
thread_local! { static X: u8 = const { 0 } }
319-
X.with(|x| <*const _>::addr(x))
398+
/// Panics if called during a TLS destructor on a thread that hasn't
399+
/// been assigned an ID.
400+
pub(crate) fn current_thread_id() -> u64 {
401+
#[cold]
402+
fn no_tid() -> ! {
403+
panic!("Thread hasn't been assigned an ID!")
404+
}
405+
406+
crate::thread::try_current_id().map_or_else(|| no_tid(), |tid| tid.as_u64().get())
320407
}

0 commit comments

Comments
 (0)