1
1
#[ cfg( all( test, not( target_os = "emscripten" ) ) ) ]
2
2
mod tests;
3
3
4
+ use cfg_if:: cfg_if;
5
+
4
6
use crate :: cell:: UnsafeCell ;
5
7
use crate :: fmt;
6
8
use crate :: ops:: Deref ;
7
9
use crate :: panic:: { RefUnwindSafe , UnwindSafe } ;
8
- use crate :: sync:: atomic:: { AtomicUsize , Ordering :: Relaxed } ;
9
10
use crate :: sys:: sync as sys;
10
11
11
12
/// A re-entrant mutual exclusion lock
@@ -53,8 +54,8 @@ use crate::sys::sync as sys;
53
54
//
54
55
// The 'owner' field tracks which thread has locked the mutex.
55
56
//
56
- // We use current_thread_unique_ptr () as the thread identifier,
57
- // which is just the address of a thread local variable .
57
+ // We use current_thread_id () as the thread identifier, which is just the
58
+ // current thread's ThreadId, so it's unique across the process lifetime .
58
59
//
59
60
// If `owner` is set to the identifier of the current thread,
60
61
// we assume the mutex is already locked and instead of locking it again,
@@ -72,14 +73,95 @@ use crate::sys::sync as sys;
72
73
// since we're not dealing with multiple threads. If it's not equal,
73
74
// synchronization is left to the mutex, making relaxed memory ordering for
74
75
// the `owner` field fine in all cases.
76
+ //
77
+ // On systems without 64 bit atomics we use a simple seqlock to emulate a 64 bit Tid using
78
+ // 32 bit atomics (which should be supported on all platforms with `std`). This works
79
+ // because only one thread at a time (the one holding the mutex) writes to it.
75
80
#[ unstable( feature = "reentrant_lock" , issue = "121440" ) ]
76
81
pub struct ReentrantLock < T : ?Sized > {
77
82
mutex : sys:: Mutex ,
78
- owner : AtomicUsize ,
83
+ owner : Tid ,
79
84
lock_count : UnsafeCell < u32 > ,
80
85
data : T ,
81
86
}
82
87
88
+ cfg_if ! (
89
+ if #[ cfg( target_has_atomic = "64" ) ] {
90
+ use crate :: sync:: atomic:: { AtomicU64 , Ordering :: Relaxed } ;
91
+
92
+ struct Tid ( AtomicU64 ) ;
93
+
94
+ impl Tid {
95
+ const fn new( tid: u64 ) -> Self {
96
+ Self ( AtomicU64 :: new( tid) )
97
+ }
98
+
99
+ #[ inline]
100
+ fn get( & self ) -> u64 {
101
+ self . 0 . load( Relaxed )
102
+ }
103
+
104
+ #[ inline]
105
+ fn set( & self , tid: u64 ) {
106
+ self . 0 . store( tid, Relaxed )
107
+ }
108
+ }
109
+ } else if #[ cfg( target_has_atomic = "32" ) ] {
110
+ use crate :: sync:: atomic:: { AtomicU32 , Ordering :: { Acquire , Relaxed , Release } } ;
111
+
112
+ struct Tid {
113
+ seq: AtomicU32 ,
114
+ low: AtomicU32 ,
115
+ high: AtomicU32 ,
116
+ }
117
+
118
+ impl Tid {
119
+ const fn new( tid: u64 ) -> Self {
120
+ Self {
121
+ seq: AtomicU32 :: new( 0 ) ,
122
+ low: AtomicU32 :: new( tid as u32 ) ,
123
+ high: AtomicU32 :: new( ( tid >> 32 ) as u32 ) ,
124
+ }
125
+ }
126
+
127
+ #[ inline]
128
+ fn get( & self ) -> u64 {
129
+ // Synchronizes with the release-increment in `set()` to ensure
130
+ // we only read the data after it's been fully written.
131
+ let mut seq = self . seq. load( Acquire ) ;
132
+ loop {
133
+ if seq % 2 == 0 {
134
+ let low = self . low. load( Relaxed ) ;
135
+ let high = self . high. load( Relaxed ) ;
136
+ // The acquire-increment in `set()` synchronizes with this release
137
+ // store to ensure that `get()` doesn't see data from a subsequent
138
+ // `set()` call.
139
+ match self . seq. compare_exchange_weak( seq, seq, Release , Acquire ) {
140
+ Ok ( _) => return u64 :: from( low) | ( u64 :: from( high) << 32 ) ,
141
+ Err ( new) => seq = new,
142
+ }
143
+ } else {
144
+ crate :: hint:: spin_loop( ) ;
145
+ seq = self . seq. load( Acquire ) ;
146
+ }
147
+ }
148
+ }
149
+
150
+ #[ inline]
151
+ // This may only be called from one thread at a time, otherwise
152
+ // concurrent `get()` calls may return teared data.
153
+ fn set( & self , tid: u64 ) {
154
+ self . seq. fetch_add( 1 , Acquire ) ;
155
+ self . low. store( tid as u32 , Relaxed ) ;
156
+ self . high. store( ( tid >> 32 ) as u32 , Relaxed ) ;
157
+ self . seq. fetch_add( 1 , Release ) ;
158
+ }
159
+ }
160
+ } else {
161
+ compile_error!( "`ReentrantLock` requires at least 32 bit atomics!" ) ;
162
+ }
163
+ ) ;
164
+
83
165
#[ unstable( feature = "reentrant_lock" , issue = "121440" ) ]
84
166
unsafe impl < T : Send + ?Sized > Send for ReentrantLock < T > { }
85
167
#[ unstable( feature = "reentrant_lock" , issue = "121440" ) ]
@@ -131,7 +213,7 @@ impl<T> ReentrantLock<T> {
131
213
pub const fn new ( t : T ) -> ReentrantLock < T > {
132
214
ReentrantLock {
133
215
mutex : sys:: Mutex :: new ( ) ,
134
- owner : AtomicUsize :: new ( 0 ) ,
216
+ owner : Tid :: new ( 0 ) ,
135
217
lock_count : UnsafeCell :: new ( 0 ) ,
136
218
data : t,
137
219
}
@@ -181,14 +263,14 @@ impl<T: ?Sized> ReentrantLock<T> {
181
263
/// assert_eq!(lock.lock().get(), 10);
182
264
/// ```
183
265
pub fn lock ( & self ) -> ReentrantLockGuard < ' _ , T > {
184
- let this_thread = current_thread_unique_ptr ( ) ;
266
+ let this_thread = current_thread_id ( ) ;
185
267
// Safety: We only touch lock_count when we own the lock.
186
268
unsafe {
187
- if self . owner . load ( Relaxed ) == this_thread {
269
+ if self . owner . get ( ) == this_thread {
188
270
self . increment_lock_count ( ) . expect ( "lock count overflow in reentrant mutex" ) ;
189
271
} else {
190
272
self . mutex . lock ( ) ;
191
- self . owner . store ( this_thread, Relaxed ) ;
273
+ self . owner . set ( this_thread) ;
192
274
debug_assert_eq ! ( * self . lock_count. get( ) , 0 ) ;
193
275
* self . lock_count . get ( ) = 1 ;
194
276
}
@@ -223,14 +305,14 @@ impl<T: ?Sized> ReentrantLock<T> {
223
305
///
224
306
/// This function does not block.
225
307
pub ( crate ) fn try_lock ( & self ) -> Option < ReentrantLockGuard < ' _ , T > > {
226
- let this_thread = current_thread_unique_ptr ( ) ;
308
+ let this_thread = current_thread_id ( ) ;
227
309
// Safety: We only touch lock_count when we own the lock.
228
310
unsafe {
229
- if self . owner . load ( Relaxed ) == this_thread {
311
+ if self . owner . get ( ) == this_thread {
230
312
self . increment_lock_count ( ) ?;
231
313
Some ( ReentrantLockGuard { lock : self } )
232
314
} else if self . mutex . try_lock ( ) {
233
- self . owner . store ( this_thread, Relaxed ) ;
315
+ self . owner . set ( this_thread) ;
234
316
debug_assert_eq ! ( * self . lock_count. get( ) , 0 ) ;
235
317
* self . lock_count . get ( ) = 1 ;
236
318
Some ( ReentrantLockGuard { lock : self } )
@@ -303,18 +385,23 @@ impl<T: ?Sized> Drop for ReentrantLockGuard<'_, T> {
303
385
unsafe {
304
386
* self . lock . lock_count . get ( ) -= 1 ;
305
387
if * self . lock . lock_count . get ( ) == 0 {
306
- self . lock . owner . store ( 0 , Relaxed ) ;
388
+ self . lock . owner . set ( 0 ) ;
307
389
self . lock . mutex . unlock ( ) ;
308
390
}
309
391
}
310
392
}
311
393
}
312
394
313
- /// Get an address that is unique per running thread.
395
+ /// Returns the current thread's ThreadId value, which is guaranteed
396
+ /// to be unique across the lifetime of the process.
314
397
///
315
- /// This can be used as a non-null usize-sized ID.
316
- pub ( crate ) fn current_thread_unique_ptr ( ) -> usize {
317
- // Use a non-drop type to make sure it's still available during thread destruction.
318
- thread_local ! { static X : u8 = const { 0 } }
319
- X . with ( |x| <* const _ >:: addr ( x) )
398
+ /// Panics if called during a TLS destructor on a thread that hasn't
399
+ /// been assigned an ID.
400
+ pub ( crate ) fn current_thread_id ( ) -> u64 {
401
+ #[ cold]
402
+ fn no_tid ( ) -> ! {
403
+ panic ! ( "Thread hasn't been assigned an ID!" )
404
+ }
405
+
406
+ crate :: thread:: try_current_id ( ) . map_or_else ( || no_tid ( ) , |tid| tid. as_u64 ( ) . get ( ) )
320
407
}
0 commit comments