@@ -5,6 +5,7 @@ use core::time::Duration;
5
5
use std:: collections:: HashSet ;
6
6
use std:: cell:: RefCell ;
7
7
8
+ #[ cfg( not( feature = "backtrace" ) ) ]
8
9
use std:: sync:: atomic:: { AtomicUsize , Ordering } ;
9
10
10
11
use std:: sync:: Mutex as StdMutex ;
@@ -15,7 +16,12 @@ use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
15
16
use std:: sync:: Condvar as StdCondvar ;
16
17
17
18
#[ cfg( feature = "backtrace" ) ]
18
- use backtrace:: Backtrace ;
19
+ use { prelude:: HashMap , backtrace:: Backtrace , std:: sync:: Once } ;
20
+
21
+ #[ cfg( not( feature = "backtrace" ) ) ]
22
+ struct Backtrace { }
23
+ #[ cfg( not( feature = "backtrace" ) ) ]
24
+ impl Backtrace { fn new ( ) -> Backtrace { Backtrace { } } }
19
25
20
26
pub type LockResult < Guard > = Result < Guard , ( ) > ;
21
27
@@ -46,14 +52,19 @@ thread_local! {
46
52
/// We track the set of locks currently held by a reference to their `LockMetadata`
47
53
static LOCKS_HELD : RefCell <HashSet <Arc <LockMetadata >>> = RefCell :: new( HashSet :: new( ) ) ;
48
54
}
55
+ #[ cfg( not( feature = "backtrace" ) ) ]
49
56
static LOCK_IDX : AtomicUsize = AtomicUsize :: new ( 0 ) ;
50
57
58
+ #[ cfg( feature = "backtrace" ) ]
59
+ static mut LOCKS : Option < StdMutex < HashMap < u64 , Arc < LockMetadata > > > > = None ;
60
+ #[ cfg( feature = "backtrace" ) ]
61
+ static LOCKS_INIT : Once = Once :: new ( ) ;
62
+
51
63
/// Metadata about a single lock, by id, the set of things locked-before it, and the backtrace of
52
64
/// when the Mutex itself was constructed.
53
65
struct LockMetadata {
54
66
lock_idx : u64 ,
55
- locked_before : StdMutex < HashSet < Arc < LockMetadata > > > ,
56
- #[ cfg( feature = "backtrace" ) ]
67
+ locked_before : StdMutex < HashSet < LockDep > > ,
57
68
lock_construction_bt : Backtrace ,
58
69
}
59
70
impl PartialEq for LockMetadata {
@@ -64,14 +75,57 @@ impl std::hash::Hash for LockMetadata {
64
75
fn hash < H : std:: hash:: Hasher > ( & self , hasher : & mut H ) { hasher. write_u64 ( self . lock_idx ) ; }
65
76
}
66
77
78
+ struct LockDep {
79
+ lock : Arc < LockMetadata > ,
80
+ lockdep_trace : Option < Backtrace > ,
81
+ }
82
+ impl LockDep {
83
+ /// Note that `Backtrace::new()` is rather expensive so we rely on the caller to fill in the
84
+ /// `lockdep_backtrace` field after ensuring we need it.
85
+ fn new_without_bt ( lock : & Arc < LockMetadata > ) -> Self {
86
+ Self { lock : Arc :: clone ( lock) , lockdep_trace : None }
87
+ }
88
+ }
89
+ impl PartialEq for LockDep {
90
+ fn eq ( & self , o : & LockDep ) -> bool { self . lock . lock_idx == o. lock . lock_idx }
91
+ }
92
+ impl Eq for LockDep { }
93
+ impl std:: hash:: Hash for LockDep {
94
+ fn hash < H : std:: hash:: Hasher > ( & self , hasher : & mut H ) { hasher. write_u64 ( self . lock . lock_idx ) ; }
95
+ }
96
+
67
97
impl LockMetadata {
68
- fn new ( ) -> LockMetadata {
69
- LockMetadata {
70
- locked_before : StdMutex :: new ( HashSet :: new ( ) ) ,
71
- lock_idx : LOCK_IDX . fetch_add ( 1 , Ordering :: Relaxed ) as u64 ,
72
- #[ cfg( feature = "backtrace" ) ]
73
- lock_construction_bt : Backtrace :: new ( ) ,
98
+ fn new ( ) -> Arc < LockMetadata > {
99
+ let lock_idx;
100
+ let backtrace = Backtrace :: new ( ) ;
101
+
102
+ #[ cfg( not( feature = "backtrace" ) ) ]
103
+ { lock_idx = LOCK_IDX . fetch_add ( 1 , Ordering :: Relaxed ) as u64 ; }
104
+
105
+ #[ cfg( feature = "backtrace" ) ]
106
+ {
107
+ let mut idx = None ;
108
+ // Find the first frame which was *not* in debug_sync (or which is in our tests) and
109
+ // use that as the mutex construction site.
110
+ for frame in backtrace. frames ( ) {
111
+ let symbol_name = frame. symbols ( ) . last ( ) . unwrap ( ) . name ( ) . unwrap ( ) . as_str ( ) . unwrap ( ) ;
112
+ if !symbol_name. contains ( "lightning::debug_sync::" ) || symbol_name. contains ( "lightning::debug_sync::tests" ) {
113
+ idx = Some ( frame. ip ( ) as usize as u64 ) ;
114
+ break ;
115
+ }
116
+ }
117
+ lock_idx = idx. unwrap ( ) ;
118
+ LOCKS_INIT . call_once ( || { unsafe { LOCKS = Some ( StdMutex :: new ( HashMap :: new ( ) ) ) ; } } ) ;
119
+ if let Some ( metadata) = unsafe { LOCKS . as_ref ( ) } . unwrap ( ) . lock ( ) . unwrap ( ) . get ( & lock_idx) {
120
+ return Arc :: clone ( & metadata) ;
121
+ }
74
122
}
123
+
124
+ Arc :: new ( LockMetadata {
125
+ locked_before : StdMutex :: new ( HashSet :: new ( ) ) ,
126
+ lock_idx,
127
+ lock_construction_bt : backtrace,
128
+ } )
75
129
}
76
130
77
131
// Returns whether we were a recursive lock (only relevant for read)
@@ -89,18 +143,25 @@ impl LockMetadata {
89
143
}
90
144
for locked in held. borrow ( ) . iter ( ) {
91
145
if !read && * locked == * this {
92
- panic ! ( "Tried to lock a lock while it was held!" ) ;
146
+ // With `feature = "backtrace"` set, we may be looking at different instances
147
+ // of the same lock.
148
+ debug_assert ! ( cfg!( feature = "backtrace" ) , "Tried to lock a lock while it was held!" ) ;
93
149
}
94
150
for locked_dep in locked. locked_before . lock ( ) . unwrap ( ) . iter ( ) {
95
- if * locked_dep == * this {
151
+ if locked_dep. lock == * this && locked_dep . lock != * locked {
96
152
#[ cfg( feature = "backtrace" ) ]
97
- panic ! ( "Tried to violate existing lockorder.\n Mutex that should be locked after the current lock was created at the following backtrace.\n Note that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n {:?}" , locked. lock_construction_bt) ;
153
+ panic ! ( "Tried to violate existing lockorder.\n Mutex that should be locked after the current lock was created at the following backtrace.\n Note that to get a backtrace for the lockorder violation, you should set RUST_BACKTRACE=1\n Lock constructed at: \ n {:?}\n \n Lock dep created at: \n {:?} \n \n " , locked. lock_construction_bt, locked_dep . lockdep_trace ) ;
98
154
#[ cfg( not( feature = "backtrace" ) ) ]
99
155
panic ! ( "Tried to violate existing lockorder. Build with the backtrace feature for more info." ) ;
100
156
}
101
157
}
102
158
// Insert any already-held locks in our locked-before set.
103
- this. locked_before . lock ( ) . unwrap ( ) . insert ( Arc :: clone ( locked) ) ;
159
+ let mut locked_before = this. locked_before . lock ( ) . unwrap ( ) ;
160
+ let mut lockdep = LockDep :: new_without_bt ( locked) ;
161
+ if !locked_before. contains ( & lockdep) {
162
+ lockdep. lockdep_trace = Some ( Backtrace :: new ( ) ) ;
163
+ locked_before. insert ( lockdep) ;
164
+ }
104
165
}
105
166
held. borrow_mut ( ) . insert ( Arc :: clone ( this) ) ;
106
167
inserted = true ;
@@ -116,10 +177,15 @@ impl LockMetadata {
116
177
// Since a try-lock will simply fail if the lock is held already, we do not
117
178
// consider try-locks to ever generate lockorder inversions. However, if a try-lock
118
179
// succeeds, we do consider it to have created lockorder dependencies.
180
+ held. borrow_mut ( ) . insert ( Arc :: clone ( this) ) ;
181
+ let mut locked_before = this. locked_before . lock ( ) . unwrap ( ) ;
119
182
for locked in held. borrow ( ) . iter ( ) {
120
- this. locked_before . lock ( ) . unwrap ( ) . insert ( Arc :: clone ( locked) ) ;
183
+ let mut lockdep = LockDep :: new_without_bt ( locked) ;
184
+ if !locked_before. contains ( & lockdep) {
185
+ lockdep. lockdep_trace = Some ( Backtrace :: new ( ) ) ;
186
+ locked_before. insert ( lockdep) ;
187
+ }
121
188
}
122
- held. borrow_mut ( ) . insert ( Arc :: clone ( this) ) ;
123
189
} ) ;
124
190
}
125
191
}
@@ -170,7 +236,7 @@ impl<T: Sized> DerefMut for MutexGuard<'_, T> {
170
236
171
237
impl < T > Mutex < T > {
172
238
pub fn new ( inner : T ) -> Mutex < T > {
173
- Mutex { inner : StdMutex :: new ( inner) , deps : Arc :: new ( LockMetadata :: new ( ) ) }
239
+ Mutex { inner : StdMutex :: new ( inner) , deps : LockMetadata :: new ( ) }
174
240
}
175
241
176
242
pub fn lock < ' a > ( & ' a self ) -> LockResult < MutexGuard < ' a , T > > {
@@ -249,7 +315,7 @@ impl<T: Sized> DerefMut for RwLockWriteGuard<'_, T> {
249
315
250
316
impl < T > RwLock < T > {
251
317
pub fn new ( inner : T ) -> RwLock < T > {
252
- RwLock { inner : StdRwLock :: new ( inner) , deps : Arc :: new ( LockMetadata :: new ( ) ) }
318
+ RwLock { inner : StdRwLock :: new ( inner) , deps : LockMetadata :: new ( ) }
253
319
}
254
320
255
321
pub fn read < ' a > ( & ' a self ) -> LockResult < RwLockReadGuard < ' a , T > > {
@@ -271,96 +337,100 @@ impl<T> RwLock<T> {
271
337
}
272
338
}
273
339
274
- #[ test]
275
- #[ should_panic]
276
- fn recursive_lock_fail ( ) {
277
- let mutex = Mutex :: new ( ( ) ) ;
278
- let _a = mutex. lock ( ) . unwrap ( ) ;
279
- let _b = mutex. lock ( ) . unwrap ( ) ;
280
- }
340
+ pub type FairRwLock < T > = RwLock < T > ;
281
341
282
- #[ test]
283
- fn recursive_read ( ) {
284
- let lock = RwLock :: new ( ( ) ) ;
285
- let _a = lock. read ( ) . unwrap ( ) ;
286
- let _b = lock. read ( ) . unwrap ( ) ;
287
- }
342
+ mod tests {
343
+ use super :: { RwLock , Mutex } ;
288
344
289
- #[ test]
290
- #[ should_panic]
291
- fn lockorder_fail ( ) {
292
- let a = Mutex :: new ( ( ) ) ;
293
- let b = Mutex :: new ( ( ) ) ;
294
- {
295
- let _a = a. lock ( ) . unwrap ( ) ;
296
- let _b = b. lock ( ) . unwrap ( ) ;
297
- }
298
- {
299
- let _b = b. lock ( ) . unwrap ( ) ;
300
- let _a = a. lock ( ) . unwrap ( ) ;
345
+ #[ test]
346
+ #[ should_panic]
347
+ fn recursive_lock_fail ( ) {
348
+ let mutex = Mutex :: new ( ( ) ) ;
349
+ let _a = mutex. lock ( ) . unwrap ( ) ;
350
+ let _b = mutex. lock ( ) . unwrap ( ) ;
301
351
}
302
- }
303
352
304
- #[ test]
305
- #[ should_panic]
306
- fn write_lockorder_fail ( ) {
307
- let a = RwLock :: new ( ( ) ) ;
308
- let b = RwLock :: new ( ( ) ) ;
309
- {
310
- let _a = a. write ( ) . unwrap ( ) ;
311
- let _b = b. write ( ) . unwrap ( ) ;
353
+ #[ test]
354
+ fn recursive_read ( ) {
355
+ let lock = RwLock :: new ( ( ) ) ;
356
+ let _a = lock. read ( ) . unwrap ( ) ;
357
+ let _b = lock. read ( ) . unwrap ( ) ;
312
358
}
313
- {
314
- let _b = b. write ( ) . unwrap ( ) ;
315
- let _a = a. write ( ) . unwrap ( ) ;
316
- }
317
- }
318
359
319
- #[ test]
320
- #[ should_panic]
321
- fn read_lockorder_fail ( ) {
322
- let a = RwLock :: new ( ( ) ) ;
323
- let b = RwLock :: new ( ( ) ) ;
324
- {
325
- let _a = a. read ( ) . unwrap ( ) ;
326
- let _b = b. read ( ) . unwrap ( ) ;
327
- }
328
- {
329
- let _b = b. read ( ) . unwrap ( ) ;
330
- let _a = a. read ( ) . unwrap ( ) ;
360
+ #[ test]
361
+ #[ should_panic]
362
+ fn lockorder_fail ( ) {
363
+ let a = Mutex :: new ( ( ) ) ;
364
+ let b = Mutex :: new ( ( ) ) ;
365
+ {
366
+ let _a = a. lock ( ) . unwrap ( ) ;
367
+ let _b = b. lock ( ) . unwrap ( ) ;
368
+ }
369
+ {
370
+ let _b = b. lock ( ) . unwrap ( ) ;
371
+ let _a = a. lock ( ) . unwrap ( ) ;
372
+ }
331
373
}
332
- }
333
374
334
- #[ test]
335
- fn read_recurisve_no_lockorder ( ) {
336
- // Like the above, but note that no lockorder is implied when we recursively read-lock a
337
- // RwLock, causing this to pass just fine.
338
- let a = RwLock :: new ( ( ) ) ;
339
- let b = RwLock :: new ( ( ) ) ;
340
- let _outer = a. read ( ) . unwrap ( ) ;
341
- {
342
- let _a = a. read ( ) . unwrap ( ) ;
343
- let _b = b. read ( ) . unwrap ( ) ;
375
+ #[ test]
376
+ #[ should_panic]
377
+ fn write_lockorder_fail ( ) {
378
+ let a = RwLock :: new ( ( ) ) ;
379
+ let b = RwLock :: new ( ( ) ) ;
380
+ {
381
+ let _a = a. write ( ) . unwrap ( ) ;
382
+ let _b = b. write ( ) . unwrap ( ) ;
383
+ }
384
+ {
385
+ let _b = b. write ( ) . unwrap ( ) ;
386
+ let _a = a. write ( ) . unwrap ( ) ;
387
+ }
344
388
}
345
- {
346
- let _b = b. read ( ) . unwrap ( ) ;
347
- let _a = a. read ( ) . unwrap ( ) ;
389
+
390
+ #[ test]
391
+ #[ should_panic]
392
+ fn read_lockorder_fail ( ) {
393
+ let a = RwLock :: new ( ( ) ) ;
394
+ let b = RwLock :: new ( ( ) ) ;
395
+ {
396
+ let _a = a. read ( ) . unwrap ( ) ;
397
+ let _b = b. read ( ) . unwrap ( ) ;
398
+ }
399
+ {
400
+ let _b = b. read ( ) . unwrap ( ) ;
401
+ let _a = a. read ( ) . unwrap ( ) ;
402
+ }
348
403
}
349
- }
350
404
351
- #[ test]
352
- #[ should_panic]
353
- fn read_write_lockorder_fail ( ) {
354
- let a = RwLock :: new ( ( ) ) ;
355
- let b = RwLock :: new ( ( ) ) ;
356
- {
357
- let _a = a. write ( ) . unwrap ( ) ;
358
- let _b = b. read ( ) . unwrap ( ) ;
405
+ #[ test]
406
+ fn read_recurisve_no_lockorder ( ) {
407
+ // Like the above, but note that no lockorder is implied when we recursively read-lock a
408
+ // RwLock, causing this to pass just fine.
409
+ let a = RwLock :: new ( ( ) ) ;
410
+ let b = RwLock :: new ( ( ) ) ;
411
+ let _outer = a. read ( ) . unwrap ( ) ;
412
+ {
413
+ let _a = a. read ( ) . unwrap ( ) ;
414
+ let _b = b. read ( ) . unwrap ( ) ;
415
+ }
416
+ {
417
+ let _b = b. read ( ) . unwrap ( ) ;
418
+ let _a = a. read ( ) . unwrap ( ) ;
419
+ }
359
420
}
360
- {
361
- let _b = b. read ( ) . unwrap ( ) ;
362
- let _a = a. write ( ) . unwrap ( ) ;
421
+
422
+ #[ test]
423
+ #[ should_panic]
424
+ fn read_write_lockorder_fail ( ) {
425
+ let a = RwLock :: new ( ( ) ) ;
426
+ let b = RwLock :: new ( ( ) ) ;
427
+ {
428
+ let _a = a. write ( ) . unwrap ( ) ;
429
+ let _b = b. read ( ) . unwrap ( ) ;
430
+ }
431
+ {
432
+ let _b = b. read ( ) . unwrap ( ) ;
433
+ let _a = a. write ( ) . unwrap ( ) ;
434
+ }
363
435
}
364
436
}
365
-
366
- pub type FairRwLock < T > = RwLock < T > ;
0 commit comments