@@ -13,7 +13,9 @@ use prelude::v1::*;
13
13
use cell:: UnsafeCell ;
14
14
use fmt;
15
15
use marker;
16
+ use mem;
16
17
use ops:: { Deref , DerefMut } ;
18
+ use ptr;
17
19
use sys_common:: mutex as sys;
18
20
use sys_common:: poison:: { self , TryLockError , TryLockResult , LockResult } ;
19
21
@@ -243,6 +245,50 @@ impl<T: ?Sized> Mutex<T> {
243
245
pub fn is_poisoned ( & self ) -> bool {
244
246
self . inner . poison . get ( )
245
247
}
248
+
249
+ /// Consumes this mutex, returning the underlying data.
250
+ ///
251
+ /// # Failure
252
+ ///
253
+ /// If another user of this mutex panicked while holding the mutex, then
254
+ /// this call will return an error instead.
255
+ #[ unstable( feature = "mutex_into_inner" , reason = "recently added" , issue = "28968" ) ]
256
+ pub fn into_inner ( self ) -> LockResult < T > where T : Sized {
257
+ // We know statically that there are no outstanding references to
258
+ // `self` so there's no need to lock the inner StaticMutex.
259
+ //
260
+ // To get the inner value, we'd like to call `data.into_inner()`,
261
+ // but because `Mutex` impl-s `Drop`, we can't move out of it, so
262
+ // we'll have to destructure it manually instead.
263
+ unsafe {
264
+ // Like `let Mutex { inner, data } = self`.
265
+ let ( inner, data) = {
266
+ let Mutex { ref inner, ref data } = self ;
267
+ ( ptr:: read ( inner) , ptr:: read ( data) )
268
+ } ;
269
+ mem:: forget ( self ) ;
270
+ inner. lock . destroy ( ) ; // Keep in sync with the `Drop` impl.
271
+
272
+ poison:: map_result ( inner. poison . borrow ( ) , |_| data. into_inner ( ) )
273
+ }
274
+ }
275
+
276
+ /// Returns a mutable reference to the underlying data.
277
+ ///
278
+ /// Since this call borrows the `Mutex` mutably, no actual locking needs to
279
+ /// take place---the mutable borrow statically guarantees no locks exist.
280
+ ///
281
+ /// # Failure
282
+ ///
283
+ /// If another user of this mutex panicked while holding the mutex, then
284
+ /// this call will return an error instead.
285
+ #[ unstable( feature = "mutex_get_mut" , reason = "recently added" , issue = "28968" ) ]
286
+ pub fn get_mut ( & mut self ) -> LockResult < & mut T > {
287
+ // We know statically that there are no other references to `self`, so
288
+ // there's no need to lock the inner StaticMutex.
289
+ let data = unsafe { & mut * self . data . get ( ) } ;
290
+ poison:: map_result ( self . inner . poison . borrow ( ) , |_| data )
291
+ }
246
292
}
247
293
248
294
#[ stable( feature = "rust1" , since = "1.0.0" ) ]
@@ -251,6 +297,8 @@ impl<T: ?Sized> Drop for Mutex<T> {
251
297
// This is actually safe b/c we know that there is no further usage of
252
298
// this mutex (it's up to the user to arrange for a mutex to get
253
299
// dropped, that's not our job)
300
+ //
301
+ // IMPORTANT: This code must be kept in sync with `Mutex::into_inner`.
254
302
unsafe { self . inner . lock . destroy ( ) }
255
303
}
256
304
}
@@ -371,10 +419,14 @@ mod tests {
371
419
372
420
use sync:: mpsc:: channel;
373
421
use sync:: { Arc , Mutex , StaticMutex , Condvar } ;
422
+ use sync:: atomic:: { AtomicUsize , Ordering } ;
374
423
use thread;
375
424
376
425
struct Packet < T > ( Arc < ( Mutex < T > , Condvar ) > ) ;
377
426
427
+ #[ derive( Eq , PartialEq , Debug ) ]
428
+ struct NonCopy ( i32 ) ;
429
+
378
430
unsafe impl < T : Send > Send for Packet < T > { }
379
431
unsafe impl < T > Sync for Packet < T > { }
380
432
@@ -435,6 +487,69 @@ mod tests {
435
487
* m. try_lock ( ) . unwrap ( ) = ( ) ;
436
488
}
437
489
490
+ #[ test]
491
+ fn test_into_inner ( ) {
492
+ let m = Mutex :: new ( NonCopy ( 10 ) ) ;
493
+ assert_eq ! ( m. into_inner( ) . unwrap( ) , NonCopy ( 10 ) ) ;
494
+ }
495
+
496
+ #[ test]
497
+ fn test_into_inner_drop ( ) {
498
+ struct Foo ( Arc < AtomicUsize > ) ;
499
+ impl Drop for Foo {
500
+ fn drop ( & mut self ) {
501
+ self . 0 . fetch_add ( 1 , Ordering :: SeqCst ) ;
502
+ }
503
+ }
504
+ let num_drops = Arc :: new ( AtomicUsize :: new ( 0 ) ) ;
505
+ let m = Mutex :: new ( Foo ( num_drops. clone ( ) ) ) ;
506
+ assert_eq ! ( num_drops. load( Ordering :: SeqCst ) , 0 ) ;
507
+ {
508
+ let _inner = m. into_inner ( ) . unwrap ( ) ;
509
+ assert_eq ! ( num_drops. load( Ordering :: SeqCst ) , 0 ) ;
510
+ }
511
+ assert_eq ! ( num_drops. load( Ordering :: SeqCst ) , 1 ) ;
512
+ }
513
+
514
+ #[ test]
515
+ fn test_into_inner_poison ( ) {
516
+ let m = Arc :: new ( Mutex :: new ( NonCopy ( 10 ) ) ) ;
517
+ let m2 = m. clone ( ) ;
518
+ let _ = thread:: spawn ( move || {
519
+ let _lock = m2. lock ( ) . unwrap ( ) ;
520
+ panic ! ( "test panic in inner thread to poison mutex" ) ;
521
+ } ) . join ( ) ;
522
+
523
+ assert ! ( m. is_poisoned( ) ) ;
524
+ match Arc :: try_unwrap ( m) . unwrap ( ) . into_inner ( ) {
525
+ Err ( e) => assert_eq ! ( e. into_inner( ) , NonCopy ( 10 ) ) ,
526
+ Ok ( x) => panic ! ( "into_inner of poisoned Mutex is Ok: {:?}" , x) ,
527
+ }
528
+ }
529
+
530
+ #[ test]
531
+ fn test_get_mut ( ) {
532
+ let mut m = Mutex :: new ( NonCopy ( 10 ) ) ;
533
+ * m. get_mut ( ) . unwrap ( ) = NonCopy ( 20 ) ;
534
+ assert_eq ! ( m. into_inner( ) . unwrap( ) , NonCopy ( 20 ) ) ;
535
+ }
536
+
537
+ #[ test]
538
+ fn test_get_mut_poison ( ) {
539
+ let m = Arc :: new ( Mutex :: new ( NonCopy ( 10 ) ) ) ;
540
+ let m2 = m. clone ( ) ;
541
+ let _ = thread:: spawn ( move || {
542
+ let _lock = m2. lock ( ) . unwrap ( ) ;
543
+ panic ! ( "test panic in inner thread to poison mutex" ) ;
544
+ } ) . join ( ) ;
545
+
546
+ assert ! ( m. is_poisoned( ) ) ;
547
+ match Arc :: try_unwrap ( m) . unwrap ( ) . get_mut ( ) {
548
+ Err ( e) => assert_eq ! ( * e. into_inner( ) , NonCopy ( 10 ) ) ,
549
+ Ok ( x) => panic ! ( "get_mut of poisoned Mutex is Ok: {:?}" , x) ,
550
+ }
551
+ }
552
+
438
553
#[ test]
439
554
fn test_mutex_arc_condvar ( ) {
440
555
let packet = Packet ( Arc :: new ( ( Mutex :: new ( false ) , Condvar :: new ( ) ) ) ) ;
0 commit comments