Skip to content

Commit 6cdf31b

Browse files
committed
Auto merge of #29031 - cristicbz:mtx_inner, r=alexcrichton
The implementation for `into_inner` was a bit more complex than I had hoped for---is there any simpler, less unsafe way of getting around the fact that one can't move out of a `Drop` struct? See #28968 and rust-lang/rfcs#1269 .
2 parents be3d390 + 90ccefd commit 6cdf31b

File tree

2 files changed

+233
-0
lines changed

2 files changed

+233
-0
lines changed

src/libstd/sync/mutex.rs

+115
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,9 @@ use prelude::v1::*;
1313
use cell::UnsafeCell;
1414
use fmt;
1515
use marker;
16+
use mem;
1617
use ops::{Deref, DerefMut};
18+
use ptr;
1719
use sys_common::mutex as sys;
1820
use sys_common::poison::{self, TryLockError, TryLockResult, LockResult};
1921

@@ -243,6 +245,50 @@ impl<T: ?Sized> Mutex<T> {
243245
pub fn is_poisoned(&self) -> bool {
244246
self.inner.poison.get()
245247
}
248+
249+
/// Consumes this mutex, returning the underlying data.
250+
///
251+
/// # Failure
252+
///
253+
/// If another user of this mutex panicked while holding the mutex, then
254+
/// this call will return an error instead.
255+
#[unstable(feature = "mutex_into_inner", reason = "recently added", issue = "28968")]
256+
pub fn into_inner(self) -> LockResult<T> where T: Sized {
257+
// We know statically that there are no outstanding references to
258+
// `self` so there's no need to lock the inner StaticMutex.
259+
//
260+
// To get the inner value, we'd like to call `data.into_inner()`,
261+
// but because `Mutex` impl-s `Drop`, we can't move out of it, so
262+
// we'll have to destructure it manually instead.
263+
unsafe {
264+
// Like `let Mutex { inner, data } = self`.
265+
let (inner, data) = {
266+
let Mutex { ref inner, ref data } = self;
267+
(ptr::read(inner), ptr::read(data))
268+
};
269+
mem::forget(self);
270+
inner.lock.destroy(); // Keep in sync with the `Drop` impl.
271+
272+
poison::map_result(inner.poison.borrow(), |_| data.into_inner())
273+
}
274+
}
275+
276+
/// Returns a mutable reference to the underlying data.
277+
///
278+
/// Since this call borrows the `Mutex` mutably, no actual locking needs to
279+
/// take place---the mutable borrow statically guarantees no locks exist.
280+
///
281+
/// # Failure
282+
///
283+
/// If another user of this mutex panicked while holding the mutex, then
284+
/// this call will return an error instead.
285+
#[unstable(feature = "mutex_get_mut", reason = "recently added", issue = "28968")]
286+
pub fn get_mut(&mut self) -> LockResult<&mut T> {
287+
// We know statically that there are no other references to `self`, so
288+
// there's no need to lock the inner StaticMutex.
289+
let data = unsafe { &mut *self.data.get() };
290+
poison::map_result(self.inner.poison.borrow(), |_| data )
291+
}
246292
}
247293

248294
#[stable(feature = "rust1", since = "1.0.0")]
@@ -251,6 +297,8 @@ impl<T: ?Sized> Drop for Mutex<T> {
251297
// This is actually safe b/c we know that there is no further usage of
252298
// this mutex (it's up to the user to arrange for a mutex to get
253299
// dropped, that's not our job)
300+
//
301+
// IMPORTANT: This code must be kept in sync with `Mutex::into_inner`.
254302
unsafe { self.inner.lock.destroy() }
255303
}
256304
}
@@ -371,10 +419,14 @@ mod tests {
371419

372420
use sync::mpsc::channel;
373421
use sync::{Arc, Mutex, StaticMutex, Condvar};
422+
use sync::atomic::{AtomicUsize, Ordering};
374423
use thread;
375424

376425
struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
377426

427+
#[derive(Eq, PartialEq, Debug)]
428+
struct NonCopy(i32);
429+
378430
unsafe impl<T: Send> Send for Packet<T> {}
379431
unsafe impl<T> Sync for Packet<T> {}
380432

@@ -435,6 +487,69 @@ mod tests {
435487
*m.try_lock().unwrap() = ();
436488
}
437489

490+
#[test]
491+
fn test_into_inner() {
492+
let m = Mutex::new(NonCopy(10));
493+
assert_eq!(m.into_inner().unwrap(), NonCopy(10));
494+
}
495+
496+
#[test]
497+
fn test_into_inner_drop() {
498+
struct Foo(Arc<AtomicUsize>);
499+
impl Drop for Foo {
500+
fn drop(&mut self) {
501+
self.0.fetch_add(1, Ordering::SeqCst);
502+
}
503+
}
504+
let num_drops = Arc::new(AtomicUsize::new(0));
505+
let m = Mutex::new(Foo(num_drops.clone()));
506+
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
507+
{
508+
let _inner = m.into_inner().unwrap();
509+
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
510+
}
511+
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
512+
}
513+
514+
#[test]
515+
fn test_into_inner_poison() {
516+
let m = Arc::new(Mutex::new(NonCopy(10)));
517+
let m2 = m.clone();
518+
let _ = thread::spawn(move || {
519+
let _lock = m2.lock().unwrap();
520+
panic!("test panic in inner thread to poison mutex");
521+
}).join();
522+
523+
assert!(m.is_poisoned());
524+
match Arc::try_unwrap(m).unwrap().into_inner() {
525+
Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
526+
Ok(x) => panic!("into_inner of poisoned Mutex is Ok: {:?}", x),
527+
}
528+
}
529+
530+
#[test]
531+
fn test_get_mut() {
532+
let mut m = Mutex::new(NonCopy(10));
533+
*m.get_mut().unwrap() = NonCopy(20);
534+
assert_eq!(m.into_inner().unwrap(), NonCopy(20));
535+
}
536+
537+
#[test]
538+
fn test_get_mut_poison() {
539+
let m = Arc::new(Mutex::new(NonCopy(10)));
540+
let m2 = m.clone();
541+
let _ = thread::spawn(move || {
542+
let _lock = m2.lock().unwrap();
543+
panic!("test panic in inner thread to poison mutex");
544+
}).join();
545+
546+
assert!(m.is_poisoned());
547+
match Arc::try_unwrap(m).unwrap().get_mut() {
548+
Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
549+
Ok(x) => panic!("get_mut of poisoned Mutex is Ok: {:?}", x),
550+
}
551+
}
552+
438553
#[test]
439554
fn test_mutex_arc_condvar() {
440555
let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));

src/libstd/sync/rwlock.rs

+118
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,9 @@ use prelude::v1::*;
1313
use cell::UnsafeCell;
1414
use fmt;
1515
use marker;
16+
use mem;
1617
use ops::{Deref, DerefMut};
18+
use ptr;
1719
use sys_common::poison::{self, LockResult, TryLockError, TryLockResult};
1820
use sys_common::rwlock as sys;
1921

@@ -260,11 +262,60 @@ impl<T: ?Sized> RwLock<T> {
260262
pub fn is_poisoned(&self) -> bool {
261263
self.inner.poison.get()
262264
}
265+
266+
/// Consumes this `RwLock`, returning the underlying data.
267+
///
268+
/// # Failure
269+
///
270+
/// This function will return an error if the RwLock is poisoned. An RwLock
271+
/// is poisoned whenever a writer panics while holding an exclusive lock. An
272+
/// error will only be returned if the lock would have otherwise been
273+
/// acquired.
274+
#[unstable(feature = "rwlock_into_inner", reason = "recently added", issue = "28968")]
275+
pub fn into_inner(self) -> LockResult<T> where T: Sized {
276+
// We know statically that there are no outstanding references to
277+
// `self` so there's no need to lock the inner StaticRwLock.
278+
//
279+
// To get the inner value, we'd like to call `data.into_inner()`,
280+
// but because `RwLock` impl-s `Drop`, we can't move out of it, so
281+
// we'll have to destructure it manually instead.
282+
unsafe {
283+
// Like `let RwLock { inner, data } = self`.
284+
let (inner, data) = {
285+
let RwLock { ref inner, ref data } = self;
286+
(ptr::read(inner), ptr::read(data))
287+
};
288+
mem::forget(self);
289+
inner.lock.destroy(); // Keep in sync with the `Drop` impl.
290+
291+
poison::map_result(inner.poison.borrow(), |_| data.into_inner())
292+
}
293+
}
294+
295+
/// Returns a mutable reference to the underlying data.
296+
///
297+
/// Since this call borrows the `RwLock` mutably, no actual locking needs to
298+
/// take place---the mutable borrow statically guarantees no locks exist.
299+
///
300+
/// # Failure
301+
///
302+
/// This function will return an error if the RwLock is poisoned. An RwLock
303+
/// is poisoned whenever a writer panics while holding an exclusive lock. An
304+
/// error will only be returned if the lock would have otherwise been
305+
/// acquired.
306+
#[unstable(feature = "rwlock_get_mut", reason = "recently added", issue = "28968")]
307+
pub fn get_mut(&mut self) -> LockResult<&mut T> {
308+
// We know statically that there are no other references to `self`, so
309+
// there's no need to lock the inner StaticRwLock.
310+
let data = unsafe { &mut *self.data.get() };
311+
poison::map_result(self.inner.poison.borrow(), |_| data )
312+
}
263313
}
264314

265315
#[stable(feature = "rust1", since = "1.0.0")]
266316
impl<T: ?Sized> Drop for RwLock<T> {
267317
fn drop(&mut self) {
318+
// IMPORTANT: This code needs to be kept in sync with `RwLock::into_inner`.
268319
unsafe { self.inner.lock.destroy() }
269320
}
270321
}
@@ -426,6 +477,10 @@ mod tests {
426477
use sync::mpsc::channel;
427478
use thread;
428479
use sync::{Arc, RwLock, StaticRwLock, TryLockError};
480+
use sync::atomic::{AtomicUsize, Ordering};
481+
482+
#[derive(Eq, PartialEq, Debug)]
483+
struct NonCopy(i32);
429484

430485
#[test]
431486
fn smoke() {
@@ -606,4 +661,67 @@ mod tests {
606661

607662
drop(read_guard);
608663
}
664+
665+
#[test]
666+
fn test_into_inner() {
667+
let m = RwLock::new(NonCopy(10));
668+
assert_eq!(m.into_inner().unwrap(), NonCopy(10));
669+
}
670+
671+
#[test]
672+
fn test_into_inner_drop() {
673+
struct Foo(Arc<AtomicUsize>);
674+
impl Drop for Foo {
675+
fn drop(&mut self) {
676+
self.0.fetch_add(1, Ordering::SeqCst);
677+
}
678+
}
679+
let num_drops = Arc::new(AtomicUsize::new(0));
680+
let m = RwLock::new(Foo(num_drops.clone()));
681+
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
682+
{
683+
let _inner = m.into_inner().unwrap();
684+
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
685+
}
686+
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
687+
}
688+
689+
#[test]
690+
fn test_into_inner_poison() {
691+
let m = Arc::new(RwLock::new(NonCopy(10)));
692+
let m2 = m.clone();
693+
let _ = thread::spawn(move || {
694+
let _lock = m2.write().unwrap();
695+
panic!("test panic in inner thread to poison RwLock");
696+
}).join();
697+
698+
assert!(m.is_poisoned());
699+
match Arc::try_unwrap(m).unwrap().into_inner() {
700+
Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
701+
Ok(x) => panic!("into_inner of poisoned RwLock is Ok: {:?}", x),
702+
}
703+
}
704+
705+
#[test]
706+
fn test_get_mut() {
707+
let mut m = RwLock::new(NonCopy(10));
708+
*m.get_mut().unwrap() = NonCopy(20);
709+
assert_eq!(m.into_inner().unwrap(), NonCopy(20));
710+
}
711+
712+
#[test]
713+
fn test_get_mut_poison() {
714+
let m = Arc::new(RwLock::new(NonCopy(10)));
715+
let m2 = m.clone();
716+
let _ = thread::spawn(move || {
717+
let _lock = m2.write().unwrap();
718+
panic!("test panic in inner thread to poison RwLock");
719+
}).join();
720+
721+
assert!(m.is_poisoned());
722+
match Arc::try_unwrap(m).unwrap().get_mut() {
723+
Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
724+
Ok(x) => panic!("get_mut of poisoned RwLock is Ok: {:?}", x),
725+
}
726+
}
609727
}

0 commit comments

Comments
 (0)