Skip to content

Commit a795755

Browse files
committed
std: use ArcData rather than c_void in UnsafeArc.
This means that fewer `transmute`s are required, so there is less chance of a `transmute` not having the corresponding `forget` (possibly leading to use-after-free, etc).
1 parent 71448d7 commit a795755

File tree

1 file changed

+12
-17
lines changed

1 file changed

+12
-17
lines changed

src/libstd/unstable/sync.rs

+12-17
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ use vec;
2727
///
2828
/// Enforces no shared-memory safety.
2929
pub struct UnsafeArc<T> {
30-
data: *mut libc::c_void,
30+
data: *mut ArcData<T>,
3131
}
3232

3333
struct ArcData<T> {
@@ -41,7 +41,7 @@ struct ArcData<T> {
4141
data: Option<T>,
4242
}
4343

44-
unsafe fn new_inner<T: Send>(data: T, refcount: uint) -> *mut libc::c_void {
44+
unsafe fn new_inner<T: Send>(data: T, refcount: uint) -> *mut ArcData<T> {
4545
let data = ~ArcData { count: AtomicUint::new(refcount),
4646
unwrapper: AtomicOption::empty(),
4747
data: Some(data) };
@@ -79,12 +79,11 @@ impl<T: Send> UnsafeArc<T> {
7979
~[] // The "num_handles - 1" trick (below) fails in the 0 case.
8080
} else {
8181
unsafe {
82-
let mut data: ~ArcData<T> = cast::transmute(self.data);
8382
// Minus one because we are recycling the given handle's refcount.
84-
let old_count = data.count.fetch_add(num_handles - 1, Acquire);
85-
// let old_count = data.count.fetch_add(num_handles, Acquire);
83+
let old_count = (*self.data).count.fetch_add(num_handles - 1, Acquire);
84+
// let old_count = (*self.data).count.fetch_add(num_handles, Acquire);
8685
assert!(old_count >= 1);
87-
let ptr = cast::transmute(data);
86+
let ptr = self.data;
8887
cast::forget(self); // Don't run the destructor on this handle.
8988
vec::from_fn(num_handles, |_| UnsafeArc { data: ptr })
9089
}
@@ -94,21 +93,17 @@ impl<T: Send> UnsafeArc<T> {
9493
#[inline]
9594
pub fn get(&self) -> *mut T {
9695
unsafe {
97-
let mut data: ~ArcData<T> = cast::transmute(self.data);
98-
assert!(data.count.load(Relaxed) > 0);
99-
let r: *mut T = data.data.get_mut_ref();
100-
cast::forget(data);
96+
assert!((*self.data).count.load(Relaxed) > 0);
97+
let r: *mut T = (*self.data).data.get_mut_ref();
10198
return r;
10299
}
103100
}
104101

105102
#[inline]
106103
pub fn get_immut(&self) -> *T {
107104
unsafe {
108-
let data: ~ArcData<T> = cast::transmute(self.data);
109-
assert!(data.count.load(Relaxed) > 0);
110-
let r: *T = data.data.get_ref();
111-
cast::forget(data);
105+
assert!((*self.data).count.load(Relaxed) > 0);
106+
let r: *T = (*self.data).data.get_ref();
112107
return r;
113108
}
114109
}
@@ -122,6 +117,7 @@ impl<T: Send> UnsafeArc<T> {
122117
do task::unkillable {
123118
unsafe {
124119
let mut this = this.take();
120+
// The ~ dtor needs to run if this code succeeds.
125121
let mut data: ~ArcData<T> = cast::transmute(this.data);
126122
// Set up the unwrap protocol.
127123
let (p1,c1) = comm::oneshot(); // ()
@@ -186,6 +182,7 @@ impl<T: Send> UnsafeArc<T> {
186182
pub fn try_unwrap(self) -> Either<UnsafeArc<T>, T> {
187183
unsafe {
188184
let mut this = self; // FIXME(#4330) mutable self
185+
// The ~ dtor needs to run if this code succeeds.
189186
let mut data: ~ArcData<T> = cast::transmute(this.data);
190187
// This can of course race with anybody else who has a handle, but in
191188
// such a case, the returned count will always be at least 2. If we
@@ -212,11 +209,9 @@ impl<T: Send> UnsafeArc<T> {
212209
impl<T: Send> Clone for UnsafeArc<T> {
213210
fn clone(&self) -> UnsafeArc<T> {
214211
unsafe {
215-
let mut data: ~ArcData<T> = cast::transmute(self.data);
216212
// This barrier might be unnecessary, but I'm not sure...
217-
let old_count = data.count.fetch_add(1, Acquire);
213+
let old_count = (*self.data).count.fetch_add(1, Acquire);
218214
assert!(old_count >= 1);
219-
cast::forget(data);
220215
return UnsafeArc { data: self.data };
221216
}
222217
}

0 commit comments

Comments
 (0)