|
| 1 | +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT |
| 2 | +// file at the top-level directory of this distribution and at |
| 3 | +// http://rust-lang.org/COPYRIGHT. |
| 4 | +// |
| 5 | +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
| 6 | +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
| 7 | +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your |
| 8 | +// option. This file may not be copied, modified, or distributed |
| 9 | +// except according to those terms. |
| 10 | + |
| 11 | +//! A native mutex and condition variable type |
| 12 | +//! |
| 13 | +//! This module contains bindings to the platform's native mutex/condition |
| 14 | +//! variable primitives. It provides a single type, `Mutex`, which can be |
| 15 | +//! statically initialized via the `MUTEX_INIT` value. This object serves as both a |
| 16 | +//! mutex and a condition variable simultaneously. |
| 17 | +//! |
| 18 | +//! The lock is lazily initialized, but it can only be unsafely destroyed. A |
| 19 | +//! statically initialized lock doesn't necessarily have a time at which it can |
| 20 | +//! get deallocated. For this reason, there is no `Drop` implementation of the |
| 21 | +//! mutex, but rather the `destroy()` method must be invoked manually if |
| 22 | +//! destruction of the mutex is desired. |
| 23 | +//! |
| 24 | +//! It is not recommended to use this type for idiomatic rust use. This type is |
| 25 | +//! appropriate where no other options are available, but other rust concurrency |
| 26 | +//! primitives should be used before this type. |
| 27 | +//! |
| 28 | +//! # Example |
| 29 | +//! |
| 30 | +//! use std::unstable::mutex::{Mutex, MUTEX_INIT}; |
| 31 | +//! |
| 32 | +//! // Use a statically initialized mutex |
| 33 | +//! static mut lock: Mutex = MUTEX_INIT; |
| 34 | +//! |
| 35 | +//! unsafe { |
| 36 | +//! lock.lock(); |
| 37 | +//! lock.unlock(); |
| 38 | +//! } |
| 39 | +//! |
| 40 | +//! // Use a normally initialied mutex |
| 41 | +//! let mut lock = Mutex::new(); |
| 42 | +//! unsafe { |
| 43 | +//! lock.lock(); |
| 44 | +//! lock.unlock(); |
| 45 | +//! lock.destroy(); |
| 46 | +//! } |
| 47 | +
|
| 48 | +#[allow(non_camel_case_types)]; |
| 49 | + |
| 50 | +use libc::c_void; |
| 51 | +use unstable::atomics; |
| 52 | + |
| 53 | +pub struct Mutex { |
| 54 | + // pointers for the lock/cond handles, atomically updated |
| 55 | + priv lock: atomics::AtomicUint, |
| 56 | + priv cond: atomics::AtomicUint, |
| 57 | +} |
| 58 | + |
| 59 | +pub static MUTEX_INIT: Mutex = Mutex { |
| 60 | + lock: atomics::INIT_ATOMIC_UINT, |
| 61 | + cond: atomics::INIT_ATOMIC_UINT, |
| 62 | +}; |
| 63 | + |
| 64 | +impl Mutex { |
| 65 | + /// Creates a new mutex, with the lock/condition variable pre-initialized |
| 66 | + pub unsafe fn new() -> Mutex { |
| 67 | + Mutex { |
| 68 | + lock: atomics::AtomicUint::new(imp::init_lock() as uint), |
| 69 | + cond: atomics::AtomicUint::new(imp::init_cond() as uint), |
| 70 | + } |
| 71 | + } |
| 72 | + |
| 73 | + /// Creates a new copy of this mutex. This is an unsafe operation because |
| 74 | + /// there is no reference counting performed on this type. |
| 75 | + /// |
| 76 | + /// This function may only be called on mutexes which have had both the |
| 77 | + /// internal condition variable and lock initialized. This means that the |
| 78 | + /// mutex must have been created via `new`, or usage of it has already |
| 79 | + /// initialized the internal handles. |
| 80 | + /// |
| 81 | + /// This is a dangerous function to call as both this mutex and the returned |
| 82 | + /// mutex will share the same handles to the underlying mutex/condition |
| 83 | + /// variable. Care must be taken to ensure that deallocation happens |
| 84 | + /// accordingly. |
| 85 | + pub unsafe fn clone(&self) -> Mutex { |
| 86 | + let lock = self.lock.load(atomics::Relaxed); |
| 87 | + let cond = self.cond.load(atomics::Relaxed); |
| 88 | + assert!(lock != 0); |
| 89 | + assert!(cond != 0); |
| 90 | + Mutex { |
| 91 | + lock: atomics::AtomicUint::new(lock), |
| 92 | + cond: atomics::AtomicUint::new(cond), |
| 93 | + } |
| 94 | + } |
| 95 | + |
| 96 | + /// Acquires this lock. This assumes that the current thread does not |
| 97 | + /// already hold the lock. |
| 98 | + pub unsafe fn lock(&mut self) { imp::lock(self.getlock()) } |
| 99 | + |
| 100 | + /// Attempts to acquire the lock. The value returned is whether the lock was |
| 101 | + /// acquired or not |
| 102 | + pub unsafe fn trylock(&mut self) -> bool { imp::trylock(self.getlock()) } |
| 103 | + |
| 104 | + /// Unlocks the lock. This assumes that the current thread already holds the |
| 105 | + /// lock. |
| 106 | + pub unsafe fn unlock(&mut self) { imp::unlock(self.getlock()) } |
| 107 | + |
| 108 | + /// Block on the internal condition variable. |
| 109 | + /// |
| 110 | + /// This function assumes that the lock is already held |
| 111 | + pub unsafe fn wait(&mut self) { imp::wait(self.getcond(), self.getlock()) } |
| 112 | + |
| 113 | + /// Signals a thread in `wait` to wake up |
| 114 | + pub unsafe fn signal(&mut self) { imp::signal(self.getcond()) } |
| 115 | + |
| 116 | + /// This function is especially unsafe because there are no guarantees made |
| 117 | + /// that no other thread is currently holding the lock or waiting on the |
| 118 | + /// condition variable contained inside. |
| 119 | + pub unsafe fn destroy(&mut self) { |
| 120 | + imp::free_lock(self.lock.swap(0, atomics::Relaxed)); |
| 121 | + imp::free_cond(self.cond.swap(0, atomics::Relaxed)); |
| 122 | + } |
| 123 | + |
| 124 | + unsafe fn getlock(&mut self) -> *c_void { |
| 125 | + match self.lock.load(atomics::Relaxed) { |
| 126 | + 0 => {} |
| 127 | + n => return n as *c_void |
| 128 | + } |
| 129 | + let lock = imp::init_lock(); |
| 130 | + match self.lock.compare_and_swap(0, lock, atomics::SeqCst) { |
| 131 | + 0 => return lock as *c_void, |
| 132 | + _ => {} |
| 133 | + } |
| 134 | + imp::free_lock(lock); |
| 135 | + return self.lock.load(atomics::Relaxed) as *c_void; |
| 136 | + } |
| 137 | + |
| 138 | + unsafe fn getcond(&mut self) -> *c_void { |
| 139 | + match self.cond.load(atomics::Relaxed) { |
| 140 | + 0 => {} |
| 141 | + n => return n as *c_void |
| 142 | + } |
| 143 | + let cond = imp::init_cond(); |
| 144 | + match self.cond.compare_and_swap(0, cond, atomics::SeqCst) { |
| 145 | + 0 => return cond as *c_void, |
| 146 | + _ => {} |
| 147 | + } |
| 148 | + imp::free_cond(cond); |
| 149 | + return self.cond.load(atomics::Relaxed) as *c_void; |
| 150 | + } |
| 151 | +} |
| 152 | + |
| 153 | +#[cfg(unix)] |
| 154 | +mod imp { |
| 155 | + use libc::c_void; |
| 156 | + use libc; |
| 157 | + use ptr; |
| 158 | + |
| 159 | + type pthread_mutex_t = libc::c_void; |
| 160 | + type pthread_mutexattr_t = libc::c_void; |
| 161 | + type pthread_cond_t = libc::c_void; |
| 162 | + type pthread_condattr_t = libc::c_void; |
| 163 | + |
| 164 | + pub unsafe fn init_lock() -> uint { |
| 165 | + let block = libc::malloc(rust_pthread_mutex_t_size() as libc::size_t); |
| 166 | + assert!(!block.is_null()); |
| 167 | + let n = pthread_mutex_init(block, ptr::null()); |
| 168 | + assert_eq!(n, 0); |
| 169 | + return block as uint; |
| 170 | + } |
| 171 | + |
| 172 | + pub unsafe fn init_cond() -> uint { |
| 173 | + let block = libc::malloc(rust_pthread_cond_t_size() as libc::size_t); |
| 174 | + assert!(!block.is_null()); |
| 175 | + let n = pthread_cond_init(block, ptr::null()); |
| 176 | + assert_eq!(n, 0); |
| 177 | + return block as uint; |
| 178 | + } |
| 179 | + |
| 180 | + pub unsafe fn free_lock(h: uint) { |
| 181 | + let block = h as *c_void; |
| 182 | + assert_eq!(pthread_mutex_destroy(block), 0); |
| 183 | + libc::free(block); |
| 184 | + } |
| 185 | + |
| 186 | + pub unsafe fn free_cond(h: uint) { |
| 187 | + let block = h as *c_void; |
| 188 | + assert_eq!(pthread_cond_destroy(block), 0); |
| 189 | + libc::free(block); |
| 190 | + } |
| 191 | + |
| 192 | + pub unsafe fn lock(l: *pthread_mutex_t) { |
| 193 | + assert_eq!(pthread_mutex_lock(l), 0); |
| 194 | + } |
| 195 | + |
| 196 | + pub unsafe fn trylock(l: *c_void) -> bool { |
| 197 | + pthread_mutex_trylock(l) == 0 |
| 198 | + } |
| 199 | + |
| 200 | + pub unsafe fn unlock(l: *pthread_mutex_t) { |
| 201 | + assert_eq!(pthread_mutex_unlock(l), 0); |
| 202 | + } |
| 203 | + |
| 204 | + pub unsafe fn wait(cond: *pthread_cond_t, m: *pthread_mutex_t) { |
| 205 | + assert_eq!(pthread_cond_wait(cond, m), 0); |
| 206 | + } |
| 207 | + |
| 208 | + pub unsafe fn signal(cond: *pthread_cond_t) { |
| 209 | + assert_eq!(pthread_cond_signal(cond), 0); |
| 210 | + } |
| 211 | + |
| 212 | + extern { |
| 213 | + fn rust_pthread_mutex_t_size() -> libc::c_int; |
| 214 | + fn rust_pthread_cond_t_size() -> libc::c_int; |
| 215 | + } |
| 216 | + |
| 217 | + extern { |
| 218 | + fn pthread_mutex_init(lock: *pthread_mutex_t, |
| 219 | + attr: *pthread_mutexattr_t) -> libc::c_int; |
| 220 | + fn pthread_mutex_destroy(lock: *pthread_mutex_t) -> libc::c_int; |
| 221 | + fn pthread_cond_init(cond: *pthread_cond_t, |
| 222 | + attr: *pthread_condattr_t) -> libc::c_int; |
| 223 | + fn pthread_cond_destroy(cond: *pthread_cond_t) -> libc::c_int; |
| 224 | + fn pthread_mutex_lock(lock: *pthread_mutex_t) -> libc::c_int; |
| 225 | + fn pthread_mutex_trylock(lock: *pthread_mutex_t) -> libc::c_int; |
| 226 | + fn pthread_mutex_unlock(lock: *pthread_mutex_t) -> libc::c_int; |
| 227 | + |
| 228 | + fn pthread_cond_wait(cond: *pthread_cond_t, |
| 229 | + lock: *pthread_mutex_t) -> libc::c_int; |
| 230 | + fn pthread_cond_signal(cond: *pthread_cond_t) -> libc::c_int; |
| 231 | + } |
| 232 | +} |
| 233 | + |
| 234 | +#[cfg(windows)] |
| 235 | +mod imp { |
| 236 | + use libc; |
| 237 | + use libc::{HANDLE, BOOL, LPSECURITY_ATTRIBUTES, c_void, DWORD, LPCSTR}; |
| 238 | + use ptr; |
| 239 | + type LPCRITICAL_SECTION = *c_void; |
| 240 | + static SPIN_COUNT: DWORD = 4000; |
| 241 | + |
| 242 | + pub unsafe fn init_lock() -> uint { |
| 243 | + let block = libc::malloc(rust_crit_section_size() as libc::size_t); |
| 244 | + assert!(!block.is_null()); |
| 245 | + InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT); |
| 246 | + return block as uint; |
| 247 | + } |
| 248 | + |
| 249 | + pub unsafe fn init_cond() -> uint { |
| 250 | + return CreateEventA(ptr::mut_null(), libc::FALSE, libc::FALSE, |
| 251 | + ptr::null()) as uint; |
| 252 | + } |
| 253 | + |
| 254 | + pub unsafe fn free_lock(h: uint) { |
| 255 | + DeleteCriticalSection(h as LPCRITICAL_SECTION); |
| 256 | + libc::free(h as *c_void); |
| 257 | + } |
| 258 | + |
| 259 | + pub unsafe fn free_cond(h: uint) { |
| 260 | + let block = h as HANDLE; |
| 261 | + libc::CloseHandle(block); |
| 262 | + } |
| 263 | + |
| 264 | + pub unsafe fn lock(l: *c_void) { |
| 265 | + EnterCriticalSection(l as LPCRITICAL_SECTION) |
| 266 | + } |
| 267 | + |
| 268 | + pub unsafe fn trylock(l: *c_void) -> bool { |
| 269 | + TryEnterCriticalSection(l as LPCRITICAL_SECTION) != 0 |
| 270 | + } |
| 271 | + |
| 272 | + pub unsafe fn unlock(l: *c_void) { |
| 273 | + LeaveCriticalSection(l as LPCRITICAL_SECTION) |
| 274 | + } |
| 275 | + |
| 276 | + pub unsafe fn wait(cond: *c_void, m: *c_void) { |
| 277 | + unlock(m); |
| 278 | + WaitForSingleObject(cond as HANDLE, 0); |
| 279 | + lock(m); |
| 280 | + } |
| 281 | + |
| 282 | + pub unsafe fn signal(cond: *c_void) { |
| 283 | + assert!(SetEvent(cond as HANDLE) != 0); |
| 284 | + } |
| 285 | + |
| 286 | + extern { |
| 287 | + fn rust_crit_section_size() -> libc::c_int; |
| 288 | + } |
| 289 | + |
| 290 | + extern "system" { |
| 291 | + fn CreateEventA(lpSecurityAttributes: LPSECURITY_ATTRIBUTES, |
| 292 | + bManualReset: BOOL, |
| 293 | + bInitialState: BOOL, |
| 294 | + lpName: LPCSTR) -> HANDLE; |
| 295 | + fn InitializeCriticalSectionAndSpinCount( |
| 296 | + lpCriticalSection: LPCRITICAL_SECTION, |
| 297 | + dwSpinCount: DWORD) -> BOOL; |
| 298 | + fn DeleteCriticalSection(lpCriticalSection: LPCRITICAL_SECTION); |
| 299 | + fn EnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION); |
| 300 | + fn LeaveCriticalSection(lpCriticalSection: LPCRITICAL_SECTION); |
| 301 | + fn TryEnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION) -> BOOL; |
| 302 | + fn SetEvent(hEvent: HANDLE) -> BOOL; |
| 303 | + fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD; |
| 304 | + } |
| 305 | +} |
| 306 | + |
| 307 | +#[cfg(test)] |
| 308 | +mod test { |
| 309 | + use super::{Mutex, MUTEX_INIT}; |
| 310 | + use rt::thread::Thread; |
| 311 | + |
| 312 | + #[test] |
| 313 | + fn somke_lock() { |
| 314 | + static mut lock: Mutex = MUTEX_INIT; |
| 315 | + unsafe { |
| 316 | + lock.lock(); |
| 317 | + lock.unlock(); |
| 318 | + } |
| 319 | + } |
| 320 | + |
| 321 | + #[test] |
| 322 | + fn somke_cond() { |
| 323 | + static mut lock: Mutex = MUTEX_INIT; |
| 324 | + unsafe { |
| 325 | + let t = do Thread::start { |
| 326 | + lock.lock(); |
| 327 | + lock.signal(); |
| 328 | + lock.unlock(); |
| 329 | + }; |
| 330 | + lock.lock(); |
| 331 | + lock.wait(); |
| 332 | + lock.unlock(); |
| 333 | + t.join(); |
| 334 | + } |
| 335 | + } |
| 336 | +} |
0 commit comments