diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs index d410faca30d9e..0ac928817184d 100644 --- a/src/libstd/io/stdio.rs +++ b/src/libstd/io/stdio.rs @@ -6,7 +6,7 @@ use crate::cell::RefCell; use crate::fmt; use crate::io::lazy::Lazy; use crate::io::{self, BufReader, Initializer, IoSlice, IoSliceMut, LineWriter}; -use crate::sync::{Arc, Mutex, MutexGuard}; +use crate::sync::{Arc, Mutex, MutexGuard, Once}; use crate::sys::stdio; use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard}; use crate::thread::LocalKey; @@ -493,7 +493,11 @@ pub fn stdout() -> Stdout { Ok(stdout) => Maybe::Real(stdout), _ => Maybe::Fake, }; - Arc::new(ReentrantMutex::new(RefCell::new(LineWriter::new(stdout)))) + unsafe { + let ret = Arc::new(ReentrantMutex::new(RefCell::new(LineWriter::new(stdout)))); + ret.init(); + return ret; + } } } @@ -520,7 +524,7 @@ impl Stdout { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn lock(&self) -> StdoutLock<'_> { - StdoutLock { inner: self.inner.lock().unwrap_or_else(|e| e.into_inner()) } + StdoutLock { inner: self.inner.lock() } } } @@ -581,7 +585,7 @@ impl fmt::Debug for StdoutLock<'_> { /// an error. #[stable(feature = "rust1", since = "1.0.0")] pub struct Stderr { - inner: Arc>>>, + inner: &'static ReentrantMutex>>, } /// A locked reference to the `Stderr` handle. @@ -639,19 +643,28 @@ pub struct StderrLock<'a> { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn stderr() -> Stderr { - static INSTANCE: Lazy>>> = Lazy::new(); - return Stderr { - inner: unsafe { INSTANCE.get(stderr_init).expect("cannot access stderr during shutdown") }, - }; - - fn stderr_init() -> Arc>>> { - // This must not reentrantly access `INSTANCE` - let stderr = match stderr_raw() { - Ok(stderr) => Maybe::Real(stderr), - _ => Maybe::Fake, - }; - Arc::new(ReentrantMutex::new(RefCell::new(stderr))) - } + // Note that unlike `stdout()` we don't use `Lazy` here which registers a + // destructor. Stderr is not buffered nor does the `stderr_raw` type consume + // any owned resources, so there's no need to run any destructors at some + // point in the future. + // + // This has the added benefit of allowing `stderr` to be usable during + // process shutdown as well! + static INSTANCE: ReentrantMutex>> = + unsafe { ReentrantMutex::new(RefCell::new(Maybe::Fake)) }; + + // When accessing stderr we need one-time initialization of the reentrant + // mutex, followed by one-time detection of whether we actually have a + // stderr handle or not. Afterwards we can just always use the now-filled-in + // `INSTANCE` value. + static INIT: Once = Once::new(); + INIT.call_once(|| unsafe { + INSTANCE.init(); + if let Ok(stderr) = stderr_raw() { + *INSTANCE.lock().borrow_mut() = Maybe::Real(stderr); + } + }); + return Stderr { inner: &INSTANCE }; } impl Stderr { @@ -677,7 +690,7 @@ impl Stderr { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn lock(&self) -> StderrLock<'_> { - StderrLock { inner: self.inner.lock().unwrap_or_else(|e| e.into_inner()) } + StderrLock { inner: self.inner.lock() } } } diff --git a/src/libstd/sys/cloudabi/mutex.rs b/src/libstd/sys/cloudabi/mutex.rs index 4aa25e2505271..580ab0e8ad863 100644 --- a/src/libstd/sys/cloudabi/mutex.rs +++ b/src/libstd/sys/cloudabi/mutex.rs @@ -53,16 +53,16 @@ pub struct ReentrantMutex { } impl ReentrantMutex { - pub unsafe fn uninitialized() -> ReentrantMutex { + pub const unsafe fn uninitialized() -> ReentrantMutex { ReentrantMutex { lock: UnsafeCell::new(MaybeUninit::uninit()), recursion: UnsafeCell::new(MaybeUninit::uninit()), } } - pub unsafe fn init(&mut self) { - self.lock = UnsafeCell::new(MaybeUninit::new(AtomicU32::new(abi::LOCK_UNLOCKED.0))); - self.recursion = UnsafeCell::new(MaybeUninit::new(0)); + pub unsafe fn init(&self) { + *self.lock.get() = MaybeUninit::new(AtomicU32::new(abi::LOCK_UNLOCKED.0)); + *self.recursion.get() = MaybeUninit::new(0); } pub unsafe fn try_lock(&self) -> bool { diff --git a/src/libstd/sys/hermit/mutex.rs b/src/libstd/sys/hermit/mutex.rs index b5c75f738d228..3d4813209cbc4 100644 --- a/src/libstd/sys/hermit/mutex.rs +++ b/src/libstd/sys/hermit/mutex.rs @@ -46,13 +46,13 @@ pub struct ReentrantMutex { } impl ReentrantMutex { - pub unsafe fn uninitialized() -> ReentrantMutex { + pub const unsafe fn uninitialized() -> ReentrantMutex { ReentrantMutex { inner: ptr::null() } } #[inline] - pub unsafe fn init(&mut self) { - let _ = abi::recmutex_init(&mut self.inner as *mut *const c_void); + pub unsafe fn init(&self) { + let _ = abi::recmutex_init(&self.inner as *const *const c_void as *mut _); } #[inline] diff --git a/src/libstd/sys/sgx/mutex.rs b/src/libstd/sys/sgx/mutex.rs index eebbea1b285ba..4911c2f538769 100644 --- a/src/libstd/sys/sgx/mutex.rs +++ b/src/libstd/sys/sgx/mutex.rs @@ -75,7 +75,7 @@ impl ReentrantMutex { } #[inline] - pub unsafe fn init(&mut self) {} + pub unsafe fn init(&self) {} #[inline] pub unsafe fn lock(&self) { diff --git a/src/libstd/sys/unix/mutex.rs b/src/libstd/sys/unix/mutex.rs index b38375a2e03c5..103d87e3d2f91 100644 --- a/src/libstd/sys/unix/mutex.rs +++ b/src/libstd/sys/unix/mutex.rs @@ -92,11 +92,11 @@ unsafe impl Send for ReentrantMutex {} unsafe impl Sync for ReentrantMutex {} impl ReentrantMutex { - pub unsafe fn uninitialized() -> ReentrantMutex { + pub const unsafe fn uninitialized() -> ReentrantMutex { ReentrantMutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) } } - pub unsafe fn init(&mut self) { + pub unsafe fn init(&self) { let mut attr = MaybeUninit::::uninit(); let result = libc::pthread_mutexattr_init(attr.as_mut_ptr()); debug_assert_eq!(result, 0); diff --git a/src/libstd/sys/vxworks/mutex.rs b/src/libstd/sys/vxworks/mutex.rs index b38375a2e03c5..103d87e3d2f91 100644 --- a/src/libstd/sys/vxworks/mutex.rs +++ b/src/libstd/sys/vxworks/mutex.rs @@ -92,11 +92,11 @@ unsafe impl Send for ReentrantMutex {} unsafe impl Sync for ReentrantMutex {} impl ReentrantMutex { - pub unsafe fn uninitialized() -> ReentrantMutex { + pub const unsafe fn uninitialized() -> ReentrantMutex { ReentrantMutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) } } - pub unsafe fn init(&mut self) { + pub unsafe fn init(&self) { let mut attr = MaybeUninit::::uninit(); let result = libc::pthread_mutexattr_init(attr.as_mut_ptr()); debug_assert_eq!(result, 0); diff --git a/src/libstd/sys/wasm/mutex.rs b/src/libstd/sys/wasm/mutex.rs index 07238d087308f..7aaf1b3a343b6 100644 --- a/src/libstd/sys/wasm/mutex.rs +++ b/src/libstd/sys/wasm/mutex.rs @@ -47,11 +47,11 @@ impl Mutex { pub struct ReentrantMutex {} impl ReentrantMutex { - pub unsafe fn uninitialized() -> ReentrantMutex { + pub const unsafe fn uninitialized() -> ReentrantMutex { ReentrantMutex {} } - pub unsafe fn init(&mut self) {} + pub unsafe fn init(&self) {} pub unsafe fn lock(&self) {} diff --git a/src/libstd/sys/wasm/mutex_atomics.rs b/src/libstd/sys/wasm/mutex_atomics.rs index 90c628a19c22e..268a53bb5641c 100644 --- a/src/libstd/sys/wasm/mutex_atomics.rs +++ b/src/libstd/sys/wasm/mutex_atomics.rs @@ -80,11 +80,11 @@ unsafe impl Sync for ReentrantMutex {} // released when this recursion counter reaches 0. impl ReentrantMutex { - pub unsafe fn uninitialized() -> ReentrantMutex { + pub const unsafe fn uninitialized() -> ReentrantMutex { ReentrantMutex { owner: AtomicU32::new(0), recursions: UnsafeCell::new(0) } } - pub unsafe fn init(&mut self) { + pub unsafe fn init(&self) { // nothing to do... } diff --git a/src/libstd/sys/windows/mutex.rs b/src/libstd/sys/windows/mutex.rs index 281eb294c65d8..63dfc640908e9 100644 --- a/src/libstd/sys/windows/mutex.rs +++ b/src/libstd/sys/windows/mutex.rs @@ -109,7 +109,7 @@ impl Mutex { 0 => {} n => return n as *mut _, } - let mut re = box ReentrantMutex::uninitialized(); + let re = box ReentrantMutex::uninitialized(); re.init(); let re = Box::into_raw(re); match self.lock.compare_and_swap(0, re as usize, Ordering::SeqCst) { @@ -157,11 +157,11 @@ unsafe impl Send for ReentrantMutex {} unsafe impl Sync for ReentrantMutex {} impl ReentrantMutex { - pub fn uninitialized() -> ReentrantMutex { + pub const fn uninitialized() -> ReentrantMutex { ReentrantMutex { inner: UnsafeCell::new(MaybeUninit::uninit()) } } - pub unsafe fn init(&mut self) { + pub unsafe fn init(&self) { c::InitializeCriticalSection((&mut *self.inner.get()).as_mut_ptr()); } diff --git a/src/libstd/sys_common/remutex.rs b/src/libstd/sys_common/remutex.rs index a1ad44a3666ed..4f19bbc467f33 100644 --- a/src/libstd/sys_common/remutex.rs +++ b/src/libstd/sys_common/remutex.rs @@ -3,7 +3,6 @@ use crate::marker; use crate::ops::Deref; use crate::panic::{RefUnwindSafe, UnwindSafe}; use crate::sys::mutex as sys; -use crate::sys_common::poison::{self, LockResult, TryLockError, TryLockResult}; /// A re-entrant mutual exclusion /// @@ -11,8 +10,7 @@ use crate::sys_common::poison::{self, LockResult, TryLockError, TryLockResult}; /// available. The thread which has already locked the mutex can lock it /// multiple times without blocking, preventing a common source of deadlocks. pub struct ReentrantMutex { - inner: Box, - poison: poison::Flag, + inner: sys::ReentrantMutex, data: T, } @@ -39,23 +37,30 @@ pub struct ReentrantMutexGuard<'a, T: 'a> { // funny underscores due to how Deref currently works (it disregards field // privacy). __lock: &'a ReentrantMutex, - __poison: poison::Guard, } impl !marker::Send for ReentrantMutexGuard<'_, T> {} impl ReentrantMutex { /// Creates a new reentrant mutex in an unlocked state. - pub fn new(t: T) -> ReentrantMutex { - unsafe { - let mut mutex = ReentrantMutex { - inner: box sys::ReentrantMutex::uninitialized(), - poison: poison::Flag::new(), - data: t, - }; - mutex.inner.init(); - mutex - } + /// + /// # Unsafety + /// + /// This function is unsafe because it is required that `init` is called + /// once this mutex is in its final resting place, and only then are the + /// lock/unlock methods safe. + pub const unsafe fn new(t: T) -> ReentrantMutex { + ReentrantMutex { inner: sys::ReentrantMutex::uninitialized(), data: t } + } + + /// Initializes this mutex so it's ready for use. + /// + /// # Unsafety + /// + /// Unsafe to call more than once, and must be called after this will no + /// longer move in memory. + pub unsafe fn init(&self) { + self.inner.init(); } /// Acquires a mutex, blocking the current thread until it is able to do so. @@ -70,7 +75,7 @@ impl ReentrantMutex { /// If another user of this mutex panicked while holding the mutex, then /// this call will return failure if the mutex would otherwise be /// acquired. - pub fn lock(&self) -> LockResult> { + pub fn lock(&self) -> ReentrantMutexGuard<'_, T> { unsafe { self.inner.lock() } ReentrantMutexGuard::new(&self) } @@ -87,12 +92,8 @@ impl ReentrantMutex { /// If another user of this mutex panicked while holding the mutex, then /// this call will return failure if the mutex would otherwise be /// acquired. - pub fn try_lock(&self) -> TryLockResult> { - if unsafe { self.inner.try_lock() } { - Ok(ReentrantMutexGuard::new(&self)?) - } else { - Err(TryLockError::WouldBlock) - } + pub fn try_lock(&self) -> Option> { + if unsafe { self.inner.try_lock() } { Some(ReentrantMutexGuard::new(&self)) } else { None } } } @@ -108,11 +109,8 @@ impl Drop for ReentrantMutex { impl fmt::Debug for ReentrantMutex { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.try_lock() { - Ok(guard) => f.debug_struct("ReentrantMutex").field("data", &*guard).finish(), - Err(TryLockError::Poisoned(err)) => { - f.debug_struct("ReentrantMutex").field("data", &**err.get_ref()).finish() - } - Err(TryLockError::WouldBlock) => { + Some(guard) => f.debug_struct("ReentrantMutex").field("data", &*guard).finish(), + None => { struct LockedPlaceholder; impl fmt::Debug for LockedPlaceholder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -127,11 +125,8 @@ impl fmt::Debug for ReentrantMutex { } impl<'mutex, T> ReentrantMutexGuard<'mutex, T> { - fn new(lock: &'mutex ReentrantMutex) -> LockResult> { - poison::map_result(lock.poison.borrow(), |guard| ReentrantMutexGuard { - __lock: lock, - __poison: guard, - }) + fn new(lock: &'mutex ReentrantMutex) -> ReentrantMutexGuard<'mutex, T> { + ReentrantMutexGuard { __lock: lock } } } @@ -147,7 +142,6 @@ impl Drop for ReentrantMutexGuard<'_, T> { #[inline] fn drop(&mut self) { unsafe { - self.__lock.poison.done(&self.__poison); self.__lock.inner.unlock(); } } @@ -162,13 +156,17 @@ mod tests { #[test] fn smoke() { - let m = ReentrantMutex::new(()); + let m = unsafe { + let m = ReentrantMutex::new(()); + m.init(); + m + }; { - let a = m.lock().unwrap(); + let a = m.lock(); { - let b = m.lock().unwrap(); + let b = m.lock(); { - let c = m.lock().unwrap(); + let c = m.lock(); assert_eq!(*c, ()); } assert_eq!(*b, ()); @@ -179,15 +177,19 @@ mod tests { #[test] fn is_mutex() { - let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); + let m = unsafe { + let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); + m.init(); + m + }; let m2 = m.clone(); - let lock = m.lock().unwrap(); + let lock = m.lock(); let child = thread::spawn(move || { - let lock = m2.lock().unwrap(); + let lock = m2.lock(); assert_eq!(*lock.borrow(), 4950); }); for i in 0..100 { - let lock = m.lock().unwrap(); + let lock = m.lock(); *lock.borrow_mut() += i; } drop(lock); @@ -196,17 +198,21 @@ mod tests { #[test] fn trylock_works() { - let m = Arc::new(ReentrantMutex::new(())); + let m = unsafe { + let m = Arc::new(ReentrantMutex::new(())); + m.init(); + m + }; let m2 = m.clone(); - let _lock = m.try_lock().unwrap(); - let _lock2 = m.try_lock().unwrap(); + let _lock = m.try_lock(); + let _lock2 = m.try_lock(); thread::spawn(move || { let lock = m2.try_lock(); - assert!(lock.is_err()); + assert!(lock.is_none()); }) .join() .unwrap(); - let _lock3 = m.try_lock().unwrap(); + let _lock3 = m.try_lock(); } pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell>); @@ -215,22 +221,4 @@ mod tests { *self.0.borrow_mut() = 42; } } - - #[test] - fn poison_works() { - let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); - let mc = m.clone(); - let result = thread::spawn(move || { - let lock = mc.lock().unwrap(); - *lock.borrow_mut() = 1; - let lock2 = mc.lock().unwrap(); - *lock.borrow_mut() = 2; - let _answer = Answer(lock2); - panic!("What the answer to my lifetimes dilemma is?"); - }) - .join(); - assert!(result.is_err()); - let r = m.lock().err().unwrap().into_inner(); - assert_eq!(*r.borrow(), 42); - } } diff --git a/src/test/ui/eprint-on-tls-drop.rs b/src/test/ui/eprint-on-tls-drop.rs new file mode 100644 index 0000000000000..9c4800c1a3fa1 --- /dev/null +++ b/src/test/ui/eprint-on-tls-drop.rs @@ -0,0 +1,48 @@ +// run-pass +// ignore-emscripten no processes + +use std::cell::RefCell; +use std::env; +use std::process::Command; + +fn main() { + let name = "YOU_ARE_THE_TEST"; + if env::var(name).is_ok() { + std::thread::spawn(|| { + TLS.with(|f| f.borrow().ensure()); + }) + .join() + .unwrap(); + } else { + let me = env::current_exe().unwrap(); + let output = Command::new(&me).env(name, "1").output().unwrap(); + println!("{:?}", output); + assert!(output.status.success()); + let stderr = String::from_utf8(output.stderr).unwrap(); + assert!(stderr.contains("hello new\n")); + assert!(stderr.contains("hello drop\n")); + } +} + +struct Stuff { + _x: usize, +} + +impl Stuff { + fn new() -> Self { + eprintln!("hello new"); + Self { _x: 0 } + } + + fn ensure(&self) {} +} + +impl Drop for Stuff { + fn drop(&mut self) { + eprintln!("hello drop"); + } +} + +thread_local! { + static TLS: RefCell = RefCell::new(Stuff::new()); +}