From 46679bc9eff4f7f7414bc162c0e6c00e7b91cc39 Mon Sep 17 00:00:00 2001 From: David Cook Date: Sun, 26 Jan 2020 14:25:09 -0600 Subject: [PATCH 01/31] Add shims for RwLock::try_read/RwLock::try_write --- src/shims/foreign_items/posix.rs | 2 ++ tests/run-pass/sync.rs | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index c9fd59c693..4be63804a4 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -271,8 +271,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx | "pthread_mutex_unlock" | "pthread_mutex_destroy" | "pthread_rwlock_rdlock" + | "pthread_rwlock_tryrdlock" | "pthread_rwlock_unlock" | "pthread_rwlock_wrlock" + | "pthread_rwlock_trywrlock" | "pthread_rwlock_destroy" | "pthread_condattr_init" | "pthread_condattr_setclock" diff --git a/tests/run-pass/sync.rs b/tests/run-pass/sync.rs index 54d79566ea..14243349f9 100644 --- a/tests/run-pass/sync.rs +++ b/tests/run-pass/sync.rs @@ -12,7 +12,9 @@ fn main() { { let rw = sync::RwLock::new(0); drop(rw.read()); + drop(rw.try_read()); drop(rw.write()); + drop(rw.try_write()); drop(rw); } } From 88f319fb4c0597856d62ee67eba6354b496cbe8f Mon Sep 17 00:00:00 2001 From: David Cook Date: Mon, 27 Jan 2020 21:49:06 -0600 Subject: [PATCH 02/31] Add failing tests for mutex and rwlock --- src/shims/foreign_items/posix.rs | 1 + tests/run-pass/sync.rs | 73 +++++++++++++++++++++++++++++--- 2 files changed, 68 insertions(+), 6 deletions(-) diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index 4be63804a4..061ae93d8f 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -268,6 +268,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx | "pthread_mutex_init" | "pthread_mutexattr_destroy" | "pthread_mutex_lock" + | "pthread_mutex_trylock" | "pthread_mutex_unlock" | "pthread_mutex_destroy" | "pthread_rwlock_rdlock" diff --git a/tests/run-pass/sync.rs b/tests/run-pass/sync.rs index 14243349f9..6a0b41d5f6 100644 --- a/tests/run-pass/sync.rs +++ b/tests/run-pass/sync.rs @@ -1,20 +1,81 @@ // Just instantiate some data structures to make sure we got all their foreign items covered. // Requires full MIR on Windows. +#![feature(rustc_private)] + use std::sync; +extern crate libc; + fn main() { let m = sync::Mutex::new(0); - drop(m.lock()); + { + let _guard = m.lock(); + let try_lock_error = m.try_lock().unwrap_err(); + if let sync::TryLockError::Poisoned(e) = try_lock_error { + panic!("{}", e); + } + } + drop(m.try_lock().unwrap()); drop(m); #[cfg(not(target_os = "windows"))] // TODO: implement RwLock on Windows { let rw = sync::RwLock::new(0); - drop(rw.read()); - drop(rw.try_read()); - drop(rw.write()); - drop(rw.try_write()); - drop(rw); + { + let _read_guard = rw.read().unwrap(); + drop(rw.read().unwrap()); + drop(rw.try_read().unwrap()); + let try_lock_error = rw.try_write().unwrap_err(); + if let sync::TryLockError::Poisoned(e) = try_lock_error { + panic!("{}", e); + } + } + + { + let _write_guard = rw.write().unwrap(); + let try_lock_error = rw.try_read().unwrap_err(); + if let sync::TryLockError::Poisoned(e) = try_lock_error { + panic!("{}", e); + } + let try_lock_error = rw.try_write().unwrap_err(); + if let sync::TryLockError::Poisoned(e) = try_lock_error { + panic!("{}", e); + } + } + + // need to go a layer deeper and test the behavior of libc functions, because + // std::sys::unix::rwlock::RWLock keeps track of write_locked and num_readers + + unsafe { + let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, std::ptr::null_mut()), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); + } + + unsafe { + let mut rw: libc::pthread_rwlock_t = std::mem::zeroed(); + assert_eq!(libc::pthread_rwlock_init(&mut rw as *mut _, std::ptr::null_mut()), 0); + + assert_eq!(libc::pthread_rwlock_rdlock(&mut rw as *mut _), 0); + assert_eq!(libc::pthread_rwlock_rdlock(&mut rw as *mut _), 0); + assert_eq!(libc::pthread_rwlock_unlock(&mut rw as *mut _), 0); + assert_eq!(libc::pthread_rwlock_tryrdlock(&mut rw as *mut _), 0); + assert_eq!(libc::pthread_rwlock_unlock(&mut rw as *mut _), 0); + assert_eq!(libc::pthread_rwlock_trywrlock(&mut rw as *mut _), libc::EBUSY); + assert_eq!(libc::pthread_rwlock_unlock(&mut rw as *mut _), 0); + + assert_eq!(libc::pthread_rwlock_wrlock(&mut rw as *mut _), 0); + assert_eq!(libc::pthread_rwlock_tryrdlock(&mut rw as *mut _), libc::EBUSY); + assert_eq!(libc::pthread_rwlock_trywrlock(&mut rw as *mut _), libc::EBUSY); + assert_eq!(libc::pthread_rwlock_unlock(&mut rw as *mut _), 0); + + assert_eq!(libc::pthread_rwlock_destroy(&mut rw as *mut _), 0); + } } } From c2683dad34f6f51761661840de8164dd001bb782 Mon Sep 17 00:00:00 2001 From: David Cook Date: Tue, 28 Jan 2020 21:07:09 -0600 Subject: [PATCH 03/31] Clean up test case --- tests/run-pass/sync.rs | 65 +++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 33 deletions(-) diff --git a/tests/run-pass/sync.rs b/tests/run-pass/sync.rs index 6a0b41d5f6..8c92b47fb2 100644 --- a/tests/run-pass/sync.rs +++ b/tests/run-pass/sync.rs @@ -3,45 +3,33 @@ #![feature(rustc_private)] -use std::sync; +use std::sync::{Mutex, RwLock, TryLockError}; extern crate libc; fn main() { - let m = sync::Mutex::new(0); + let m = Mutex::new(0); { let _guard = m.lock(); - let try_lock_error = m.try_lock().unwrap_err(); - if let sync::TryLockError::Poisoned(e) = try_lock_error { - panic!("{}", e); - } + assert!(m.try_lock().unwrap_err().would_block()); } drop(m.try_lock().unwrap()); drop(m); #[cfg(not(target_os = "windows"))] // TODO: implement RwLock on Windows { - let rw = sync::RwLock::new(0); + let rw = RwLock::new(0); { let _read_guard = rw.read().unwrap(); drop(rw.read().unwrap()); drop(rw.try_read().unwrap()); - let try_lock_error = rw.try_write().unwrap_err(); - if let sync::TryLockError::Poisoned(e) = try_lock_error { - panic!("{}", e); - } + assert!(rw.try_write().unwrap_err().would_block()); } { let _write_guard = rw.write().unwrap(); - let try_lock_error = rw.try_read().unwrap_err(); - if let sync::TryLockError::Poisoned(e) = try_lock_error { - panic!("{}", e); - } - let try_lock_error = rw.try_write().unwrap_err(); - if let sync::TryLockError::Poisoned(e) = try_lock_error { - panic!("{}", e); - } + assert!(rw.try_read().unwrap_err().would_block()); + assert!(rw.try_write().unwrap_err().would_block()); } // need to go a layer deeper and test the behavior of libc functions, because @@ -58,24 +46,35 @@ fn main() { assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); } + let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); unsafe { - let mut rw: libc::pthread_rwlock_t = std::mem::zeroed(); - assert_eq!(libc::pthread_rwlock_init(&mut rw as *mut _, std::ptr::null_mut()), 0); + assert_eq!(libc::pthread_rwlock_rdlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_rdlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_tryrdlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_trywrlock(rw.get()), libc::EBUSY); + assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); + + assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_tryrdlock(rw.get()), libc::EBUSY); + assert_eq!(libc::pthread_rwlock_trywrlock(rw.get()), libc::EBUSY); + assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_rdlock(&mut rw as *mut _), 0); - assert_eq!(libc::pthread_rwlock_rdlock(&mut rw as *mut _), 0); - assert_eq!(libc::pthread_rwlock_unlock(&mut rw as *mut _), 0); - assert_eq!(libc::pthread_rwlock_tryrdlock(&mut rw as *mut _), 0); - assert_eq!(libc::pthread_rwlock_unlock(&mut rw as *mut _), 0); - assert_eq!(libc::pthread_rwlock_trywrlock(&mut rw as *mut _), libc::EBUSY); - assert_eq!(libc::pthread_rwlock_unlock(&mut rw as *mut _), 0); + assert_eq!(libc::pthread_rwlock_destroy(rw.get()), 0); + } + } +} - assert_eq!(libc::pthread_rwlock_wrlock(&mut rw as *mut _), 0); - assert_eq!(libc::pthread_rwlock_tryrdlock(&mut rw as *mut _), libc::EBUSY); - assert_eq!(libc::pthread_rwlock_trywrlock(&mut rw as *mut _), libc::EBUSY); - assert_eq!(libc::pthread_rwlock_unlock(&mut rw as *mut _), 0); +trait TryLockErrorExt { + fn would_block(&self) -> bool; +} - assert_eq!(libc::pthread_rwlock_destroy(&mut rw as *mut _), 0); +impl TryLockErrorExt for TryLockError { + fn would_block(&self) -> bool { + match self { + TryLockError::WouldBlock => true, + TryLockError::Poisoned(_) => false, } } } From dd9896b0f8752c71c82c5d538ec0b115ffb5cf4e Mon Sep 17 00:00:00 2001 From: David Cook Date: Mon, 17 Feb 2020 21:30:24 -0600 Subject: [PATCH 04/31] Implement mutex and rwlock functions --- src/lib.rs | 1 + src/shims/foreign_items/posix.rs | 91 +++++-- src/shims/mod.rs | 1 + src/shims/sync.rs | 436 +++++++++++++++++++++++++++++++ tests/run-pass/sync.rs | 97 ++++--- 5 files changed, 567 insertions(+), 59 deletions(-) create mode 100644 src/shims/sync.rs diff --git a/src/lib.rs b/src/lib.rs index c04fbfeab9..2f381b4a34 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -39,6 +39,7 @@ pub use crate::shims::fs::{DirHandler, EvalContextExt as FileEvalContextExt, Fil pub use crate::shims::intrinsics::EvalContextExt as IntrinsicsEvalContextExt; pub use crate::shims::os_str::EvalContextExt as OsStrEvalContextExt; pub use crate::shims::panic::{CatchUnwindData, EvalContextExt as PanicEvalContextExt}; +pub use crate::shims::sync::{EvalContextExt as SyncEvalContextExt}; pub use crate::shims::time::EvalContextExt as TimeEvalContextExt; pub use crate::shims::tls::{EvalContextExt as TlsEvalContextExt, TlsData}; pub use crate::shims::EvalContextExt as ShimsEvalContextExt; diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index 061ae93d8f..2b9e94ba11 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -255,28 +255,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.write_null(dest)?; } - // Incomplete shims that we "stub out" just to get pre-main initialziation code to work. + // Incomplete shims that we "stub out" just to get pre-main initialization code to work. // These shims are enabled only when the caller is in the standard library. | "pthread_attr_init" | "pthread_attr_destroy" | "pthread_self" - | "pthread_attr_setstacksize" if this.frame().instance.to_string().starts_with("std::sys::unix::") => { - this.write_null(dest)?; - } - | "pthread_mutexattr_init" - | "pthread_mutexattr_settype" - | "pthread_mutex_init" - | "pthread_mutexattr_destroy" - | "pthread_mutex_lock" - | "pthread_mutex_trylock" - | "pthread_mutex_unlock" - | "pthread_mutex_destroy" - | "pthread_rwlock_rdlock" - | "pthread_rwlock_tryrdlock" - | "pthread_rwlock_unlock" - | "pthread_rwlock_wrlock" - | "pthread_rwlock_trywrlock" - | "pthread_rwlock_destroy" + | "pthread_attr_setstacksize" | "pthread_condattr_init" | "pthread_condattr_setclock" | "pthread_cond_init" @@ -285,6 +269,77 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx => { this.write_null(dest)?; } + + "pthread_mutexattr_init" => { + let result = this.pthread_mutexattr_init(args[0])?; + this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + } + + "pthread_mutexattr_settype" => { + let result = this.pthread_mutexattr_settype(args[0], args[1])?; + this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + } + + "pthread_mutexattr_destroy" => { + let result = this.pthread_mutexattr_destroy(args[0])?; + this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + } + + "pthread_mutex_init" => { + let result = this.pthread_mutex_init(args[0], args[1])?; + this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + } + + "pthread_mutex_lock" => { + let result = this.pthread_mutex_lock(args[0])?; + this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + } + + "pthread_mutex_trylock" => { + let result = this.pthread_mutex_trylock(args[0])?; + this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + } + + "pthread_mutex_unlock" => { + let result = this.pthread_mutex_unlock(args[0])?; + this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + } + + "pthread_mutex_destroy" => { + let result = this.pthread_mutex_destroy(args[0])?; + this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + } + + "pthread_rwlock_rdlock" => { + let result = this.pthread_rwlock_rdlock(args[0])?; + this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + } + + "pthread_rwlock_tryrdlock" => { + let result = this.pthread_rwlock_tryrdlock(args[0])?; + this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + } + + "pthread_rwlock_wrlock" => { + let result = this.pthread_rwlock_wrlock(args[0])?; + this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + } + + "pthread_rwlock_trywrlock" => { + let result = this.pthread_rwlock_trywrlock(args[0])?; + this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + } + + "pthread_rwlock_unlock" => { + let result = this.pthread_rwlock_unlock(args[0])?; + this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + } + + "pthread_rwlock_destroy" => { + let result = this.pthread_rwlock_destroy(args[0])?; + this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + } + | "signal" | "sigaction" | "sigaltstack" diff --git a/src/shims/mod.rs b/src/shims/mod.rs index e5db537cff..764e404141 100644 --- a/src/shims/mod.rs +++ b/src/shims/mod.rs @@ -5,6 +5,7 @@ pub mod fs; pub mod intrinsics; pub mod os_str; pub mod panic; +pub mod sync; pub mod time; pub mod tls; diff --git a/src/shims/sync.rs b/src/shims/sync.rs new file mode 100644 index 0000000000..c932958662 --- /dev/null +++ b/src/shims/sync.rs @@ -0,0 +1,436 @@ +use rustc_middle::ty::{TyKind, TypeAndMut}; +use rustc_target::abi::{LayoutOf, Size}; + +use crate::stacked_borrows::Tag; +use crate::*; + +impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} +pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { + // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform + // memory layout: store an i32 in the first four bytes equal to the + // corresponding libc mutex kind constant (i.e. PTHREAD_MUTEX_NORMAL) + + fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + check_ptr_target_min_size(this, attr_op, 4)?; + + let attr = this.read_scalar(attr_op)?.not_undef()?; + if this.is_null(attr)? { + return this.eval_libc_i32("EINVAL"); + } + + let attr_place = this.deref_operand(attr_op)?; + let i32_layout = this.layout_of(this.tcx.types.i32)?; + let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, this)?; + let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?; + this.write_scalar(default_kind, kind_place.into())?; + + Ok(0) + } + + fn pthread_mutexattr_settype( + &mut self, + attr_op: OpTy<'tcx, Tag>, + kind_op: OpTy<'tcx, Tag>, + ) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + check_ptr_target_min_size(this, attr_op, 4)?; + + let attr = this.read_scalar(attr_op)?.not_undef()?; + if this.is_null(attr)? { + return this.eval_libc_i32("EINVAL"); + } + + let kind = this.read_scalar(kind_op)?.not_undef()?; + if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? || + kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? || + kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { + let attr_place = this.deref_operand(attr_op)?; + let i32_layout = this.layout_of(this.tcx.types.i32)?; + let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, this)?; + this.write_scalar(kind, kind_place.into())?; + } else { + let einval = this.eval_libc_i32("EINVAL")?; + return Ok(einval); + } + + Ok(0) + } + + fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + check_ptr_target_min_size(this, attr_op, 4)?; + + let attr = this.read_scalar(attr_op)?.not_undef()?; + if this.is_null(attr)? { + return this.eval_libc_i32("EINVAL"); + } + + let attr_place = this.deref_operand(attr_op)?; + let i32_layout = this.layout_of(this.tcx.types.i32)?; + let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, this)?; + this.write_scalar(ScalarMaybeUndef::Undef, kind_place.into())?; + + Ok(0) + } + + // pthread_mutex_t is between 24 and 48 bytes, depending on the platform + // memory layout: + // bytes 0-3: count of how many times this mutex has been locked, as a u32 + // bytes 12-15: mutex kind, as an i32 + // (the kind should be at this offset for compatibility with the static + // initializer macro) + + fn pthread_mutex_init( + &mut self, + mutex_op: OpTy<'tcx, Tag>, + attr_op: OpTy<'tcx, Tag>, + ) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + check_ptr_target_min_size(this, mutex_op, 16)?; + check_ptr_target_min_size(this, attr_op, 4)?; + + let mutex = this.read_scalar(mutex_op)?.not_undef()?; + if this.is_null(mutex)? { + return this.eval_libc_i32("EINVAL"); + } + let mutex_place = this.deref_operand(mutex_op)?; + + let i32_layout = this.layout_of(this.tcx.types.i32)?; + + let attr = this.read_scalar(attr_op)?.not_undef()?; + let kind = if this.is_null(attr)? { + this.eval_libc("PTHREAD_MUTEX_DEFAULT")? + } else { + let attr_place = this.deref_operand(attr_op)?; + let attr_kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, this)?; + this.read_scalar(attr_kind_place.into())?.not_undef()? + }; + + let u32_layout = this.layout_of(this.tcx.types.u32)?; + let locked_count_place = mutex_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + this.write_scalar(Scalar::from_u32(0), locked_count_place.into())?; + + let mutex_kind_place = mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, &*this.tcx)?; + this.write_scalar(kind, mutex_kind_place.into())?; + + Ok(0) + } + + fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + check_ptr_target_min_size(this, mutex_op, 16)?; + + let mutex = this.read_scalar(mutex_op)?.not_undef()?; + if this.is_null(mutex)? { + return this.eval_libc_i32("EINVAL"); + } + let mutex_place = this.deref_operand(mutex_op)?; + + let i32_layout = this.layout_of(this.tcx.types.i32)?; + let kind_place = mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, this)?; + let kind = this.read_scalar(kind_place.into())?.not_undef()?; + + let u32_layout = this.layout_of(this.tcx.types.u32)?; + let locked_count_place = mutex_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + let locked_count = this.read_scalar(locked_count_place.into())?.to_u32()?; + + if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { + if locked_count == 0 { + this.write_scalar(Scalar::from_u32(1), locked_count_place.into())?; + Ok(0) + } else { + throw_unsup_format!("Deadlock due to locking a PTHREAD_MUTEX_NORMAL mutex twice"); + } + } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { + if locked_count == 0 { + this.write_scalar(Scalar::from_u32(1), locked_count_place.into())?; + Ok(0) + } else { + this.eval_libc_i32("EDEADLK") + } + } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { + this.write_scalar(Scalar::from_u32(locked_count + 1), locked_count_place.into())?; + Ok(0) + } else { + this.eval_libc_i32("EINVAL") + } + } + + fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + check_ptr_target_min_size(this, mutex_op, 16)?; + + let mutex = this.read_scalar(mutex_op)?.not_undef()?; + if this.is_null(mutex)? { + return this.eval_libc_i32("EINVAL"); + } + let mutex_place = this.deref_operand(mutex_op)?; + + let i32_layout = this.layout_of(this.tcx.types.i32)?; + let kind_place = mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, this)?; + let kind = this.read_scalar(kind_place.into())?.not_undef()?; + + let u32_layout = this.layout_of(this.tcx.types.u32)?; + let locked_count_place = mutex_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + let locked_count = this.read_scalar(locked_count_place.into())?.to_u32()?; + + if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? || + kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { + if locked_count == 0 { + this.write_scalar(Scalar::from_u32(1), locked_count_place.into())?; + Ok(0) + } else { + this.eval_libc_i32("EBUSY") + } + } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { + this.write_scalar(Scalar::from_u32(locked_count + 1), locked_count_place.into())?; + Ok(0) + } else { + this.eval_libc_i32("EINVAL") + } + } + + fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + check_ptr_target_min_size(this, mutex_op, 16)?; + + let mutex = this.read_scalar(mutex_op)?.not_undef()?; + if this.is_null(mutex)? { + return this.eval_libc_i32("EINVAL"); + } + let mutex_place = this.deref_operand(mutex_op)?; + + let i32_layout = this.layout_of(this.tcx.types.i32)?; + let kind_place = mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, this)?; + let kind = this.read_scalar(kind_place.into())?.not_undef()?; + + let u32_layout = this.layout_of(this.tcx.types.u32)?; + let locked_count_place = mutex_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + let locked_count = this.read_scalar(locked_count_place.into())?.to_u32()?; + + if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { + if locked_count == 1 { + this.write_scalar(Scalar::from_u32(0), locked_count_place.into())?; + Ok(0) + } else { + throw_ub_format!("Attempted to unlock a PTHREAD_MUTEX_NORMAL mutex that was not locked"); + } + } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { + if locked_count == 1 { + this.write_scalar(Scalar::from_u32(0), locked_count_place.into())?; + Ok(0) + } else { + this.eval_libc_i32("EPERM") + } + } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { + if locked_count > 0 { + this.write_scalar(Scalar::from_u32(locked_count - 1), locked_count_place.into())?; + Ok(0) + } else { + this.eval_libc_i32("EPERM") + } + } else { + this.eval_libc_i32("EINVAL") + } + } + + fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + check_ptr_target_min_size(this, mutex_op, 16)?; + + let mutex = this.read_scalar(mutex_op)?.not_undef()?; + if this.is_null(mutex)? { + return this.eval_libc_i32("EINVAL"); + } + let mutex_place = this.deref_operand(mutex_op)?; + + let u32_layout = this.layout_of(this.tcx.types.u32)?; + let locked_count_place = mutex_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + if this.read_scalar(locked_count_place.into())?.to_u32()? != 0 { + return this.eval_libc_i32("EBUSY"); + } + + let i32_layout = this.layout_of(this.tcx.types.i32)?; + let kind_place = mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, this)?; + this.write_scalar(ScalarMaybeUndef::Undef, kind_place.into())?; + this.write_scalar(ScalarMaybeUndef::Undef, locked_count_place.into())?; + + Ok(0) + } + + // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform + // memory layout: + // bytes 0-3: reader count, as a u32 + // bytes 4-7: writer count, as a u32 + + fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + check_ptr_target_min_size(this, rwlock_op, 8)?; + + let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; + if this.is_null(rwlock)? { + return this.eval_libc_i32("EINVAL"); + } + let rwlock_place = this.deref_operand(rwlock_op)?; + + let u32_layout = this.layout_of(this.tcx.types.u32)?; + let readers_place = rwlock_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + let writers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let readers = this.read_scalar(readers_place.into())?.to_u32()?; + let writers = this.read_scalar(writers_place.into())?.to_u32()?; + if writers != 0 { + throw_unsup_format!("Deadlock due to read-locking a pthreads read-write lock while it is already write-locked"); + } else { + this.write_scalar(Scalar::from_u32(readers + 1), readers_place.into())?; + Ok(0) + } + } + + fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + check_ptr_target_min_size(this, rwlock_op, 8)?; + + let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; + if this.is_null(rwlock)? { + return this.eval_libc_i32("EINVAL"); + } + let rwlock_place = this.deref_operand(rwlock_op)?; + + let u32_layout = this.layout_of(this.tcx.types.u32)?; + let readers_place = rwlock_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + let writers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let readers = this.read_scalar(readers_place.into())?.to_u32()?; + let writers = this.read_scalar(writers_place.into())?.to_u32()?; + if writers != 0 { + this.eval_libc_i32("EBUSY") + } else { + this.write_scalar(Scalar::from_u32(readers + 1), readers_place.into())?; + Ok(0) + } + } + + fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + check_ptr_target_min_size(this, rwlock_op, 8)?; + + let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; + if this.is_null(rwlock)? { + return this.eval_libc_i32("EINVAL"); + } + let rwlock_place = this.deref_operand(rwlock_op)?; + + let u32_layout = this.layout_of(this.tcx.types.u32)?; + let readers_place = rwlock_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + let writers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let readers = this.read_scalar(readers_place.into())?.to_u32()?; + let writers = this.read_scalar(writers_place.into())?.to_u32()?; + if readers != 0 { + throw_unsup_format!("Deadlock due to write-locking a pthreads read-write lock while it is already read-locked"); + } else if writers != 0 { + throw_unsup_format!("Deadlock due to write-locking a pthreads read-write lock while it is already write-locked"); + } else { + this.write_scalar(Scalar::from_u32(1), writers_place.into())?; + Ok(0) + } + } + + fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + check_ptr_target_min_size(this, rwlock_op, 8)?; + + let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; + if this.is_null(rwlock)? { + return this.eval_libc_i32("EINVAL"); + } + let rwlock_place = this.deref_operand(rwlock_op)?; + + let u32_layout = this.layout_of(this.tcx.types.u32)?; + let readers_place = rwlock_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + let writers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let readers = this.read_scalar(readers_place.into())?.to_u32()?; + let writers = this.read_scalar(writers_place.into())?.to_u32()?; + if readers != 0 || writers != 0 { + this.eval_libc_i32("EBUSY") + } else { + this.write_scalar(Scalar::from_u32(1), writers_place.into())?; + Ok(0) + } + } + + fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + check_ptr_target_min_size(this, rwlock_op, 8)?; + + let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; + if this.is_null(rwlock)? { + return this.eval_libc_i32("EINVAL"); + } + let rwlock_place = this.deref_operand(rwlock_op)?; + + let u32_layout = this.layout_of(this.tcx.types.u32)?; + let readers_place = rwlock_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + let writers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let readers = this.read_scalar(readers_place.into())?.to_u32()?; + let writers = this.read_scalar(writers_place.into())?.to_u32()?; + if readers != 0 { + this.write_scalar(Scalar::from_u32(readers - 1), readers_place.into())?; + Ok(0) + } else if writers != 0 { + this.write_scalar(Scalar::from_u32(0), writers_place.into())?; + Ok(0) + } else { + this.eval_libc_i32("EPERM") + } + } + + fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { + let this = self.eval_context_mut(); + + check_ptr_target_min_size(this, rwlock_op, 8)?; + + let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; + if this.is_null(rwlock)? { + return this.eval_libc_i32("EINVAL"); + } + let rwlock_place = this.deref_operand(rwlock_op)?; + + let u32_layout = this.layout_of(this.tcx.types.u32)?; + let readers_place = rwlock_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + if this.read_scalar(readers_place.into())?.to_u32()? != 0 { + return this.eval_libc_i32("EBUSY"); + } + let writers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + if this.read_scalar(writers_place.into())?.to_u32()? != 0 { + return this.eval_libc_i32("EBUSY"); + } + + this.write_scalar(ScalarMaybeUndef::Undef, readers_place.into())?; + this.write_scalar(ScalarMaybeUndef::Undef, writers_place.into())?; + + Ok(0) + } +} + +fn check_ptr_target_min_size<'mir, 'tcx: 'mir>(ecx: &MiriEvalContext<'mir, 'tcx>, operand: OpTy<'tcx, Tag>, min_size: u64) -> InterpResult<'tcx, ()> { + let target_ty = match operand.layout.ty.kind { + TyKind::RawPtr(TypeAndMut{ ty, mutbl: _ }) => ty, + _ => panic!("Argument to pthread function was not a raw pointer"), + }; + let target_layout = ecx.layout_of(target_ty)?; + assert!(target_layout.size.bytes() >= min_size); + Ok(()) +} diff --git a/tests/run-pass/sync.rs b/tests/run-pass/sync.rs index 8c92b47fb2..d6ce939c6c 100644 --- a/tests/run-pass/sync.rs +++ b/tests/run-pass/sync.rs @@ -1,4 +1,3 @@ -// Just instantiate some data structures to make sure we got all their foreign items covered. // Requires full MIR on Windows. #![feature(rustc_private)] @@ -8,6 +7,16 @@ use std::sync::{Mutex, RwLock, TryLockError}; extern crate libc; fn main() { + test_mutex(); + #[cfg(not(target_os = "windows"))] // TODO: implement RwLock on Windows + { + test_rwlock_stdlib(); + test_rwlock_libc_init(); + test_rwlock_libc_static_initializer(); + } +} + +fn test_mutex() { let m = Mutex::new(0); { let _guard = m.lock(); @@ -15,54 +24,60 @@ fn main() { } drop(m.try_lock().unwrap()); drop(m); +} - #[cfg(not(target_os = "windows"))] // TODO: implement RwLock on Windows +#[cfg(not(target_os = "windows"))] +fn test_rwlock_stdlib() { + let rw = RwLock::new(0); { - let rw = RwLock::new(0); - { - let _read_guard = rw.read().unwrap(); - drop(rw.read().unwrap()); - drop(rw.try_read().unwrap()); - assert!(rw.try_write().unwrap_err().would_block()); - } + let _read_guard = rw.read().unwrap(); + drop(rw.read().unwrap()); + drop(rw.try_read().unwrap()); + assert!(rw.try_write().unwrap_err().would_block()); + } - { - let _write_guard = rw.write().unwrap(); - assert!(rw.try_read().unwrap_err().would_block()); - assert!(rw.try_write().unwrap_err().would_block()); - } + { + let _write_guard = rw.write().unwrap(); + assert!(rw.try_read().unwrap_err().would_block()); + assert!(rw.try_write().unwrap_err().would_block()); + } +} - // need to go a layer deeper and test the behavior of libc functions, because - // std::sys::unix::rwlock::RWLock keeps track of write_locked and num_readers +// need to go a layer deeper and test the behavior of libc functions, because +// std::sys::unix::rwlock::RWLock keeps track of write_locked and num_readers - unsafe { - let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); - assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, std::ptr::null_mut()), 0); - assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY); - assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); - } +#[cfg(not(target_os = "windows"))] +fn test_rwlock_libc_init() { + unsafe { + let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, std::ptr::null_mut()), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); + } +} - let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); - unsafe { - assert_eq!(libc::pthread_rwlock_rdlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_rdlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_tryrdlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_trywrlock(rw.get()), libc::EBUSY); - assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); +#[cfg(not(target_os = "windows"))] +fn test_rwlock_libc_static_initializer() { + let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); + unsafe { + assert_eq!(libc::pthread_rwlock_rdlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_rdlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_tryrdlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_trywrlock(rw.get()), libc::EBUSY); + assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_tryrdlock(rw.get()), libc::EBUSY); - assert_eq!(libc::pthread_rwlock_trywrlock(rw.get()), libc::EBUSY); - assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_tryrdlock(rw.get()), libc::EBUSY); + assert_eq!(libc::pthread_rwlock_trywrlock(rw.get()), libc::EBUSY); + assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_destroy(rw.get()), 0); - } + assert_eq!(libc::pthread_rwlock_destroy(rw.get()), 0); } } From 765050f302f75a8f4c5de3ab527aec4d4f9f883e Mon Sep 17 00:00:00 2001 From: David Cook Date: Mon, 17 Feb 2020 22:52:44 -0600 Subject: [PATCH 05/31] Revise mutex/rwlock memory layout for macOS compat --- src/shims/sync.rs | 54 ++++++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index c932958662..3208727730 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -79,7 +79,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // pthread_mutex_t is between 24 and 48 bytes, depending on the platform // memory layout: - // bytes 0-3: count of how many times this mutex has been locked, as a u32 + // bytes 0-3: reserved for signature on macOS + // bytes 4-7: count of how many times this mutex has been locked, as a u32 // bytes 12-15: mutex kind, as an i32 // (the kind should be at this offset for compatibility with the static // initializer macro) @@ -112,7 +113,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx }; let u32_layout = this.layout_of(this.tcx.types.u32)?; - let locked_count_place = mutex_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + let locked_count_place = mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; this.write_scalar(Scalar::from_u32(0), locked_count_place.into())?; let mutex_kind_place = mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, &*this.tcx)?; @@ -137,7 +138,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let kind = this.read_scalar(kind_place.into())?.not_undef()?; let u32_layout = this.layout_of(this.tcx.types.u32)?; - let locked_count_place = mutex_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + let locked_count_place = mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; let locked_count = this.read_scalar(locked_count_place.into())?.to_u32()?; if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { @@ -178,7 +179,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let kind = this.read_scalar(kind_place.into())?.not_undef()?; let u32_layout = this.layout_of(this.tcx.types.u32)?; - let locked_count_place = mutex_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + let locked_count_place = mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; let locked_count = this.read_scalar(locked_count_place.into())?.to_u32()?; if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? || @@ -213,7 +214,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let kind = this.read_scalar(kind_place.into())?.not_undef()?; let u32_layout = this.layout_of(this.tcx.types.u32)?; - let locked_count_place = mutex_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + let locked_count_place = mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; let locked_count = this.read_scalar(locked_count_place.into())?.to_u32()?; if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { @@ -254,7 +255,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let mutex_place = this.deref_operand(mutex_op)?; let u32_layout = this.layout_of(this.tcx.types.u32)?; - let locked_count_place = mutex_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + let locked_count_place = mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; if this.read_scalar(locked_count_place.into())?.to_u32()? != 0 { return this.eval_libc_i32("EBUSY"); } @@ -269,13 +270,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform // memory layout: - // bytes 0-3: reader count, as a u32 - // bytes 4-7: writer count, as a u32 + // bytes 0-3: reserved for signature on macOS + // bytes 4-7: reader count, as a u32 + // bytes 8-11: writer count, as a u32 fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, rwlock_op, 8)?; + check_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { @@ -284,8 +286,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let rwlock_place = this.deref_operand(rwlock_op)?; let u32_layout = this.layout_of(this.tcx.types.u32)?; - let readers_place = rwlock_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; - let writers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let readers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let writers_place = rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, this)?; let readers = this.read_scalar(readers_place.into())?.to_u32()?; let writers = this.read_scalar(writers_place.into())?.to_u32()?; if writers != 0 { @@ -299,7 +301,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, rwlock_op, 8)?; + check_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { @@ -308,8 +310,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let rwlock_place = this.deref_operand(rwlock_op)?; let u32_layout = this.layout_of(this.tcx.types.u32)?; - let readers_place = rwlock_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; - let writers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let readers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let writers_place = rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, this)?; let readers = this.read_scalar(readers_place.into())?.to_u32()?; let writers = this.read_scalar(writers_place.into())?.to_u32()?; if writers != 0 { @@ -323,7 +325,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, rwlock_op, 8)?; + check_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { @@ -332,8 +334,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let rwlock_place = this.deref_operand(rwlock_op)?; let u32_layout = this.layout_of(this.tcx.types.u32)?; - let readers_place = rwlock_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; - let writers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let readers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let writers_place = rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, this)?; let readers = this.read_scalar(readers_place.into())?.to_u32()?; let writers = this.read_scalar(writers_place.into())?.to_u32()?; if readers != 0 { @@ -349,7 +351,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, rwlock_op, 8)?; + check_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { @@ -358,8 +360,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let rwlock_place = this.deref_operand(rwlock_op)?; let u32_layout = this.layout_of(this.tcx.types.u32)?; - let readers_place = rwlock_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; - let writers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let readers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let writers_place = rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, this)?; let readers = this.read_scalar(readers_place.into())?.to_u32()?; let writers = this.read_scalar(writers_place.into())?.to_u32()?; if readers != 0 || writers != 0 { @@ -373,7 +375,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, rwlock_op, 8)?; + check_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { @@ -382,8 +384,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let rwlock_place = this.deref_operand(rwlock_op)?; let u32_layout = this.layout_of(this.tcx.types.u32)?; - let readers_place = rwlock_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; - let writers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let readers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let writers_place = rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, this)?; let readers = this.read_scalar(readers_place.into())?.to_u32()?; let writers = this.read_scalar(writers_place.into())?.to_u32()?; if readers != 0 { @@ -400,7 +402,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, rwlock_op, 8)?; + check_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { @@ -409,11 +411,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let rwlock_place = this.deref_operand(rwlock_op)?; let u32_layout = this.layout_of(this.tcx.types.u32)?; - let readers_place = rwlock_place.offset(Size::ZERO, MemPlaceMeta::None, u32_layout, this)?; + let readers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; if this.read_scalar(readers_place.into())?.to_u32()? != 0 { return this.eval_libc_i32("EBUSY"); } - let writers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; + let writers_place = rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, this)?; if this.read_scalar(writers_place.into())?.to_u32()? != 0 { return this.eval_libc_i32("EBUSY"); } From dca83d73cbfe847738cbd310a4da786979768dd0 Mon Sep 17 00:00:00 2001 From: David Cook Date: Thu, 20 Feb 2020 22:19:51 -0600 Subject: [PATCH 06/31] Add test that exercises ReentrantMutex --- tests/run-pass/reentrant-println.rs | 17 +++++++++++++++++ tests/run-pass/reentrant-println.stdout | 2 ++ 2 files changed, 19 insertions(+) create mode 100644 tests/run-pass/reentrant-println.rs create mode 100644 tests/run-pass/reentrant-println.stdout diff --git a/tests/run-pass/reentrant-println.rs b/tests/run-pass/reentrant-println.rs new file mode 100644 index 0000000000..3703d21e04 --- /dev/null +++ b/tests/run-pass/reentrant-println.rs @@ -0,0 +1,17 @@ +use std::fmt::{Display, Error, Formatter}; + +// This test case exercises std::sys_common::remutex::ReentrantMutex +// by calling println!() from inside fmt + +struct InterruptingCow(); + +impl Display for InterruptingCow { + fn fmt(&self, _f: &mut Formatter<'_>) -> Result<(), Error> { + println!("Moo"); + Ok(()) + } +} + +fn main() { + println!("\"Knock knock\" \"Who's {} there?\"", InterruptingCow()); +} diff --git a/tests/run-pass/reentrant-println.stdout b/tests/run-pass/reentrant-println.stdout new file mode 100644 index 0000000000..8a57d32f84 --- /dev/null +++ b/tests/run-pass/reentrant-println.stdout @@ -0,0 +1,2 @@ +"Knock knock" "Who's Moo + there?" From c773ca8614772115788fb59a8cdab49efd09e477 Mon Sep 17 00:00:00 2001 From: David Cook Date: Fri, 21 Feb 2020 19:05:24 -0600 Subject: [PATCH 07/31] Style fixes --- tests/run-pass/reentrant-println.rs | 4 ++-- tests/run-pass/sync.rs | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/run-pass/reentrant-println.rs b/tests/run-pass/reentrant-println.rs index 3703d21e04..09c4fc3f74 100644 --- a/tests/run-pass/reentrant-println.rs +++ b/tests/run-pass/reentrant-println.rs @@ -3,7 +3,7 @@ use std::fmt::{Display, Error, Formatter}; // This test case exercises std::sys_common::remutex::ReentrantMutex // by calling println!() from inside fmt -struct InterruptingCow(); +struct InterruptingCow; impl Display for InterruptingCow { fn fmt(&self, _f: &mut Formatter<'_>) -> Result<(), Error> { @@ -13,5 +13,5 @@ impl Display for InterruptingCow { } fn main() { - println!("\"Knock knock\" \"Who's {} there?\"", InterruptingCow()); + println!("\"Knock knock\" \"Who's {} there?\"", InterruptingCow); } diff --git a/tests/run-pass/sync.rs b/tests/run-pass/sync.rs index d6ce939c6c..46cad3c162 100644 --- a/tests/run-pass/sync.rs +++ b/tests/run-pass/sync.rs @@ -1,5 +1,3 @@ -// Requires full MIR on Windows. - #![feature(rustc_private)] use std::sync::{Mutex, RwLock, TryLockError}; From 5cc091bc6e307c4e46f94aac6cf54d0e0e2ce70a Mon Sep 17 00:00:00 2001 From: David Cook Date: Fri, 21 Feb 2020 19:10:20 -0600 Subject: [PATCH 08/31] Add test of recursive mutex using libc FFI --- tests/run-pass/sync.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tests/run-pass/sync.rs b/tests/run-pass/sync.rs index 46cad3c162..b247061083 100644 --- a/tests/run-pass/sync.rs +++ b/tests/run-pass/sync.rs @@ -8,6 +8,7 @@ fn main() { test_mutex(); #[cfg(not(target_os = "windows"))] // TODO: implement RwLock on Windows { + test_mutex_libc_recursive(); test_rwlock_stdlib(); test_rwlock_libc_init(); test_rwlock_libc_static_initializer(); @@ -24,6 +25,28 @@ fn test_mutex() { drop(m); } +#[cfg(not(target_os = "windows"))] +fn test_mutex_libc_recursive() { + unsafe { + let mut attr: libc::pthread_mutexattr_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutexattr_init(&mut attr as *mut _), 0); + assert_eq!(libc::pthread_mutexattr_settype(&mut attr as *mut _, libc::PTHREAD_MUTEX_RECURSIVE), 0); + let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mut attr as *mut _), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), libc::EPERM); + assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutexattr_destroy(&mut attr as *mut _), 0); + } +} + #[cfg(not(target_os = "windows"))] fn test_rwlock_stdlib() { let rw = RwLock::new(0); From d11315ebfb4c2d95a1ca6c52bec105237b10e933 Mon Sep 17 00:00:00 2001 From: David Cook Date: Sun, 15 Mar 2020 15:10:08 -0500 Subject: [PATCH 09/31] Fix misleading function names --- src/shims/sync.rs | 32 ++++++++++++++++---------------- tests/run-pass/sync.rs | 4 ++-- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 3208727730..22e62beae2 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -13,7 +13,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, attr_op, 4)?; + assert_ptr_target_min_size(this, attr_op, 4)?; let attr = this.read_scalar(attr_op)?.not_undef()?; if this.is_null(attr)? { @@ -36,7 +36,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, attr_op, 4)?; + assert_ptr_target_min_size(this, attr_op, 4)?; let attr = this.read_scalar(attr_op)?.not_undef()?; if this.is_null(attr)? { @@ -62,7 +62,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, attr_op, 4)?; + assert_ptr_target_min_size(this, attr_op, 4)?; let attr = this.read_scalar(attr_op)?.not_undef()?; if this.is_null(attr)? { @@ -92,8 +92,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, mutex_op, 16)?; - check_ptr_target_min_size(this, attr_op, 4)?; + assert_ptr_target_min_size(this, mutex_op, 16)?; + assert_ptr_target_min_size(this, attr_op, 4)?; let mutex = this.read_scalar(mutex_op)?.not_undef()?; if this.is_null(mutex)? { @@ -125,7 +125,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, mutex_op, 16)?; + assert_ptr_target_min_size(this, mutex_op, 16)?; let mutex = this.read_scalar(mutex_op)?.not_undef()?; if this.is_null(mutex)? { @@ -166,7 +166,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, mutex_op, 16)?; + assert_ptr_target_min_size(this, mutex_op, 16)?; let mutex = this.read_scalar(mutex_op)?.not_undef()?; if this.is_null(mutex)? { @@ -201,7 +201,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, mutex_op, 16)?; + assert_ptr_target_min_size(this, mutex_op, 16)?; let mutex = this.read_scalar(mutex_op)?.not_undef()?; if this.is_null(mutex)? { @@ -246,7 +246,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, mutex_op, 16)?; + assert_ptr_target_min_size(this, mutex_op, 16)?; let mutex = this.read_scalar(mutex_op)?.not_undef()?; if this.is_null(mutex)? { @@ -277,7 +277,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, rwlock_op, 12)?; + assert_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { @@ -301,7 +301,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, rwlock_op, 12)?; + assert_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { @@ -325,7 +325,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, rwlock_op, 12)?; + assert_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { @@ -351,7 +351,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, rwlock_op, 12)?; + assert_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { @@ -375,7 +375,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, rwlock_op, 12)?; + assert_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { @@ -402,7 +402,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - check_ptr_target_min_size(this, rwlock_op, 12)?; + assert_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { @@ -427,7 +427,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } } -fn check_ptr_target_min_size<'mir, 'tcx: 'mir>(ecx: &MiriEvalContext<'mir, 'tcx>, operand: OpTy<'tcx, Tag>, min_size: u64) -> InterpResult<'tcx, ()> { +fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(ecx: &MiriEvalContext<'mir, 'tcx>, operand: OpTy<'tcx, Tag>, min_size: u64) -> InterpResult<'tcx, ()> { let target_ty = match operand.layout.ty.kind { TyKind::RawPtr(TypeAndMut{ ty, mutbl: _ }) => ty, _ => panic!("Argument to pthread function was not a raw pointer"), diff --git a/tests/run-pass/sync.rs b/tests/run-pass/sync.rs index b247061083..c1e44789aa 100644 --- a/tests/run-pass/sync.rs +++ b/tests/run-pass/sync.rs @@ -10,7 +10,7 @@ fn main() { { test_mutex_libc_recursive(); test_rwlock_stdlib(); - test_rwlock_libc_init(); + test_mutex_libc_init(); test_rwlock_libc_static_initializer(); } } @@ -68,7 +68,7 @@ fn test_rwlock_stdlib() { // std::sys::unix::rwlock::RWLock keeps track of write_locked and num_readers #[cfg(not(target_os = "windows"))] -fn test_rwlock_libc_init() { +fn test_mutex_libc_init() { unsafe { let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, std::ptr::null_mut()), 0); From fd94255b9d4ab69b110bb5d2acef5c288fe4a0e1 Mon Sep 17 00:00:00 2001 From: David Cook Date: Tue, 17 Mar 2020 08:19:57 -0500 Subject: [PATCH 10/31] Add comments explaining asserts --- src/shims/sync.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 22e62beae2..61346bfd85 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -13,6 +13,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // Ensure that the following write at an offset to the attr pointer is within bounds assert_ptr_target_min_size(this, attr_op, 4)?; let attr = this.read_scalar(attr_op)?.not_undef()?; @@ -36,6 +37,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // Ensure that the following write at an offset to the attr pointer is within bounds assert_ptr_target_min_size(this, attr_op, 4)?; let attr = this.read_scalar(attr_op)?.not_undef()?; @@ -62,6 +64,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // Ensure that the following write at an offset to the attr pointer is within bounds assert_ptr_target_min_size(this, attr_op, 4)?; let attr = this.read_scalar(attr_op)?.not_undef()?; @@ -92,7 +95,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // Ensure that the following writes at offsets to the mutex pointer are within bounds assert_ptr_target_min_size(this, mutex_op, 16)?; + // Ensure that the following read at an offset to the attr pointer is within bounds assert_ptr_target_min_size(this, attr_op, 4)?; let mutex = this.read_scalar(mutex_op)?.not_undef()?; @@ -125,6 +130,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // Ensure that the following reads and writes at offsets to the mutex pointer are within bounds assert_ptr_target_min_size(this, mutex_op, 16)?; let mutex = this.read_scalar(mutex_op)?.not_undef()?; @@ -166,6 +172,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // Ensure that the following reads and writes at offsets to the mutex pointer are within bounds assert_ptr_target_min_size(this, mutex_op, 16)?; let mutex = this.read_scalar(mutex_op)?.not_undef()?; @@ -201,6 +208,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // Ensure that the following reads and writes at offsets to the mutex pointer are within bounds assert_ptr_target_min_size(this, mutex_op, 16)?; let mutex = this.read_scalar(mutex_op)?.not_undef()?; @@ -246,6 +254,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // Ensure that the following read and writes at offsets to the mutex pointer are within bounds assert_ptr_target_min_size(this, mutex_op, 16)?; let mutex = this.read_scalar(mutex_op)?.not_undef()?; @@ -277,6 +286,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // Ensure that the following reads and write at offsets to the rwlock pointer are within bounds assert_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; @@ -301,6 +311,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // Ensure that the following reads and write at offsets to the rwlock pointer are within bounds assert_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; @@ -325,6 +336,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // Ensure that the following reads and write at offsets to the rwlock pointer are within bounds assert_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; @@ -351,6 +363,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // Ensure that the following reads and write at offsets to the rwlock pointer are within bounds assert_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; @@ -375,6 +388,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // Ensure that the following reads and writes at offsets to the rwlock pointer are within bounds assert_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; @@ -402,6 +416,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); + // Ensure that the following reads and writes at offsets to the rwlock pointer are within bounds assert_ptr_target_min_size(this, rwlock_op, 12)?; let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; From 141319a412ddca4a2b16f45eed459c4a735e8f8f Mon Sep 17 00:00:00 2001 From: David Cook Date: Sun, 22 Mar 2020 15:18:02 -0500 Subject: [PATCH 11/31] Refactor sync shims with setters and getters --- src/shims/sync.rs | 400 +++++++++++++++++++++++++--------------------- 1 file changed, 219 insertions(+), 181 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 61346bfd85..b8d9a88865 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -6,26 +6,16 @@ use crate::*; impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { - // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform - // memory layout: store an i32 in the first four bytes equal to the - // corresponding libc mutex kind constant (i.e. PTHREAD_MUTEX_NORMAL) - fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // Ensure that the following write at an offset to the attr pointer is within bounds - assert_ptr_target_min_size(this, attr_op, 4)?; - let attr = this.read_scalar(attr_op)?.not_undef()?; if this.is_null(attr)? { return this.eval_libc_i32("EINVAL"); } - let attr_place = this.deref_operand(attr_op)?; - let i32_layout = this.layout_of(this.tcx.types.i32)?; - let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, this)?; let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?; - this.write_scalar(default_kind, kind_place.into())?; + mutexattr_set_kind(this, attr_op, default_kind)?; Ok(0) } @@ -37,22 +27,17 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // Ensure that the following write at an offset to the attr pointer is within bounds - assert_ptr_target_min_size(this, attr_op, 4)?; - let attr = this.read_scalar(attr_op)?.not_undef()?; if this.is_null(attr)? { return this.eval_libc_i32("EINVAL"); } let kind = this.read_scalar(kind_op)?.not_undef()?; - if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? || - kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? || - kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { - let attr_place = this.deref_operand(attr_op)?; - let i32_layout = this.layout_of(this.tcx.types.i32)?; - let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, this)?; - this.write_scalar(kind, kind_place.into())?; + if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? + || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? + || kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? + { + mutexattr_set_kind(this, attr_op, kind)?; } else { let einval = this.eval_libc_i32("EINVAL")?; return Ok(einval); @@ -64,30 +49,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // Ensure that the following write at an offset to the attr pointer is within bounds - assert_ptr_target_min_size(this, attr_op, 4)?; - let attr = this.read_scalar(attr_op)?.not_undef()?; if this.is_null(attr)? { return this.eval_libc_i32("EINVAL"); } - let attr_place = this.deref_operand(attr_op)?; - let i32_layout = this.layout_of(this.tcx.types.i32)?; - let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, this)?; - this.write_scalar(ScalarMaybeUndef::Undef, kind_place.into())?; + mutexattr_set_kind(this, attr_op, ScalarMaybeUndef::Undef)?; Ok(0) } - // pthread_mutex_t is between 24 and 48 bytes, depending on the platform - // memory layout: - // bytes 0-3: reserved for signature on macOS - // bytes 4-7: count of how many times this mutex has been locked, as a u32 - // bytes 12-15: mutex kind, as an i32 - // (the kind should be at this offset for compatibility with the static - // initializer macro) - fn pthread_mutex_init( &mut self, mutex_op: OpTy<'tcx, Tag>, @@ -95,34 +66,20 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // Ensure that the following writes at offsets to the mutex pointer are within bounds - assert_ptr_target_min_size(this, mutex_op, 16)?; - // Ensure that the following read at an offset to the attr pointer is within bounds - assert_ptr_target_min_size(this, attr_op, 4)?; - let mutex = this.read_scalar(mutex_op)?.not_undef()?; if this.is_null(mutex)? { return this.eval_libc_i32("EINVAL"); } - let mutex_place = this.deref_operand(mutex_op)?; - - let i32_layout = this.layout_of(this.tcx.types.i32)?; let attr = this.read_scalar(attr_op)?.not_undef()?; let kind = if this.is_null(attr)? { this.eval_libc("PTHREAD_MUTEX_DEFAULT")? } else { - let attr_place = this.deref_operand(attr_op)?; - let attr_kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, this)?; - this.read_scalar(attr_kind_place.into())?.not_undef()? + mutexattr_get_kind(this, attr_op)?.not_undef()? }; - let u32_layout = this.layout_of(this.tcx.types.u32)?; - let locked_count_place = mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; - this.write_scalar(Scalar::from_u32(0), locked_count_place.into())?; - - let mutex_kind_place = mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, &*this.tcx)?; - this.write_scalar(kind, mutex_kind_place.into())?; + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?; + mutex_set_kind(this, mutex_op, kind)?; Ok(0) } @@ -130,39 +87,30 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // Ensure that the following reads and writes at offsets to the mutex pointer are within bounds - assert_ptr_target_min_size(this, mutex_op, 16)?; - let mutex = this.read_scalar(mutex_op)?.not_undef()?; if this.is_null(mutex)? { return this.eval_libc_i32("EINVAL"); } - let mutex_place = this.deref_operand(mutex_op)?; - let i32_layout = this.layout_of(this.tcx.types.i32)?; - let kind_place = mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, this)?; - let kind = this.read_scalar(kind_place.into())?.not_undef()?; - - let u32_layout = this.layout_of(this.tcx.types.u32)?; - let locked_count_place = mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; - let locked_count = this.read_scalar(locked_count_place.into())?.to_u32()?; + let kind = mutex_get_kind(this, mutex_op)?.not_undef()?; + let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?; if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { if locked_count == 0 { - this.write_scalar(Scalar::from_u32(1), locked_count_place.into())?; + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?; Ok(0) } else { throw_unsup_format!("Deadlock due to locking a PTHREAD_MUTEX_NORMAL mutex twice"); } } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { if locked_count == 0 { - this.write_scalar(Scalar::from_u32(1), locked_count_place.into())?; + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?; Ok(0) } else { this.eval_libc_i32("EDEADLK") } } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { - this.write_scalar(Scalar::from_u32(locked_count + 1), locked_count_place.into())?; + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(locked_count + 1))?; Ok(0) } else { this.eval_libc_i32("EINVAL") @@ -172,33 +120,25 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // Ensure that the following reads and writes at offsets to the mutex pointer are within bounds - assert_ptr_target_min_size(this, mutex_op, 16)?; - let mutex = this.read_scalar(mutex_op)?.not_undef()?; if this.is_null(mutex)? { return this.eval_libc_i32("EINVAL"); } - let mutex_place = this.deref_operand(mutex_op)?; - let i32_layout = this.layout_of(this.tcx.types.i32)?; - let kind_place = mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, this)?; - let kind = this.read_scalar(kind_place.into())?.not_undef()?; + let kind = mutex_get_kind(this, mutex_op)?.not_undef()?; + let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?; - let u32_layout = this.layout_of(this.tcx.types.u32)?; - let locked_count_place = mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; - let locked_count = this.read_scalar(locked_count_place.into())?.to_u32()?; - - if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? || - kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { + if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? + || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? + { if locked_count == 0 { - this.write_scalar(Scalar::from_u32(1), locked_count_place.into())?; + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?; Ok(0) } else { this.eval_libc_i32("EBUSY") } } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { - this.write_scalar(Scalar::from_u32(locked_count + 1), locked_count_place.into())?; + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(locked_count + 1))?; Ok(0) } else { this.eval_libc_i32("EINVAL") @@ -208,40 +148,33 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // Ensure that the following reads and writes at offsets to the mutex pointer are within bounds - assert_ptr_target_min_size(this, mutex_op, 16)?; - let mutex = this.read_scalar(mutex_op)?.not_undef()?; if this.is_null(mutex)? { return this.eval_libc_i32("EINVAL"); } - let mutex_place = this.deref_operand(mutex_op)?; - - let i32_layout = this.layout_of(this.tcx.types.i32)?; - let kind_place = mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, this)?; - let kind = this.read_scalar(kind_place.into())?.not_undef()?; - let u32_layout = this.layout_of(this.tcx.types.u32)?; - let locked_count_place = mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; - let locked_count = this.read_scalar(locked_count_place.into())?.to_u32()?; + let kind = mutex_get_kind(this, mutex_op)?.not_undef()?; + let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?; if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { if locked_count == 1 { - this.write_scalar(Scalar::from_u32(0), locked_count_place.into())?; + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?; Ok(0) } else { - throw_ub_format!("Attempted to unlock a PTHREAD_MUTEX_NORMAL mutex that was not locked"); + throw_ub_format!( + "Attempted to unlock a PTHREAD_MUTEX_NORMAL mutex that was not locked" + ); } } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { if locked_count == 1 { - this.write_scalar(Scalar::from_u32(0), locked_count_place.into())?; + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?; Ok(0) } else { this.eval_libc_i32("EPERM") } } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { if locked_count > 0 { - this.write_scalar(Scalar::from_u32(locked_count - 1), locked_count_place.into())?; + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(locked_count - 1))?; Ok(0) } else { this.eval_libc_i32("EPERM") @@ -254,56 +187,37 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // Ensure that the following read and writes at offsets to the mutex pointer are within bounds - assert_ptr_target_min_size(this, mutex_op, 16)?; - let mutex = this.read_scalar(mutex_op)?.not_undef()?; if this.is_null(mutex)? { return this.eval_libc_i32("EINVAL"); } - let mutex_place = this.deref_operand(mutex_op)?; - let u32_layout = this.layout_of(this.tcx.types.u32)?; - let locked_count_place = mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; - if this.read_scalar(locked_count_place.into())?.to_u32()? != 0 { + if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 { return this.eval_libc_i32("EBUSY"); } - let i32_layout = this.layout_of(this.tcx.types.i32)?; - let kind_place = mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, this)?; - this.write_scalar(ScalarMaybeUndef::Undef, kind_place.into())?; - this.write_scalar(ScalarMaybeUndef::Undef, locked_count_place.into())?; + mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?; + mutex_set_locked_count(this, mutex_op, ScalarMaybeUndef::Undef)?; Ok(0) } - // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform - // memory layout: - // bytes 0-3: reserved for signature on macOS - // bytes 4-7: reader count, as a u32 - // bytes 8-11: writer count, as a u32 - fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // Ensure that the following reads and write at offsets to the rwlock pointer are within bounds - assert_ptr_target_min_size(this, rwlock_op, 12)?; - let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { return this.eval_libc_i32("EINVAL"); } - let rwlock_place = this.deref_operand(rwlock_op)?; - let u32_layout = this.layout_of(this.tcx.types.u32)?; - let readers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; - let writers_place = rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, this)?; - let readers = this.read_scalar(readers_place.into())?.to_u32()?; - let writers = this.read_scalar(writers_place.into())?.to_u32()?; + let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; + let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; if writers != 0 { - throw_unsup_format!("Deadlock due to read-locking a pthreads read-write lock while it is already write-locked"); + throw_unsup_format!( + "Deadlock due to read-locking a pthreads read-write lock while it is already write-locked" + ); } else { - this.write_scalar(Scalar::from_u32(readers + 1), readers_place.into())?; + rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers + 1))?; Ok(0) } } @@ -311,24 +225,17 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // Ensure that the following reads and write at offsets to the rwlock pointer are within bounds - assert_ptr_target_min_size(this, rwlock_op, 12)?; - let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { return this.eval_libc_i32("EINVAL"); } - let rwlock_place = this.deref_operand(rwlock_op)?; - let u32_layout = this.layout_of(this.tcx.types.u32)?; - let readers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; - let writers_place = rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, this)?; - let readers = this.read_scalar(readers_place.into())?.to_u32()?; - let writers = this.read_scalar(writers_place.into())?.to_u32()?; + let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; + let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; if writers != 0 { this.eval_libc_i32("EBUSY") } else { - this.write_scalar(Scalar::from_u32(readers + 1), readers_place.into())?; + rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers + 1))?; Ok(0) } } @@ -336,26 +243,23 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // Ensure that the following reads and write at offsets to the rwlock pointer are within bounds - assert_ptr_target_min_size(this, rwlock_op, 12)?; - let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { return this.eval_libc_i32("EINVAL"); } - let rwlock_place = this.deref_operand(rwlock_op)?; - let u32_layout = this.layout_of(this.tcx.types.u32)?; - let readers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; - let writers_place = rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, this)?; - let readers = this.read_scalar(readers_place.into())?.to_u32()?; - let writers = this.read_scalar(writers_place.into())?.to_u32()?; + let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; + let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; if readers != 0 { - throw_unsup_format!("Deadlock due to write-locking a pthreads read-write lock while it is already read-locked"); + throw_unsup_format!( + "Deadlock due to write-locking a pthreads read-write lock while it is already read-locked" + ); } else if writers != 0 { - throw_unsup_format!("Deadlock due to write-locking a pthreads read-write lock while it is already write-locked"); + throw_unsup_format!( + "Deadlock due to write-locking a pthreads read-write lock while it is already write-locked" + ); } else { - this.write_scalar(Scalar::from_u32(1), writers_place.into())?; + rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?; Ok(0) } } @@ -363,24 +267,17 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // Ensure that the following reads and write at offsets to the rwlock pointer are within bounds - assert_ptr_target_min_size(this, rwlock_op, 12)?; - let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { return this.eval_libc_i32("EINVAL"); } - let rwlock_place = this.deref_operand(rwlock_op)?; - let u32_layout = this.layout_of(this.tcx.types.u32)?; - let readers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; - let writers_place = rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, this)?; - let readers = this.read_scalar(readers_place.into())?.to_u32()?; - let writers = this.read_scalar(writers_place.into())?.to_u32()?; + let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; + let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; if readers != 0 || writers != 0 { this.eval_libc_i32("EBUSY") } else { - this.write_scalar(Scalar::from_u32(1), writers_place.into())?; + rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?; Ok(0) } } @@ -388,25 +285,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // Ensure that the following reads and writes at offsets to the rwlock pointer are within bounds - assert_ptr_target_min_size(this, rwlock_op, 12)?; - let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { return this.eval_libc_i32("EINVAL"); } - let rwlock_place = this.deref_operand(rwlock_op)?; - let u32_layout = this.layout_of(this.tcx.types.u32)?; - let readers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; - let writers_place = rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, this)?; - let readers = this.read_scalar(readers_place.into())?.to_u32()?; - let writers = this.read_scalar(writers_place.into())?.to_u32()?; + let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; + let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; if readers != 0 { - this.write_scalar(Scalar::from_u32(readers - 1), readers_place.into())?; + rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers - 1))?; Ok(0) } else if writers != 0 { - this.write_scalar(Scalar::from_u32(0), writers_place.into())?; + rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?; Ok(0) } else { this.eval_libc_i32("EPERM") @@ -416,38 +306,186 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - // Ensure that the following reads and writes at offsets to the rwlock pointer are within bounds - assert_ptr_target_min_size(this, rwlock_op, 12)?; - let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; if this.is_null(rwlock)? { return this.eval_libc_i32("EINVAL"); } - let rwlock_place = this.deref_operand(rwlock_op)?; - let u32_layout = this.layout_of(this.tcx.types.u32)?; - let readers_place = rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, this)?; - if this.read_scalar(readers_place.into())?.to_u32()? != 0 { + if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0 { return this.eval_libc_i32("EBUSY"); } - let writers_place = rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, this)?; - if this.read_scalar(writers_place.into())?.to_u32()? != 0 { + if rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0 { return this.eval_libc_i32("EBUSY"); } - this.write_scalar(ScalarMaybeUndef::Undef, readers_place.into())?; - this.write_scalar(ScalarMaybeUndef::Undef, writers_place.into())?; + rwlock_set_readers(this, rwlock_op, ScalarMaybeUndef::Undef)?; + rwlock_set_writers(this, rwlock_op, ScalarMaybeUndef::Undef)?; Ok(0) } } -fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>(ecx: &MiriEvalContext<'mir, 'tcx>, operand: OpTy<'tcx, Tag>, min_size: u64) -> InterpResult<'tcx, ()> { +fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + operand: OpTy<'tcx, Tag>, + min_size: u64, +) -> InterpResult<'tcx, ()> { let target_ty = match operand.layout.ty.kind { - TyKind::RawPtr(TypeAndMut{ ty, mutbl: _ }) => ty, + TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty, _ => panic!("Argument to pthread function was not a raw pointer"), }; let target_layout = ecx.layout_of(target_ty)?; assert!(target_layout.size.bytes() >= min_size); Ok(()) } + +// pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform +// memory layout: store an i32 in the first four bytes equal to the +// corresponding libc mutex kind constant (i.e. PTHREAD_MUTEX_NORMAL) + +fn mutexattr_get_kind<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + attr_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the attr pointer is within bounds + assert_ptr_target_min_size(ecx, attr_op, 4)?; + let attr_place = ecx.deref_operand(attr_op)?; + let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; + let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?; + ecx.read_scalar(kind_place.into()) +} + +fn mutexattr_set_kind<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + attr_op: OpTy<'tcx, Tag>, + kind: impl Into>, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the attr pointer is within bounds + assert_ptr_target_min_size(ecx, attr_op, 4)?; + let attr_place = ecx.deref_operand(attr_op)?; + let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; + let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?; + ecx.write_scalar(kind.into(), kind_place.into()) +} + +// pthread_mutex_t is between 24 and 48 bytes, depending on the platform +// memory layout: +// bytes 0-3: reserved for signature on macOS +// bytes 4-7: count of how many times this mutex has been locked, as a u32 +// bytes 12-15: mutex kind, as an i32 +// (the kind should be at this offset for compatibility with the static +// initializer macro) + +fn mutex_get_locked_count<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + mutex_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the mutex pointer is within bounds + assert_ptr_target_min_size(ecx, mutex_op, 16)?; + let mutex_place = ecx.deref_operand(mutex_op)?; + let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; + let locked_count_place = + mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; + ecx.read_scalar(locked_count_place.into()) +} + +fn mutex_set_locked_count<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + mutex_op: OpTy<'tcx, Tag>, + locked_count: impl Into>, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the mutex pointer is within bounds + assert_ptr_target_min_size(ecx, mutex_op, 16)?; + let mutex_place = ecx.deref_operand(mutex_op)?; + let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; + let locked_count_place = + mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; + ecx.write_scalar(locked_count.into(), locked_count_place.into()) +} + +fn mutex_get_kind<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + mutex_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the mutex pointer is within bounds + assert_ptr_target_min_size(ecx, mutex_op, 16)?; + let mutex_place = ecx.deref_operand(mutex_op)?; + let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; + let kind_place = + mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, ecx)?; + ecx.read_scalar(kind_place.into()) +} + +fn mutex_set_kind<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + mutex_op: OpTy<'tcx, Tag>, + kind: impl Into>, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the mutex pointer is within bounds + assert_ptr_target_min_size(ecx, mutex_op, 16)?; + let mutex_place = ecx.deref_operand(mutex_op)?; + let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; + let kind_place = + mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, ecx)?; + ecx.write_scalar(kind.into(), kind_place.into()) +} + +// pthread_rwlock_t is between 32 and 56 bytes, depending on the platform +// memory layout: +// bytes 0-3: reserved for signature on macOS +// bytes 4-7: reader count, as a u32 +// bytes 8-11: writer count, as a u32 + +fn rwlock_get_readers<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + rwlock_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the rwlock pointer is within bounds + assert_ptr_target_min_size(ecx, rwlock_op, 12)?; + let rwlock_place = ecx.deref_operand(rwlock_op)?; + let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; + let readers_place = + rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; + ecx.read_scalar(readers_place.into()) +} + +fn rwlock_set_readers<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + rwlock_op: OpTy<'tcx, Tag>, + readers: impl Into>, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the rwlock pointer is within bounds + assert_ptr_target_min_size(ecx, rwlock_op, 12)?; + let rwlock_place = ecx.deref_operand(rwlock_op)?; + let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; + let readers_place = + rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; + ecx.write_scalar(readers.into(), readers_place.into()) +} + +fn rwlock_get_writers<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + rwlock_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the rwlock pointer is within bounds + assert_ptr_target_min_size(ecx, rwlock_op, 12)?; + let rwlock_place = ecx.deref_operand(rwlock_op)?; + let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; + let writers_place = + rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?; + ecx.read_scalar(writers_place.into()) +} + +fn rwlock_set_writers<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + rwlock_op: OpTy<'tcx, Tag>, + writers: impl Into>, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the rwlock pointer is within bounds + assert_ptr_target_min_size(ecx, rwlock_op, 12)?; + let rwlock_place = ecx.deref_operand(rwlock_op)?; + let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; + let writers_place = + rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?; + ecx.write_scalar(writers.into(), writers_place.into()) +} From ba3884dae6d34370c0841d61acefe8d4a3f8d05c Mon Sep 17 00:00:00 2001 From: David Cook Date: Sun, 22 Mar 2020 16:16:02 -0500 Subject: [PATCH 12/31] Use checked addition/subtraction on lock counts --- src/shims/sync.rs | 54 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 39 insertions(+), 15 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index b8d9a88865..987513d323 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -110,8 +110,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.eval_libc_i32("EDEADLK") } } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(locked_count + 1))?; - Ok(0) + match locked_count.checked_add(1) { + Some(new_count) => { + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?; + Ok(0) + } + None => this.eval_libc_i32("EAGAIN"), + } } else { this.eval_libc_i32("EINVAL") } @@ -138,8 +143,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.eval_libc_i32("EBUSY") } } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(locked_count + 1))?; - Ok(0) + match locked_count.checked_add(1) { + Some(new_count) => { + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?; + Ok(0) + } + None => this.eval_libc_i32("EAGAIN"), + } } else { this.eval_libc_i32("EINVAL") } @@ -173,11 +183,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.eval_libc_i32("EPERM") } } else if kind == this.eval_libc("PTHREAD_MUTEX_RECURSIVE")? { - if locked_count > 0 { - mutex_set_locked_count(this, mutex_op, Scalar::from_u32(locked_count - 1))?; - Ok(0) - } else { - this.eval_libc_i32("EPERM") + match locked_count.checked_sub(1) { + Some(new_count) => { + mutex_set_locked_count(this, mutex_op, Scalar::from_u32(new_count))?; + Ok(0) + } + None => { + // locked_count was already zero + this.eval_libc_i32("EPERM") + } } } else { this.eval_libc_i32("EINVAL") @@ -217,8 +231,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx "Deadlock due to read-locking a pthreads read-write lock while it is already write-locked" ); } else { - rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers + 1))?; - Ok(0) + match readers.checked_add(1) { + Some(new_readers) => { + rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?; + Ok(0) + } + None => this.eval_libc_i32("EAGAIN"), + } } } @@ -235,8 +254,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx if writers != 0 { this.eval_libc_i32("EBUSY") } else { - rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers + 1))?; - Ok(0) + match readers.checked_add(1) { + Some(new_readers) => { + rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?; + Ok(0) + } + None => this.eval_libc_i32("EAGAIN"), + } } } @@ -292,8 +316,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; - if readers != 0 { - rwlock_set_readers(this, rwlock_op, Scalar::from_u32(readers - 1))?; + if let Some(new_readers) = readers.checked_sub(1) { + rwlock_set_readers(this, rwlock_op, Scalar::from_u32(new_readers))?; Ok(0) } else if writers != 0 { rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?; From e5e3256b5942a0c2e280700fff7f33bbdc803436 Mon Sep 17 00:00:00 2001 From: David Cook Date: Thu, 26 Mar 2020 21:23:10 -0500 Subject: [PATCH 13/31] Address review comments --- src/shims/sync.rs | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 987513d323..960eca5510 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -167,7 +167,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?; if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? { - if locked_count == 1 { + if locked_count != 0 { mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?; Ok(0) } else { @@ -176,7 +176,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx ); } } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { - if locked_count == 1 { + if locked_count != 0 { mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?; Ok(0) } else { @@ -363,8 +363,9 @@ fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>( Ok(()) } -// pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform -// memory layout: store an i32 in the first four bytes equal to the +// pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform. + +// Our chosen memory layout: store an i32 in the first four bytes equal to the // corresponding libc mutex kind constant (i.e. PTHREAD_MUTEX_NORMAL) fn mutexattr_get_kind<'mir, 'tcx: 'mir>( @@ -392,13 +393,14 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>( ecx.write_scalar(kind.into(), kind_place.into()) } -// pthread_mutex_t is between 24 and 48 bytes, depending on the platform -// memory layout: +// pthread_mutex_t is between 24 and 48 bytes, depending on the platform. + +// Our chosen memory layout: // bytes 0-3: reserved for signature on macOS +// (need to avoid this because it is set by static initializer macros) // bytes 4-7: count of how many times this mutex has been locked, as a u32 // bytes 12-15: mutex kind, as an i32 -// (the kind should be at this offset for compatibility with the static -// initializer macro) +// (the kind has to be at this offset for compatibility with static initializer macros) fn mutex_get_locked_count<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, @@ -454,9 +456,11 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>( ecx.write_scalar(kind.into(), kind_place.into()) } -// pthread_rwlock_t is between 32 and 56 bytes, depending on the platform -// memory layout: +// pthread_rwlock_t is between 32 and 56 bytes, depending on the platform. + +// Our chosen memory layout: // bytes 0-3: reserved for signature on macOS +// (need to avoid this because it is set by static initializer macros) // bytes 4-7: reader count, as a u32 // bytes 8-11: writer count, as a u32 From 735fc12e1ad35e8789a922eb506f64756f499a32 Mon Sep 17 00:00:00 2001 From: David Cook Date: Fri, 27 Mar 2020 20:06:53 -0500 Subject: [PATCH 14/31] Handle variation in layout of pthread_mutex_t --- src/shims/sync.rs | 86 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 74 insertions(+), 12 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 960eca5510..6ce45e3ad4 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -1,5 +1,7 @@ +use std::sync::atomic::{AtomicU64, Ordering}; + use rustc_middle::ty::{TyKind, TypeAndMut}; -use rustc_target::abi::{LayoutOf, Size}; +use rustc_target::abi::{FieldsShape, LayoutOf, Size}; use crate::stacked_borrows::Tag; use crate::*; @@ -399,15 +401,67 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>( // bytes 0-3: reserved for signature on macOS // (need to avoid this because it is set by static initializer macros) // bytes 4-7: count of how many times this mutex has been locked, as a u32 -// bytes 12-15: mutex kind, as an i32 -// (the kind has to be at this offset for compatibility with static initializer macros) +// bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32 +// (the kind has to be at its offset for compatibility with static initializer macros) + +static LIBC_MUTEX_KIND_OFFSET_CACHE: AtomicU64 = AtomicU64::new(0); + +fn libc_mutex_kind_offset<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, +) -> InterpResult<'tcx, u64> { + // Check if this offset has already been found and memoized + let cached_value = LIBC_MUTEX_KIND_OFFSET_CACHE.load(Ordering::Relaxed); + if cached_value != 0 { + return Ok(cached_value); + } + + // This function infers the offset of the `kind` field of libc's pthread_mutex_t + // C struct by examining the array inside libc::PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP. + // At time of writing, it is always all zero bytes except for a one byte at one of + // four positions, depending on the target OS's C struct layout and the endianness of the + // target architecture. This offset will then be used in getters and setters below, so that + // mutexes created from static initializers can be emulated with the correct behavior. + let initializer_path = ["libc", "PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP"]; + let initializer_instance = ecx.resolve_path(&initializer_path); + let initializer_cid = GlobalId { instance: initializer_instance, promoted: None }; + let initializer_const_val = ecx.const_eval_raw(initializer_cid)?; + let array_mplacety = ecx.mplace_field(initializer_const_val, 0)?; + let array_length = match array_mplacety.layout.fields { + FieldsShape::Array { count, .. } => count, + _ => bug!("Couldn't get array length from type {:?}", array_mplacety.layout.ty), + }; + + let kind_offset = if array_length < 20 { + bug!("libc::PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP array was shorter than expected"); + } else if ecx.read_scalar(ecx.mplace_field(array_mplacety, 16)?.into())?.to_u8()? != 0 { + // for little-endian architectures + 16 + } else if ecx.read_scalar(ecx.mplace_field(array_mplacety, 19)?.into())?.to_u8()? != 0 { + // for big-endian architectures + // (note that the i32 spans bytes 16 through 19, so the offset of the kind field is 16) + 16 + } else if ecx.read_scalar(ecx.mplace_field(array_mplacety, 12)?.into())?.to_u8()? != 0 { + // for little-endian architectures + 12 + } else if ecx.read_scalar(ecx.mplace_field(array_mplacety, 15)?.into())?.to_u8()? != 0 { + // for big-endian architectures + // (note that the i32 spans bytes 12 through 15, so the offset of the kind field is 12) + 12 + } else { + bug!("Couldn't determine offset of `kind` in pthread_mutex_t"); + }; + + // Save offset to memoization cache for future calls + LIBC_MUTEX_KIND_OFFSET_CACHE.store(kind_offset, Ordering::Relaxed); + Ok(kind_offset) +} fn mutex_get_locked_count<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { // Ensure that the following read at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 16)?; + assert_ptr_target_min_size(ecx, mutex_op, 20)?; let mutex_place = ecx.deref_operand(mutex_op)?; let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; let locked_count_place = @@ -421,7 +475,7 @@ fn mutex_set_locked_count<'mir, 'tcx: 'mir>( locked_count: impl Into>, ) -> InterpResult<'tcx, ()> { // Ensure that the following write at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 16)?; + assert_ptr_target_min_size(ecx, mutex_op, 20)?; let mutex_place = ecx.deref_operand(mutex_op)?; let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; let locked_count_place = @@ -430,15 +484,19 @@ fn mutex_set_locked_count<'mir, 'tcx: 'mir>( } fn mutex_get_kind<'mir, 'tcx: 'mir>( - ecx: &MiriEvalContext<'mir, 'tcx>, + ecx: &mut MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, ) -> InterpResult<'tcx, ScalarMaybeUndef> { // Ensure that the following read at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 16)?; + assert_ptr_target_min_size(ecx, mutex_op, 20)?; let mutex_place = ecx.deref_operand(mutex_op)?; let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; - let kind_place = - mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, ecx)?; + let kind_place = mutex_place.offset( + Size::from_bytes(libc_mutex_kind_offset(ecx)?), + MemPlaceMeta::None, + i32_layout, + ecx, + )?; ecx.read_scalar(kind_place.into()) } @@ -448,11 +506,15 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>( kind: impl Into>, ) -> InterpResult<'tcx, ()> { // Ensure that the following write at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 16)?; + assert_ptr_target_min_size(ecx, mutex_op, 20)?; let mutex_place = ecx.deref_operand(mutex_op)?; let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; - let kind_place = - mutex_place.offset(Size::from_bytes(12), MemPlaceMeta::None, i32_layout, ecx)?; + let kind_place = mutex_place.offset( + Size::from_bytes(libc_mutex_kind_offset(ecx)?), + MemPlaceMeta::None, + i32_layout, + ecx, + )?; ecx.write_scalar(kind.into(), kind_place.into()) } From de29546414cc5a987fd3317a9c3e5415e15a133b Mon Sep 17 00:00:00 2001 From: David Cook Date: Fri, 27 Mar 2020 20:26:21 -0500 Subject: [PATCH 15/31] Add and rearrange mutex tests --- tests/run-pass/sync.rs | 58 ++++++++++++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 19 deletions(-) diff --git a/tests/run-pass/sync.rs b/tests/run-pass/sync.rs index c1e44789aa..025ae81372 100644 --- a/tests/run-pass/sync.rs +++ b/tests/run-pass/sync.rs @@ -5,17 +5,18 @@ use std::sync::{Mutex, RwLock, TryLockError}; extern crate libc; fn main() { - test_mutex(); + test_mutex_stdlib(); #[cfg(not(target_os = "windows"))] // TODO: implement RwLock on Windows { - test_mutex_libc_recursive(); + test_mutex_libc_init_recursive(); + test_mutex_libc_init_normal(); + test_mutex_libc_static_initializer_recursive(); test_rwlock_stdlib(); - test_mutex_libc_init(); test_rwlock_libc_static_initializer(); } } -fn test_mutex() { +fn test_mutex_stdlib() { let m = Mutex::new(0); { let _guard = m.lock(); @@ -26,7 +27,7 @@ fn test_mutex() { } #[cfg(not(target_os = "windows"))] -fn test_mutex_libc_recursive() { +fn test_mutex_libc_init_recursive() { unsafe { let mut attr: libc::pthread_mutexattr_t = std::mem::zeroed(); assert_eq!(libc::pthread_mutexattr_init(&mut attr as *mut _), 0); @@ -47,6 +48,39 @@ fn test_mutex_libc_recursive() { } } +#[cfg(not(target_os = "windows"))] +fn test_mutex_libc_init_normal() { + unsafe { + let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, libc::PTHREAD_MUTEX_NORMAL), 0); + let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); + } +} + +#[cfg(not(target_os = "windows"))] +fn test_mutex_libc_static_initializer_recursive() { + let mutex = std::cell::UnsafeCell::new(libc::PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP); + unsafe { + assert_eq!(libc::pthread_mutex_lock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_trylock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_unlock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_unlock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_trylock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_lock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_unlock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_unlock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_unlock(mutex.get()), libc::EPERM); + assert_eq!(libc::pthread_mutex_destroy(mutex.get()), 0); + } +} + #[cfg(not(target_os = "windows"))] fn test_rwlock_stdlib() { let rw = RwLock::new(0); @@ -67,20 +101,6 @@ fn test_rwlock_stdlib() { // need to go a layer deeper and test the behavior of libc functions, because // std::sys::unix::rwlock::RWLock keeps track of write_locked and num_readers -#[cfg(not(target_os = "windows"))] -fn test_mutex_libc_init() { - unsafe { - let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); - assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, std::ptr::null_mut()), 0); - assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY); - assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); - } -} - #[cfg(not(target_os = "windows"))] fn test_rwlock_libc_static_initializer() { let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); From c7466c9531c1a282380183c78001014c35dc9fac Mon Sep 17 00:00:00 2001 From: David Cook Date: Fri, 27 Mar 2020 20:40:54 -0500 Subject: [PATCH 16/31] Add TerminationInfo::Deadlock, use in mutex shim --- src/diagnostics.rs | 6 +++- src/shims/sync.rs | 76 +++++----------------------------------------- 2 files changed, 13 insertions(+), 69 deletions(-) diff --git a/src/diagnostics.rs b/src/diagnostics.rs index 9ff4340211..2b53efe864 100644 --- a/src/diagnostics.rs +++ b/src/diagnostics.rs @@ -12,7 +12,8 @@ pub enum TerminationInfo { Exit(i64), Abort(Option), UnsupportedInIsolation(String), - ExperimentalUb { msg: String, url: String } + ExperimentalUb { msg: String, url: String }, + Deadlock, } impl fmt::Debug for TerminationInfo { @@ -29,6 +30,8 @@ impl fmt::Debug for TerminationInfo { write!(f, "{}", msg), ExperimentalUb { msg, .. } => write!(f, "{}", msg), + Deadlock => + write!(f, "the evaluated program deadlocked"), } } } @@ -60,6 +63,7 @@ pub fn report_error<'tcx, 'mir>( "unsupported operation", ExperimentalUb { .. } => "Undefined Behavior", + Deadlock => "deadlock", }; let helps = match info { UnsupportedInIsolation(_) => diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 6ce45e3ad4..94e563353b 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -1,7 +1,5 @@ -use std::sync::atomic::{AtomicU64, Ordering}; - use rustc_middle::ty::{TyKind, TypeAndMut}; -use rustc_target::abi::{FieldsShape, LayoutOf, Size}; +use rustc_target::abi::{LayoutOf, Size}; use crate::stacked_borrows::Tag; use crate::*; @@ -102,7 +100,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx mutex_set_locked_count(this, mutex_op, Scalar::from_u32(1))?; Ok(0) } else { - throw_unsup_format!("Deadlock due to locking a PTHREAD_MUTEX_NORMAL mutex twice"); + throw_machine_stop!(TerminationInfo::Deadlock); } } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { if locked_count == 0 { @@ -404,58 +402,6 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>( // bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32 // (the kind has to be at its offset for compatibility with static initializer macros) -static LIBC_MUTEX_KIND_OFFSET_CACHE: AtomicU64 = AtomicU64::new(0); - -fn libc_mutex_kind_offset<'mir, 'tcx: 'mir>( - ecx: &mut MiriEvalContext<'mir, 'tcx>, -) -> InterpResult<'tcx, u64> { - // Check if this offset has already been found and memoized - let cached_value = LIBC_MUTEX_KIND_OFFSET_CACHE.load(Ordering::Relaxed); - if cached_value != 0 { - return Ok(cached_value); - } - - // This function infers the offset of the `kind` field of libc's pthread_mutex_t - // C struct by examining the array inside libc::PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP. - // At time of writing, it is always all zero bytes except for a one byte at one of - // four positions, depending on the target OS's C struct layout and the endianness of the - // target architecture. This offset will then be used in getters and setters below, so that - // mutexes created from static initializers can be emulated with the correct behavior. - let initializer_path = ["libc", "PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP"]; - let initializer_instance = ecx.resolve_path(&initializer_path); - let initializer_cid = GlobalId { instance: initializer_instance, promoted: None }; - let initializer_const_val = ecx.const_eval_raw(initializer_cid)?; - let array_mplacety = ecx.mplace_field(initializer_const_val, 0)?; - let array_length = match array_mplacety.layout.fields { - FieldsShape::Array { count, .. } => count, - _ => bug!("Couldn't get array length from type {:?}", array_mplacety.layout.ty), - }; - - let kind_offset = if array_length < 20 { - bug!("libc::PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP array was shorter than expected"); - } else if ecx.read_scalar(ecx.mplace_field(array_mplacety, 16)?.into())?.to_u8()? != 0 { - // for little-endian architectures - 16 - } else if ecx.read_scalar(ecx.mplace_field(array_mplacety, 19)?.into())?.to_u8()? != 0 { - // for big-endian architectures - // (note that the i32 spans bytes 16 through 19, so the offset of the kind field is 16) - 16 - } else if ecx.read_scalar(ecx.mplace_field(array_mplacety, 12)?.into())?.to_u8()? != 0 { - // for little-endian architectures - 12 - } else if ecx.read_scalar(ecx.mplace_field(array_mplacety, 15)?.into())?.to_u8()? != 0 { - // for big-endian architectures - // (note that the i32 spans bytes 12 through 15, so the offset of the kind field is 12) - 12 - } else { - bug!("Couldn't determine offset of `kind` in pthread_mutex_t"); - }; - - // Save offset to memoization cache for future calls - LIBC_MUTEX_KIND_OFFSET_CACHE.store(kind_offset, Ordering::Relaxed); - Ok(kind_offset) -} - fn mutex_get_locked_count<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, mutex_op: OpTy<'tcx, Tag>, @@ -491,12 +437,9 @@ fn mutex_get_kind<'mir, 'tcx: 'mir>( assert_ptr_target_min_size(ecx, mutex_op, 20)?; let mutex_place = ecx.deref_operand(mutex_op)?; let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; - let kind_place = mutex_place.offset( - Size::from_bytes(libc_mutex_kind_offset(ecx)?), - MemPlaceMeta::None, - i32_layout, - ecx, - )?; + let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; + let kind_place = + mutex_place.offset(Size::from_bytes(kind_offset), MemPlaceMeta::None, i32_layout, ecx)?; ecx.read_scalar(kind_place.into()) } @@ -509,12 +452,9 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>( assert_ptr_target_min_size(ecx, mutex_op, 20)?; let mutex_place = ecx.deref_operand(mutex_op)?; let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; - let kind_place = mutex_place.offset( - Size::from_bytes(libc_mutex_kind_offset(ecx)?), - MemPlaceMeta::None, - i32_layout, - ecx, - )?; + let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; + let kind_place = + mutex_place.offset(Size::from_bytes(kind_offset), MemPlaceMeta::None, i32_layout, ecx)?; ecx.write_scalar(kind.into(), kind_place.into()) } From 7f6df15aa2215ade35992b396bb76c8ae8fcf4df Mon Sep 17 00:00:00 2001 From: David Cook Date: Sat, 28 Mar 2020 09:25:02 -0500 Subject: [PATCH 17/31] Rearrange functions --- src/shims/sync.rs | 342 +++++++++++++++++++++++----------------------- 1 file changed, 171 insertions(+), 171 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 94e563353b..4e4f8c112e 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -4,6 +4,177 @@ use rustc_target::abi::{LayoutOf, Size}; use crate::stacked_borrows::Tag; use crate::*; +fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + operand: OpTy<'tcx, Tag>, + min_size: u64, +) -> InterpResult<'tcx, ()> { + let target_ty = match operand.layout.ty.kind { + TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty, + _ => panic!("Argument to pthread function was not a raw pointer"), + }; + let target_layout = ecx.layout_of(target_ty)?; + assert!(target_layout.size.bytes() >= min_size); + Ok(()) +} + +// pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform. + +// Our chosen memory layout: store an i32 in the first four bytes equal to the +// corresponding libc mutex kind constant (i.e. PTHREAD_MUTEX_NORMAL) + +fn mutexattr_get_kind<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + attr_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the attr pointer is within bounds + assert_ptr_target_min_size(ecx, attr_op, 4)?; + let attr_place = ecx.deref_operand(attr_op)?; + let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; + let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?; + ecx.read_scalar(kind_place.into()) +} + +fn mutexattr_set_kind<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + attr_op: OpTy<'tcx, Tag>, + kind: impl Into>, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the attr pointer is within bounds + assert_ptr_target_min_size(ecx, attr_op, 4)?; + let attr_place = ecx.deref_operand(attr_op)?; + let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; + let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?; + ecx.write_scalar(kind.into(), kind_place.into()) +} + +// pthread_mutex_t is between 24 and 48 bytes, depending on the platform. + +// Our chosen memory layout: +// bytes 0-3: reserved for signature on macOS +// (need to avoid this because it is set by static initializer macros) +// bytes 4-7: count of how many times this mutex has been locked, as a u32 +// bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32 +// (the kind has to be at its offset for compatibility with static initializer macros) + +fn mutex_get_locked_count<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + mutex_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the mutex pointer is within bounds + assert_ptr_target_min_size(ecx, mutex_op, 20)?; + let mutex_place = ecx.deref_operand(mutex_op)?; + let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; + let locked_count_place = + mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; + ecx.read_scalar(locked_count_place.into()) +} + +fn mutex_set_locked_count<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + mutex_op: OpTy<'tcx, Tag>, + locked_count: impl Into>, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the mutex pointer is within bounds + assert_ptr_target_min_size(ecx, mutex_op, 20)?; + let mutex_place = ecx.deref_operand(mutex_op)?; + let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; + let locked_count_place = + mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; + ecx.write_scalar(locked_count.into(), locked_count_place.into()) +} + +fn mutex_get_kind<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + mutex_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the mutex pointer is within bounds + assert_ptr_target_min_size(ecx, mutex_op, 20)?; + let mutex_place = ecx.deref_operand(mutex_op)?; + let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; + let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; + let kind_place = + mutex_place.offset(Size::from_bytes(kind_offset), MemPlaceMeta::None, i32_layout, ecx)?; + ecx.read_scalar(kind_place.into()) +} + +fn mutex_set_kind<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + mutex_op: OpTy<'tcx, Tag>, + kind: impl Into>, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the mutex pointer is within bounds + assert_ptr_target_min_size(ecx, mutex_op, 20)?; + let mutex_place = ecx.deref_operand(mutex_op)?; + let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; + let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; + let kind_place = + mutex_place.offset(Size::from_bytes(kind_offset), MemPlaceMeta::None, i32_layout, ecx)?; + ecx.write_scalar(kind.into(), kind_place.into()) +} + +// pthread_rwlock_t is between 32 and 56 bytes, depending on the platform. + +// Our chosen memory layout: +// bytes 0-3: reserved for signature on macOS +// (need to avoid this because it is set by static initializer macros) +// bytes 4-7: reader count, as a u32 +// bytes 8-11: writer count, as a u32 + +fn rwlock_get_readers<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + rwlock_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the rwlock pointer is within bounds + assert_ptr_target_min_size(ecx, rwlock_op, 12)?; + let rwlock_place = ecx.deref_operand(rwlock_op)?; + let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; + let readers_place = + rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; + ecx.read_scalar(readers_place.into()) +} + +fn rwlock_set_readers<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + rwlock_op: OpTy<'tcx, Tag>, + readers: impl Into>, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the rwlock pointer is within bounds + assert_ptr_target_min_size(ecx, rwlock_op, 12)?; + let rwlock_place = ecx.deref_operand(rwlock_op)?; + let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; + let readers_place = + rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; + ecx.write_scalar(readers.into(), readers_place.into()) +} + +fn rwlock_get_writers<'mir, 'tcx: 'mir>( + ecx: &MiriEvalContext<'mir, 'tcx>, + rwlock_op: OpTy<'tcx, Tag>, +) -> InterpResult<'tcx, ScalarMaybeUndef> { + // Ensure that the following read at an offset to the rwlock pointer is within bounds + assert_ptr_target_min_size(ecx, rwlock_op, 12)?; + let rwlock_place = ecx.deref_operand(rwlock_op)?; + let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; + let writers_place = + rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?; + ecx.read_scalar(writers_place.into()) +} + +fn rwlock_set_writers<'mir, 'tcx: 'mir>( + ecx: &mut MiriEvalContext<'mir, 'tcx>, + rwlock_op: OpTy<'tcx, Tag>, + writers: impl Into>, +) -> InterpResult<'tcx, ()> { + // Ensure that the following write at an offset to the rwlock pointer is within bounds + assert_ptr_target_min_size(ecx, rwlock_op, 12)?; + let rwlock_place = ecx.deref_operand(rwlock_op)?; + let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; + let writers_place = + rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?; + ecx.write_scalar(writers.into(), writers_place.into()) +} + impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {} pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> { fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { @@ -348,174 +519,3 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(0) } } - -fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>( - ecx: &MiriEvalContext<'mir, 'tcx>, - operand: OpTy<'tcx, Tag>, - min_size: u64, -) -> InterpResult<'tcx, ()> { - let target_ty = match operand.layout.ty.kind { - TyKind::RawPtr(TypeAndMut { ty, mutbl: _ }) => ty, - _ => panic!("Argument to pthread function was not a raw pointer"), - }; - let target_layout = ecx.layout_of(target_ty)?; - assert!(target_layout.size.bytes() >= min_size); - Ok(()) -} - -// pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform. - -// Our chosen memory layout: store an i32 in the first four bytes equal to the -// corresponding libc mutex kind constant (i.e. PTHREAD_MUTEX_NORMAL) - -fn mutexattr_get_kind<'mir, 'tcx: 'mir>( - ecx: &MiriEvalContext<'mir, 'tcx>, - attr_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUndef> { - // Ensure that the following read at an offset to the attr pointer is within bounds - assert_ptr_target_min_size(ecx, attr_op, 4)?; - let attr_place = ecx.deref_operand(attr_op)?; - let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; - let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?; - ecx.read_scalar(kind_place.into()) -} - -fn mutexattr_set_kind<'mir, 'tcx: 'mir>( - ecx: &mut MiriEvalContext<'mir, 'tcx>, - attr_op: OpTy<'tcx, Tag>, - kind: impl Into>, -) -> InterpResult<'tcx, ()> { - // Ensure that the following write at an offset to the attr pointer is within bounds - assert_ptr_target_min_size(ecx, attr_op, 4)?; - let attr_place = ecx.deref_operand(attr_op)?; - let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; - let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?; - ecx.write_scalar(kind.into(), kind_place.into()) -} - -// pthread_mutex_t is between 24 and 48 bytes, depending on the platform. - -// Our chosen memory layout: -// bytes 0-3: reserved for signature on macOS -// (need to avoid this because it is set by static initializer macros) -// bytes 4-7: count of how many times this mutex has been locked, as a u32 -// bytes 12-15 or 16-19 (depending on platform): mutex kind, as an i32 -// (the kind has to be at its offset for compatibility with static initializer macros) - -fn mutex_get_locked_count<'mir, 'tcx: 'mir>( - ecx: &MiriEvalContext<'mir, 'tcx>, - mutex_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUndef> { - // Ensure that the following read at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 20)?; - let mutex_place = ecx.deref_operand(mutex_op)?; - let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; - let locked_count_place = - mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; - ecx.read_scalar(locked_count_place.into()) -} - -fn mutex_set_locked_count<'mir, 'tcx: 'mir>( - ecx: &mut MiriEvalContext<'mir, 'tcx>, - mutex_op: OpTy<'tcx, Tag>, - locked_count: impl Into>, -) -> InterpResult<'tcx, ()> { - // Ensure that the following write at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 20)?; - let mutex_place = ecx.deref_operand(mutex_op)?; - let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; - let locked_count_place = - mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; - ecx.write_scalar(locked_count.into(), locked_count_place.into()) -} - -fn mutex_get_kind<'mir, 'tcx: 'mir>( - ecx: &mut MiriEvalContext<'mir, 'tcx>, - mutex_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUndef> { - // Ensure that the following read at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 20)?; - let mutex_place = ecx.deref_operand(mutex_op)?; - let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; - let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; - let kind_place = - mutex_place.offset(Size::from_bytes(kind_offset), MemPlaceMeta::None, i32_layout, ecx)?; - ecx.read_scalar(kind_place.into()) -} - -fn mutex_set_kind<'mir, 'tcx: 'mir>( - ecx: &mut MiriEvalContext<'mir, 'tcx>, - mutex_op: OpTy<'tcx, Tag>, - kind: impl Into>, -) -> InterpResult<'tcx, ()> { - // Ensure that the following write at an offset to the mutex pointer is within bounds - assert_ptr_target_min_size(ecx, mutex_op, 20)?; - let mutex_place = ecx.deref_operand(mutex_op)?; - let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; - let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; - let kind_place = - mutex_place.offset(Size::from_bytes(kind_offset), MemPlaceMeta::None, i32_layout, ecx)?; - ecx.write_scalar(kind.into(), kind_place.into()) -} - -// pthread_rwlock_t is between 32 and 56 bytes, depending on the platform. - -// Our chosen memory layout: -// bytes 0-3: reserved for signature on macOS -// (need to avoid this because it is set by static initializer macros) -// bytes 4-7: reader count, as a u32 -// bytes 8-11: writer count, as a u32 - -fn rwlock_get_readers<'mir, 'tcx: 'mir>( - ecx: &MiriEvalContext<'mir, 'tcx>, - rwlock_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUndef> { - // Ensure that the following read at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 12)?; - let rwlock_place = ecx.deref_operand(rwlock_op)?; - let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; - let readers_place = - rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; - ecx.read_scalar(readers_place.into()) -} - -fn rwlock_set_readers<'mir, 'tcx: 'mir>( - ecx: &mut MiriEvalContext<'mir, 'tcx>, - rwlock_op: OpTy<'tcx, Tag>, - readers: impl Into>, -) -> InterpResult<'tcx, ()> { - // Ensure that the following write at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 12)?; - let rwlock_place = ecx.deref_operand(rwlock_op)?; - let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; - let readers_place = - rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; - ecx.write_scalar(readers.into(), readers_place.into()) -} - -fn rwlock_get_writers<'mir, 'tcx: 'mir>( - ecx: &MiriEvalContext<'mir, 'tcx>, - rwlock_op: OpTy<'tcx, Tag>, -) -> InterpResult<'tcx, ScalarMaybeUndef> { - // Ensure that the following read at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 12)?; - let rwlock_place = ecx.deref_operand(rwlock_op)?; - let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; - let writers_place = - rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?; - ecx.read_scalar(writers_place.into()) -} - -fn rwlock_set_writers<'mir, 'tcx: 'mir>( - ecx: &mut MiriEvalContext<'mir, 'tcx>, - rwlock_op: OpTy<'tcx, Tag>, - writers: impl Into>, -) -> InterpResult<'tcx, ()> { - // Ensure that the following write at an offset to the rwlock pointer is within bounds - assert_ptr_target_min_size(ecx, rwlock_op, 12)?; - let rwlock_place = ecx.deref_operand(rwlock_op)?; - let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; - let writers_place = - rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?; - ecx.write_scalar(writers.into(), writers_place.into()) -} From bb06a0cf0e60ef09782a41b76d5204498355193a Mon Sep 17 00:00:00 2001 From: David Cook Date: Sat, 28 Mar 2020 09:35:51 -0500 Subject: [PATCH 18/31] Restrict mutex static initializer test to Linux On macOS, libc does not have a static initializer for recursive mutexes --- tests/run-pass/sync.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/run-pass/sync.rs b/tests/run-pass/sync.rs index 025ae81372..24d7b0be53 100644 --- a/tests/run-pass/sync.rs +++ b/tests/run-pass/sync.rs @@ -10,10 +10,13 @@ fn main() { { test_mutex_libc_init_recursive(); test_mutex_libc_init_normal(); - test_mutex_libc_static_initializer_recursive(); test_rwlock_stdlib(); test_rwlock_libc_static_initializer(); } + #[cfg(target_os = "linux")] + { + test_mutex_libc_static_initializer_recursive(); + } } fn test_mutex_stdlib() { @@ -64,7 +67,7 @@ fn test_mutex_libc_init_normal() { } } -#[cfg(not(target_os = "windows"))] +#[cfg(target_os = "linux")] fn test_mutex_libc_static_initializer_recursive() { let mutex = std::cell::UnsafeCell::new(libc::PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP); unsafe { From 37ddde9f70237a05dfdfb0aba837b8704a5dc7d0 Mon Sep 17 00:00:00 2001 From: David Cook Date: Sat, 28 Mar 2020 10:16:08 -0500 Subject: [PATCH 19/31] Implement TryEnterCriticalSection --- src/shims/foreign_items/windows.rs | 5 +++++ tests/run-pass/sync.rs | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/shims/foreign_items/windows.rs b/src/shims/foreign_items/windows.rs index cfc94bfd9b..3c819fddc4 100644 --- a/src/shims/foreign_items/windows.rs +++ b/src/shims/foreign_items/windows.rs @@ -233,6 +233,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx // (Windows locks are reentrant, and we have only 1 thread, // so not doing any futher checks here is at least not incorrect.) } + "TryEnterCriticalSection" if this.frame().instance.to_string().starts_with("std::sys::windows::") + => { + // There is only one thread, so this always succeeds and returns TRUE + this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?; + } _ => throw_unsup_format!("can't call foreign function: {}", link_name), } diff --git a/tests/run-pass/sync.rs b/tests/run-pass/sync.rs index 24d7b0be53..0ddf429fad 100644 --- a/tests/run-pass/sync.rs +++ b/tests/run-pass/sync.rs @@ -1,6 +1,6 @@ #![feature(rustc_private)] -use std::sync::{Mutex, RwLock, TryLockError}; +use std::sync::{Mutex, TryLockError}; extern crate libc; @@ -86,6 +86,7 @@ fn test_mutex_libc_static_initializer_recursive() { #[cfg(not(target_os = "windows"))] fn test_rwlock_stdlib() { + use std::sync::RwLock; let rw = RwLock::new(0); { let _read_guard = rw.read().unwrap(); From e1a1592991e0432c1e591e92e274f9943e690e3f Mon Sep 17 00:00:00 2001 From: David Cook Date: Sat, 28 Mar 2020 10:58:36 -0500 Subject: [PATCH 20/31] Set some explicit return value sizes --- src/shims/foreign_items/posix.rs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index 2b9e94ba11..fbf8a3b950 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -272,72 +272,72 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx "pthread_mutexattr_init" => { let result = this.pthread_mutexattr_init(args[0])?; - this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_mutexattr_settype" => { let result = this.pthread_mutexattr_settype(args[0], args[1])?; - this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_mutexattr_destroy" => { let result = this.pthread_mutexattr_destroy(args[0])?; - this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_mutex_init" => { let result = this.pthread_mutex_init(args[0], args[1])?; - this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_mutex_lock" => { let result = this.pthread_mutex_lock(args[0])?; - this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_mutex_trylock" => { let result = this.pthread_mutex_trylock(args[0])?; - this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_mutex_unlock" => { let result = this.pthread_mutex_unlock(args[0])?; - this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_mutex_destroy" => { let result = this.pthread_mutex_destroy(args[0])?; - this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_rwlock_rdlock" => { let result = this.pthread_rwlock_rdlock(args[0])?; - this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_rwlock_tryrdlock" => { let result = this.pthread_rwlock_tryrdlock(args[0])?; - this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_rwlock_wrlock" => { let result = this.pthread_rwlock_wrlock(args[0])?; - this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_rwlock_trywrlock" => { let result = this.pthread_rwlock_trywrlock(args[0])?; - this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_rwlock_unlock" => { let result = this.pthread_rwlock_unlock(args[0])?; - this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(result), dest)?; } "pthread_rwlock_destroy" => { let result = this.pthread_rwlock_destroy(args[0])?; - this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(result), dest)?; } | "signal" From 8293d80b53a60121961027c3ba8e29823b153179 Mon Sep 17 00:00:00 2001 From: David Cook Date: Sat, 28 Mar 2020 11:14:50 -0500 Subject: [PATCH 21/31] Set explicit return value size for windows shim --- src/shims/foreign_items/windows.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/shims/foreign_items/windows.rs b/src/shims/foreign_items/windows.rs index 3c819fddc4..443d44fae1 100644 --- a/src/shims/foreign_items/windows.rs +++ b/src/shims/foreign_items/windows.rs @@ -236,7 +236,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx "TryEnterCriticalSection" if this.frame().instance.to_string().starts_with("std::sys::windows::") => { // There is only one thread, so this always succeeds and returns TRUE - this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?; + this.write_scalar(Scalar::from_i32(1), dest)?; } _ => throw_unsup_format!("can't call foreign function: {}", link_name), From ac8c98da8e806a7e18dcc0cee8201085e5c7abb6 Mon Sep 17 00:00:00 2001 From: David Cook Date: Sun, 29 Mar 2020 01:38:34 -0500 Subject: [PATCH 22/31] Store layouts of i32 and u32 inside Evaluator --- src/lib.rs | 5 +++-- src/machine.rs | 53 ++++++++++++++++++++++++++++++++++++++++++++++- src/shims/sync.rs | 42 ++++++++++++++++++------------------- 3 files changed, 75 insertions(+), 25 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 2f381b4a34..82ac2e8d21 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -51,8 +51,9 @@ pub use crate::diagnostics::{ pub use crate::eval::{create_ecx, eval_main, MiriConfig}; pub use crate::helpers::EvalContextExt as HelpersEvalContextExt; pub use crate::machine::{ - AllocExtra, Evaluator, FrameData, MemoryExtra, MiriEvalContext, MiriEvalContextExt, - MiriMemoryKind, NUM_CPUS, PAGE_SIZE, STACK_ADDR, STACK_SIZE, + AllocExtra, EvalContextExt as MachineEvalContextExt, Evaluator, FrameData, MemoryExtra, + MiriEvalContext, MiriEvalContextExt, MiriMemoryKind, NUM_CPUS, PAGE_SIZE, STACK_ADDR, + STACK_SIZE, }; pub use crate::mono_hash_map::MonoHashMap; pub use crate::operator::EvalContextExt as OperatorEvalContextExt; diff --git a/src/machine.rs b/src/machine.rs index f794453228..a60ae8a4be 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -11,7 +11,7 @@ use log::trace; use rand::rngs::StdRng; use rustc_data_structures::fx::FxHashMap; -use rustc_middle::{mir, ty}; +use rustc_middle::{mir, ty::{self, layout::TyAndLayout}}; use rustc_target::abi::{LayoutOf, Size}; use rustc_ast::attr; use rustc_span::symbol::{sym, Symbol}; @@ -146,6 +146,39 @@ impl MemoryExtra { } } +/// Cached layouts of primitive types +#[derive(Default)] +struct PrimitiveLayouts<'tcx> { + i32: RefCell>>, + u32: RefCell>>, +} + +impl<'mir, 'tcx: 'mir> PrimitiveLayouts<'tcx> { + fn i32(&self, ecx: &MiriEvalContext<'mir, 'tcx>) -> InterpResult<'tcx, TyAndLayout<'tcx>> { + { + let layout_ref = self.i32.borrow(); + if layout_ref.is_some() { + return Ok(layout_ref.unwrap()); + } + } + let layout = ecx.layout_of(ecx.tcx.types.i32)?; + *self.i32.borrow_mut() = Some(layout); + Ok(layout) + } + + fn u32(&self, ecx: &MiriEvalContext<'mir, 'tcx>) -> InterpResult<'tcx, TyAndLayout<'tcx>> { + { + let layout_ref = self.u32.borrow(); + if layout_ref.is_some() { + return Ok(layout_ref.unwrap()); + } + } + let layout = ecx.layout_of(ecx.tcx.types.u32)?; + *self.u32.borrow_mut() = Some(layout); + Ok(layout) + } +} + /// The machine itself. pub struct Evaluator<'tcx> { /// Environment variables set by `setenv`. @@ -182,6 +215,9 @@ pub struct Evaluator<'tcx> { /// The "time anchor" for this machine's monotone clock (for `Instant` simulation). pub(crate) time_anchor: Instant, + + /// Cached `TyLayout`s for primitive data types that are commonly used inside Miri. + primitive_layouts: PrimitiveLayouts<'tcx>, } impl<'tcx> Evaluator<'tcx> { @@ -201,6 +237,7 @@ impl<'tcx> Evaluator<'tcx> { dir_handler: Default::default(), panic_payload: None, time_anchor: Instant::now(), + primitive_layouts: PrimitiveLayouts::default(), } } } @@ -224,6 +261,20 @@ impl<'mir, 'tcx> MiriEvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> } } +impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {} +/// Provides convenience methods for use elsewhere +pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { + fn i32_layout(&self) -> InterpResult<'tcx, TyAndLayout<'tcx>> { + let this = self.eval_context_ref(); + this.machine.primitive_layouts.i32(this) + } + + fn u32_layout(&self) -> InterpResult<'tcx, TyAndLayout<'tcx>> { + let this = self.eval_context_ref(); + this.machine.primitive_layouts.u32(this) + } +} + /// Machine hook implementations. impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> { type MemoryKind = MiriMemoryKind; diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 4e4f8c112e..eac2053493 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -30,8 +30,7 @@ fn mutexattr_get_kind<'mir, 'tcx: 'mir>( // Ensure that the following read at an offset to the attr pointer is within bounds assert_ptr_target_min_size(ecx, attr_op, 4)?; let attr_place = ecx.deref_operand(attr_op)?; - let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; - let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?; + let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.i32_layout()?, ecx)?; ecx.read_scalar(kind_place.into()) } @@ -43,8 +42,7 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>( // Ensure that the following write at an offset to the attr pointer is within bounds assert_ptr_target_min_size(ecx, attr_op, 4)?; let attr_place = ecx.deref_operand(attr_op)?; - let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; - let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, i32_layout, ecx)?; + let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.i32_layout()?, ecx)?; ecx.write_scalar(kind.into(), kind_place.into()) } @@ -64,9 +62,8 @@ fn mutex_get_locked_count<'mir, 'tcx: 'mir>( // Ensure that the following read at an offset to the mutex pointer is within bounds assert_ptr_target_min_size(ecx, mutex_op, 20)?; let mutex_place = ecx.deref_operand(mutex_op)?; - let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; let locked_count_place = - mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; + mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?; ecx.read_scalar(locked_count_place.into()) } @@ -78,9 +75,8 @@ fn mutex_set_locked_count<'mir, 'tcx: 'mir>( // Ensure that the following write at an offset to the mutex pointer is within bounds assert_ptr_target_min_size(ecx, mutex_op, 20)?; let mutex_place = ecx.deref_operand(mutex_op)?; - let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; let locked_count_place = - mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; + mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?; ecx.write_scalar(locked_count.into(), locked_count_place.into()) } @@ -91,10 +87,13 @@ fn mutex_get_kind<'mir, 'tcx: 'mir>( // Ensure that the following read at an offset to the mutex pointer is within bounds assert_ptr_target_min_size(ecx, mutex_op, 20)?; let mutex_place = ecx.deref_operand(mutex_op)?; - let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; - let kind_place = - mutex_place.offset(Size::from_bytes(kind_offset), MemPlaceMeta::None, i32_layout, ecx)?; + let kind_place = mutex_place.offset( + Size::from_bytes(kind_offset), + MemPlaceMeta::None, + ecx.i32_layout()?, + ecx, + )?; ecx.read_scalar(kind_place.into()) } @@ -106,10 +105,13 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>( // Ensure that the following write at an offset to the mutex pointer is within bounds assert_ptr_target_min_size(ecx, mutex_op, 20)?; let mutex_place = ecx.deref_operand(mutex_op)?; - let i32_layout = ecx.layout_of(ecx.tcx.types.i32)?; let kind_offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; - let kind_place = - mutex_place.offset(Size::from_bytes(kind_offset), MemPlaceMeta::None, i32_layout, ecx)?; + let kind_place = mutex_place.offset( + Size::from_bytes(kind_offset), + MemPlaceMeta::None, + ecx.i32_layout()?, + ecx, + )?; ecx.write_scalar(kind.into(), kind_place.into()) } @@ -128,9 +130,8 @@ fn rwlock_get_readers<'mir, 'tcx: 'mir>( // Ensure that the following read at an offset to the rwlock pointer is within bounds assert_ptr_target_min_size(ecx, rwlock_op, 12)?; let rwlock_place = ecx.deref_operand(rwlock_op)?; - let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; let readers_place = - rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; + rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?; ecx.read_scalar(readers_place.into()) } @@ -142,9 +143,8 @@ fn rwlock_set_readers<'mir, 'tcx: 'mir>( // Ensure that the following write at an offset to the rwlock pointer is within bounds assert_ptr_target_min_size(ecx, rwlock_op, 12)?; let rwlock_place = ecx.deref_operand(rwlock_op)?; - let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; let readers_place = - rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, u32_layout, ecx)?; + rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?; ecx.write_scalar(readers.into(), readers_place.into()) } @@ -155,9 +155,8 @@ fn rwlock_get_writers<'mir, 'tcx: 'mir>( // Ensure that the following read at an offset to the rwlock pointer is within bounds assert_ptr_target_min_size(ecx, rwlock_op, 12)?; let rwlock_place = ecx.deref_operand(rwlock_op)?; - let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; let writers_place = - rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?; + rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?; ecx.read_scalar(writers_place.into()) } @@ -169,9 +168,8 @@ fn rwlock_set_writers<'mir, 'tcx: 'mir>( // Ensure that the following write at an offset to the rwlock pointer is within bounds assert_ptr_target_min_size(ecx, rwlock_op, 12)?; let rwlock_place = ecx.deref_operand(rwlock_op)?; - let u32_layout = ecx.layout_of(ecx.tcx.types.u32)?; let writers_place = - rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, u32_layout, ecx)?; + rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?; ecx.write_scalar(writers.into(), writers_place.into()) } From 79f3307f308ac1d9304437509db0f8a4a295d63b Mon Sep 17 00:00:00 2001 From: David Cook Date: Sun, 5 Apr 2020 12:09:31 -0500 Subject: [PATCH 23/31] Update comments, rearrange code --- src/machine.rs | 2 + src/shims/foreign_items/posix.rs | 88 +++++++++++++---------------- src/shims/sync.rs | 9 +-- tests/run-pass/libc.rs | 85 ++++++++++++++++++++++++++++ tests/run-pass/reentrant-println.rs | 2 +- tests/run-pass/sync.rs | 88 ----------------------------- 6 files changed, 131 insertions(+), 143 deletions(-) diff --git a/src/machine.rs b/src/machine.rs index a60ae8a4be..bfb832085e 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -217,6 +217,8 @@ pub struct Evaluator<'tcx> { pub(crate) time_anchor: Instant, /// Cached `TyLayout`s for primitive data types that are commonly used inside Miri. + /// FIXME: Search through the rest of the codebase for more layout_of() calls that + /// could be cached here. primitive_layouts: PrimitiveLayouts<'tcx>, } diff --git a/src/shims/foreign_items/posix.rs b/src/shims/foreign_items/posix.rs index fbf8a3b950..3ececb9c20 100644 --- a/src/shims/foreign_items/posix.rs +++ b/src/shims/foreign_items/posix.rs @@ -233,113 +233,101 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.write_null(dest)?; } - // Better error for attempts to create a thread - "pthread_create" => { - throw_unsup_format!("Miri does not support threading"); - } - - // Miscellaneous - "isatty" => { - let _fd = this.read_scalar(args[0])?.to_i32()?; - // "returns 1 if fd is an open file descriptor referring to a terminal; otherwise 0 is returned, and errno is set to indicate the error" - // FIXME: we just say nothing is a terminal. - let enotty = this.eval_libc("ENOTTY")?; - this.set_last_error(enotty)?; - this.write_null(dest)?; - } - "pthread_atfork" => { - let _prepare = this.read_scalar(args[0])?.not_undef()?; - let _parent = this.read_scalar(args[1])?.not_undef()?; - let _child = this.read_scalar(args[1])?.not_undef()?; - // We do not support forking, so there is nothing to do here. - this.write_null(dest)?; - } - - // Incomplete shims that we "stub out" just to get pre-main initialization code to work. - // These shims are enabled only when the caller is in the standard library. - | "pthread_attr_init" - | "pthread_attr_destroy" - | "pthread_self" - | "pthread_attr_setstacksize" - | "pthread_condattr_init" - | "pthread_condattr_setclock" - | "pthread_cond_init" - | "pthread_condattr_destroy" - | "pthread_cond_destroy" if this.frame().instance.to_string().starts_with("std::sys::unix::") - => { - this.write_null(dest)?; - } - + // Synchronization primitives "pthread_mutexattr_init" => { let result = this.pthread_mutexattr_init(args[0])?; this.write_scalar(Scalar::from_i32(result), dest)?; } - "pthread_mutexattr_settype" => { let result = this.pthread_mutexattr_settype(args[0], args[1])?; this.write_scalar(Scalar::from_i32(result), dest)?; } - "pthread_mutexattr_destroy" => { let result = this.pthread_mutexattr_destroy(args[0])?; this.write_scalar(Scalar::from_i32(result), dest)?; } - "pthread_mutex_init" => { let result = this.pthread_mutex_init(args[0], args[1])?; this.write_scalar(Scalar::from_i32(result), dest)?; } - "pthread_mutex_lock" => { let result = this.pthread_mutex_lock(args[0])?; this.write_scalar(Scalar::from_i32(result), dest)?; } - "pthread_mutex_trylock" => { let result = this.pthread_mutex_trylock(args[0])?; this.write_scalar(Scalar::from_i32(result), dest)?; } - "pthread_mutex_unlock" => { let result = this.pthread_mutex_unlock(args[0])?; this.write_scalar(Scalar::from_i32(result), dest)?; } - "pthread_mutex_destroy" => { let result = this.pthread_mutex_destroy(args[0])?; this.write_scalar(Scalar::from_i32(result), dest)?; } - "pthread_rwlock_rdlock" => { let result = this.pthread_rwlock_rdlock(args[0])?; this.write_scalar(Scalar::from_i32(result), dest)?; } - "pthread_rwlock_tryrdlock" => { let result = this.pthread_rwlock_tryrdlock(args[0])?; this.write_scalar(Scalar::from_i32(result), dest)?; } - "pthread_rwlock_wrlock" => { let result = this.pthread_rwlock_wrlock(args[0])?; this.write_scalar(Scalar::from_i32(result), dest)?; } - "pthread_rwlock_trywrlock" => { let result = this.pthread_rwlock_trywrlock(args[0])?; this.write_scalar(Scalar::from_i32(result), dest)?; } - "pthread_rwlock_unlock" => { let result = this.pthread_rwlock_unlock(args[0])?; this.write_scalar(Scalar::from_i32(result), dest)?; } - "pthread_rwlock_destroy" => { let result = this.pthread_rwlock_destroy(args[0])?; this.write_scalar(Scalar::from_i32(result), dest)?; } + // Better error for attempts to create a thread + "pthread_create" => { + throw_unsup_format!("Miri does not support threading"); + } + + // Miscellaneous + "isatty" => { + let _fd = this.read_scalar(args[0])?.to_i32()?; + // "returns 1 if fd is an open file descriptor referring to a terminal; otherwise 0 is returned, and errno is set to indicate the error" + // FIXME: we just say nothing is a terminal. + let enotty = this.eval_libc("ENOTTY")?; + this.set_last_error(enotty)?; + this.write_null(dest)?; + } + "pthread_atfork" => { + let _prepare = this.read_scalar(args[0])?.not_undef()?; + let _parent = this.read_scalar(args[1])?.not_undef()?; + let _child = this.read_scalar(args[1])?.not_undef()?; + // We do not support forking, so there is nothing to do here. + this.write_null(dest)?; + } + + // Incomplete shims that we "stub out" just to get pre-main initialization code to work. + // These shims are enabled only when the caller is in the standard library. + | "pthread_attr_init" + | "pthread_attr_destroy" + | "pthread_self" + | "pthread_attr_setstacksize" + | "pthread_condattr_init" + | "pthread_condattr_setclock" + | "pthread_cond_init" + | "pthread_condattr_destroy" + | "pthread_cond_destroy" if this.frame().instance.to_string().starts_with("std::sys::unix::") + => { + this.write_null(dest)?; + } + | "signal" | "sigaction" | "sigaltstack" diff --git a/src/shims/sync.rs b/src/shims/sync.rs index eac2053493..c2ea02af5b 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -20,8 +20,9 @@ fn assert_ptr_target_min_size<'mir, 'tcx: 'mir>( // pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform. -// Our chosen memory layout: store an i32 in the first four bytes equal to the -// corresponding libc mutex kind constant (i.e. PTHREAD_MUTEX_NORMAL) +// Our chosen memory layout for emulation (does not have to match the platform layout!): +// store an i32 in the first four bytes equal to the corresponding libc mutex kind constant +// (e.g. PTHREAD_MUTEX_NORMAL). fn mutexattr_get_kind<'mir, 'tcx: 'mir>( ecx: &MiriEvalContext<'mir, 'tcx>, @@ -48,7 +49,7 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>( // pthread_mutex_t is between 24 and 48 bytes, depending on the platform. -// Our chosen memory layout: +// Our chosen memory layout for the emulated mutex (does not have to match the platform layout!): // bytes 0-3: reserved for signature on macOS // (need to avoid this because it is set by static initializer macros) // bytes 4-7: count of how many times this mutex has been locked, as a u32 @@ -117,7 +118,7 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>( // pthread_rwlock_t is between 32 and 56 bytes, depending on the platform. -// Our chosen memory layout: +// Our chosen memory layout for the emulated rwlock (does not have to match the platform layout!): // bytes 0-3: reserved for signature on macOS // (need to avoid this because it is set by static initializer macros) // bytes 4-7: reader count, as a u32 diff --git a/tests/run-pass/libc.rs b/tests/run-pass/libc.rs index 064c00e81b..7ea793089d 100644 --- a/tests/run-pass/libc.rs +++ b/tests/run-pass/libc.rs @@ -42,7 +42,92 @@ fn test_posix_fadvise() { assert_eq!(result, 0); } +fn test_mutex_libc_init_recursive() { + unsafe { + let mut attr: libc::pthread_mutexattr_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutexattr_init(&mut attr as *mut _), 0); + assert_eq!(libc::pthread_mutexattr_settype(&mut attr as *mut _, libc::PTHREAD_MUTEX_RECURSIVE), 0); + let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mut attr as *mut _), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), libc::EPERM); + assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutexattr_destroy(&mut attr as *mut _), 0); + } +} + +fn test_mutex_libc_init_normal() { + unsafe { + let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, libc::PTHREAD_MUTEX_NORMAL), 0); + let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); + } +} + +// Only linux provides PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP, +// libc for macOS just has the default PTHREAD_MUTEX_INITIALIZER. +#[cfg(target_os = "linux")] +fn test_mutex_libc_static_initializer_recursive() { + let mutex = std::cell::UnsafeCell::new(libc::PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP); + unsafe { + assert_eq!(libc::pthread_mutex_lock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_trylock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_unlock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_unlock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_trylock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_lock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_unlock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_unlock(mutex.get()), 0); + assert_eq!(libc::pthread_mutex_unlock(mutex.get()), libc::EPERM); + assert_eq!(libc::pthread_mutex_destroy(mutex.get()), 0); + } +} + +// Testing the behavior of std::sync::RwLock does not fully exercise the pthread rwlock shims, we +// need to go a layer deeper and test the behavior of the libc functions, because +// std::sys::unix::rwlock::RWLock itself keeps track of write_locked and num_readers. +fn test_rwlock_libc_static_initializer() { + let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); + unsafe { + assert_eq!(libc::pthread_rwlock_rdlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_rdlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_tryrdlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_trywrlock(rw.get()), libc::EBUSY); + assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); + + assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_tryrdlock(rw.get()), libc::EBUSY); + assert_eq!(libc::pthread_rwlock_trywrlock(rw.get()), libc::EBUSY); + assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); + + assert_eq!(libc::pthread_rwlock_destroy(rw.get()), 0); + } +} + fn main() { #[cfg(not(target_os = "macos"))] test_posix_fadvise(); + + test_mutex_libc_init_recursive(); + test_mutex_libc_init_normal(); + test_rwlock_libc_static_initializer(); + + #[cfg(target_os = "linux")] + test_mutex_libc_static_initializer_recursive(); } diff --git a/tests/run-pass/reentrant-println.rs b/tests/run-pass/reentrant-println.rs index 09c4fc3f74..e73e82b8ec 100644 --- a/tests/run-pass/reentrant-println.rs +++ b/tests/run-pass/reentrant-println.rs @@ -1,7 +1,7 @@ use std::fmt::{Display, Error, Formatter}; // This test case exercises std::sys_common::remutex::ReentrantMutex -// by calling println!() from inside fmt +// by calling println!() from inside fmt. struct InterruptingCow; diff --git a/tests/run-pass/sync.rs b/tests/run-pass/sync.rs index 0ddf429fad..1ede5d42bb 100644 --- a/tests/run-pass/sync.rs +++ b/tests/run-pass/sync.rs @@ -2,20 +2,11 @@ use std::sync::{Mutex, TryLockError}; -extern crate libc; - fn main() { test_mutex_stdlib(); #[cfg(not(target_os = "windows"))] // TODO: implement RwLock on Windows { - test_mutex_libc_init_recursive(); - test_mutex_libc_init_normal(); test_rwlock_stdlib(); - test_rwlock_libc_static_initializer(); - } - #[cfg(target_os = "linux")] - { - test_mutex_libc_static_initializer_recursive(); } } @@ -29,61 +20,6 @@ fn test_mutex_stdlib() { drop(m); } -#[cfg(not(target_os = "windows"))] -fn test_mutex_libc_init_recursive() { - unsafe { - let mut attr: libc::pthread_mutexattr_t = std::mem::zeroed(); - assert_eq!(libc::pthread_mutexattr_init(&mut attr as *mut _), 0); - assert_eq!(libc::pthread_mutexattr_settype(&mut attr as *mut _, libc::PTHREAD_MUTEX_RECURSIVE), 0); - let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); - assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mut attr as *mut _), 0); - assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), libc::EPERM); - assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutexattr_destroy(&mut attr as *mut _), 0); - } -} - -#[cfg(not(target_os = "windows"))] -fn test_mutex_libc_init_normal() { - unsafe { - let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed(); - assert_eq!(libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, libc::PTHREAD_MUTEX_NORMAL), 0); - let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); - assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0); - assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY); - assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); - assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); - } -} - -#[cfg(target_os = "linux")] -fn test_mutex_libc_static_initializer_recursive() { - let mutex = std::cell::UnsafeCell::new(libc::PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP); - unsafe { - assert_eq!(libc::pthread_mutex_lock(mutex.get()), 0); - assert_eq!(libc::pthread_mutex_trylock(mutex.get()), 0); - assert_eq!(libc::pthread_mutex_unlock(mutex.get()), 0); - assert_eq!(libc::pthread_mutex_unlock(mutex.get()), 0); - assert_eq!(libc::pthread_mutex_trylock(mutex.get()), 0); - assert_eq!(libc::pthread_mutex_lock(mutex.get()), 0); - assert_eq!(libc::pthread_mutex_unlock(mutex.get()), 0); - assert_eq!(libc::pthread_mutex_unlock(mutex.get()), 0); - assert_eq!(libc::pthread_mutex_unlock(mutex.get()), libc::EPERM); - assert_eq!(libc::pthread_mutex_destroy(mutex.get()), 0); - } -} - #[cfg(not(target_os = "windows"))] fn test_rwlock_stdlib() { use std::sync::RwLock; @@ -102,30 +38,6 @@ fn test_rwlock_stdlib() { } } -// need to go a layer deeper and test the behavior of libc functions, because -// std::sys::unix::rwlock::RWLock keeps track of write_locked and num_readers - -#[cfg(not(target_os = "windows"))] -fn test_rwlock_libc_static_initializer() { - let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); - unsafe { - assert_eq!(libc::pthread_rwlock_rdlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_rdlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_tryrdlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_trywrlock(rw.get()), libc::EBUSY); - assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); - - assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0); - assert_eq!(libc::pthread_rwlock_tryrdlock(rw.get()), libc::EBUSY); - assert_eq!(libc::pthread_rwlock_trywrlock(rw.get()), libc::EBUSY); - assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); - - assert_eq!(libc::pthread_rwlock_destroy(rw.get()), 0); - } -} - trait TryLockErrorExt { fn would_block(&self) -> bool; } From 100141f57c27d0b282aec6156d60ab9d26583a47 Mon Sep 17 00:00:00 2001 From: David Cook Date: Sun, 5 Apr 2020 12:32:09 -0500 Subject: [PATCH 24/31] Remove null checks, fall through to UB upon deref --- src/shims/sync.rs | 70 ----------------------------------------------- 1 file changed, 70 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index c2ea02af5b..c9d846288a 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -179,11 +179,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutexattr_init(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let attr = this.read_scalar(attr_op)?.not_undef()?; - if this.is_null(attr)? { - return this.eval_libc_i32("EINVAL"); - } - let default_kind = this.eval_libc("PTHREAD_MUTEX_DEFAULT")?; mutexattr_set_kind(this, attr_op, default_kind)?; @@ -197,11 +192,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let attr = this.read_scalar(attr_op)?.not_undef()?; - if this.is_null(attr)? { - return this.eval_libc_i32("EINVAL"); - } - let kind = this.read_scalar(kind_op)?.not_undef()?; if kind == this.eval_libc("PTHREAD_MUTEX_NORMAL")? || kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? @@ -219,11 +209,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutexattr_destroy(&mut self, attr_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let attr = this.read_scalar(attr_op)?.not_undef()?; - if this.is_null(attr)? { - return this.eval_libc_i32("EINVAL"); - } - mutexattr_set_kind(this, attr_op, ScalarMaybeUndef::Undef)?; Ok(0) @@ -236,11 +221,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx ) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let mutex = this.read_scalar(mutex_op)?.not_undef()?; - if this.is_null(mutex)? { - return this.eval_libc_i32("EINVAL"); - } - let attr = this.read_scalar(attr_op)?.not_undef()?; let kind = if this.is_null(attr)? { this.eval_libc("PTHREAD_MUTEX_DEFAULT")? @@ -257,11 +237,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_lock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let mutex = this.read_scalar(mutex_op)?.not_undef()?; - if this.is_null(mutex)? { - return this.eval_libc_i32("EINVAL"); - } - let kind = mutex_get_kind(this, mutex_op)?.not_undef()?; let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?; @@ -295,11 +270,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_trylock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let mutex = this.read_scalar(mutex_op)?.not_undef()?; - if this.is_null(mutex)? { - return this.eval_libc_i32("EINVAL"); - } - let kind = mutex_get_kind(this, mutex_op)?.not_undef()?; let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?; @@ -328,11 +298,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_unlock(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let mutex = this.read_scalar(mutex_op)?.not_undef()?; - if this.is_null(mutex)? { - return this.eval_libc_i32("EINVAL"); - } - let kind = mutex_get_kind(this, mutex_op)?.not_undef()?; let locked_count = mutex_get_locked_count(this, mutex_op)?.to_u32()?; @@ -371,11 +336,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_mutex_destroy(&mut self, mutex_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let mutex = this.read_scalar(mutex_op)?.not_undef()?; - if this.is_null(mutex)? { - return this.eval_libc_i32("EINVAL"); - } - if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 { return this.eval_libc_i32("EBUSY"); } @@ -389,11 +349,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_rdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; - if this.is_null(rwlock)? { - return this.eval_libc_i32("EINVAL"); - } - let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; if writers != 0 { @@ -414,11 +369,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; - if this.is_null(rwlock)? { - return this.eval_libc_i32("EINVAL"); - } - let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; if writers != 0 { @@ -437,11 +387,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_wrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; - if this.is_null(rwlock)? { - return this.eval_libc_i32("EINVAL"); - } - let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; if readers != 0 { @@ -461,11 +406,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_trywrlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; - if this.is_null(rwlock)? { - return this.eval_libc_i32("EINVAL"); - } - let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; if readers != 0 || writers != 0 { @@ -479,11 +419,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_unlock(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; - if this.is_null(rwlock)? { - return this.eval_libc_i32("EINVAL"); - } - let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; if let Some(new_readers) = readers.checked_sub(1) { @@ -500,11 +435,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - let rwlock = this.read_scalar(rwlock_op)?.not_undef()?; - if this.is_null(rwlock)? { - return this.eval_libc_i32("EINVAL"); - } - if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0 { return this.eval_libc_i32("EBUSY"); } From e7944419d4b7403c51028204ec5c4c53e776e94a Mon Sep 17 00:00:00 2001 From: David Cook Date: Sun, 5 Apr 2020 12:44:23 -0500 Subject: [PATCH 25/31] Use Deadlock machine stop uniformly --- src/shims/sync.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index c9d846288a..90d7104b9e 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -352,9 +352,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; if writers != 0 { - throw_unsup_format!( - "Deadlock due to read-locking a pthreads read-write lock while it is already write-locked" - ); + throw_machine_stop!(TerminationInfo::Deadlock); } else { match readers.checked_add(1) { Some(new_readers) => { @@ -390,13 +388,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let readers = rwlock_get_readers(this, rwlock_op)?.to_u32()?; let writers = rwlock_get_writers(this, rwlock_op)?.to_u32()?; if readers != 0 { - throw_unsup_format!( - "Deadlock due to write-locking a pthreads read-write lock while it is already read-locked" - ); + throw_machine_stop!(TerminationInfo::Deadlock); } else if writers != 0 { - throw_unsup_format!( - "Deadlock due to write-locking a pthreads read-write lock while it is already write-locked" - ); + throw_machine_stop!(TerminationInfo::Deadlock); } else { rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?; Ok(0) From d5d5a569264d6ca18ff4d4648d62a81ce85114f7 Mon Sep 17 00:00:00 2001 From: David Cook Date: Sun, 5 Apr 2020 13:25:49 -0500 Subject: [PATCH 26/31] Add tests --- .../libc_pthread_mutex_normal_deadlock.rs | 16 ++++++++++++++++ .../libc_pthread_rwlock_read_write_deadlock.rs | 13 +++++++++++++ .../libc_pthread_rwlock_write_read_deadlock.rs | 13 +++++++++++++ ...libc_pthread_rwlock_write_write_deadlock.rs | 13 +++++++++++++ tests/run-pass/libc.rs | 18 ++++++++++++++++++ 5 files changed, 73 insertions(+) create mode 100644 tests/compile-fail/libc_pthread_mutex_normal_deadlock.rs create mode 100644 tests/compile-fail/libc_pthread_rwlock_read_write_deadlock.rs create mode 100644 tests/compile-fail/libc_pthread_rwlock_write_read_deadlock.rs create mode 100644 tests/compile-fail/libc_pthread_rwlock_write_write_deadlock.rs diff --git a/tests/compile-fail/libc_pthread_mutex_normal_deadlock.rs b/tests/compile-fail/libc_pthread_mutex_normal_deadlock.rs new file mode 100644 index 0000000000..7034bf64ec --- /dev/null +++ b/tests/compile-fail/libc_pthread_mutex_normal_deadlock.rs @@ -0,0 +1,16 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +fn main() { + unsafe { + let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, libc::PTHREAD_MUTEX_NORMAL), 0); + let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + libc::pthread_mutex_lock(&mut mutex as *mut _); //~ ERROR deadlock + } +} diff --git a/tests/compile-fail/libc_pthread_rwlock_read_write_deadlock.rs b/tests/compile-fail/libc_pthread_rwlock_read_write_deadlock.rs new file mode 100644 index 0000000000..dd4707d60e --- /dev/null +++ b/tests/compile-fail/libc_pthread_rwlock_read_write_deadlock.rs @@ -0,0 +1,13 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +fn main() { + let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); + unsafe { + assert_eq!(libc::pthread_rwlock_rdlock(rw.get()), 0); + libc::pthread_rwlock_wrlock(rw.get()); //~ ERROR: deadlock + } +} diff --git a/tests/compile-fail/libc_pthread_rwlock_write_read_deadlock.rs b/tests/compile-fail/libc_pthread_rwlock_write_read_deadlock.rs new file mode 100644 index 0000000000..1b460e7174 --- /dev/null +++ b/tests/compile-fail/libc_pthread_rwlock_write_read_deadlock.rs @@ -0,0 +1,13 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +fn main() { + let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); + unsafe { + assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0); + libc::pthread_rwlock_rdlock(rw.get()); //~ ERROR: deadlock + } +} diff --git a/tests/compile-fail/libc_pthread_rwlock_write_write_deadlock.rs b/tests/compile-fail/libc_pthread_rwlock_write_write_deadlock.rs new file mode 100644 index 0000000000..cc327ec46b --- /dev/null +++ b/tests/compile-fail/libc_pthread_rwlock_write_write_deadlock.rs @@ -0,0 +1,13 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +fn main() { + let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); + unsafe { + assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0); + libc::pthread_rwlock_wrlock(rw.get()); //~ ERROR: deadlock + } +} diff --git a/tests/run-pass/libc.rs b/tests/run-pass/libc.rs index 7ea793089d..c930a034b1 100644 --- a/tests/run-pass/libc.rs +++ b/tests/run-pass/libc.rs @@ -78,6 +78,23 @@ fn test_mutex_libc_init_normal() { } } +fn test_mutex_libc_init_errorcheck() { + unsafe { + let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, libc::PTHREAD_MUTEX_ERRORCHECK), 0); + let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), libc::EDEADLK); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), libc::EPERM); + assert_eq!(libc::pthread_mutex_destroy(&mut mutex as *mut _), 0); + } +} + // Only linux provides PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP, // libc for macOS just has the default PTHREAD_MUTEX_INITIALIZER. #[cfg(target_os = "linux")] @@ -126,6 +143,7 @@ fn main() { test_mutex_libc_init_recursive(); test_mutex_libc_init_normal(); + test_mutex_libc_init_errorcheck(); test_rwlock_libc_static_initializer(); #[cfg(target_os = "linux")] From f9dc942cfdafc4fe86bc0ec1f0963f88eaa580c1 Mon Sep 17 00:00:00 2001 From: David Cook Date: Sun, 5 Apr 2020 13:53:03 -0500 Subject: [PATCH 27/31] Changes to error handling --- src/shims/sync.rs | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/src/shims/sync.rs b/src/shims/sync.rs index 90d7104b9e..d7ae32daaa 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -263,7 +263,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx None => this.eval_libc_i32("EAGAIN"), } } else { - this.eval_libc_i32("EINVAL") + throw_ub_format!("called pthread_mutex_lock on an unsupported type of mutex"); } } @@ -291,7 +291,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx None => this.eval_libc_i32("EAGAIN"), } } else { - this.eval_libc_i32("EINVAL") + throw_ub_format!("called pthread_mutex_trylock on an unsupported type of mutex"); } } @@ -306,9 +306,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx mutex_set_locked_count(this, mutex_op, Scalar::from_u32(0))?; Ok(0) } else { - throw_ub_format!( - "Attempted to unlock a PTHREAD_MUTEX_NORMAL mutex that was not locked" - ); + throw_ub_format!("unlocked a PTHREAD_MUTEX_NORMAL mutex that was not locked"); } } else if kind == this.eval_libc("PTHREAD_MUTEX_ERRORCHECK")? { if locked_count != 0 { @@ -329,7 +327,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx } } } else { - this.eval_libc_i32("EINVAL") + throw_ub_format!("called pthread_mutex_unlock on an unsupported type of mutex"); } } @@ -337,7 +335,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let this = self.eval_context_mut(); if mutex_get_locked_count(this, mutex_op)?.to_u32()? != 0 { - return this.eval_libc_i32("EBUSY"); + throw_ub_format!("destroyed a locked mutex"); } mutex_set_kind(this, mutex_op, ScalarMaybeUndef::Undef)?; @@ -422,18 +420,17 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?; Ok(0) } else { - this.eval_libc_i32("EPERM") + throw_ub_format!("unlocked an rwlock that was not locked"); } } fn pthread_rwlock_destroy(&mut self, rwlock_op: OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); - if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0 { - return this.eval_libc_i32("EBUSY"); - } - if rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0 { - return this.eval_libc_i32("EBUSY"); + if rwlock_get_readers(this, rwlock_op)?.to_u32()? != 0 + || rwlock_get_writers(this, rwlock_op)?.to_u32()? != 0 + { + throw_ub_format!("destroyed a locked rwlock"); } rwlock_set_readers(this, rwlock_op, ScalarMaybeUndef::Undef)?; From 134d6a2faab1801e9b3d23b6ee11ba1643eae0fe Mon Sep 17 00:00:00 2001 From: David Cook Date: Sun, 5 Apr 2020 14:55:57 -0500 Subject: [PATCH 28/31] Add tests, improve test coverage --- .../libc_pthread_mutex_destroy_locked.rs | 16 ++++++++++++++++ ...libc_pthread_mutex_normal_unlock_unlocked.rs | 17 +++++++++++++++++ .../libc_pthread_rwlock_destroy_read_locked.rs | 13 +++++++++++++ .../libc_pthread_rwlock_destroy_write_locked.rs | 13 +++++++++++++ .../libc_pthread_rwlock_unlock_unlocked.rs | 12 ++++++++++++ tests/run-pass/libc.rs | 8 +++++++- 6 files changed, 78 insertions(+), 1 deletion(-) create mode 100644 tests/compile-fail/libc_pthread_mutex_destroy_locked.rs create mode 100644 tests/compile-fail/libc_pthread_mutex_normal_unlock_unlocked.rs create mode 100644 tests/compile-fail/libc_pthread_rwlock_destroy_read_locked.rs create mode 100644 tests/compile-fail/libc_pthread_rwlock_destroy_write_locked.rs create mode 100644 tests/compile-fail/libc_pthread_rwlock_unlock_unlocked.rs diff --git a/tests/compile-fail/libc_pthread_mutex_destroy_locked.rs b/tests/compile-fail/libc_pthread_mutex_destroy_locked.rs new file mode 100644 index 0000000000..e7ed8ad296 --- /dev/null +++ b/tests/compile-fail/libc_pthread_mutex_destroy_locked.rs @@ -0,0 +1,16 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +fn main() { + unsafe { + let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, libc::PTHREAD_MUTEX_NORMAL), 0); + let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + libc::pthread_mutex_destroy(&mut mutex as *mut _); //~ ERROR destroyed a locked mutex + } +} diff --git a/tests/compile-fail/libc_pthread_mutex_normal_unlock_unlocked.rs b/tests/compile-fail/libc_pthread_mutex_normal_unlock_unlocked.rs new file mode 100644 index 0000000000..65de62484d --- /dev/null +++ b/tests/compile-fail/libc_pthread_mutex_normal_unlock_unlocked.rs @@ -0,0 +1,17 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +fn main() { + unsafe { + let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, libc::PTHREAD_MUTEX_NORMAL), 0); + let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0); + assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0); + assert_eq!(libc::pthread_mutex_unlock(&mut mutex as *mut _), 0); + libc::pthread_mutex_unlock(&mut mutex as *mut _); //~ ERROR was not locked + } +} diff --git a/tests/compile-fail/libc_pthread_rwlock_destroy_read_locked.rs b/tests/compile-fail/libc_pthread_rwlock_destroy_read_locked.rs new file mode 100644 index 0000000000..8750a7388f --- /dev/null +++ b/tests/compile-fail/libc_pthread_rwlock_destroy_read_locked.rs @@ -0,0 +1,13 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +fn main() { + let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); + unsafe { + assert_eq!(libc::pthread_rwlock_rdlock(rw.get()), 0); + libc::pthread_rwlock_destroy(rw.get()); //~ ERROR destroyed a locked rwlock + } +} diff --git a/tests/compile-fail/libc_pthread_rwlock_destroy_write_locked.rs b/tests/compile-fail/libc_pthread_rwlock_destroy_write_locked.rs new file mode 100644 index 0000000000..aecccfa503 --- /dev/null +++ b/tests/compile-fail/libc_pthread_rwlock_destroy_write_locked.rs @@ -0,0 +1,13 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +fn main() { + let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); + unsafe { + assert_eq!(libc::pthread_rwlock_wrlock(rw.get()), 0); + libc::pthread_rwlock_destroy(rw.get()); //~ ERROR destroyed a locked rwlock + } +} diff --git a/tests/compile-fail/libc_pthread_rwlock_unlock_unlocked.rs b/tests/compile-fail/libc_pthread_rwlock_unlock_unlocked.rs new file mode 100644 index 0000000000..8b3de53828 --- /dev/null +++ b/tests/compile-fail/libc_pthread_rwlock_unlock_unlocked.rs @@ -0,0 +1,12 @@ +// ignore-windows: No libc on Windows + +#![feature(rustc_private)] + +extern crate libc; + +fn main() { + let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER); + unsafe { + libc::pthread_rwlock_unlock(rw.get()); //~ ERROR was not locked + } +} diff --git a/tests/run-pass/libc.rs b/tests/run-pass/libc.rs index c930a034b1..a449d9340a 100644 --- a/tests/run-pass/libc.rs +++ b/tests/run-pass/libc.rs @@ -15,7 +15,7 @@ fn tmp() -> PathBuf { #[cfg(not(target_os = "macos"))] fn test_posix_fadvise() { use std::convert::TryInto; - use std::fs::{File, remove_file}; + use std::fs::{remove_file, File}; use std::io::Write; use std::os::unix::io::AsRawFd; @@ -66,6 +66,7 @@ fn test_mutex_libc_init_recursive() { fn test_mutex_libc_init_normal() { unsafe { let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed(); + assert_eq!(libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, 0x12345678), libc::EINVAL); assert_eq!(libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, libc::PTHREAD_MUTEX_NORMAL), 0); let mut mutex: libc::pthread_mutex_t = std::mem::zeroed(); assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0); @@ -133,6 +134,11 @@ fn test_rwlock_libc_static_initializer() { assert_eq!(libc::pthread_rwlock_trywrlock(rw.get()), libc::EBUSY); assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_trywrlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_tryrdlock(rw.get()), libc::EBUSY); + assert_eq!(libc::pthread_rwlock_trywrlock(rw.get()), libc::EBUSY); + assert_eq!(libc::pthread_rwlock_unlock(rw.get()), 0); + assert_eq!(libc::pthread_rwlock_destroy(rw.get()), 0); } } From bc54c7628dbcccc8d727abea591e9ac14ea2fed2 Mon Sep 17 00:00:00 2001 From: David Cook Date: Sun, 5 Apr 2020 16:03:44 -0500 Subject: [PATCH 29/31] Eagerly compute i32 and u32 layouts --- src/eval.rs | 15 +++++---- src/lib.rs | 5 ++- src/machine.rs | 79 ++++++++++++++++++----------------------------- src/shims/sync.rs | 58 ++++++++++++++++++++++++---------- 4 files changed, 83 insertions(+), 74 deletions(-) diff --git a/src/eval.rs b/src/eval.rs index 46e66bc0a8..c3510188e3 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -1,14 +1,14 @@ //! Main evaluator loop and setting up the initial stack frame. -use std::ffi::OsStr; use std::convert::TryFrom; +use std::ffi::OsStr; use rand::rngs::StdRng; use rand::SeedableRng; -use rustc_target::abi::LayoutOf; -use rustc_middle::ty::{self, TyCtxt}; use rustc_hir::def_id::DefId; +use rustc_middle::ty::{self, layout::LayoutCx, TyCtxt}; +use rustc_target::abi::LayoutOf; use crate::*; @@ -60,10 +60,13 @@ pub fn create_ecx<'mir, 'tcx: 'mir>( main_id: DefId, config: MiriConfig, ) -> InterpResult<'tcx, (InterpCx<'mir, 'tcx, Evaluator<'tcx>>, MPlaceTy<'tcx, Tag>)> { + let tcx_at = tcx.at(rustc_span::source_map::DUMMY_SP); + let param_env = ty::ParamEnv::reveal_all(); + let layout_cx = LayoutCx { tcx, param_env }; let mut ecx = InterpCx::new( - tcx.at(rustc_span::source_map::DUMMY_SP), - ty::ParamEnv::reveal_all(), - Evaluator::new(config.communicate, config.validate), + tcx_at, + param_env, + Evaluator::new(config.communicate, config.validate, layout_cx), MemoryExtra::new( StdRng::seed_from_u64(config.seed.unwrap_or(0)), config.stacked_borrows, diff --git a/src/lib.rs b/src/lib.rs index 82ac2e8d21..2f381b4a34 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -51,9 +51,8 @@ pub use crate::diagnostics::{ pub use crate::eval::{create_ecx, eval_main, MiriConfig}; pub use crate::helpers::EvalContextExt as HelpersEvalContextExt; pub use crate::machine::{ - AllocExtra, EvalContextExt as MachineEvalContextExt, Evaluator, FrameData, MemoryExtra, - MiriEvalContext, MiriEvalContextExt, MiriMemoryKind, NUM_CPUS, PAGE_SIZE, STACK_ADDR, - STACK_SIZE, + AllocExtra, Evaluator, FrameData, MemoryExtra, MiriEvalContext, MiriEvalContextExt, + MiriMemoryKind, NUM_CPUS, PAGE_SIZE, STACK_ADDR, STACK_SIZE, }; pub use crate::mono_hash_map::MonoHashMap; pub use crate::operator::EvalContextExt as OperatorEvalContextExt; diff --git a/src/machine.rs b/src/machine.rs index bfb832085e..26ff23511f 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -10,11 +10,18 @@ use std::time::Instant; use log::trace; use rand::rngs::StdRng; -use rustc_data_structures::fx::FxHashMap; -use rustc_middle::{mir, ty::{self, layout::TyAndLayout}}; -use rustc_target::abi::{LayoutOf, Size}; use rustc_ast::attr; +use rustc_data_structures::fx::FxHashMap; +use rustc_middle::{ + mir, + ty::{ + self, + layout::{LayoutCx, LayoutError, TyAndLayout}, + TyCtxt, + }, +}; use rustc_span::symbol::{sym, Symbol}; +use rustc_target::abi::{LayoutOf, Size}; use crate::*; @@ -146,36 +153,18 @@ impl MemoryExtra { } } -/// Cached layouts of primitive types -#[derive(Default)] -struct PrimitiveLayouts<'tcx> { - i32: RefCell>>, - u32: RefCell>>, +/// Precomputed layouts of primitive types +pub(crate) struct PrimitiveLayouts<'tcx> { + pub(crate) i32: TyAndLayout<'tcx>, + pub(crate) u32: TyAndLayout<'tcx>, } impl<'mir, 'tcx: 'mir> PrimitiveLayouts<'tcx> { - fn i32(&self, ecx: &MiriEvalContext<'mir, 'tcx>) -> InterpResult<'tcx, TyAndLayout<'tcx>> { - { - let layout_ref = self.i32.borrow(); - if layout_ref.is_some() { - return Ok(layout_ref.unwrap()); - } - } - let layout = ecx.layout_of(ecx.tcx.types.i32)?; - *self.i32.borrow_mut() = Some(layout); - Ok(layout) - } - - fn u32(&self, ecx: &MiriEvalContext<'mir, 'tcx>) -> InterpResult<'tcx, TyAndLayout<'tcx>> { - { - let layout_ref = self.u32.borrow(); - if layout_ref.is_some() { - return Ok(layout_ref.unwrap()); - } - } - let layout = ecx.layout_of(ecx.tcx.types.u32)?; - *self.u32.borrow_mut() = Some(layout); - Ok(layout) + fn new(layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>) -> Result> { + Ok(Self { + i32: layout_cx.layout_of(layout_cx.tcx.types.i32)?, + u32: layout_cx.layout_of(layout_cx.tcx.types.u32)?, + }) } } @@ -216,14 +205,20 @@ pub struct Evaluator<'tcx> { /// The "time anchor" for this machine's monotone clock (for `Instant` simulation). pub(crate) time_anchor: Instant, - /// Cached `TyLayout`s for primitive data types that are commonly used inside Miri. + /// Precomputed `TyLayout`s for primitive data types that are commonly used inside Miri. /// FIXME: Search through the rest of the codebase for more layout_of() calls that - /// could be cached here. - primitive_layouts: PrimitiveLayouts<'tcx>, + /// could be stored here. + pub(crate) layouts: PrimitiveLayouts<'tcx>, } impl<'tcx> Evaluator<'tcx> { - pub(crate) fn new(communicate: bool, validate: bool) -> Self { + pub(crate) fn new( + communicate: bool, + validate: bool, + layout_cx: LayoutCx<'tcx, TyCtxt<'tcx>>, + ) -> Self { + let layouts = PrimitiveLayouts::new(layout_cx) + .expect("Couldn't get layouts of primitive types"); Evaluator { // `env_vars` could be initialized properly here if `Memory` were available before // calling this method. @@ -239,7 +234,7 @@ impl<'tcx> Evaluator<'tcx> { dir_handler: Default::default(), panic_payload: None, time_anchor: Instant::now(), - primitive_layouts: PrimitiveLayouts::default(), + layouts, } } } @@ -263,20 +258,6 @@ impl<'mir, 'tcx> MiriEvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> } } -impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {} -/// Provides convenience methods for use elsewhere -pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { - fn i32_layout(&self) -> InterpResult<'tcx, TyAndLayout<'tcx>> { - let this = self.eval_context_ref(); - this.machine.primitive_layouts.i32(this) - } - - fn u32_layout(&self) -> InterpResult<'tcx, TyAndLayout<'tcx>> { - let this = self.eval_context_ref(); - this.machine.primitive_layouts.u32(this) - } -} - /// Machine hook implementations. impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'tcx> { type MemoryKind = MiriMemoryKind; diff --git a/src/shims/sync.rs b/src/shims/sync.rs index d7ae32daaa..b03dcbfd89 100644 --- a/src/shims/sync.rs +++ b/src/shims/sync.rs @@ -31,7 +31,8 @@ fn mutexattr_get_kind<'mir, 'tcx: 'mir>( // Ensure that the following read at an offset to the attr pointer is within bounds assert_ptr_target_min_size(ecx, attr_op, 4)?; let attr_place = ecx.deref_operand(attr_op)?; - let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.i32_layout()?, ecx)?; + let kind_place = + attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.machine.layouts.i32, ecx)?; ecx.read_scalar(kind_place.into()) } @@ -43,7 +44,8 @@ fn mutexattr_set_kind<'mir, 'tcx: 'mir>( // Ensure that the following write at an offset to the attr pointer is within bounds assert_ptr_target_min_size(ecx, attr_op, 4)?; let attr_place = ecx.deref_operand(attr_op)?; - let kind_place = attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.i32_layout()?, ecx)?; + let kind_place = + attr_place.offset(Size::ZERO, MemPlaceMeta::None, ecx.machine.layouts.i32, ecx)?; ecx.write_scalar(kind.into(), kind_place.into()) } @@ -63,8 +65,12 @@ fn mutex_get_locked_count<'mir, 'tcx: 'mir>( // Ensure that the following read at an offset to the mutex pointer is within bounds assert_ptr_target_min_size(ecx, mutex_op, 20)?; let mutex_place = ecx.deref_operand(mutex_op)?; - let locked_count_place = - mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?; + let locked_count_place = mutex_place.offset( + Size::from_bytes(4), + MemPlaceMeta::None, + ecx.machine.layouts.u32, + ecx, + )?; ecx.read_scalar(locked_count_place.into()) } @@ -76,8 +82,12 @@ fn mutex_set_locked_count<'mir, 'tcx: 'mir>( // Ensure that the following write at an offset to the mutex pointer is within bounds assert_ptr_target_min_size(ecx, mutex_op, 20)?; let mutex_place = ecx.deref_operand(mutex_op)?; - let locked_count_place = - mutex_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?; + let locked_count_place = mutex_place.offset( + Size::from_bytes(4), + MemPlaceMeta::None, + ecx.machine.layouts.u32, + ecx, + )?; ecx.write_scalar(locked_count.into(), locked_count_place.into()) } @@ -92,7 +102,7 @@ fn mutex_get_kind<'mir, 'tcx: 'mir>( let kind_place = mutex_place.offset( Size::from_bytes(kind_offset), MemPlaceMeta::None, - ecx.i32_layout()?, + ecx.machine.layouts.i32, ecx, )?; ecx.read_scalar(kind_place.into()) @@ -110,7 +120,7 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>( let kind_place = mutex_place.offset( Size::from_bytes(kind_offset), MemPlaceMeta::None, - ecx.i32_layout()?, + ecx.machine.layouts.i32, ecx, )?; ecx.write_scalar(kind.into(), kind_place.into()) @@ -131,8 +141,12 @@ fn rwlock_get_readers<'mir, 'tcx: 'mir>( // Ensure that the following read at an offset to the rwlock pointer is within bounds assert_ptr_target_min_size(ecx, rwlock_op, 12)?; let rwlock_place = ecx.deref_operand(rwlock_op)?; - let readers_place = - rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?; + let readers_place = rwlock_place.offset( + Size::from_bytes(4), + MemPlaceMeta::None, + ecx.machine.layouts.u32, + ecx, + )?; ecx.read_scalar(readers_place.into()) } @@ -144,8 +158,12 @@ fn rwlock_set_readers<'mir, 'tcx: 'mir>( // Ensure that the following write at an offset to the rwlock pointer is within bounds assert_ptr_target_min_size(ecx, rwlock_op, 12)?; let rwlock_place = ecx.deref_operand(rwlock_op)?; - let readers_place = - rwlock_place.offset(Size::from_bytes(4), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?; + let readers_place = rwlock_place.offset( + Size::from_bytes(4), + MemPlaceMeta::None, + ecx.machine.layouts.u32, + ecx, + )?; ecx.write_scalar(readers.into(), readers_place.into()) } @@ -156,8 +174,12 @@ fn rwlock_get_writers<'mir, 'tcx: 'mir>( // Ensure that the following read at an offset to the rwlock pointer is within bounds assert_ptr_target_min_size(ecx, rwlock_op, 12)?; let rwlock_place = ecx.deref_operand(rwlock_op)?; - let writers_place = - rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?; + let writers_place = rwlock_place.offset( + Size::from_bytes(8), + MemPlaceMeta::None, + ecx.machine.layouts.u32, + ecx, + )?; ecx.read_scalar(writers_place.into()) } @@ -169,8 +191,12 @@ fn rwlock_set_writers<'mir, 'tcx: 'mir>( // Ensure that the following write at an offset to the rwlock pointer is within bounds assert_ptr_target_min_size(ecx, rwlock_op, 12)?; let rwlock_place = ecx.deref_operand(rwlock_op)?; - let writers_place = - rwlock_place.offset(Size::from_bytes(8), MemPlaceMeta::None, ecx.u32_layout()?, ecx)?; + let writers_place = rwlock_place.offset( + Size::from_bytes(8), + MemPlaceMeta::None, + ecx.machine.layouts.u32, + ecx, + )?; ecx.write_scalar(writers.into(), writers_place.into()) } From 0f5f0e1520a4f001674478ee5b8eb7a644b2c66a Mon Sep 17 00:00:00 2001 From: David Cook Date: Sun, 5 Apr 2020 20:55:39 -0500 Subject: [PATCH 30/31] Fix spelling typo --- src/shims/foreign_items/posix/linux.rs | 2 +- src/shims/foreign_items/posix/macos.rs | 2 +- src/shims/foreign_items/windows.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/shims/foreign_items/posix/linux.rs b/src/shims/foreign_items/posix/linux.rs index 286bd5798b..16c6c002b6 100644 --- a/src/shims/foreign_items/posix/linux.rs +++ b/src/shims/foreign_items/posix/linux.rs @@ -113,7 +113,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.write_scalar(Scalar::from_i32(-1), dest)?; } - // Incomplete shims that we "stub out" just to get pre-main initialziation code to work. + // Incomplete shims that we "stub out" just to get pre-main initialization code to work. // These shims are enabled only when the caller is in the standard library. "pthread_getattr_np" if this.frame().instance.to_string().starts_with("std::sys::unix::") => { this.write_null(dest)?; diff --git a/src/shims/foreign_items/posix/macos.rs b/src/shims/foreign_items/posix/macos.rs index 44c45d90c1..9810a77ffd 100644 --- a/src/shims/foreign_items/posix/macos.rs +++ b/src/shims/foreign_items/posix/macos.rs @@ -88,7 +88,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx this.write_scalar(stack_size, dest)?; } - // Incomplete shims that we "stub out" just to get pre-main initialziation code to work. + // Incomplete shims that we "stub out" just to get pre-main initialization code to work. // These shims are enabled only when the caller is in the standard library. "mmap" if this.frame().instance.to_string().starts_with("std::sys::unix::") => { // This is a horrible hack, but since the guard page mechanism calls mmap and expects a particular return value, we just give it that value. diff --git a/src/shims/foreign_items/windows.rs b/src/shims/foreign_items/windows.rs index 443d44fae1..1d17cbcefd 100644 --- a/src/shims/foreign_items/windows.rs +++ b/src/shims/foreign_items/windows.rs @@ -207,7 +207,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx throw_unsup_format!("Miri does not support threading"); } - // Incomplete shims that we "stub out" just to get pre-main initialziation code to work. + // Incomplete shims that we "stub out" just to get pre-main initialization code to work. // These shims are enabled only when the caller is in the standard library. "GetProcessHeap" if this.frame().instance.to_string().starts_with("std::sys::windows::") => { // Just fake a HANDLE From 80497e5d3c5fe08e95bcbe114fae39661a909e16 Mon Sep 17 00:00:00 2001 From: David Cook Date: Mon, 6 Apr 2020 07:23:58 -0500 Subject: [PATCH 31/31] Clean up conditional compilation --- tests/run-pass/libc.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/run-pass/libc.rs b/tests/run-pass/libc.rs index a449d9340a..fc154c05c8 100644 --- a/tests/run-pass/libc.rs +++ b/tests/run-pass/libc.rs @@ -12,7 +12,7 @@ fn tmp() -> PathBuf { std::env::var("MIRI_TEMP").map(PathBuf::from).unwrap_or_else(|_| std::env::temp_dir()) } -#[cfg(not(target_os = "macos"))] +#[cfg(target_os = "linux")] fn test_posix_fadvise() { use std::convert::TryInto; use std::fs::{remove_file, File}; @@ -144,7 +144,7 @@ fn test_rwlock_libc_static_initializer() { } fn main() { - #[cfg(not(target_os = "macos"))] + #[cfg(target_os = "linux")] test_posix_fadvise(); test_mutex_libc_init_recursive();