Skip to content

Miri subtree update #127726

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 29 commits into from
Jul 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
2d33d7b
Run tests for all specified targets
Mandragorian Jun 22, 2024
c5e9513
Refactor float casting tests
tgross35 Jun 20, 2024
93c1749
Add casting tests for `f16` and `f128`
tgross35 Jun 27, 2024
12bfa7b
Auto merge of #3688 - tgross35:float-cast-test-refactor, r=RalfJung
bors Jul 4, 2024
8699540
Auto merge of #3702 - Mandragorian:multiple_targets, r=RalfJung
bors Jul 4, 2024
987f973
Preparing for merge from rustc
Jul 5, 2024
9afcb21
Merge from rustc
Jul 5, 2024
8f03c97
Auto merge of #3733 - rust-lang:rustup-2024-07-05, r=RalfJung
bors Jul 5, 2024
c77a2c6
implement `libc::sched_getaffinity` and `libc::sched_setaffinity`
folkertdev Jun 21, 2024
4b69da9
Preparing for merge from rustc
Jul 6, 2024
7a9e1e3
Merge from rustc
Jul 6, 2024
d358f5d
Auto merge of #3736 - rust-lang:rustup-2024-07-06, r=RalfJung
bors Jul 6, 2024
9a0e671
lookup c_ulong instead of hard-coding the chunk size
RalfJung Jul 6, 2024
4601952
`sched_setaffinity`: test `cpusetsize == 0`
folkertdev Jul 6, 2024
d65e368
`sched_setaffinity`: adjust test on BE systems
folkertdev Jul 6, 2024
838b8d5
Auto merge of #3698 - folkertdev:sched-setaffinity, r=RalfJung
bors Jul 6, 2024
6e754fe
Stacked Borrows: fix PartialEq for Stack
RalfJung Jul 7, 2024
964f6d9
Auto merge of #3738 - RalfJung:stack-partial-eq, r=RalfJung
bors Jul 7, 2024
13c6476
implement support for multiple TLS destructors on macOS
joboet Jul 7, 2024
f86f789
Preparing for merge from rustc
Jul 9, 2024
1dcc342
Merge from rustc
Jul 9, 2024
d5aba8d
Auto merge of #3741 - rust-lang:rustup-2024-07-09, r=RalfJung
bors Jul 9, 2024
f50b0b8
Auto merge of #3739 - joboet:macos_tls_dtors, r=RalfJung
bors Jul 9, 2024
ea7c136
Fix libc::read shim: make it write to a buffer correct amount of byte…
Jun 29, 2024
6f65362
Auto merge of #3720 - safinaskar:read, r=RalfJung
bors Jul 10, 2024
9a23878
add test for intermediate reference in '&(*x).0 as *const i32'
RalfJung Jul 14, 2024
5f99349
Auto merge of #3750 - RalfJung:dangling-intermediate-ref, r=RalfJung
bors Jul 14, 2024
32221c3
implement the `os_unfair_lock` functions on macOS
joboet Jul 13, 2024
e90f047
Auto merge of #3745 - joboet:os_unfair_lock, r=RalfJung
bors Jul 14, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 18 additions & 8 deletions src/tools/miri/cargo-miri/src/phases.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,17 @@ pub fn phase_cargo_miri(mut args: impl Iterator<Item = String>) {
miri_for_host()
)
});
let host = &rustc_version.host;
let target = get_arg_flag_value("--target");
let target = target.as_ref().unwrap_or(host);
let mut targets = get_arg_flag_values("--target").collect::<Vec<_>>();
// If `targets` is empty, we need to add a `--target $HOST` flag ourselves, and also ensure
// that the host target is indeed setup.
let target_flag = if targets.is_empty() {
let host = &rustc_version.host;
targets.push(host.clone());
Some(host)
} else {
// We don't need to add a `--target` flag, we just forward the user's flags.
None
};

// If cleaning the target directory & sysroot cache,
// delete them then exit. There is no reason to setup a new
Expand All @@ -118,8 +126,11 @@ pub fn phase_cargo_miri(mut args: impl Iterator<Item = String>) {
return;
}

// We always setup.
let miri_sysroot = setup(&subcommand, target, &rustc_version, verbose, quiet);
for target in &targets {
// We always setup.
setup(&subcommand, target.as_str(), &rustc_version, verbose, quiet);
}
let miri_sysroot = get_sysroot_dir();

// Invoke actual cargo for the job, but with different flags.
// We re-use `cargo test` and `cargo run`, which makes target and binary handling very easy but
Expand Down Expand Up @@ -155,10 +166,9 @@ pub fn phase_cargo_miri(mut args: impl Iterator<Item = String>) {
// This is needed to make the `target.runner` settings do something,
// and it later helps us detect which crates are proc-macro/build-script
// (host crates) and which crates are needed for the program itself.
if get_arg_flag_value("--target").is_none() {
// No target given. Explicitly pick the host.
if let Some(target_flag) = target_flag {
cmd.arg("--target");
cmd.arg(host);
cmd.arg(target_flag);
}

// Set ourselves as runner for al binaries invoked by cargo.
Expand Down
4 changes: 3 additions & 1 deletion src/tools/miri/ci/ci.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,11 @@ function run_tests {
if [ -n "${TEST_TARGET-}" ]; then
begingroup "Testing foreign architecture $TEST_TARGET"
TARGET_FLAG="--target $TEST_TARGET"
MULTI_TARGET_FLAG=""
else
begingroup "Testing host architecture"
TARGET_FLAG=""
MULTI_TARGET_FLAG="--multi-target"
fi

## ui test suite
Expand Down Expand Up @@ -93,7 +95,7 @@ function run_tests {
echo 'build.rustc-wrapper = "thisdoesnotexist"' > .cargo/config.toml
fi
# Run the actual test
time ${PYTHON} test-cargo-miri/run-test.py $TARGET_FLAG
time ${PYTHON} test-cargo-miri/run-test.py $TARGET_FLAG $MULTI_TARGET_FLAG
# Clean up
unset RUSTC MIRI
rm -rf .cargo
Expand Down
2 changes: 1 addition & 1 deletion src/tools/miri/rust-version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
66b4f0021bfb11a8c20d084c99a40f4a78ce1d38
99b7134389e9766462601a2fc4013840b9d31745
3 changes: 3 additions & 0 deletions src/tools/miri/src/bin/miri.rs
Original file line number Diff line number Diff line change
Expand Up @@ -592,6 +592,9 @@ fn main() {
let num_cpus = param
.parse::<u32>()
.unwrap_or_else(|err| show_error!("-Zmiri-num-cpus requires a `u32`: {}", err));
if !(1..=miri::MAX_CPUS).contains(&usize::try_from(num_cpus).unwrap()) {
show_error!("-Zmiri-num-cpus must be in the range 1..={}", miri::MAX_CPUS);
}
miri_config.num_cpus = num_cpus;
} else if let Some(param) = arg.strip_prefix("-Zmiri-force-page-size=") {
let page_size = param.parse::<u64>().unwrap_or_else(|err| {
Expand Down
12 changes: 10 additions & 2 deletions src/tools/miri/src/borrow_tracker/stacked_borrows/stack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,16 @@ impl StackCache {

impl PartialEq for Stack {
fn eq(&self, other: &Self) -> bool {
// All the semantics of Stack are in self.borrows, everything else is caching
self.borrows == other.borrows
let Stack {
borrows,
unknown_bottom,
// The cache is ignored for comparison.
#[cfg(feature = "stack-cache")]
cache: _,
#[cfg(feature = "stack-cache")]
unique_range: _,
} = self;
*borrows == other.borrows && *unknown_bottom == other.unknown_bottom
}
}

Expand Down
90 changes: 90 additions & 0 deletions src/tools/miri/src/concurrency/cpu_affinity.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
use rustc_middle::ty::layout::LayoutOf;
use rustc_target::abi::Endian;

use crate::*;

/// The maximum number of CPUs supported by miri.
///
/// This value is compatible with the libc `CPU_SETSIZE` constant and corresponds to the number
/// of CPUs that a `cpu_set_t` can contain.
///
/// Real machines can have more CPUs than this number, and there exist APIs to set their affinity,
/// but this is not currently supported by miri.
pub const MAX_CPUS: usize = 1024;

/// A thread's CPU affinity mask determines the set of CPUs on which it is eligible to run.
// the actual representation depends on the target's endianness and pointer width.
// See CpuAffinityMask::set for details
#[derive(Clone)]
pub(crate) struct CpuAffinityMask([u8; Self::CPU_MASK_BYTES]);

impl CpuAffinityMask {
pub(crate) const CPU_MASK_BYTES: usize = MAX_CPUS / 8;

pub fn new<'tcx>(cx: &impl LayoutOf<'tcx>, cpu_count: u32) -> Self {
let mut this = Self([0; Self::CPU_MASK_BYTES]);

// the default affinity mask includes only the available CPUs
for i in 0..cpu_count as usize {
this.set(cx, i);
}

this
}

pub fn chunk_size<'tcx>(cx: &impl LayoutOf<'tcx>) -> u64 {
// The actual representation of the CpuAffinityMask is [c_ulong; _].
let ulong = helpers::path_ty_layout(cx, &["core", "ffi", "c_ulong"]);
ulong.size.bytes()
}

fn set<'tcx>(&mut self, cx: &impl LayoutOf<'tcx>, cpu: usize) {
// we silently ignore CPUs that are out of bounds. This matches the behavior of
// `sched_setaffinity` with a mask that specifies more than `CPU_SETSIZE` CPUs.
if cpu >= MAX_CPUS {
return;
}

// The actual representation of the CpuAffinityMask is [c_ulong; _].
// Within the array elements, we need to use the endianness of the target.
let target = &cx.tcx().sess.target;
match Self::chunk_size(cx) {
4 => {
let start = cpu / 32 * 4; // first byte of the correct u32
let chunk = self.0[start..].first_chunk_mut::<4>().unwrap();
let offset = cpu % 32;
*chunk = match target.options.endian {
Endian::Little => (u32::from_le_bytes(*chunk) | 1 << offset).to_le_bytes(),
Endian::Big => (u32::from_be_bytes(*chunk) | 1 << offset).to_be_bytes(),
};
}
8 => {
let start = cpu / 64 * 8; // first byte of the correct u64
let chunk = self.0[start..].first_chunk_mut::<8>().unwrap();
let offset = cpu % 64;
*chunk = match target.options.endian {
Endian::Little => (u64::from_le_bytes(*chunk) | 1 << offset).to_le_bytes(),
Endian::Big => (u64::from_be_bytes(*chunk) | 1 << offset).to_be_bytes(),
};
}
other => bug!("chunk size not supported: {other}"),
};
}

pub fn as_slice(&self) -> &[u8] {
self.0.as_slice()
}

pub fn from_array<'tcx>(
cx: &impl LayoutOf<'tcx>,
cpu_count: u32,
bytes: [u8; Self::CPU_MASK_BYTES],
) -> Option<Self> {
// mask by what CPUs are actually available
let default = Self::new(cx, cpu_count);
let masked = std::array::from_fn(|i| bytes[i] & default.0[i]);

// at least one thread must be set for the input to be valid
masked.iter().any(|b| *b != 0).then_some(Self(masked))
}
}
1 change: 1 addition & 0 deletions src/tools/miri/src/concurrency/mod.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
pub mod cpu_affinity;
pub mod data_race;
pub mod init_once;
mod range_object_map;
Expand Down
21 changes: 15 additions & 6 deletions src/tools/miri/src/concurrency/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
if this.mutex_is_locked(mutex) {
assert_ne!(this.mutex_get_owner(mutex), this.active_thread());
this.mutex_enqueue_and_block(mutex, retval, dest);
this.mutex_enqueue_and_block(mutex, Some((retval, dest)));
} else {
// We can have it right now!
this.mutex_lock(mutex);
Expand Down Expand Up @@ -390,9 +390,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}

/// Put the thread into the queue waiting for the mutex.
/// Once the Mutex becomes available, `retval` will be written to `dest`.
///
/// Once the Mutex becomes available and if it exists, `retval_dest.0` will
/// be written to `retval_dest.1`.
#[inline]
fn mutex_enqueue_and_block(&mut self, id: MutexId, retval: Scalar, dest: MPlaceTy<'tcx>) {
fn mutex_enqueue_and_block(
&mut self,
id: MutexId,
retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
) {
let this = self.eval_context_mut();
assert!(this.mutex_is_locked(id), "queing on unlocked mutex");
let thread = this.active_thread();
Expand All @@ -403,13 +409,16 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
callback!(
@capture<'tcx> {
id: MutexId,
retval: Scalar,
dest: MPlaceTy<'tcx>,
retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
}
@unblock = |this| {
assert!(!this.mutex_is_locked(id));
this.mutex_lock(id);
this.write_scalar(retval, &dest)?;

if let Some((retval, dest)) = retval_dest {
this.write_scalar(retval, &dest)?;
}

Ok(())
}
),
Expand Down
5 changes: 5 additions & 0 deletions src/tools/miri/src/concurrency/thread.rs
Original file line number Diff line number Diff line change
Expand Up @@ -936,6 +936,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// After this all accesses will be treated as occurring in the new thread.
let old_thread_id = this.machine.threads.set_active_thread_id(new_thread_id);

// The child inherits its parent's cpu affinity.
if let Some(cpuset) = this.machine.thread_cpu_affinity.get(&old_thread_id).cloned() {
this.machine.thread_cpu_affinity.insert(new_thread_id, cpuset);
}

// Perform the function pointer load in the new thread frame.
let instance = this.get_ptr_fn(start_routine)?.as_instance()?;

Expand Down
3 changes: 2 additions & 1 deletion src/tools/miri/src/eval.rs
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,8 @@ pub fn create_ecx<'tcx>(
})?;

// Make sure we have MIR. We check MIR for some stable monomorphic function in libcore.
let sentinel = ecx.try_resolve_path(&["core", "ascii", "escape_default"], Namespace::ValueNS);
let sentinel =
helpers::try_resolve_path(tcx, &["core", "ascii", "escape_default"], Namespace::ValueNS);
if !matches!(sentinel, Some(s) if tcx.is_mir_available(s.def.def_id())) {
tcx.dcx().fatal(
"the current sysroot was built without `-Zalways-encode-mir`, or libcore seems missing. \
Expand Down
55 changes: 33 additions & 22 deletions src/tools/miri/src/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::middle::dependency_format::Linkage;
use rustc_middle::middle::exported_symbols::ExportedSymbol;
use rustc_middle::mir;
use rustc_middle::ty::layout::MaybeResult;
use rustc_middle::ty::{
self,
layout::{LayoutOf, TyAndLayout},
Expand Down Expand Up @@ -159,6 +160,35 @@ fn try_resolve_did(tcx: TyCtxt<'_>, path: &[&str], namespace: Option<Namespace>)
None
}

/// Gets an instance for a path; fails gracefully if the path does not exist.
pub fn try_resolve_path<'tcx>(
tcx: TyCtxt<'tcx>,
path: &[&str],
namespace: Namespace,
) -> Option<ty::Instance<'tcx>> {
let did = try_resolve_did(tcx, path, Some(namespace))?;
Some(ty::Instance::mono(tcx, did))
}

/// Gets an instance for a path.
#[track_caller]
pub fn resolve_path<'tcx>(
tcx: TyCtxt<'tcx>,
path: &[&str],
namespace: Namespace,
) -> ty::Instance<'tcx> {
try_resolve_path(tcx, path, namespace)
.unwrap_or_else(|| panic!("failed to find required Rust item: {path:?}"))
}

/// Gets the layout of a type at a path.
#[track_caller]
pub fn path_ty_layout<'tcx>(cx: &impl LayoutOf<'tcx>, path: &[&str]) -> TyAndLayout<'tcx> {
let ty =
resolve_path(cx.tcx(), path, Namespace::TypeNS).ty(cx.tcx(), ty::ParamEnv::reveal_all());
cx.layout_of(ty).to_result().ok().unwrap()
}

/// Call `f` for each exported symbol.
pub fn iter_exported_symbols<'tcx>(
tcx: TyCtxt<'tcx>,
Expand Down Expand Up @@ -259,23 +289,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
try_resolve_did(*self.eval_context_ref().tcx, path, None).is_some()
}

/// Gets an instance for a path; fails gracefully if the path does not exist.
fn try_resolve_path(&self, path: &[&str], namespace: Namespace) -> Option<ty::Instance<'tcx>> {
let tcx = self.eval_context_ref().tcx.tcx;
let did = try_resolve_did(tcx, path, Some(namespace))?;
Some(ty::Instance::mono(tcx, did))
}

/// Gets an instance for a path.
fn resolve_path(&self, path: &[&str], namespace: Namespace) -> ty::Instance<'tcx> {
self.try_resolve_path(path, namespace)
.unwrap_or_else(|| panic!("failed to find required Rust item: {path:?}"))
}

/// Evaluates the scalar at the specified path.
fn eval_path(&self, path: &[&str]) -> OpTy<'tcx> {
let this = self.eval_context_ref();
let instance = this.resolve_path(path, Namespace::ValueNS);
let instance = resolve_path(*this.tcx, path, Namespace::ValueNS);
// We don't give a span -- this isn't actually used directly by the program anyway.
let const_val = this.eval_global(instance).unwrap_or_else(|err| {
panic!("failed to evaluate required Rust item: {path:?}\n{err:?}")
Expand Down Expand Up @@ -344,19 +361,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
"`libc` crate is not reliably available on Windows targets; Miri should not use it there"
);
}
let ty = this
.resolve_path(&["libc", name], Namespace::TypeNS)
.ty(*this.tcx, ty::ParamEnv::reveal_all());
this.layout_of(ty).unwrap()
path_ty_layout(this, &["libc", name])
}

/// Helper function to get the `TyAndLayout` of a `windows` type
fn windows_ty_layout(&self, name: &str) -> TyAndLayout<'tcx> {
let this = self.eval_context_ref();
let ty = this
.resolve_path(&["std", "sys", "pal", "windows", "c", name], Namespace::TypeNS)
.ty(*this.tcx, ty::ParamEnv::reveal_all());
this.layout_of(ty).unwrap()
path_ty_layout(this, &["std", "sys", "pal", "windows", "c", name])
}

/// Project to the given *named* field (which must be a struct or union type).
Expand Down
4 changes: 2 additions & 2 deletions src/tools/miri/src/intrinsics/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -392,10 +392,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
bug!("float_finite: non-float input type {}", x.layout.ty)
};
Ok(match fty {
FloatTy::F16 => unimplemented!("f16_f128"),
FloatTy::F16 => x.to_scalar().to_f16()?.is_finite(),
FloatTy::F32 => x.to_scalar().to_f32()?.is_finite(),
FloatTy::F64 => x.to_scalar().to_f64()?.is_finite(),
FloatTy::F128 => unimplemented!("f16_f128"),
FloatTy::F128 => x.to_scalar().to_f128()?.is_finite(),
})
};
match (float_finite(&a)?, float_finite(&b)?) {
Expand Down
1 change: 1 addition & 0 deletions src/tools/miri/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ pub use crate::borrow_tracker::{
};
pub use crate::clock::{Clock, Instant};
pub use crate::concurrency::{
cpu_affinity::MAX_CPUS,
data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _},
init_once::{EvalContextExt as _, InitOnceId},
sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SynchronizationObjects},
Expand Down
Loading
Loading