From 0fa941dbe18c545243040c70d9a80a4ff31dff53 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 16 Apr 2024 15:23:00 +0200 Subject: [PATCH 1/3] when an address gets reused, establish a happens-before link in the data race model --- src/alloc_addresses/mod.rs | 42 ++++++++----- src/alloc_addresses/reuse_pool.rs | 39 ++++++++---- src/concurrency/mod.rs | 2 + src/concurrency/thread.rs | 6 ++ src/helpers.rs | 6 +- src/machine.rs | 7 +-- .../address_reuse_happens_before.rs | 60 +++++++++++++++++++ tests/pass/weak_memory/weak.rs | 3 + 8 files changed, 129 insertions(+), 36 deletions(-) create mode 100644 tests/pass/concurrency/address_reuse_happens_before.rs diff --git a/src/alloc_addresses/mod.rs b/src/alloc_addresses/mod.rs index 3e00e6037c..50142d6b5a 100644 --- a/src/alloc_addresses/mod.rs +++ b/src/alloc_addresses/mod.rs @@ -14,7 +14,8 @@ use rustc_span::Span; use rustc_target::abi::{Align, HasDataLayout, Size}; use crate::*; -use reuse_pool::ReusePool; + +use self::reuse_pool::ReusePool; #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum ProvenanceMode { @@ -159,9 +160,12 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { assert!(!matches!(kind, AllocKind::Dead)); // This allocation does not have a base address yet, pick or reuse one. - let base_addr = if let Some(reuse_addr) = + let base_addr = if let Some((reuse_addr, clock)) = global_state.reuse.take_addr(&mut *rng, size, align) { + if let Some(data_race) = &ecx.machine.data_race { + data_race.validate_lock_acquire(&clock, ecx.get_active_thread()); + } reuse_addr } else { // We have to pick a fresh address. @@ -329,14 +333,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } } -impl GlobalStateInner { - pub fn free_alloc_id( - &mut self, - rng: &mut impl Rng, - dead_id: AllocId, - size: Size, - align: Align, - ) { +impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { + pub fn free_alloc_id(&mut self, dead_id: AllocId, size: Size, align: Align) { + let global_state = self.alloc_addresses.get_mut(); + let rng = self.rng.get_mut(); + // We can *not* remove this from `base_addr`, since the interpreter design requires that we // be able to retrieve an AllocId + offset for any memory access *before* we check if the // access is valid. Specifically, `ptr_get_alloc` is called on each attempt at a memory @@ -349,15 +350,26 @@ impl GlobalStateInner { // returns a dead allocation. // To avoid a linear scan we first look up the address in `base_addr`, and then find it in // `int_to_ptr_map`. - let addr = *self.base_addr.get(&dead_id).unwrap(); - let pos = self.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr).unwrap(); - let removed = self.int_to_ptr_map.remove(pos); + let addr = *global_state.base_addr.get(&dead_id).unwrap(); + let pos = + global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr).unwrap(); + let removed = global_state.int_to_ptr_map.remove(pos); assert_eq!(removed, (addr, dead_id)); // double-check that we removed the right thing // We can also remove it from `exposed`, since this allocation can anyway not be returned by // `alloc_id_from_addr` any more. - self.exposed.remove(&dead_id); + global_state.exposed.remove(&dead_id); // Also remember this address for future reuse. - self.reuse.add_addr(rng, addr, size, align) + global_state.reuse.add_addr(rng, addr, size, align, || { + let mut clock = concurrency::VClock::default(); + if let Some(data_race) = &self.data_race { + data_race.validate_lock_release( + &mut clock, + self.threads.get_active_thread_id(), + self.threads.active_thread_ref().current_span(), + ); + } + clock + }) } } diff --git a/src/alloc_addresses/reuse_pool.rs b/src/alloc_addresses/reuse_pool.rs index 8374d0ec60..9af4bdac4c 100644 --- a/src/alloc_addresses/reuse_pool.rs +++ b/src/alloc_addresses/reuse_pool.rs @@ -4,6 +4,8 @@ use rand::Rng; use rustc_target::abi::{Align, Size}; +use crate::concurrency::VClock; + const MAX_POOL_SIZE: usize = 64; // Just use fair coins, until we have evidence that other numbers are better. @@ -21,7 +23,10 @@ pub struct ReusePool { /// /// Each of these maps has at most MAX_POOL_SIZE elements, and since alignment is limited to /// less than 64 different possible value, that bounds the overall size of the pool. - pool: Vec>, + /// + /// We also store the clock from the thread that donated this pool element, + /// to ensure synchronization with the thread that picks up this address. + pool: Vec>, } impl ReusePool { @@ -29,7 +34,7 @@ impl ReusePool { ReusePool { pool: vec![] } } - fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size)> { + fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size, VClock)> { let pool_idx: usize = align.bytes().trailing_zeros().try_into().unwrap(); if self.pool.len() <= pool_idx { self.pool.resize(pool_idx + 1, Vec::new()); @@ -37,26 +42,38 @@ impl ReusePool { &mut self.pool[pool_idx] } - pub fn add_addr(&mut self, rng: &mut impl Rng, addr: u64, size: Size, align: Align) { + pub fn add_addr( + &mut self, + rng: &mut impl Rng, + addr: u64, + size: Size, + align: Align, + clock: impl FnOnce() -> VClock, + ) { // Let's see if we even want to remember this address. if !rng.gen_bool(ADDR_REMEMBER_CHANCE) { return; } // Determine the pool to add this to, and where in the pool to put it. let subpool = self.subpool(align); - let pos = subpool.partition_point(|(_addr, other_size)| *other_size < size); + let pos = subpool.partition_point(|(_addr, other_size, _)| *other_size < size); // Make sure the pool does not grow too big. if subpool.len() >= MAX_POOL_SIZE { // Pool full. Replace existing element, or last one if this would be even bigger. let clamped_pos = pos.min(subpool.len() - 1); - subpool[clamped_pos] = (addr, size); + subpool[clamped_pos] = (addr, size, clock()); return; } // Add address to pool, at the right position. - subpool.insert(pos, (addr, size)); + subpool.insert(pos, (addr, size, clock())); } - pub fn take_addr(&mut self, rng: &mut impl Rng, size: Size, align: Align) -> Option { + pub fn take_addr( + &mut self, + rng: &mut impl Rng, + size: Size, + align: Align, + ) -> Option<(u64, VClock)> { // Determine whether we'll even attempt a reuse. if !rng.gen_bool(ADDR_TAKE_CHANCE) { return None; @@ -65,9 +82,9 @@ impl ReusePool { let subpool = self.subpool(align); // Let's see if we can find something of the right size. We want to find the full range of // such items, beginning with the first, so we can't use `binary_search_by_key`. - let begin = subpool.partition_point(|(_addr, other_size)| *other_size < size); + let begin = subpool.partition_point(|(_addr, other_size, _)| *other_size < size); let mut end = begin; - while let Some((_addr, other_size)) = subpool.get(end) { + while let Some((_addr, other_size, _)) = subpool.get(end) { if *other_size != size { break; } @@ -80,8 +97,8 @@ impl ReusePool { // Pick a random element with the desired size. let idx = rng.gen_range(begin..end); // Remove it from the pool and return. - let (chosen_addr, chosen_size) = subpool.remove(idx); + let (chosen_addr, chosen_size, clock) = subpool.remove(idx); debug_assert!(chosen_size >= size && chosen_addr % align.bytes() == 0); - Some(chosen_addr) + Some((chosen_addr, clock)) } } diff --git a/src/concurrency/mod.rs b/src/concurrency/mod.rs index 45903107f1..15e1a94d6d 100644 --- a/src/concurrency/mod.rs +++ b/src/concurrency/mod.rs @@ -6,3 +6,5 @@ pub mod init_once; pub mod thread; mod vector_clock; pub mod weak_memory; + +pub use vector_clock::VClock; diff --git a/src/concurrency/thread.rs b/src/concurrency/thread.rs index e28e5f8369..2fabd39a74 100644 --- a/src/concurrency/thread.rs +++ b/src/concurrency/thread.rs @@ -223,6 +223,12 @@ impl<'mir, 'tcx> Thread<'mir, 'tcx> { // empty stacks. self.top_user_relevant_frame.or_else(|| self.stack.len().checked_sub(1)) } + + pub fn current_span(&self) -> Span { + self.top_user_relevant_frame() + .map(|frame_idx| self.stack[frame_idx].current_span()) + .unwrap_or(rustc_span::DUMMY_SP) + } } impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> { diff --git a/src/helpers.rs b/src/helpers.rs index e2c6769ccb..17ae2dd91a 100644 --- a/src/helpers.rs +++ b/src/helpers.rs @@ -1265,9 +1265,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { /// This function is backed by a cache, and can be assumed to be very fast. /// It will work even when the stack is empty. pub fn current_span(&self) -> Span { - self.top_user_relevant_frame() - .map(|frame_idx| self.stack()[frame_idx].current_span()) - .unwrap_or(rustc_span::DUMMY_SP) + self.threads.active_thread_ref().current_span() } /// Returns the span of the *caller* of the current operation, again @@ -1279,7 +1277,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { // We need to go down at least to the caller (len - 2), or however // far we have to go to find a frame in a local crate which is also not #[track_caller]. let frame_idx = self.top_user_relevant_frame().unwrap(); - let frame_idx = cmp::min(frame_idx, self.stack().len().checked_sub(2).unwrap()); + let frame_idx = cmp::min(frame_idx, self.stack().len().saturating_sub(2)); self.stack()[frame_idx].current_span() } diff --git a/src/machine.rs b/src/machine.rs index 0bfc59e67d..d7c762fe1a 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -1303,12 +1303,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { { *deallocated_at = Some(machine.current_span()); } - machine.alloc_addresses.get_mut().free_alloc_id( - machine.rng.get_mut(), - alloc_id, - size, - align, - ); + machine.free_alloc_id(alloc_id, size, align); Ok(()) } diff --git a/tests/pass/concurrency/address_reuse_happens_before.rs b/tests/pass/concurrency/address_reuse_happens_before.rs new file mode 100644 index 0000000000..c0de1b4826 --- /dev/null +++ b/tests/pass/concurrency/address_reuse_happens_before.rs @@ -0,0 +1,60 @@ +//! Regression test for : +//! When the address gets reused, there should be a happens-before relation. +#![feature(strict_provenance)] +#![feature(sync_unsafe_cell)] + +use std::cell::SyncUnsafeCell; +use std::sync::atomic::{AtomicUsize, Ordering::Relaxed}; +use std::thread; + +static ADDR: AtomicUsize = AtomicUsize::new(0); +static VAL: SyncUnsafeCell = SyncUnsafeCell::new(0); + +fn addr() -> usize { + let alloc = Box::new(42); + <*const i32>::addr(&*alloc) +} + +fn thread1() { + unsafe { + VAL.get().write(42); + } + let alloc = addr(); + ADDR.store(alloc, Relaxed); +} + +fn thread2() -> bool { + // We try to get an allocation at the same address as the global `ADDR`. If we fail too often, + // just bail. `main` will try again with a different allocation. + for _ in 0..16 { + let alloc = addr(); + let addr = ADDR.load(Relaxed); + if alloc == addr { + // We got a reuse! + // If the new allocation is at the same address as the old one, there must be a + // happens-before relationship between them. Therefore, we can read VAL without racing + // and must observe the write above. + let val = unsafe { VAL.get().read() }; + assert_eq!(val, 42); + return true; + } + } + + false +} + +fn main() { + let mut success = false; + while !success { + let t1 = thread::spawn(thread1); + let t2 = thread::spawn(thread2); + t1.join().unwrap(); + success = t2.join().unwrap(); + + // Reset everything. + ADDR.store(0, Relaxed); + unsafe { + VAL.get().write(0); + } + } +} diff --git a/tests/pass/weak_memory/weak.rs b/tests/pass/weak_memory/weak.rs index e10ccc277f..dac63eeeb0 100644 --- a/tests/pass/weak_memory/weak.rs +++ b/tests/pass/weak_memory/weak.rs @@ -37,6 +37,8 @@ fn relaxed() -> bool { let x = static_atomic(0); let j1 = spawn(move || { x.store(1, Relaxed); + // Preemption is disabled, so the store above will never be the + // latest store visible to another thread. x.store(2, Relaxed); }); @@ -138,6 +140,7 @@ fn faa_replaced_by_load() -> bool { } /// Asserts that the function returns true at least once in 100 runs +#[track_caller] fn assert_once(f: fn() -> bool) { assert!(std::iter::repeat_with(|| f()).take(100).any(|x| x)); } From c962d88c8c82edfc6ecbcc00fc19ea1a7283634a Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Thu, 18 Apr 2024 13:20:01 +0200 Subject: [PATCH 2/3] do not reuse stack addresses; make reuse rate configurable --- README.md | 5 +++++ src/alloc_addresses/mod.rs | 14 +++++++++----- src/alloc_addresses/reuse_pool.rs | 22 ++++++++++++---------- src/bin/miri.rs | 14 ++++++++++++++ src/eval.rs | 3 +++ src/machine.rs | 4 ++-- 6 files changed, 45 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 948f1ee6c6..f01ce52f05 100644 --- a/README.md +++ b/README.md @@ -295,6 +295,11 @@ up the sysroot. If you are using `miri` (the Miri driver) directly, see the Miri adds its own set of `-Z` flags, which are usually set via the `MIRIFLAGS` environment variable. We first document the most relevant and most commonly used flags: +* `-Zmiri-address-reuse-rate=` changes the probability that a freed *non-stack* allocation + will be added to the pool for address reuse, and the probability that a new *non-stack* allocation + will be taken from the pool. Stack allocations never get added to or taken from the pool. The + default is `0.5`. Note that a very high reuse rate can mask concurrency bugs as address + reuse induces synchronization between threads. * `-Zmiri-compare-exchange-weak-failure-rate=` changes the failure rate of `compare_exchange_weak` operations. The default is `0.8` (so 4 out of 5 weak ops will fail). You can change it to any value between `0.0` and `1.0`, where `1.0` means it diff --git a/src/alloc_addresses/mod.rs b/src/alloc_addresses/mod.rs index 50142d6b5a..247f829297 100644 --- a/src/alloc_addresses/mod.rs +++ b/src/alloc_addresses/mod.rs @@ -78,7 +78,7 @@ impl GlobalStateInner { GlobalStateInner { int_to_ptr_map: Vec::default(), base_addr: FxHashMap::default(), - reuse: ReusePool::new(), + reuse: ReusePool::new(config.address_reuse_rate), exposed: FxHashSet::default(), next_base_addr: stack_addr, provenance_mode: config.provenance_mode, @@ -142,7 +142,11 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } } - fn addr_from_alloc_id(&self, alloc_id: AllocId, _kind: MemoryKind) -> InterpResult<'tcx, u64> { + fn addr_from_alloc_id( + &self, + alloc_id: AllocId, + memory_kind: MemoryKind, + ) -> InterpResult<'tcx, u64> { let ecx = self.eval_context_ref(); let mut global_state = ecx.machine.alloc_addresses.borrow_mut(); let global_state = &mut *global_state; @@ -161,7 +165,7 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { // This allocation does not have a base address yet, pick or reuse one. let base_addr = if let Some((reuse_addr, clock)) = - global_state.reuse.take_addr(&mut *rng, size, align) + global_state.reuse.take_addr(&mut *rng, size, align, memory_kind) { if let Some(data_race) = &ecx.machine.data_race { data_race.validate_lock_acquire(&clock, ecx.get_active_thread()); @@ -334,7 +338,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { } impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { - pub fn free_alloc_id(&mut self, dead_id: AllocId, size: Size, align: Align) { + pub fn free_alloc_id(&mut self, dead_id: AllocId, size: Size, align: Align, kind: MemoryKind) { let global_state = self.alloc_addresses.get_mut(); let rng = self.rng.get_mut(); @@ -359,7 +363,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { // `alloc_id_from_addr` any more. global_state.exposed.remove(&dead_id); // Also remember this address for future reuse. - global_state.reuse.add_addr(rng, addr, size, align, || { + global_state.reuse.add_addr(rng, addr, size, align, kind, || { let mut clock = concurrency::VClock::default(); if let Some(data_race) = &self.data_race { data_race.validate_lock_release( diff --git a/src/alloc_addresses/reuse_pool.rs b/src/alloc_addresses/reuse_pool.rs index 9af4bdac4c..3b2f0269eb 100644 --- a/src/alloc_addresses/reuse_pool.rs +++ b/src/alloc_addresses/reuse_pool.rs @@ -4,20 +4,17 @@ use rand::Rng; use rustc_target::abi::{Align, Size}; -use crate::concurrency::VClock; +use crate::{concurrency::VClock, MemoryKind}; const MAX_POOL_SIZE: usize = 64; -// Just use fair coins, until we have evidence that other numbers are better. -const ADDR_REMEMBER_CHANCE: f64 = 0.5; -const ADDR_TAKE_CHANCE: f64 = 0.5; - /// The pool strikes a balance between exploring more possible executions and making it more likely /// to find bugs. The hypothesis is that bugs are more likely to occur when reuse happens for /// allocations with the same layout, since that can trigger e.g. ABA issues in a concurrent data /// structure. Therefore we only reuse allocations when size and alignment match exactly. #[derive(Debug)] pub struct ReusePool { + address_reuse_rate: f64, /// The i-th element in `pool` stores allocations of alignment `2^i`. We store these reusable /// allocations as address-size pairs, the list must be sorted by the size. /// @@ -30,8 +27,8 @@ pub struct ReusePool { } impl ReusePool { - pub fn new() -> Self { - ReusePool { pool: vec![] } + pub fn new(address_reuse_rate: f64) -> Self { + ReusePool { address_reuse_rate, pool: vec![] } } fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size, VClock)> { @@ -48,10 +45,14 @@ impl ReusePool { addr: u64, size: Size, align: Align, + kind: MemoryKind, clock: impl FnOnce() -> VClock, ) { // Let's see if we even want to remember this address. - if !rng.gen_bool(ADDR_REMEMBER_CHANCE) { + // We don't remember stack addresses: there's a lot of them (so the perf impact is big), + // and we only want to reuse stack slots within the same thread or else we'll add a lot of + // undesired synchronization. + if kind == MemoryKind::Stack || !rng.gen_bool(self.address_reuse_rate) { return; } // Determine the pool to add this to, and where in the pool to put it. @@ -73,9 +74,10 @@ impl ReusePool { rng: &mut impl Rng, size: Size, align: Align, + kind: MemoryKind, ) -> Option<(u64, VClock)> { - // Determine whether we'll even attempt a reuse. - if !rng.gen_bool(ADDR_TAKE_CHANCE) { + // Determine whether we'll even attempt a reuse. As above, we don't do reuse for stack addresses. + if kind == MemoryKind::Stack || !rng.gen_bool(self.address_reuse_rate) { return None; } // Determine the pool to take this from. diff --git a/src/bin/miri.rs b/src/bin/miri.rs index 3f7a965e9d..263a78257f 100644 --- a/src/bin/miri.rs +++ b/src/bin/miri.rs @@ -542,6 +542,20 @@ fn main() { miri_config.tracked_alloc_ids.extend(ids); } else if arg == "-Zmiri-track-alloc-accesses" { miri_config.track_alloc_accesses = true; + } else if let Some(param) = arg.strip_prefix("-Zmiri-address-reuse-rate=") { + let rate = match param.parse::() { + Ok(rate) if rate >= 0.0 && rate <= 1.0 => rate, + Ok(_) => + show_error!( + "-Zmiri-compare-exchange-weak-failure-rate must be between `0.0` and `1.0`" + ), + Err(err) => + show_error!( + "-Zmiri-compare-exchange-weak-failure-rate requires a `f64` between `0.0` and `1.0`: {}", + err + ), + }; + miri_config.address_reuse_rate = rate; } else if let Some(param) = arg.strip_prefix("-Zmiri-compare-exchange-weak-failure-rate=") { let rate = match param.parse::() { Ok(rate) if rate >= 0.0 && rate <= 1.0 => rate, diff --git a/src/eval.rs b/src/eval.rs index df0ede1e1b..457fe740d6 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -150,6 +150,8 @@ pub struct MiriConfig { pub page_size: Option, /// Whether to collect a backtrace when each allocation is created, just in case it leaks. pub collect_leak_backtraces: bool, + /// Probability for address reuse. + pub address_reuse_rate: f64, } impl Default for MiriConfig { @@ -186,6 +188,7 @@ impl Default for MiriConfig { num_cpus: 1, page_size: None, collect_leak_backtraces: true, + address_reuse_rate: 0.5, } } } diff --git a/src/machine.rs b/src/machine.rs index d7c762fe1a..eec162a87e 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -1282,7 +1282,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { (alloc_id, prove_extra): (AllocId, Self::ProvenanceExtra), size: Size, align: Align, - _kind: MemoryKind, + kind: MemoryKind, ) -> InterpResult<'tcx> { if machine.tracked_alloc_ids.contains(&alloc_id) { machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id)); @@ -1303,7 +1303,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> { { *deallocated_at = Some(machine.current_span()); } - machine.free_alloc_id(alloc_id, size, align); + machine.free_alloc_id(alloc_id, size, align, kind); Ok(()) } From 2155a302a78ee0fc95aecf093ac35739d0cd562b Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Thu, 18 Apr 2024 22:59:28 +0200 Subject: [PATCH 3/3] when reusing an address, most of the time only reuse from the current thread --- README.md | 9 +- src/alloc_addresses/mod.rs | 17 ++- src/alloc_addresses/reuse_pool.rs | 49 +++++-- src/bin/miri.rs | 129 +++++++----------- src/eval.rs | 3 + .../both_borrows/retag_data_race_write.rs | 2 + tests/fail/data_race/alloc_read_race.rs | 2 + tests/fail/data_race/alloc_write_race.rs | 2 + .../data_race/atomic_read_na_write_race1.rs | 2 + .../data_race/atomic_read_na_write_race2.rs | 2 + .../data_race/atomic_write_na_read_race1.rs | 2 + .../data_race/atomic_write_na_read_race2.rs | 2 + .../data_race/atomic_write_na_write_race1.rs | 2 + .../data_race/atomic_write_na_write_race2.rs | 2 + .../data_race/dangling_thread_async_race.rs | 2 + tests/fail/data_race/dangling_thread_race.rs | 2 + tests/fail/data_race/dealloc_read_race1.rs | 2 + tests/fail/data_race/dealloc_read_race2.rs | 2 + .../fail/data_race/dealloc_read_race_stack.rs | 2 + tests/fail/data_race/dealloc_write_race1.rs | 2 + tests/fail/data_race/dealloc_write_race2.rs | 2 + .../data_race/dealloc_write_race_stack.rs | 2 + .../data_race/enable_after_join_to_main.rs | 2 + tests/fail/data_race/fence_after_load.rs | 3 + tests/fail/data_race/mixed_size_read.rs | 3 + tests/fail/data_race/mixed_size_write.rs | 3 + tests/fail/data_race/read_read_race1.rs | 3 + tests/fail/data_race/read_read_race2.rs | 3 + tests/fail/data_race/read_write_race.rs | 2 + tests/fail/data_race/read_write_race_stack.rs | 2 + tests/fail/data_race/relax_acquire_race.rs | 2 + tests/fail/data_race/release_seq_race.rs | 2 + .../data_race/release_seq_race_same_thread.rs | 2 + tests/fail/data_race/rmw_race.rs | 2 + tests/fail/data_race/stack_pop_race.rs | 3 + tests/fail/data_race/write_write_race.rs | 2 + .../fail/data_race/write_write_race_stack.rs | 2 + .../retag_data_race_protected_read.rs | 3 +- .../stacked_borrows/retag_data_race_read.rs | 3 +- tests/fail/weak_memory/racing_mixed_size.rs | 3 +- .../weak_memory/racing_mixed_size_read.rs | 3 +- .../address_reuse_happens_before.rs | 1 + .../concurrency/disable_data_race_detector.rs | 2 + 43 files changed, 183 insertions(+), 109 deletions(-) diff --git a/README.md b/README.md index f01ce52f05..4254b9bb67 100644 --- a/README.md +++ b/README.md @@ -298,8 +298,13 @@ environment variable. We first document the most relevant and most commonly used * `-Zmiri-address-reuse-rate=` changes the probability that a freed *non-stack* allocation will be added to the pool for address reuse, and the probability that a new *non-stack* allocation will be taken from the pool. Stack allocations never get added to or taken from the pool. The - default is `0.5`. Note that a very high reuse rate can mask concurrency bugs as address - reuse induces synchronization between threads. + default is `0.5`. +* `-Zmiri-address-reuse-cross-thread-rate=` changes the probability that an allocation which + attempts to reuse a previously freed block of memory will also consider blocks freed by *other + threads*. The default is `0.1`, which means by default, in 90% of the cases where an address reuse + attempt is made, only addresses from the same thread will be considered. Reusing an address from + another thread induces synchronization between those threads, which can mask data races and weak + memory bugs. * `-Zmiri-compare-exchange-weak-failure-rate=` changes the failure rate of `compare_exchange_weak` operations. The default is `0.8` (so 4 out of 5 weak ops will fail). You can change it to any value between `0.0` and `1.0`, where `1.0` means it diff --git a/src/alloc_addresses/mod.rs b/src/alloc_addresses/mod.rs index 247f829297..612649c90c 100644 --- a/src/alloc_addresses/mod.rs +++ b/src/alloc_addresses/mod.rs @@ -78,7 +78,7 @@ impl GlobalStateInner { GlobalStateInner { int_to_ptr_map: Vec::default(), base_addr: FxHashMap::default(), - reuse: ReusePool::new(config.address_reuse_rate), + reuse: ReusePool::new(config), exposed: FxHashSet::default(), next_base_addr: stack_addr, provenance_mode: config.provenance_mode, @@ -164,9 +164,13 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> { assert!(!matches!(kind, AllocKind::Dead)); // This allocation does not have a base address yet, pick or reuse one. - let base_addr = if let Some((reuse_addr, clock)) = - global_state.reuse.take_addr(&mut *rng, size, align, memory_kind) - { + let base_addr = if let Some((reuse_addr, clock)) = global_state.reuse.take_addr( + &mut *rng, + size, + align, + memory_kind, + ecx.get_active_thread(), + ) { if let Some(data_race) = &ecx.machine.data_race { data_race.validate_lock_acquire(&clock, ecx.get_active_thread()); } @@ -363,12 +367,13 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> { // `alloc_id_from_addr` any more. global_state.exposed.remove(&dead_id); // Also remember this address for future reuse. - global_state.reuse.add_addr(rng, addr, size, align, kind, || { + let thread = self.threads.get_active_thread_id(); + global_state.reuse.add_addr(rng, addr, size, align, kind, thread, || { let mut clock = concurrency::VClock::default(); if let Some(data_race) = &self.data_race { data_race.validate_lock_release( &mut clock, - self.threads.get_active_thread_id(), + thread, self.threads.active_thread_ref().current_span(), ); } diff --git a/src/alloc_addresses/reuse_pool.rs b/src/alloc_addresses/reuse_pool.rs index 3b2f0269eb..fd0b24cc29 100644 --- a/src/alloc_addresses/reuse_pool.rs +++ b/src/alloc_addresses/reuse_pool.rs @@ -4,7 +4,7 @@ use rand::Rng; use rustc_target::abi::{Align, Size}; -use crate::{concurrency::VClock, MemoryKind}; +use crate::{concurrency::VClock, MemoryKind, MiriConfig, ThreadId}; const MAX_POOL_SIZE: usize = 64; @@ -15,23 +15,28 @@ const MAX_POOL_SIZE: usize = 64; #[derive(Debug)] pub struct ReusePool { address_reuse_rate: f64, + address_reuse_cross_thread_rate: f64, /// The i-th element in `pool` stores allocations of alignment `2^i`. We store these reusable - /// allocations as address-size pairs, the list must be sorted by the size. + /// allocations as address-size pairs, the list must be sorted by the size and then the thread ID. /// /// Each of these maps has at most MAX_POOL_SIZE elements, and since alignment is limited to /// less than 64 different possible value, that bounds the overall size of the pool. /// - /// We also store the clock from the thread that donated this pool element, + /// We also store the ID and the data-race clock of the thread that donated this pool element, /// to ensure synchronization with the thread that picks up this address. - pool: Vec>, + pool: Vec>, } impl ReusePool { - pub fn new(address_reuse_rate: f64) -> Self { - ReusePool { address_reuse_rate, pool: vec![] } + pub fn new(config: &MiriConfig) -> Self { + ReusePool { + address_reuse_rate: config.address_reuse_rate, + address_reuse_cross_thread_rate: config.address_reuse_cross_thread_rate, + pool: vec![], + } } - fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size, VClock)> { + fn subpool(&mut self, align: Align) -> &mut Vec<(u64, Size, ThreadId, VClock)> { let pool_idx: usize = align.bytes().trailing_zeros().try_into().unwrap(); if self.pool.len() <= pool_idx { self.pool.resize(pool_idx + 1, Vec::new()); @@ -46,6 +51,7 @@ impl ReusePool { size: Size, align: Align, kind: MemoryKind, + thread: ThreadId, clock: impl FnOnce() -> VClock, ) { // Let's see if we even want to remember this address. @@ -55,18 +61,21 @@ impl ReusePool { if kind == MemoryKind::Stack || !rng.gen_bool(self.address_reuse_rate) { return; } + let clock = clock(); // Determine the pool to add this to, and where in the pool to put it. let subpool = self.subpool(align); - let pos = subpool.partition_point(|(_addr, other_size, _)| *other_size < size); + let pos = subpool.partition_point(|(_addr, other_size, other_thread, _)| { + (*other_size, *other_thread) < (size, thread) + }); // Make sure the pool does not grow too big. if subpool.len() >= MAX_POOL_SIZE { // Pool full. Replace existing element, or last one if this would be even bigger. let clamped_pos = pos.min(subpool.len() - 1); - subpool[clamped_pos] = (addr, size, clock()); + subpool[clamped_pos] = (addr, size, thread, clock); return; } // Add address to pool, at the right position. - subpool.insert(pos, (addr, size, clock())); + subpool.insert(pos, (addr, size, thread, clock)); } pub fn take_addr( @@ -75,21 +84,32 @@ impl ReusePool { size: Size, align: Align, kind: MemoryKind, + thread: ThreadId, ) -> Option<(u64, VClock)> { // Determine whether we'll even attempt a reuse. As above, we don't do reuse for stack addresses. if kind == MemoryKind::Stack || !rng.gen_bool(self.address_reuse_rate) { return None; } + let cross_thread_reuse = rng.gen_bool(self.address_reuse_cross_thread_rate); // Determine the pool to take this from. let subpool = self.subpool(align); // Let's see if we can find something of the right size. We want to find the full range of - // such items, beginning with the first, so we can't use `binary_search_by_key`. - let begin = subpool.partition_point(|(_addr, other_size, _)| *other_size < size); + // such items, beginning with the first, so we can't use `binary_search_by_key`. If we do + // *not* want to consider other thread's allocations, we effectively use the lexicographic + // order on `(size, thread)`. + let begin = subpool.partition_point(|(_addr, other_size, other_thread, _)| { + *other_size < size + || (*other_size == size && !cross_thread_reuse && *other_thread < thread) + }); let mut end = begin; - while let Some((_addr, other_size, _)) = subpool.get(end) { + while let Some((_addr, other_size, other_thread, _)) = subpool.get(end) { if *other_size != size { break; } + if !cross_thread_reuse && *other_thread != thread { + // We entered the allocations of another thread. + break; + } end += 1; } if end == begin { @@ -99,8 +119,9 @@ impl ReusePool { // Pick a random element with the desired size. let idx = rng.gen_range(begin..end); // Remove it from the pool and return. - let (chosen_addr, chosen_size, clock) = subpool.remove(idx); + let (chosen_addr, chosen_size, chosen_thread, clock) = subpool.remove(idx); debug_assert!(chosen_size >= size && chosen_addr % align.bytes() == 0); + debug_assert!(cross_thread_reuse || chosen_thread == thread); Some((chosen_addr, clock)) } } diff --git a/src/bin/miri.rs b/src/bin/miri.rs index 263a78257f..db2cd01ce0 100644 --- a/src/bin/miri.rs +++ b/src/bin/miri.rs @@ -307,6 +307,15 @@ fn parse_comma_list(input: &str) -> Result, T::Err> { input.split(',').map(str::parse::).collect() } +/// Parses the input as a float in the range from 0.0 to 1.0 (inclusive). +fn parse_rate(input: &str) -> Result { + match input.parse::() { + Ok(rate) if rate >= 0.0 && rate <= 1.0 => Ok(rate), + Ok(_) => Err("must be between `0.0` and `1.0`"), + Err(_) => Err("requires a `f64` between `0.0` and `1.0`"), + } +} + #[cfg(any(target_os = "linux", target_os = "macos"))] fn jemalloc_magic() { // These magic runes are copied from @@ -499,14 +508,9 @@ fn main() { } else if let Some(param) = arg.strip_prefix("-Zmiri-env-forward=") { miri_config.forwarded_env_vars.push(param.to_owned()); } else if let Some(param) = arg.strip_prefix("-Zmiri-track-pointer-tag=") { - let ids: Vec = match parse_comma_list(param) { - Ok(ids) => ids, - Err(err) => - show_error!( - "-Zmiri-track-pointer-tag requires a comma separated list of valid `u64` arguments: {}", - err - ), - }; + let ids: Vec = parse_comma_list(param).unwrap_or_else(|err| { + show_error!("-Zmiri-track-pointer-tag requires a comma separated list of valid `u64` arguments: {err}") + }); for id in ids.into_iter().map(miri::BorTag::new) { if let Some(id) = id { miri_config.tracked_pointer_tags.insert(id); @@ -515,14 +519,9 @@ fn main() { } } } else if let Some(param) = arg.strip_prefix("-Zmiri-track-call-id=") { - let ids: Vec = match parse_comma_list(param) { - Ok(ids) => ids, - Err(err) => - show_error!( - "-Zmiri-track-call-id requires a comma separated list of valid `u64` arguments: {}", - err - ), - }; + let ids: Vec = parse_comma_list(param).unwrap_or_else(|err| { + show_error!("-Zmiri-track-call-id requires a comma separated list of valid `u64` arguments: {err}") + }); for id in ids.into_iter().map(miri::CallId::new) { if let Some(id) = id { miri_config.tracked_call_ids.insert(id); @@ -531,70 +530,37 @@ fn main() { } } } else if let Some(param) = arg.strip_prefix("-Zmiri-track-alloc-id=") { - let ids: Vec = match parse_comma_list::>(param) { - Ok(ids) => ids.into_iter().map(miri::AllocId).collect(), - Err(err) => - show_error!( - "-Zmiri-track-alloc-id requires a comma separated list of valid non-zero `u64` arguments: {}", - err - ), - }; - miri_config.tracked_alloc_ids.extend(ids); + let ids = parse_comma_list::>(param).unwrap_or_else(|err| { + show_error!("-Zmiri-track-alloc-id requires a comma separated list of valid non-zero `u64` arguments: {err}") + }); + miri_config.tracked_alloc_ids.extend(ids.into_iter().map(miri::AllocId)); } else if arg == "-Zmiri-track-alloc-accesses" { miri_config.track_alloc_accesses = true; } else if let Some(param) = arg.strip_prefix("-Zmiri-address-reuse-rate=") { - let rate = match param.parse::() { - Ok(rate) if rate >= 0.0 && rate <= 1.0 => rate, - Ok(_) => - show_error!( - "-Zmiri-compare-exchange-weak-failure-rate must be between `0.0` and `1.0`" - ), - Err(err) => - show_error!( - "-Zmiri-compare-exchange-weak-failure-rate requires a `f64` between `0.0` and `1.0`: {}", - err - ), - }; - miri_config.address_reuse_rate = rate; + miri_config.address_reuse_rate = parse_rate(param) + .unwrap_or_else(|err| show_error!("-Zmiri-address-reuse-rate {err}")); + } else if let Some(param) = arg.strip_prefix("-Zmiri-address-reuse-cross-thread-rate=") { + miri_config.address_reuse_cross_thread_rate = parse_rate(param) + .unwrap_or_else(|err| show_error!("-Zmiri-address-reuse-cross-thread-rate {err}")); } else if let Some(param) = arg.strip_prefix("-Zmiri-compare-exchange-weak-failure-rate=") { - let rate = match param.parse::() { - Ok(rate) if rate >= 0.0 && rate <= 1.0 => rate, - Ok(_) => - show_error!( - "-Zmiri-compare-exchange-weak-failure-rate must be between `0.0` and `1.0`" - ), - Err(err) => - show_error!( - "-Zmiri-compare-exchange-weak-failure-rate requires a `f64` between `0.0` and `1.0`: {}", - err - ), - }; - miri_config.cmpxchg_weak_failure_rate = rate; + miri_config.cmpxchg_weak_failure_rate = parse_rate(param).unwrap_or_else(|err| { + show_error!("-Zmiri-compare-exchange-weak-failure-rate {err}") + }); } else if let Some(param) = arg.strip_prefix("-Zmiri-preemption-rate=") { - let rate = match param.parse::() { - Ok(rate) if rate >= 0.0 && rate <= 1.0 => rate, - Ok(_) => show_error!("-Zmiri-preemption-rate must be between `0.0` and `1.0`"), - Err(err) => - show_error!( - "-Zmiri-preemption-rate requires a `f64` between `0.0` and `1.0`: {}", - err - ), - }; - miri_config.preemption_rate = rate; + miri_config.preemption_rate = + parse_rate(param).unwrap_or_else(|err| show_error!("-Zmiri-preemption-rate {err}")); } else if arg == "-Zmiri-report-progress" { // This makes it take a few seconds between progress reports on my laptop. miri_config.report_progress = Some(1_000_000); } else if let Some(param) = arg.strip_prefix("-Zmiri-report-progress=") { - let interval = match param.parse::() { - Ok(i) => i, - Err(err) => show_error!("-Zmiri-report-progress requires a `u32`: {}", err), - }; + let interval = param.parse::().unwrap_or_else(|err| { + show_error!("-Zmiri-report-progress requires a `u32`: {}", err) + }); miri_config.report_progress = Some(interval); } else if let Some(param) = arg.strip_prefix("-Zmiri-provenance-gc=") { - let interval = match param.parse::() { - Ok(i) => i, - Err(err) => show_error!("-Zmiri-provenance-gc requires a `u32`: {}", err), - }; + let interval = param.parse::().unwrap_or_else(|err| { + show_error!("-Zmiri-provenance-gc requires a `u32`: {}", err) + }); miri_config.gc_interval = interval; } else if let Some(param) = arg.strip_prefix("-Zmiri-measureme=") { miri_config.measureme_out = Some(param.to_string()); @@ -619,23 +585,20 @@ fn main() { show_error!("-Zmiri-extern-so-file `{}` does not exist", filename); } } else if let Some(param) = arg.strip_prefix("-Zmiri-num-cpus=") { - let num_cpus = match param.parse::() { - Ok(i) => i, - Err(err) => show_error!("-Zmiri-num-cpus requires a `u32`: {}", err), - }; - + let num_cpus = param + .parse::() + .unwrap_or_else(|err| show_error!("-Zmiri-num-cpus requires a `u32`: {}", err)); miri_config.num_cpus = num_cpus; } else if let Some(param) = arg.strip_prefix("-Zmiri-force-page-size=") { - let page_size = match param.parse::() { - Ok(i) => - if i.is_power_of_two() { - i * 1024 - } else { - show_error!("-Zmiri-force-page-size requires a power of 2: {}", i) - }, - Err(err) => show_error!("-Zmiri-force-page-size requires a `u64`: {}", err), + let page_size = param.parse::().unwrap_or_else(|err| { + show_error!("-Zmiri-force-page-size requires a `u64`: {}", err) + }); + // Convert from kilobytes to bytes. + let page_size = if page_size.is_power_of_two() { + page_size * 1024 + } else { + show_error!("-Zmiri-force-page-size requires a power of 2: {page_size}"); }; - miri_config.page_size = Some(page_size); } else { // Forward to rustc. diff --git a/src/eval.rs b/src/eval.rs index 457fe740d6..45dadb50f4 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -152,6 +152,8 @@ pub struct MiriConfig { pub collect_leak_backtraces: bool, /// Probability for address reuse. pub address_reuse_rate: f64, + /// Probability for address reuse across threads. + pub address_reuse_cross_thread_rate: f64, } impl Default for MiriConfig { @@ -189,6 +191,7 @@ impl Default for MiriConfig { page_size: None, collect_leak_backtraces: true, address_reuse_rate: 0.5, + address_reuse_cross_thread_rate: 0.1, } } } diff --git a/tests/fail/both_borrows/retag_data_race_write.rs b/tests/fail/both_borrows/retag_data_race_write.rs index 3edaf10f3d..0061679eaa 100644 --- a/tests/fail/both_borrows/retag_data_race_write.rs +++ b/tests/fail/both_borrows/retag_data_race_write.rs @@ -1,6 +1,8 @@ //! Make sure that a retag acts like a write for the data race model. //@revisions: stack tree //@compile-flags: -Zmiri-preemption-rate=0 +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 //@[tree]compile-flags: -Zmiri-tree-borrows #[derive(Copy, Clone)] struct SendPtr(*mut u8); diff --git a/tests/fail/data_race/alloc_read_race.rs b/tests/fail/data_race/alloc_read_race.rs index 2cf3660690..c85c0ebe24 100644 --- a/tests/fail/data_race/alloc_read_race.rs +++ b/tests/fail/data_race/alloc_read_race.rs @@ -1,4 +1,6 @@ //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 #![feature(new_uninit)] use std::mem::MaybeUninit; diff --git a/tests/fail/data_race/alloc_write_race.rs b/tests/fail/data_race/alloc_write_race.rs index e95e0e1a84..9e2a430dd9 100644 --- a/tests/fail/data_race/alloc_write_race.rs +++ b/tests/fail/data_race/alloc_write_race.rs @@ -1,4 +1,6 @@ //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 #![feature(new_uninit)] use std::ptr::null_mut; diff --git a/tests/fail/data_race/atomic_read_na_write_race1.rs b/tests/fail/data_race/atomic_read_na_write_race1.rs index a256267bcd..4003892f0a 100644 --- a/tests/fail/data_race/atomic_read_na_write_race1.rs +++ b/tests/fail/data_race/atomic_read_na_write_race1.rs @@ -1,5 +1,7 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/tests/fail/data_race/atomic_read_na_write_race2.rs b/tests/fail/data_race/atomic_read_na_write_race2.rs index cc6a0742c2..8bceba9380 100644 --- a/tests/fail/data_race/atomic_read_na_write_race2.rs +++ b/tests/fail/data_race/atomic_read_na_write_race2.rs @@ -1,5 +1,7 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; diff --git a/tests/fail/data_race/atomic_write_na_read_race1.rs b/tests/fail/data_race/atomic_write_na_read_race1.rs index 7392781e6c..1a2746a26f 100644 --- a/tests/fail/data_race/atomic_write_na_read_race1.rs +++ b/tests/fail/data_race/atomic_write_na_read_race1.rs @@ -1,5 +1,7 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; diff --git a/tests/fail/data_race/atomic_write_na_read_race2.rs b/tests/fail/data_race/atomic_write_na_read_race2.rs index f681ce0c05..e0876a93fd 100644 --- a/tests/fail/data_race/atomic_write_na_read_race2.rs +++ b/tests/fail/data_race/atomic_write_na_read_race2.rs @@ -1,5 +1,7 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/tests/fail/data_race/atomic_write_na_write_race1.rs b/tests/fail/data_race/atomic_write_na_write_race1.rs index 47a3ef5d16..1010216a49 100644 --- a/tests/fail/data_race/atomic_write_na_write_race1.rs +++ b/tests/fail/data_race/atomic_write_na_write_race1.rs @@ -1,5 +1,7 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/tests/fail/data_race/atomic_write_na_write_race2.rs b/tests/fail/data_race/atomic_write_na_write_race2.rs index 8bba4a8892..b494bd3a00 100644 --- a/tests/fail/data_race/atomic_write_na_write_race2.rs +++ b/tests/fail/data_race/atomic_write_na_write_race2.rs @@ -1,5 +1,7 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; diff --git a/tests/fail/data_race/dangling_thread_async_race.rs b/tests/fail/data_race/dangling_thread_async_race.rs index 5b9005606e..dffafe3cfa 100644 --- a/tests/fail/data_race/dangling_thread_async_race.rs +++ b/tests/fail/data_race/dangling_thread_async_race.rs @@ -1,5 +1,7 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::mem; use std::thread::{sleep, spawn}; diff --git a/tests/fail/data_race/dangling_thread_race.rs b/tests/fail/data_race/dangling_thread_race.rs index 91c1191e03..8dc35c7ea7 100644 --- a/tests/fail/data_race/dangling_thread_race.rs +++ b/tests/fail/data_race/dangling_thread_race.rs @@ -1,5 +1,7 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::mem; use std::thread::{sleep, spawn}; diff --git a/tests/fail/data_race/dealloc_read_race1.rs b/tests/fail/data_race/dealloc_read_race1.rs index 5928e47176..f174909e9d 100644 --- a/tests/fail/data_race/dealloc_read_race1.rs +++ b/tests/fail/data_race/dealloc_read_race1.rs @@ -1,5 +1,7 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::thread::spawn; diff --git a/tests/fail/data_race/dealloc_read_race2.rs b/tests/fail/data_race/dealloc_read_race2.rs index c5f82cc9a7..1edfbf5e61 100644 --- a/tests/fail/data_race/dealloc_read_race2.rs +++ b/tests/fail/data_race/dealloc_read_race2.rs @@ -1,5 +1,7 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::thread::spawn; diff --git a/tests/fail/data_race/dealloc_read_race_stack.rs b/tests/fail/data_race/dealloc_read_race_stack.rs index 1095f1e4e8..c67e03d362 100644 --- a/tests/fail/data_race/dealloc_read_race_stack.rs +++ b/tests/fail/data_race/dealloc_read_race_stack.rs @@ -1,4 +1,6 @@ //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::ptr::null_mut; use std::sync::atomic::{AtomicPtr, Ordering}; diff --git a/tests/fail/data_race/dealloc_write_race1.rs b/tests/fail/data_race/dealloc_write_race1.rs index b5911e5111..7605f1911d 100644 --- a/tests/fail/data_race/dealloc_write_race1.rs +++ b/tests/fail/data_race/dealloc_write_race1.rs @@ -1,5 +1,7 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::thread::spawn; diff --git a/tests/fail/data_race/dealloc_write_race2.rs b/tests/fail/data_race/dealloc_write_race2.rs index 7a2c882f7e..4f3819bd63 100644 --- a/tests/fail/data_race/dealloc_write_race2.rs +++ b/tests/fail/data_race/dealloc_write_race2.rs @@ -1,5 +1,7 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::thread::spawn; diff --git a/tests/fail/data_race/dealloc_write_race_stack.rs b/tests/fail/data_race/dealloc_write_race_stack.rs index 5ee4cc04a8..8e63bc1dc7 100644 --- a/tests/fail/data_race/dealloc_write_race_stack.rs +++ b/tests/fail/data_race/dealloc_write_race_stack.rs @@ -1,4 +1,6 @@ //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::ptr::null_mut; use std::sync::atomic::{AtomicPtr, Ordering}; diff --git a/tests/fail/data_race/enable_after_join_to_main.rs b/tests/fail/data_race/enable_after_join_to_main.rs index f2da45d727..53050608d2 100644 --- a/tests/fail/data_race/enable_after_join_to_main.rs +++ b/tests/fail/data_race/enable_after_join_to_main.rs @@ -1,5 +1,7 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::thread::spawn; diff --git a/tests/fail/data_race/fence_after_load.rs b/tests/fail/data_race/fence_after_load.rs index 683e3b9c7a..92cb4ccccf 100644 --- a/tests/fail/data_race/fence_after_load.rs +++ b/tests/fail/data_race/fence_after_load.rs @@ -1,5 +1,8 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 + use std::sync::atomic::{fence, AtomicUsize, Ordering}; use std::sync::Arc; use std::thread; diff --git a/tests/fail/data_race/mixed_size_read.rs b/tests/fail/data_race/mixed_size_read.rs index 091a47070b..61af972b3d 100644 --- a/tests/fail/data_race/mixed_size_read.rs +++ b/tests/fail/data_race/mixed_size_read.rs @@ -1,4 +1,7 @@ //@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 + use std::sync::atomic::{AtomicU16, AtomicU8, Ordering}; use std::thread; diff --git a/tests/fail/data_race/mixed_size_write.rs b/tests/fail/data_race/mixed_size_write.rs index 49fb6c1d5c..12e51bb942 100644 --- a/tests/fail/data_race/mixed_size_write.rs +++ b/tests/fail/data_race/mixed_size_write.rs @@ -1,4 +1,7 @@ //@compile-flags: -Zmiri-preemption-rate=0.0 -Zmiri-disable-weak-memory-emulation +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 + use std::sync::atomic::{AtomicU16, AtomicU8, Ordering}; use std::thread; diff --git a/tests/fail/data_race/read_read_race1.rs b/tests/fail/data_race/read_read_race1.rs index f66b5ca3d5..02aa4e4b71 100644 --- a/tests/fail/data_race/read_read_race1.rs +++ b/tests/fail/data_race/read_read_race1.rs @@ -1,4 +1,7 @@ //@compile-flags: -Zmiri-preemption-rate=0.0 +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 + use std::sync::atomic::{AtomicU16, Ordering}; use std::thread; diff --git a/tests/fail/data_race/read_read_race2.rs b/tests/fail/data_race/read_read_race2.rs index d87b667d91..3b94c9143f 100644 --- a/tests/fail/data_race/read_read_race2.rs +++ b/tests/fail/data_race/read_read_race2.rs @@ -1,4 +1,7 @@ //@compile-flags: -Zmiri-preemption-rate=0.0 +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 + use std::sync::atomic::{AtomicU16, Ordering}; use std::thread; diff --git a/tests/fail/data_race/read_write_race.rs b/tests/fail/data_race/read_write_race.rs index 70971b59ff..adf19dda9d 100644 --- a/tests/fail/data_race/read_write_race.rs +++ b/tests/fail/data_race/read_write_race.rs @@ -1,5 +1,7 @@ // We want to control preemption here. Stacked borrows interferes by having its own accesses. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::thread::spawn; diff --git a/tests/fail/data_race/read_write_race_stack.rs b/tests/fail/data_race/read_write_race_stack.rs index 9fec3ceee0..f411767f7b 100644 --- a/tests/fail/data_race/read_write_race_stack.rs +++ b/tests/fail/data_race/read_write_race_stack.rs @@ -1,4 +1,6 @@ //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::ptr::null_mut; use std::sync::atomic::{AtomicPtr, Ordering}; diff --git a/tests/fail/data_race/relax_acquire_race.rs b/tests/fail/data_race/relax_acquire_race.rs index be4450794c..c4f9438082 100644 --- a/tests/fail/data_race/relax_acquire_race.rs +++ b/tests/fail/data_race/relax_acquire_race.rs @@ -1,4 +1,6 @@ //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/tests/fail/data_race/release_seq_race.rs b/tests/fail/data_race/release_seq_race.rs index 9810832413..f03ab3efa0 100644 --- a/tests/fail/data_race/release_seq_race.rs +++ b/tests/fail/data_race/release_seq_race.rs @@ -1,4 +1,6 @@ //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::{sleep, spawn}; diff --git a/tests/fail/data_race/release_seq_race_same_thread.rs b/tests/fail/data_race/release_seq_race_same_thread.rs index 93cbc2a57d..88ae01b3ca 100644 --- a/tests/fail/data_race/release_seq_race_same_thread.rs +++ b/tests/fail/data_race/release_seq_race_same_thread.rs @@ -1,4 +1,6 @@ //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/tests/fail/data_race/rmw_race.rs b/tests/fail/data_race/rmw_race.rs index 982e9c1c41..d738caa105 100644 --- a/tests/fail/data_race/rmw_race.rs +++ b/tests/fail/data_race/rmw_race.rs @@ -1,4 +1,6 @@ //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread::spawn; diff --git a/tests/fail/data_race/stack_pop_race.rs b/tests/fail/data_race/stack_pop_race.rs index 68d82bc30a..762a8e51f6 100644 --- a/tests/fail/data_race/stack_pop_race.rs +++ b/tests/fail/data_race/stack_pop_race.rs @@ -1,4 +1,7 @@ //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 + use std::thread; #[derive(Copy, Clone)] diff --git a/tests/fail/data_race/write_write_race.rs b/tests/fail/data_race/write_write_race.rs index e8924702af..993d8d25b4 100644 --- a/tests/fail/data_race/write_write_race.rs +++ b/tests/fail/data_race/write_write_race.rs @@ -1,5 +1,7 @@ // We want to control preemption here. //@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::thread::spawn; diff --git a/tests/fail/data_race/write_write_race_stack.rs b/tests/fail/data_race/write_write_race_stack.rs index 984ae2ee83..8070a7f4fc 100644 --- a/tests/fail/data_race/write_write_race_stack.rs +++ b/tests/fail/data_race/write_write_race_stack.rs @@ -1,4 +1,6 @@ //@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows +// Avoid accidental synchronization via address reuse inside `thread::spawn`. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=0 use std::ptr::null_mut; use std::sync::atomic::{AtomicPtr, Ordering}; diff --git a/tests/fail/stacked_borrows/retag_data_race_protected_read.rs b/tests/fail/stacked_borrows/retag_data_race_protected_read.rs index 3de517055e..a6ee7b40c3 100644 --- a/tests/fail/stacked_borrows/retag_data_race_protected_read.rs +++ b/tests/fail/stacked_borrows/retag_data_race_protected_read.rs @@ -1,4 +1,5 @@ -//@compile-flags: -Zmiri-preemption-rate=0 +// Avoid accidental synchronization via address reuse. +//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0 use std::thread; #[derive(Copy, Clone)] diff --git a/tests/fail/stacked_borrows/retag_data_race_read.rs b/tests/fail/stacked_borrows/retag_data_race_read.rs index 25c92ddf6c..949f659e7e 100644 --- a/tests/fail/stacked_borrows/retag_data_race_read.rs +++ b/tests/fail/stacked_borrows/retag_data_race_read.rs @@ -1,5 +1,6 @@ //! Make sure that a retag acts like a read for the data race model. -//@compile-flags: -Zmiri-preemption-rate=0 +// Avoid accidental synchronization via address reuse. +//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0 #[derive(Copy, Clone)] struct SendPtr(*mut u8); diff --git a/tests/fail/weak_memory/racing_mixed_size.rs b/tests/fail/weak_memory/racing_mixed_size.rs index dfe9397a4c..1193dddc57 100644 --- a/tests/fail/weak_memory/racing_mixed_size.rs +++ b/tests/fail/weak_memory/racing_mixed_size.rs @@ -1,5 +1,6 @@ // We want to control preemption here. -//@compile-flags: -Zmiri-preemption-rate=0 +// Avoid accidental synchronization via address reuse. +//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0 #![feature(core_intrinsics)] diff --git a/tests/fail/weak_memory/racing_mixed_size_read.rs b/tests/fail/weak_memory/racing_mixed_size_read.rs index b946a75c3a..0a0e372f1f 100644 --- a/tests/fail/weak_memory/racing_mixed_size_read.rs +++ b/tests/fail/weak_memory/racing_mixed_size_read.rs @@ -1,5 +1,6 @@ // We want to control preemption here. -//@compile-flags: -Zmiri-preemption-rate=0 +// Avoid accidental synchronization via address reuse. +//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0 use std::sync::atomic::Ordering::*; use std::sync::atomic::{AtomicU16, AtomicU32}; diff --git a/tests/pass/concurrency/address_reuse_happens_before.rs b/tests/pass/concurrency/address_reuse_happens_before.rs index c0de1b4826..cfc1ef7ae4 100644 --- a/tests/pass/concurrency/address_reuse_happens_before.rs +++ b/tests/pass/concurrency/address_reuse_happens_before.rs @@ -1,5 +1,6 @@ //! Regression test for : //! When the address gets reused, there should be a happens-before relation. +//@compile-flags: -Zmiri-address-reuse-cross-thread-rate=1.0 #![feature(strict_provenance)] #![feature(sync_unsafe_cell)] diff --git a/tests/pass/concurrency/disable_data_race_detector.rs b/tests/pass/concurrency/disable_data_race_detector.rs index 049b5e7f49..354a4bef93 100644 --- a/tests/pass/concurrency/disable_data_race_detector.rs +++ b/tests/pass/concurrency/disable_data_race_detector.rs @@ -1,4 +1,6 @@ //@compile-flags: -Zmiri-disable-data-race-detector +// Avoid non-determinism +//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-address-reuse-cross-thread-rate=0 use std::thread::spawn;