Skip to content

Commit

Permalink
Auto merge of #130401 - matthiaskrgr:rollup-fri2j58, r=matthiaskrgr
Browse files Browse the repository at this point in the history
Rollup of 5 pull requests

Successful merges:

 - #129439 (Implement feature `string_from_utf8_lossy_owned` for lossy conversion from `Vec<u8>` to `String` methods)
 - #129828 (miri: treat non-memory local variables properly for data race detection)
 - #130110 (make dist vendoring configurable)
 - #130293 (Fix lint levels not getting overridden by attrs on `Stmt` nodes)
 - #130342 (interpret, miri: fix dealing with overflow during slice indexing and allocation)

Failed merges:

 - #130394 (const: don't ICE when encountering a mutable ref to immutable memory)

r? `@ghost`
`@rustbot` modify labels: rollup
  • Loading branch information
bors committed Sep 15, 2024
2 parents bc3609f + 6a37bae commit 8eeed71
Show file tree
Hide file tree
Showing 15 changed files with 536 additions and 143 deletions.
304 changes: 199 additions & 105 deletions src/concurrency/data_race.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ use std::{
};

use rustc_ast::Mutability;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::fx::FxHashSet;
use rustc_index::{Idx, IndexVec};
use rustc_middle::{mir, ty::Ty};
Expand Down Expand Up @@ -1047,32 +1048,31 @@ impl VClockAlloc {
) -> InterpResult<'tcx> {
let current_span = machine.current_span();
let global = machine.data_race.as_ref().unwrap();
if global.race_detecting() {
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
for (mem_clocks_range, mem_clocks) in
alloc_ranges.iter_mut(access_range.start, access_range.size)
if !global.race_detecting() {
return Ok(());
}
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
for (mem_clocks_range, mem_clocks) in
alloc_ranges.iter_mut(access_range.start, access_range.size)
{
if let Err(DataRace) =
mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
{
if let Err(DataRace) =
mem_clocks.read_race_detect(&mut thread_clocks, index, read_type, current_span)
{
drop(thread_clocks);
// Report data-race.
return Self::report_data_race(
global,
&machine.threads,
mem_clocks,
AccessType::NaRead(read_type),
access_range.size,
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
ty,
);
}
drop(thread_clocks);
// Report data-race.
return Self::report_data_race(
global,
&machine.threads,
mem_clocks,
AccessType::NaRead(read_type),
access_range.size,
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
ty,
);
}
Ok(())
} else {
Ok(())
}
Ok(())
}

/// Detect data-races for an unsynchronized write operation. It will not perform
Expand All @@ -1090,33 +1090,129 @@ impl VClockAlloc {
) -> InterpResult<'tcx> {
let current_span = machine.current_span();
let global = machine.data_race.as_mut().unwrap();
if global.race_detecting() {
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
for (mem_clocks_range, mem_clocks) in
self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
if !global.race_detecting() {
return Ok(());
}
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
for (mem_clocks_range, mem_clocks) in
self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
{
if let Err(DataRace) =
mem_clocks.write_race_detect(&mut thread_clocks, index, write_type, current_span)
{
if let Err(DataRace) = mem_clocks.write_race_detect(
&mut thread_clocks,
index,
write_type,
current_span,
) {
drop(thread_clocks);
// Report data-race
return Self::report_data_race(
global,
&machine.threads,
mem_clocks,
AccessType::NaWrite(write_type),
access_range.size,
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
ty,
);
}
drop(thread_clocks);
// Report data-race
return Self::report_data_race(
global,
&machine.threads,
mem_clocks,
AccessType::NaWrite(write_type),
access_range.size,
interpret::Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
ty,
);
}
Ok(())
}
Ok(())
}
}

/// Vector clock state for a stack frame (tracking the local variables
/// that do not have an allocation yet).
#[derive(Debug, Default)]
pub struct FrameState {
local_clocks: RefCell<FxHashMap<mir::Local, LocalClocks>>,
}

/// Stripped-down version of [`MemoryCellClocks`] for the clocks we need to keep track
/// of in a local that does not yet have addressable memory -- and hence can only
/// be accessed from the thread its stack frame belongs to, and cannot be access atomically.
#[derive(Debug)]
struct LocalClocks {
write: VTimestamp,
write_type: NaWriteType,
read: VTimestamp,
}

impl Default for LocalClocks {
fn default() -> Self {
Self { write: VTimestamp::ZERO, write_type: NaWriteType::Allocate, read: VTimestamp::ZERO }
}
}

impl FrameState {
pub fn local_write(&self, local: mir::Local, storage_live: bool, machine: &MiriMachine<'_>) {
let current_span = machine.current_span();
let global = machine.data_race.as_ref().unwrap();
if !global.race_detecting() {
return;
}
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
// This should do the same things as `MemoryCellClocks::write_race_detect`.
if !current_span.is_dummy() {
thread_clocks.clock.index_mut(index).span = current_span;
}
let mut clocks = self.local_clocks.borrow_mut();
if storage_live {
let new_clocks = LocalClocks {
write: thread_clocks.clock[index],
write_type: NaWriteType::Allocate,
read: VTimestamp::ZERO,
};
// There might already be an entry in the map for this, if the local was previously
// live already.
clocks.insert(local, new_clocks);
} else {
Ok(())
// This can fail to exist if `race_detecting` was false when the allocation
// occurred, in which case we can backdate this to the beginning of time.
let clocks = clocks.entry(local).or_insert_with(Default::default);
clocks.write = thread_clocks.clock[index];
clocks.write_type = NaWriteType::Write;
}
}

pub fn local_read(&self, local: mir::Local, machine: &MiriMachine<'_>) {
let current_span = machine.current_span();
let global = machine.data_race.as_ref().unwrap();
if !global.race_detecting() {
return;
}
let (index, mut thread_clocks) = global.active_thread_state_mut(&machine.threads);
// This should do the same things as `MemoryCellClocks::read_race_detect`.
if !current_span.is_dummy() {
thread_clocks.clock.index_mut(index).span = current_span;
}
thread_clocks.clock.index_mut(index).set_read_type(NaReadType::Read);
// This can fail to exist if `race_detecting` was false when the allocation
// occurred, in which case we can backdate this to the beginning of time.
let mut clocks = self.local_clocks.borrow_mut();
let clocks = clocks.entry(local).or_insert_with(Default::default);
clocks.read = thread_clocks.clock[index];
}

pub fn local_moved_to_memory(
&self,
local: mir::Local,
alloc: &mut VClockAlloc,
machine: &MiriMachine<'_>,
) {
let global = machine.data_race.as_ref().unwrap();
if !global.race_detecting() {
return;
}
let (index, _thread_clocks) = global.active_thread_state_mut(&machine.threads);
// Get the time the last write actually happened. This can fail to exist if
// `race_detecting` was false when the write occurred, in that case we can backdate this
// to the beginning of time.
let local_clocks = self.local_clocks.borrow_mut().remove(&local).unwrap_or_default();
for (_mem_clocks_range, mem_clocks) in alloc.alloc_ranges.get_mut().iter_mut_all() {
// The initialization write for this already happened, just at the wrong timestamp.
// Check that the thread index matches what we expect.
assert_eq!(mem_clocks.write.0, index);
// Convert the local's clocks into memory clocks.
mem_clocks.write = (index, local_clocks.write);
mem_clocks.write_type = local_clocks.write_type;
mem_clocks.read = VClock::new_with_index(index, local_clocks.read);
}
}
}
Expand Down Expand Up @@ -1305,69 +1401,67 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
) -> InterpResult<'tcx> {
let this = self.eval_context_ref();
assert!(access.is_atomic());
if let Some(data_race) = &this.machine.data_race {
if data_race.race_detecting() {
let size = place.layout.size;
let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
// Load and log the atomic operation.
// Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
trace!(
"Atomic op({}) with ordering {:?} on {:?} (size={})",
access.description(None, None),
&atomic,
place.ptr(),
size.bytes()
);
let Some(data_race) = &this.machine.data_race else { return Ok(()) };
if !data_race.race_detecting() {
return Ok(());
}
let size = place.layout.size;
let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr(), 0)?;
// Load and log the atomic operation.
// Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
trace!(
"Atomic op({}) with ordering {:?} on {:?} (size={})",
access.description(None, None),
&atomic,
place.ptr(),
size.bytes()
);

let current_span = this.machine.current_span();
// Perform the atomic operation.
data_race.maybe_perform_sync_operation(
&this.machine.threads,
current_span,
|index, mut thread_clocks| {
for (mem_clocks_range, mem_clocks) in
alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
{
if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic)
{
mem::drop(thread_clocks);
return VClockAlloc::report_data_race(
data_race,
&this.machine.threads,
mem_clocks,
access,
place.layout.size,
interpret::Pointer::new(
alloc_id,
Size::from_bytes(mem_clocks_range.start),
),
None,
)
.map(|_| true);
}
}

// This conservatively assumes all operations have release semantics
Ok(true)
},
)?;

// Log changes to atomic memory.
if tracing::enabled!(tracing::Level::TRACE) {
for (_offset, mem_clocks) in
alloc_meta.alloc_ranges.borrow().iter(base_offset, size)
{
trace!(
"Updated atomic memory({:?}, size={}) to {:#?}",
place.ptr(),
size.bytes(),
mem_clocks.atomic_ops
);
let current_span = this.machine.current_span();
// Perform the atomic operation.
data_race.maybe_perform_sync_operation(
&this.machine.threads,
current_span,
|index, mut thread_clocks| {
for (mem_clocks_range, mem_clocks) in
alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
{
if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic) {
mem::drop(thread_clocks);
return VClockAlloc::report_data_race(
data_race,
&this.machine.threads,
mem_clocks,
access,
place.layout.size,
interpret::Pointer::new(
alloc_id,
Size::from_bytes(mem_clocks_range.start),
),
None,
)
.map(|_| true);
}
}

// This conservatively assumes all operations have release semantics
Ok(true)
},
)?;

// Log changes to atomic memory.
if tracing::enabled!(tracing::Level::TRACE) {
for (_offset, mem_clocks) in alloc_meta.alloc_ranges.borrow().iter(base_offset, size) {
trace!(
"Updated atomic memory({:?}, size={}) to {:#?}",
place.ptr(),
size.bytes(),
mem_clocks.atomic_ops
);
}
}

Ok(())
}
}
Expand Down
6 changes: 4 additions & 2 deletions src/concurrency/thread.rs
Original file line number Diff line number Diff line change
Expand Up @@ -530,7 +530,9 @@ impl<'tcx> ThreadManager<'tcx> {
}

/// Mutably borrow the stack of the active thread.
fn active_thread_stack_mut(&mut self) -> &mut Vec<Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
pub fn active_thread_stack_mut(
&mut self,
) -> &mut Vec<Frame<'tcx, Provenance, FrameExtra<'tcx>>> {
&mut self.threads[self.active_thread].stack
}
pub fn all_stacks(
Expand Down Expand Up @@ -898,7 +900,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// This allocation will be deallocated when the thread dies, so it is not in read-only memory.
alloc.mutability = Mutability::Mut;
// Create a fresh allocation with this content.
let ptr = this.allocate_raw_ptr(alloc, MiriMemoryKind::Tls.into())?;
let ptr = this.insert_allocation(alloc, MiriMemoryKind::Tls.into())?;
this.machine.threads.set_thread_local_alloc(def_id, ptr);
Ok(ptr)
}
Expand Down
6 changes: 6 additions & 0 deletions src/concurrency/vector_clock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,13 +130,19 @@ impl Ord for VTimestamp {
/// also this means that there is only one unique valid length
/// for each set of vector clock values and hence the PartialEq
/// and Eq derivations are correct.
///
/// This means we cannot represent a clock where the last entry is a timestamp-0 read that occurs
/// because of a retag. That's fine, all it does is risk wrong diagnostics in a extreme corner case.
#[derive(PartialEq, Eq, Default, Debug)]
pub struct VClock(SmallVec<[VTimestamp; SMALL_VECTOR]>);

impl VClock {
/// Create a new vector-clock containing all zeros except
/// for a value at the given index
pub(super) fn new_with_index(index: VectorIdx, timestamp: VTimestamp) -> VClock {
if timestamp.time() == 0 {
return VClock::default();
}
let len = index.index() + 1;
let mut vec = smallvec::smallvec![VTimestamp::ZERO; len];
vec[index.index()] = timestamp;
Expand Down
Loading

0 comments on commit 8eeed71

Please sign in to comment.