Skip to content

Commit 6ee68f5

Browse files
committed
More tests, fix issue 1643 and detect races with allocation.
1 parent a2fa80d commit 6ee68f5

14 files changed

+564
-20
lines changed

src/data_race.rs

Lines changed: 56 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,25 @@ struct AtomicMemoryCellClocks {
192192
sync_vector: VClock,
193193
}
194194

195+
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
196+
enum WriteType {
197+
/// Allocate memory.
198+
Allocate,
199+
/// Standard unsynchronized write.
200+
Write,
201+
/// Deallocate memory
202+
Deallocate,
203+
}
204+
impl WriteType {
205+
fn get_descriptor(self) -> &'static str {
206+
match self {
207+
WriteType::Allocate => "ALLOCATE",
208+
WriteType::Write => "WRITE",
209+
WriteType::Deallocate => "DEALLOCATE",
210+
}
211+
}
212+
}
213+
195214
/// Memory Cell vector clock metadata
196215
/// for data-race detection.
197216
#[derive(Clone, PartialEq, Eq, Debug)]
@@ -204,6 +223,11 @@ struct MemoryCellClocks {
204223
/// that performed the last write operation.
205224
write_index: VectorIdx,
206225

226+
/// The type of operation that the write index represents,
227+
/// either newly allocated memory, a non-atomic write or
228+
/// a deallocation of memory.
229+
write_type: WriteType,
230+
207231
/// The vector-clock of the timestamp of the last read operation
208232
/// performed by a thread since the last write operation occurred.
209233
/// It is reset to zero on each write operation.
@@ -215,20 +239,19 @@ struct MemoryCellClocks {
215239
atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
216240
}
217241

218-
/// Create a default memory cell clocks instance
219-
/// for uninitialized memory.
220-
impl Default for MemoryCellClocks {
221-
fn default() -> Self {
242+
impl MemoryCellClocks {
243+
244+
/// Create a new set of clocks representing memory allocated
245+
/// at a given vector timestamp and index.
246+
fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
222247
MemoryCellClocks {
223248
read: VClock::default(),
224-
write: 0,
225-
write_index: VectorIdx::MAX_INDEX,
249+
write: alloc,
250+
write_index: alloc_index,
251+
write_type: WriteType::Allocate,
226252
atomic_ops: None,
227253
}
228254
}
229-
}
230-
231-
impl MemoryCellClocks {
232255

233256
/// Load the internal atomic memory cells if they exist.
234257
#[inline]
@@ -382,6 +405,7 @@ impl MemoryCellClocks {
382405
&mut self,
383406
clocks: &ThreadClockSet,
384407
index: VectorIdx,
408+
write_type: WriteType,
385409
) -> Result<(), DataRace> {
386410
log::trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, clocks);
387411
if self.write <= clocks.clock[self.write_index] && self.read <= clocks.clock {
@@ -393,6 +417,7 @@ impl MemoryCellClocks {
393417
if race_free {
394418
self.write = clocks.clock[index];
395419
self.write_index = index;
420+
self.write_type = write_type;
396421
self.read.set_zero_vector();
397422
Ok(())
398423
} else {
@@ -646,16 +671,28 @@ pub struct VClockAlloc {
646671
/// Assigning each byte a MemoryCellClocks.
647672
alloc_ranges: RefCell<RangeMap<MemoryCellClocks>>,
648673

649-
// Pointer to global state.
674+
/// Pointer to global state.
650675
global: MemoryExtra,
651676
}
652677

653678
impl VClockAlloc {
654-
/// Create a new data-race allocation detector.
655-
pub fn new_allocation(global: &MemoryExtra, len: Size) -> VClockAlloc {
679+
680+
/// Create a new data-race detector for newly allocated memory.
681+
pub fn new_allocation(global: &MemoryExtra, len: Size, track_alloc: bool) -> VClockAlloc {
682+
//FIXME: stack allocations are currently ignored due to the lazy nature of stack
683+
// allocation, this results in data-races being missed.
684+
let (alloc_timestamp, alloc_index) = if !track_alloc {
685+
(0, VectorIdx::MAX_INDEX)
686+
}else{
687+
let (alloc_index, clocks) = global.current_thread_state();
688+
let alloc_timestamp = clocks.clock[alloc_index];
689+
(alloc_timestamp, alloc_index)
690+
};
656691
VClockAlloc {
657692
global: Rc::clone(global),
658-
alloc_ranges: RefCell::new(RangeMap::new(len, MemoryCellClocks::default())),
693+
alloc_ranges: RefCell::new(RangeMap::new(
694+
len, MemoryCellClocks::new(alloc_timestamp, alloc_index)
695+
)),
659696
}
660697
}
661698

@@ -712,7 +749,7 @@ impl VClockAlloc {
712749
// Convert the write action into the vector clock it
713750
// represents for diagnostic purposes.
714751
write_clock = VClock::new_with_index(range.write_index, range.write);
715-
("WRITE", range.write_index, &write_clock)
752+
(range.write_type.get_descriptor(), range.write_index, &write_clock)
716753
} else if let Some(idx) = Self::find_gt_index(&range.read, &current_clocks.clock) {
717754
("READ", idx, &range.read)
718755
} else if !is_atomic {
@@ -792,17 +829,17 @@ impl VClockAlloc {
792829
&mut self,
793830
pointer: Pointer<Tag>,
794831
len: Size,
795-
action: &str,
832+
write_type: WriteType,
796833
) -> InterpResult<'tcx> {
797834
if self.global.multi_threaded.get() {
798835
let (index, clocks) = self.global.current_thread_state();
799836
for (_, range) in self.alloc_ranges.get_mut().iter_mut(pointer.offset, len) {
800-
if let Err(DataRace) = range.write_race_detect(&*clocks, index) {
837+
if let Err(DataRace) = range.write_race_detect(&*clocks, index, write_type) {
801838
// Report data-race
802839
return Self::report_data_race(
803840
&self.global,
804841
range,
805-
action,
842+
write_type.get_descriptor(),
806843
false,
807844
pointer,
808845
len,
@@ -820,15 +857,15 @@ impl VClockAlloc {
820857
/// being created or if it is temporarily disabled during a racy read or write
821858
/// operation
822859
pub fn write<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
823-
self.unique_access(pointer, len, "Write")
860+
self.unique_access(pointer, len, WriteType::Write)
824861
}
825862

826863
/// Detect data-races for an unsynchronized deallocate operation, will not perform
827864
/// data-race threads if `multi-threaded` is false, either due to no threads
828865
/// being created or if it is temporarily disabled during a racy read or write
829866
/// operation
830867
pub fn deallocate<'tcx>(&mut self, pointer: Pointer<Tag>, len: Size) -> InterpResult<'tcx> {
831-
self.unique_access(pointer, len, "Deallocate")
868+
self.unique_access(pointer, len, WriteType::Deallocate)
832869
}
833870
}
834871

src/machine.rs

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -478,7 +478,24 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
478478
(None, Tag::Untagged)
479479
};
480480
let race_alloc = if let Some(data_race) = &memory_extra.data_race {
481-
Some(data_race::AllocExtra::new_allocation(&data_race, alloc.size))
481+
match kind {
482+
// V-Table generation is lazy and so racy, so do not track races.
483+
// Also V-Tables are read only so no data races can be detected.
484+
MemoryKind::Vtable => None,
485+
// User allocated and stack memory should track allocation.
486+
MemoryKind::Machine(
487+
MiriMemoryKind::Rust | MiriMemoryKind::C | MiriMemoryKind::WinHeap
488+
) | MemoryKind::Stack => Some(
489+
data_race::AllocExtra::new_allocation(&data_race, alloc.size, true)
490+
),
491+
// Other global memory should trace races but be allocated at the 0 timestamp.
492+
MemoryKind::Machine(
493+
MiriMemoryKind::Global | MiriMemoryKind::Machine | MiriMemoryKind::Env |
494+
MiriMemoryKind::ExternStatic | MiriMemoryKind::Tls
495+
) | MemoryKind::CallerLocation => Some(
496+
data_race::AllocExtra::new_allocation(&data_race, alloc.size, false)
497+
)
498+
}
482499
} else {
483500
None
484501
};
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
// ignore-windows: Concurrency on Windows is not supported yet.
2+
3+
use std::thread::spawn;
4+
use std::ptr::null_mut;
5+
use std::sync::atomic::{Ordering, AtomicPtr};
6+
7+
#[derive(Copy, Clone)]
8+
struct EvilSend<T>(pub T);
9+
10+
unsafe impl<T> Send for EvilSend<T> {}
11+
unsafe impl<T> Sync for EvilSend<T> {}
12+
13+
pub fn main() {
14+
// Shared atomic pointer
15+
let pointer = AtomicPtr::new(null_mut::<usize>());
16+
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);
17+
18+
// Note: this is scheduler-dependent
19+
// the operations need to occur in
20+
// order, otherwise the allocation is
21+
// not visible to the other-thread to
22+
// detect the race:
23+
// 1. alloc
24+
// 2. write
25+
unsafe {
26+
let j1 = spawn(move || {
27+
//Concurrent allocate the memory.
28+
//Uses relaxed semantics to not generate
29+
//a release sequence.
30+
let pointer = &*ptr.0;
31+
pointer.store(Box::into_raw(Box::new(0usize)), Ordering::Relaxed);
32+
});
33+
34+
let j2 = spawn(move || {
35+
let pointer = &*ptr.0;
36+
*pointer.load(Ordering::Relaxed) //~ ERROR Data race
37+
});
38+
39+
j1.join().unwrap();
40+
j2.join().unwrap();
41+
42+
// Clean up memory, will never be executed
43+
drop(Box::from_raw(pointer.load(Ordering::Relaxed)));
44+
}
45+
}
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
// ignore-windows: Concurrency on Windows is not supported yet.
2+
3+
use std::thread::spawn;
4+
use std::ptr::null_mut;
5+
use std::sync::atomic::{Ordering, AtomicPtr};
6+
7+
#[derive(Copy, Clone)]
8+
struct EvilSend<T>(pub T);
9+
10+
unsafe impl<T> Send for EvilSend<T> {}
11+
unsafe impl<T> Sync for EvilSend<T> {}
12+
13+
pub fn main() {
14+
// Shared atomic pointer
15+
let pointer = AtomicPtr::new(null_mut::<usize>());
16+
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);
17+
18+
// Note: this is scheduler-dependent
19+
// the operations need to occur in
20+
// order, otherwise the allocation is
21+
// not visible to the other-thread to
22+
// detect the race:
23+
// 1. alloc
24+
// 2. write
25+
unsafe {
26+
let j1 = spawn(move || {
27+
//Concurrent allocate the memory.
28+
//Uses relaxed semantics to not generate
29+
//a release sequence.
30+
let pointer = &*ptr.0;
31+
pointer.store(Box::into_raw(Box::new(0usize)), Ordering::Relaxed);
32+
});
33+
34+
let j2 = spawn(move || {
35+
let pointer = &*ptr.0;
36+
*pointer.load(Ordering::Relaxed) = 2; //~ ERROR Data race
37+
});
38+
39+
j1.join().unwrap();
40+
j2.join().unwrap();
41+
42+
// Clean up memory, will never be executed
43+
drop(Box::from_raw(pointer.load(Ordering::Relaxed)));
44+
}
45+
}
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
// ignore-windows: Concurrency on Windows is not supported yet.
2+
3+
use std::thread::spawn;
4+
5+
#[derive(Copy, Clone)]
6+
struct EvilSend<T>(pub T);
7+
8+
unsafe impl<T> Send for EvilSend<T> {}
9+
unsafe impl<T> Sync for EvilSend<T> {}
10+
11+
extern "Rust" {
12+
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
13+
}
14+
15+
pub fn main() {
16+
// Shared atomic pointer
17+
let pointer: *mut usize = Box::into_raw(Box::new(0usize));
18+
let ptr = EvilSend(pointer);
19+
20+
unsafe {
21+
let j1 = spawn(move || {
22+
*ptr.0
23+
});
24+
25+
let j2 = spawn(move || {
26+
__rust_dealloc(ptr.0 as *mut _, std::mem::size_of::<usize>(), std::mem::align_of::<usize>()); //~ ERROR Data race
27+
});
28+
29+
j1.join().unwrap();
30+
j2.join().unwrap();
31+
}
32+
}
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
// ignore-windows: Concurrency on Windows is not supported yet.
2+
// compile-flags: -Zmiri-disable-isolation
3+
4+
use std::thread::{spawn, sleep};
5+
use std::ptr::null_mut;
6+
use std::sync::atomic::{Ordering, AtomicPtr};
7+
use std::time::Duration;
8+
9+
#[derive(Copy, Clone)]
10+
struct EvilSend<T>(pub T);
11+
12+
unsafe impl<T> Send for EvilSend<T> {}
13+
unsafe impl<T> Sync for EvilSend<T> {}
14+
15+
pub fn main() {
16+
// Shared atomic pointer
17+
let pointer = AtomicPtr::new(null_mut::<usize>());
18+
let ptr = EvilSend(&pointer as *const AtomicPtr<usize>);
19+
20+
// Note: this is scheduler-dependent
21+
// the operations need to occur in
22+
// order, otherwise the allocation is
23+
// not visible to the other-thread to
24+
// detect the race:
25+
// 1. stack-allocate
26+
// 2. read
27+
// 3. stack-deallocate
28+
unsafe {
29+
let j1 = spawn(move || {
30+
//Concurrent allocate the memory.
31+
//Uses relaxed semantics to not generate
32+
//a release sequence.
33+
let pointer = &*ptr.0;
34+
{
35+
let mut stack_var = 0usize;
36+
37+
pointer.store(&mut stack_var as *mut _, Ordering::Release);
38+
39+
sleep(Duration::from_millis(100));
40+
41+
} //~ ERROR Data race
42+
});
43+
44+
let j2 = spawn(move || {
45+
let pointer = &*ptr.0;
46+
*pointer.load(Ordering::Acquire)
47+
});
48+
49+
j1.join().unwrap();
50+
j2.join().unwrap();
51+
}
52+
}

0 commit comments

Comments
 (0)