-
Notifications
You must be signed in to change notification settings - Fork 353
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add simple data-race detector #1617
Changes from 1 commit
89814f1
ed32b26
cae4302
2e75de5
fe2e857
95c99b2
9cb6b8d
c70bbea
2a40d9b
69fb641
4a1f7ac
a3b7839
0b0264f
3268f56
55fc552
6c57229
cbb695f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||||||||
---|---|---|---|---|---|---|---|---|---|---|
|
@@ -37,6 +37,24 @@ | |||||||||
//! to a acquire load and a release store given the global sequentially consistent order | ||||||||||
//! of the schedule. | ||||||||||
//! | ||||||||||
//! The timestamps used in the data-race detector assign each sequence of non-atomic operations | ||||||||||
//! followed by a single atomic or concurrent operation a single timestamp. | ||||||||||
//! Write, Read, Write, ThreadJoin will be represented by a single timestamp value on a thread | ||||||||||
//! This is because extra increment operations between the operations in the sequence are not | ||||||||||
//! required for accurate reporting of data-race values. | ||||||||||
//! | ||||||||||
//! If the timestamp was not incremented after the atomic operation, then data-races would not be detected: | ||||||||||
//! Example - this should report a data-race but does not: | ||||||||||
//! t1: (x,0), atomic[release A], t1=(x+1, 0 ), write(var B), | ||||||||||
//! t2: (0,y) , atomic[acquire A], t2=(x+1, y+1), ,write(var B) | ||||||||||
//! | ||||||||||
//! The timestamp is not incremented before an atomic operation, since the result is indistinguishable | ||||||||||
//! from the value not being incremented. | ||||||||||
//! t: (x, 0), atomic[release _], (x + 1, 0) || (0, y), atomic[acquire _], (x, _) | ||||||||||
//! vs t: (x, 0), atomic[release _], (x + 1, 0) || (0, y), atomic[acquire _], (x+1, _) | ||||||||||
//! Both result in the sequence on thread x up to and including the atomic release as happening | ||||||||||
//! before the acquire. | ||||||||||
//! | ||||||||||
//! FIXME: | ||||||||||
//! currently we have our own local copy of the currently active thread index and names, this is due | ||||||||||
//! in part to the inability to access the current location of threads.active_thread inside the AllocExtra | ||||||||||
|
@@ -499,7 +517,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { | |||||||||
} | ||||||||||
|
||||||||||
/// Perform an atomic compare and exchange at a given memory location | ||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||
/// on success an atomic RMW operation is performed and on failure | ||||||||||
/// On success an atomic RMW operation is performed and on failure | ||||||||||
/// only an atomic read occurs. | ||||||||||
fn atomic_compare_exchange_scalar( | ||||||||||
&mut self, | ||||||||||
|
@@ -1136,9 +1154,6 @@ impl GlobalState { | |||||||||
// Now load the two clocks and configure the initial state. | ||||||||||
let (current, created) = vector_clocks.pick2_mut(current_index, created_index); | ||||||||||
|
||||||||||
// Advance the current thread before the synchronized operation. | ||||||||||
current.increment_clock(current_index); | ||||||||||
|
||||||||||
// Join the created with current, since the current threads | ||||||||||
// previous actions happen-before the created thread. | ||||||||||
created.join_with(current); | ||||||||||
|
@@ -1167,14 +1182,12 @@ impl GlobalState { | |||||||||
.as_ref() | ||||||||||
.expect("Joined with thread but thread has not terminated"); | ||||||||||
|
||||||||||
// Pre increment clocks before atomic operation. | ||||||||||
current.increment_clock(current_index); | ||||||||||
|
||||||||||
// The join thread happens-before the current thread | ||||||||||
// so update the current vector clock. | ||||||||||
current.clock.join(join_clock); | ||||||||||
|
||||||||||
// Post increment clocks after atomic operation. | ||||||||||
// Increment clocks after atomic operation. | ||||||||||
current.increment_clock(current_index); | ||||||||||
|
||||||||||
// Check the number of active threads, if the value is 1 | ||||||||||
|
@@ -1277,8 +1290,7 @@ impl GlobalState { | |||||||||
op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx>, | ||||||||||
) -> InterpResult<'tcx> { | ||||||||||
if self.multi_threaded.get() { | ||||||||||
let (index, mut clocks) = self.current_thread_state_mut(); | ||||||||||
clocks.increment_clock(index); | ||||||||||
let (index, clocks) = self.current_thread_state_mut(); | ||||||||||
op(index, clocks)?; | ||||||||||
let (_, mut clocks) = self.current_thread_state_mut(); | ||||||||||
clocks.increment_clock(index); | ||||||||||
|
@@ -1303,16 +1315,18 @@ impl GlobalState { | |||||||||
/// `validate_lock_release` must happen before this. | ||||||||||
pub fn validate_lock_acquire(&self, lock: &VClock, thread: ThreadId) { | ||||||||||
let (index, mut clocks) = self.load_thread_state_mut(thread); | ||||||||||
clocks.increment_clock(index); | ||||||||||
clocks.clock.join(&lock); | ||||||||||
clocks.increment_clock(index); | ||||||||||
RalfJung marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||||||
} | ||||||||||
|
||||||||||
/// Release a lock handle, express that this happens-before | ||||||||||
/// any subsequent calls to `validate_lock_acquire`. | ||||||||||
/// For normal locks this should be equivalent to `validate_lock_release_shared` | ||||||||||
/// since an acquire operation should have occured before, however | ||||||||||
/// for futex & cond-var operations this is not the case and this | ||||||||||
/// operation must be used. | ||||||||||
pub fn validate_lock_release(&self, lock: &mut VClock, thread: ThreadId) { | ||||||||||
let (index, mut clocks) = self.load_thread_state_mut(thread); | ||||||||||
clocks.increment_clock(index); | ||||||||||
lock.clone_from(&clocks.clock); | ||||||||||
clocks.increment_clock(index); | ||||||||||
} | ||||||||||
|
@@ -1321,9 +1335,11 @@ impl GlobalState { | |||||||||
/// any subsequent calls to `validate_lock_acquire` as well | ||||||||||
/// as any previous calls to this function after any | ||||||||||
/// `validate_lock_release` calls. | ||||||||||
/// For normal locks this should be equivalent to `validate_lock_release` | ||||||||||
/// this function only exists for joining over the set of concurrent readers | ||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||
/// in a read-write lock and should not be used for anything else. | ||||||||||
pub fn validate_lock_release_shared(&self, lock: &mut VClock, thread: ThreadId) { | ||||||||||
RalfJung marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||||||
let (index, mut clocks) = self.load_thread_state_mut(thread); | ||||||||||
clocks.increment_clock(index); | ||||||||||
lock.join(&clocks.clock); | ||||||||||
clocks.increment_clock(index); | ||||||||||
} | ||||||||||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -62,9 +62,11 @@ fn mutex_get_kind<'mir, 'tcx: 'mir>( | |
mutex_op: OpTy<'tcx, Tag>, | ||
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> { | ||
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; | ||
//FIXME: this has been made atomic to fix data-race reporting inside the internal | ||
// mutex implementation, it may not need to be atomic. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No need to leave a FIXME here I think (same in the rest of this file). |
||
ecx.read_scalar_at_offset_atomic( | ||
mutex_op, offset, ecx.machine.layouts.i32, | ||
AtomicReadOp::Acquire | ||
AtomicReadOp::Relaxed | ||
) | ||
} | ||
|
||
|
@@ -74,18 +76,23 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>( | |
kind: impl Into<ScalarMaybeUninit<Tag>>, | ||
) -> InterpResult<'tcx, ()> { | ||
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 }; | ||
//FIXME: this has been made atomic to fix data-race reporting inside the internal | ||
// mutex implementation, it may not need to be atomic. | ||
ecx.write_scalar_at_offset_atomic( | ||
mutex_op, offset, kind, ecx.machine.layouts.i32, | ||
AtomicWriteOp::Release | ||
mutex_op, offset, kind, ecx.machine.layouts.i32, | ||
AtomicWriteOp::Relaxed | ||
) | ||
} | ||
|
||
fn mutex_get_id<'mir, 'tcx: 'mir>( | ||
ecx: &MiriEvalContext<'mir, 'tcx>, | ||
mutex_op: OpTy<'tcx, Tag>, | ||
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> { | ||
//FIXME: this has been made atomic to fix data-race reporting inside the internal | ||
// mutex implementation, it may not need to be atomic. | ||
ecx.read_scalar_at_offset_atomic( | ||
mutex_op, 4, ecx.machine.layouts.u32, AtomicReadOp::Acquire | ||
mutex_op, 4, ecx.machine.layouts.u32, | ||
AtomicReadOp::Relaxed | ||
) | ||
} | ||
|
||
|
@@ -94,9 +101,11 @@ fn mutex_set_id<'mir, 'tcx: 'mir>( | |
mutex_op: OpTy<'tcx, Tag>, | ||
id: impl Into<ScalarMaybeUninit<Tag>>, | ||
) -> InterpResult<'tcx, ()> { | ||
//FIXME: this has been made atomic to fix data-race reporting inside the internal | ||
// mutex implementation, it may not need to be atomic. | ||
ecx.write_scalar_at_offset_atomic( | ||
mutex_op, 4, id, ecx.machine.layouts.u32, | ||
AtomicWriteOp::Release | ||
AtomicWriteOp::Relaxed | ||
) | ||
} | ||
|
||
|
@@ -126,10 +135,12 @@ fn mutex_get_or_create_id<'mir, 'tcx: 'mir>( | |
fn rwlock_get_id<'mir, 'tcx: 'mir>( | ||
ecx: &MiriEvalContext<'mir, 'tcx>, | ||
rwlock_op: OpTy<'tcx, Tag>, | ||
//FIXME: this has been made atomic to fix data-race reporting inside the internal | ||
// rw-lock implementation, it may not need to be atomic. | ||
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> { | ||
ecx.read_scalar_at_offset_atomic( | ||
rwlock_op, 4, ecx.machine.layouts.u32, | ||
AtomicReadOp::Acquire | ||
AtomicReadOp::Relaxed | ||
) | ||
} | ||
|
||
|
@@ -138,9 +149,11 @@ fn rwlock_set_id<'mir, 'tcx: 'mir>( | |
rwlock_op: OpTy<'tcx, Tag>, | ||
id: impl Into<ScalarMaybeUninit<Tag>>, | ||
) -> InterpResult<'tcx, ()> { | ||
//FIXME: this has been made atomic to fix data-race reporting inside the internal | ||
// rw-lock implementation, it may not need to be atomic. | ||
ecx.write_scalar_at_offset_atomic( | ||
rwlock_op, 4, id, ecx.machine.layouts.u32, | ||
AtomicWriteOp::Release | ||
AtomicWriteOp::Relaxed | ||
) | ||
} | ||
|
||
|
@@ -194,9 +207,11 @@ fn cond_get_id<'mir, 'tcx: 'mir>( | |
ecx: &MiriEvalContext<'mir, 'tcx>, | ||
cond_op: OpTy<'tcx, Tag>, | ||
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> { | ||
//FIXME: this has been made atomic to fix data-race reporting inside the internal | ||
// cond-var implementation, it may not need to be atomic. | ||
ecx.read_scalar_at_offset_atomic( | ||
cond_op, 4, ecx.machine.layouts.u32, | ||
AtomicReadOp::Acquire | ||
AtomicReadOp::Relaxed | ||
) | ||
} | ||
|
||
|
@@ -205,9 +220,11 @@ fn cond_set_id<'mir, 'tcx: 'mir>( | |
cond_op: OpTy<'tcx, Tag>, | ||
id: impl Into<ScalarMaybeUninit<Tag>>, | ||
) -> InterpResult<'tcx, ()> { | ||
//FIXME: this has been made atomic to fix data-race reporting inside the internal | ||
// cond-var implementation, it may not need to be atomic. | ||
ecx.write_scalar_at_offset_atomic( | ||
cond_op, 4, id, ecx.machine.layouts.u32, | ||
AtomicWriteOp::Release | ||
AtomicWriteOp::Relaxed | ||
) | ||
} | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,2 @@ | ||
warning: thread support is experimental, no weak memory effects are currently emulated. | ||
warning: thread support is experimental and incomplete: weak memory effects are not emulated. | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,2 @@ | ||
warning: thread support is experimental, no weak memory effects are currently emulated. | ||
warning: thread support is experimental and incomplete: weak memory effects are not emulated. | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,2 @@ | ||
warning: thread support is experimental, no weak memory effects are currently emulated. | ||
warning: thread support is experimental and incomplete: weak memory effects are not emulated. | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,2 @@ | ||
warning: thread support is experimental, no weak memory effects are currently emulated. | ||
warning: thread support is experimental and incomplete: weak memory effects are not emulated. | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,2 @@ | ||
warning: thread support is experimental, no weak memory effects are currently emulated. | ||
warning: thread support is experimental and incomplete: weak memory effects are not emulated. | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,2 @@ | ||
warning: thread support is experimental, no weak memory effects are currently emulated. | ||
warning: thread support is experimental and incomplete: weak memory effects are not emulated. | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,2 @@ | ||
warning: thread support is experimental, no weak memory effects are currently emulated. | ||
warning: thread support is experimental and incomplete: weak memory effects are not emulated. | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is there a full stop missing at the end here?
Also, does this match what the paper does? If the paper increments before and after, then adding a comment that this is somewhat strange but see the paper for details seems better than trying to adjust the algorithm the paper authors came up with.