diff --git a/genmc-sys/build.rs b/genmc-sys/build.rs index f20e0e8d52..9621454179 100644 --- a/genmc-sys/build.rs +++ b/genmc-sys/build.rs @@ -26,9 +26,9 @@ mod downloading { use super::GENMC_DOWNLOAD_PATH; /// The GenMC repository the we get our commit from. - pub(crate) const GENMC_GITHUB_URL: &str = "https://github.com/MPI-SWS/genmc.git"; + pub(crate) const GENMC_GITHUB_URL: &str = "https://github.com/Patrick-6/genmc.git"; /// The GenMC commit we depend on. It must be available on the specified GenMC repository. - pub(crate) const GENMC_COMMIT: &str = "3438dd2c1202cd4a47ed7881d099abf23e4167ab"; + pub(crate) const GENMC_COMMIT: &str = "56997d85fc9c07f75431570d49ce4892b22b8e6e"; pub(crate) fn download_genmc() -> PathBuf { let Ok(genmc_download_path) = PathBuf::from_str(GENMC_DOWNLOAD_PATH); @@ -224,7 +224,6 @@ fn compile_cpp_dependencies(genmc_path: &Path) { .include(genmc_include_dir) .include(llvm_include_dirs) .include("./src_cpp") - .file("./src_cpp/MiriInterface.hpp") .file("./src_cpp/MiriInterface.cpp") .compile("genmc_interop"); diff --git a/genmc-sys/src/lib.rs b/genmc-sys/src/lib.rs index ab46d729ea..8522cd680f 100644 --- a/genmc-sys/src/lib.rs +++ b/genmc-sys/src/lib.rs @@ -1,12 +1,32 @@ +pub use cxx::UniquePtr; + pub use self::ffi::*; +/// Defined in "genmc/src/Support/SAddr.hpp" +/// FIXME(genmc): `getGlobalAllocStaticMask()` is used to ensure the constant is consistent between Miri and GenMC, +/// but if https://github.com/dtolnay/cxx/issues/1051 is fixed we could share the constant directly. +pub const GENMC_GLOBAL_ADDRESSES_MASK: u64 = 1 << 63; + +#[repr(transparent)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct GenmcThreadId(pub i32); + +pub const GENMC_MAIN_THREAD_ID: GenmcThreadId = GenmcThreadId(0); + +impl GenmcScalar { + pub const UNINIT: Self = Self { value: 0, is_init: false }; + /// GenMC expects a value for all stores, but we cannot always provide one (e.g., non-atomic writes). + /// FIXME(genmc): remove this if a permanent fix is ever found. + pub const DUMMY: Self = Self::from_u64(0xDEADBEEF); + + pub const fn from_u64(value: u64) -> Self { + Self { value, is_init: true } + } +} + impl Default for GenmcParams { fn default() -> Self { - Self { - print_random_schedule_seed: false, - do_symmetry_reduction: false, - // FIXME(GenMC): Add defaults for remaining parameters - } + Self { print_random_schedule_seed: false, do_symmetry_reduction: false } } } @@ -20,11 +40,118 @@ mod ffi { pub do_symmetry_reduction: bool, // FIXME(GenMC): Add remaining parameters. } + + #[derive(Debug)] + enum ActionKind { + /// Any Mir terminator that's atomic and has load semantics. + Load, + /// Anything that's not a `Load`. + NonLoad, + } + + #[derive(Debug)] + enum MemOrdering { + NotAtomic = 0, + Relaxed = 1, + // We skip 2 in case we support consume. + Acquire = 3, + Release = 4, + AcquireRelease = 5, + SequentiallyConsistent = 6, + } + + #[derive(Debug, Clone, Copy)] + struct GenmcScalar { + value: u64, + is_init: bool, + } + + /**** \/ Result & Error types \/ ****/ + + // FIXME(genmc): Rework error handling (likely requires changes on the GenMC side). + + #[must_use] + #[derive(Debug)] + struct LoadResult { + is_read_opt: bool, + read_value: GenmcScalar, + error: UniquePtr, + } + + #[must_use] + #[derive(Debug)] + struct StoreResult { + error: UniquePtr, + isCoMaxWrite: bool, + } + + /**** /\ Result & Error types /\ ****/ + unsafe extern "C++" { include!("MiriInterface.hpp"); + type MemOrdering; + + // Types for Scheduling queries: + type ActionKind; + + // Result / Error types: + type LoadResult; + type StoreResult; + + type GenmcScalar; + type MiriGenMCShim; fn createGenmcHandle(config: &GenmcParams) -> UniquePtr; + fn getGlobalAllocStaticMask() -> u64; + + fn handleExecutionStart(self: Pin<&mut MiriGenMCShim>); + fn handleExecutionEnd(self: Pin<&mut MiriGenMCShim>) -> UniquePtr; + + fn handleLoad( + self: Pin<&mut MiriGenMCShim>, + thread_id: i32, + address: u64, + size: u64, + memory_ordering: MemOrdering, + old_value: GenmcScalar, + ) -> LoadResult; + fn handleStore( + self: Pin<&mut MiriGenMCShim>, + thread_id: i32, + address: u64, + size: u64, + value: GenmcScalar, + old_value: GenmcScalar, + memory_ordering: MemOrdering, + ) -> StoreResult; + + fn handleMalloc( + self: Pin<&mut MiriGenMCShim>, + thread_id: i32, + size: u64, + alignment: u64, + ) -> u64; + fn handleFree(self: Pin<&mut MiriGenMCShim>, thread_id: i32, address: u64, size: u64); + + fn handleThreadCreate(self: Pin<&mut MiriGenMCShim>, thread_id: i32, parent_id: i32); + fn handleThreadJoin(self: Pin<&mut MiriGenMCShim>, thread_id: i32, child_id: i32); + fn handleThreadFinish(self: Pin<&mut MiriGenMCShim>, thread_id: i32, ret_val: u64); + fn handleThreadKill(self: Pin<&mut MiriGenMCShim>, thread_id: i32); + + /**** Scheduling ****/ + fn scheduleNext( + self: Pin<&mut MiriGenMCShim>, + curr_thread_id: i32, + curr_thread_next_instr_kind: ActionKind, + ) -> i64; + + fn getBlockedExecutionCount(self: &MiriGenMCShim) -> u64; + fn getExploredExecutionCount(self: &MiriGenMCShim) -> u64; + + /// Check whether there are more executions to explore. + /// If there are more executions, this method prepares for the next execution and returns `true`. + fn isExplorationDone(self: Pin<&mut MiriGenMCShim>) -> bool; } } diff --git a/genmc-sys/src_cpp/MiriInterface.cpp b/genmc-sys/src_cpp/MiriInterface.cpp index 0827bb3d40..7cf89b271b 100644 --- a/genmc-sys/src_cpp/MiriInterface.cpp +++ b/genmc-sys/src_cpp/MiriInterface.cpp @@ -2,49 +2,260 @@ #include "genmc-sys/src/lib.rs.h" +#include "ADT/value_ptr.hpp" +#include "Config/MemoryModel.hpp" +#include "Config/Verbosity.hpp" +#include "ExecutionGraph/EventLabel.hpp" +#include "ExecutionGraph/LoadAnnotation.hpp" +#include "Runtime/InterpreterEnumAPI.hpp" +#include "Static/ModuleID.hpp" +#include "Support/ASize.hpp" +#include "Support/Error.hpp" +#include "Support/Logger.hpp" +#include "Support/MemAccess.hpp" +#include "Support/RMWOps.hpp" +#include "Support/SAddr.hpp" +#include "Support/SVal.hpp" +#include "Support/ThreadInfo.hpp" +#include "Verification/GenMCDriver.hpp" + +#include +#include +#include +#include + +// Return -1 when no thread can/should be scheduled, or the thread id of the next thread +// NOTE: this is safe because ThreadId is 32 bit, and we return a 64 bit integer +// FIXME(genmc,cxx): could directly return std::optional if CXX ever supports sharing it (see https://github.com/dtolnay/cxx/issues/87). +auto MiriGenMCShim::scheduleNext(const int curr_thread_id, + const ActionKind curr_thread_next_instr_kind) -> int64_t +{ + // The current thread is the only one where the `kind` could have changed since we last made + // a scheduling decision. + globalInstructions[curr_thread_id].kind = curr_thread_next_instr_kind; + + if (const auto result = GenMCDriver::scheduleNext(globalInstructions)) + return static_cast(result.value()); + return -1; +} + +/**** Functions available to Miri ****/ + +// NOLINTNEXTLINE(readability-convert-member-functions-to-static) auto MiriGenMCShim::createHandle(const GenmcParams &config) -> std::unique_ptr { auto conf = std::make_shared(); + // NOTE: Miri already initialization checks, so we can disable them in GenMC + conf->skipNonAtomicInitializedCheck = true; + // Miri needs all threads to be replayed, even fully completed ones. conf->replayCompletedThreads = true; + // FIXME(genmc): make sure this doesn't affect any tests, and maybe make it changeable from Miri: + constexpr unsigned int DEFAULT_WARN_ON_GRAPH_SIZE = 16 * 1024; + conf->warnOnGraphSize = DEFAULT_WARN_ON_GRAPH_SIZE; + // We only support the RC11 memory model for Rust. conf->model = ModelType::RC11; + // FIXME(genmc): expose this setting to Miri + conf->randomScheduleSeed = "42"; conf->printRandomScheduleSeed = config.print_random_schedule_seed; - // FIXME(genmc): disable any options we don't support currently: + // FIXME(genmc): Add support for setting this from the Miri side. + // FIXME(genmc): Decide on what to do about warnings from GenMC (keep them disabled until then). + logLevel = VerbosityLevel::Error; + + // FIXME(genmc): check if we can enable IPR: conf->ipr = false; + // FIXME(genmc): check if we can enable BAM: conf->disableBAM = true; + // FIXME(genmc): check if we can do instruction caching (probably not) conf->instructionCaching = false; - ERROR_ON(config.do_symmetry_reduction, "Symmetry reduction is currently unsupported in GenMC mode."); + // FIXME(genmc): implement symmetry reduction. + ERROR_ON(config.do_symmetry_reduction, + "Symmetry reduction is currently unsupported in GenMC mode."); conf->symmetryReduction = config.do_symmetry_reduction; - // FIXME(genmc): Should there be a way to change this option from Miri? + // FIXME(genmc): expose this setting to Miri (useful for testing Miri-GenMC). conf->schedulePolicy = SchedulePolicy::WF; - // FIXME(genmc): implement estimation mode: + // FIXME(genmc): implement estimation mode. conf->estimate = false; - conf->estimationMax = 1000; - const auto mode = conf->estimate ? GenMCDriver::Mode(GenMCDriver::EstimationMode{}) - : GenMCDriver::Mode(GenMCDriver::VerificationMode{}); + const auto mode = GenMCDriver::Mode(GenMCDriver::VerificationMode{}); // Running Miri-GenMC without race detection is not supported. - // Disabling this option also changes the behavior of the replay scheduler to only schedule at atomic operations, which is required with Miri. - // This happens because Miri can generate multiple GenMC events for a single MIR terminator. Without this option, - // the scheduler might incorrectly schedule an atomic MIR terminator because the first event it creates is a non-atomic (e.g., `StorageLive`). + // Disabling this option also changes the behavior of the replay scheduler to only schedule + // at atomic operations, which is required with Miri. This happens because Miri can generate + // multiple GenMC events for a single MIR terminator. Without this option, the scheduler + // might incorrectly schedule an atomic MIR terminator because the first event it creates is + // a non-atomic (e.g., `StorageLive`). conf->disableRaceDetection = false; // Miri can already check for unfreed memory. Also, GenMC cannot distinguish between memory // that is allowed to leak and memory that is not. conf->warnUnfreedMemory = false; - // FIXME(genmc): check config: - // checkConfigOptions(*conf); + checkConfigOptions(*conf, true); auto driver = std::make_unique(std::move(conf), mode); + + auto *driverPtr = driver.get(); + auto initValGetter = [driverPtr](const AAccess &access) + { + // FIXME(genmc): Add proper support for initial values. + return SVal(0xff); + }; + driver->getExec().getGraph().setInitValGetter(initValGetter); + return driver; } + +// This needs to be available to Miri, but clang-tidy wants it static +// NOLINTNEXTLINE(misc-use-internal-linkage) +auto createGenmcHandle(const GenmcParams &config) + -> std::unique_ptr +{ + return MiriGenMCShim::createHandle(config); +} + +/**** Execution start/end handling ****/ + +void MiriGenMCShim::handleExecutionStart() +{ + globalInstructions.clear(); + globalInstructions.push_back(Action(ActionKind::Load, Event::getInit())); + + GenMCDriver::handleExecutionStart(); +} + +auto MiriGenMCShim::handleExecutionEnd() -> std::unique_ptr +{ + return GenMCDriver::handleExecutionEnd(globalInstructions); +} + +/**** Thread management ****/ + +void MiriGenMCShim::handleThreadCreate(ThreadId thread_id, ThreadId parent_id) +{ + // NOTE: The threadCreate event happens in the parent: + auto pos = incPos(parent_id); + + // FIXME(genmc): for supporting symmetry reduction, these will need to be properly set: + const unsigned funId = 0; + const SVal arg = SVal(0); + const ThreadInfo childInfo = ThreadInfo{thread_id, parent_id, funId, arg}; + + // NOTE: Default memory ordering (`Release`) used here. + auto tcLab = std::make_unique(pos, childInfo); + auto createLab = GenMCDriver::handleThreadCreate(std::move(tcLab)); + auto genmcTid = createLab->getChildId(); + + BUG_ON(genmcTid != thread_id || genmcTid == -1 || genmcTid != globalInstructions.size()); + globalInstructions.push_back(Action(ActionKind::Load, Event(genmcTid, 0))); +} + +void MiriGenMCShim::handleThreadJoin(ThreadId thread_id, ThreadId child_id) +{ + // The thread join event happens in the parent. + auto pos = incPos(thread_id); + + // NOTE: Default memory ordering (`Acquire`) used here. + auto lab = std::make_unique(pos, child_id); + auto res = GenMCDriver::handleThreadJoin(std::move(lab)); + // If the join failed, decrease the event index again: + if (!res.has_value()) + decPos(thread_id); + + // NOTE: Thread return value is ignored, since Miri doesn't need it. +} + +void MiriGenMCShim::handleThreadFinish(ThreadId thread_id, uint64_t ret_val) +{ + auto pos = incPos(thread_id); + auto retVal = SVal(ret_val); + + // NOTE: Default memory ordering (`Release`) used here. + auto eLab = std::make_unique(pos, retVal); + + GenMCDriver::handleThreadFinish(std::move(eLab)); +} + +void MiriGenMCShim::handleThreadKill(ThreadId thread_id) { + auto pos = incPos(thread_id); + auto kLab = std::make_unique(pos); + + GenMCDriver::handleThreadKill(std::move(kLab)); +} + +/**** Memory access handling ****/ + +[[nodiscard]] auto MiriGenMCShim::handleLoad(ThreadId thread_id, uint64_t address, uint64_t size, + MemOrdering ord, GenmcScalar old_val) -> LoadResult +{ + auto pos = incPos(thread_id); + + auto loc = SAddr(address); + auto aSize = ASize(size); + // `type` is only used for printing. + auto type = AType::Unsigned; + + auto newLab = std::make_unique(pos, ord, loc, aSize, type); + + auto result = GenMCDriver::handleLoad(std::move(newLab)); + return result; +} + +[[nodiscard]] auto MiriGenMCShim::handleStore(ThreadId thread_id, uint64_t address, uint64_t size, + GenmcScalar value, GenmcScalar old_val, + MemOrdering ord) + -> StoreResult +{ + auto pos = incPos(thread_id); + + auto loc = SAddr(address); + auto aSize = ASize(size); + // `type` is only used for printing. + auto type = AType::Unsigned; + + auto val = value.toSVal(); + + std::unique_ptr wLab = std::make_unique(pos, ord, loc, aSize, type, val); + + return GenMCDriver::handleStore(std::move(wLab)); +} + +/**** Memory (de)allocation ****/ + +auto MiriGenMCShim::handleMalloc(ThreadId thread_id, uint64_t size, uint64_t alignment) -> uintptr_t +{ + auto pos = incPos(thread_id); + + // These are only used for printing and features Miri-GenMC doesn't support (yet). + auto sd = StorageDuration::SD_Heap; + auto stype = StorageType::ST_Volatile; + auto spc = AddressSpace::AS_User; + + auto aLab = std::make_unique(pos, size, alignment, sd, stype, spc, EventDeps()); + + SAddr retVal = GenMCDriver::handleMalloc(std::move(aLab)); + + BUG_ON(retVal.get() == 0); + + auto address = retVal.get(); + return address; +} + +void MiriGenMCShim::handleFree(ThreadId thread_id, uint64_t address, uint64_t size) +{ + auto addr = SAddr(address); + auto alloc_size = SAddr(size); + + auto pos = incPos(thread_id); + + auto dLab = std::make_unique(pos, addr, size); + GenMCDriver::handleFree(std::move(dLab)); +} diff --git a/genmc-sys/src_cpp/MiriInterface.hpp b/genmc-sys/src_cpp/MiriInterface.hpp index e55522ef41..074ca767bb 100644 --- a/genmc-sys/src_cpp/MiriInterface.hpp +++ b/genmc-sys/src_cpp/MiriInterface.hpp @@ -6,15 +6,24 @@ #include "config.h" #include "Config/Config.hpp" +#include "ExecutionGraph/EventLabel.hpp" +#include "Static/ModuleID.hpp" +#include "Support/MemOrdering.hpp" +#include "Support/RMWOps.hpp" +#include "Support/ResultHandling.hpp" #include "Verification/GenMCDriver.hpp" -#include +#include +#include +#include +#include /**** Types available to Miri ****/ -// Config struct defined on the Rust side and translated to C++ by cxx.rs: struct GenmcParams; +using ThreadId = int; + struct MiriGenMCShim : private GenMCDriver { @@ -22,23 +31,80 @@ struct MiriGenMCShim : private GenMCDriver MiriGenMCShim(std::shared_ptr conf, Mode mode /* = VerificationMode{} */) : GenMCDriver(std::move(conf), nullptr, mode) { - std::cerr << "C++: GenMC handle created!" << std::endl; + globalInstructions.reserve(8); + globalInstructions.push_back(Action(ActionKind::Load, Event::getInit())); + } + + virtual ~MiriGenMCShim() {} + + /**** Execution start/end handling ****/ + + void handleExecutionStart(); + std::unique_ptr handleExecutionEnd(); + + /**** Memory access handling ****/ + + [[nodiscard]] LoadResult handleLoad(ThreadId thread_id, uint64_t address, uint64_t size, + MemOrdering ord, GenmcScalar old_val); + [[nodiscard]] StoreResult handleStore(ThreadId thread_id, uint64_t address, uint64_t size, + GenmcScalar value, GenmcScalar old_val, + MemOrdering ord); + + /**** Memory (de)allocation ****/ + + uintptr_t handleMalloc(ThreadId thread_id, uint64_t size, uint64_t alignment); + void handleFree(ThreadId thread_id, uint64_t address, uint64_t size); + + /**** Thread management ****/ + + void handleThreadCreate(ThreadId thread_id, ThreadId parent_id); + void handleThreadJoin(ThreadId thread_id, ThreadId child_id); + void handleThreadFinish(ThreadId thread_id, uint64_t ret_val); + void handleThreadKill(ThreadId thread_id); + + /**** Scheduling queries ****/ + + auto scheduleNext(const int curr_thread_id, const ActionKind curr_thread_next_instr_kind) + -> int64_t; + + /**** TODO GENMC: Other stuff: ****/ + + auto getBlockedExecutionCount() const -> uint64_t + { + return static_cast(getResult().exploredBlocked); } - virtual ~MiriGenMCShim() + auto getExploredExecutionCount() const -> uint64_t { - std::cerr << "C++: GenMC handle destroyed!" << std::endl; + return static_cast(getResult().explored); + } + + bool isExplorationDone() { return GenMCDriver::done(); } + + /**** OTHER ****/ + + auto incPos(ThreadId tid) -> Event + { + ERROR_ON(tid >= globalInstructions.size(), "ThreadId out of bounds"); + return ++globalInstructions[tid].event; + } + auto decPos(ThreadId tid) -> Event + { + ERROR_ON(tid >= globalInstructions.size(), "ThreadId out of bounds"); + return --globalInstructions[tid].event; } static std::unique_ptr createHandle(const GenmcParams &config); + +private: + std::vector globalInstructions; }; /**** Functions available to Miri ****/ // NOTE: CXX doesn't support exposing static methods to Rust currently, so we expose this function instead. -static inline auto createGenmcHandle(const GenmcParams &config) -> std::unique_ptr -{ - return MiriGenMCShim::createHandle(config); -} +std::unique_ptr createGenmcHandle(const GenmcParams &config); + +constexpr auto getGlobalAllocStaticMask() -> uint64_t { return SAddr::staticMask; } #endif /* GENMC_MIRI_INTERFACE_HPP */ diff --git a/src/alloc_addresses/mod.rs b/src/alloc_addresses/mod.rs index 334503d299..215ec8bdd1 100644 --- a/src/alloc_addresses/mod.rs +++ b/src/alloc_addresses/mod.rs @@ -33,6 +33,7 @@ pub struct GlobalStateInner { /// sorted by address. We cannot use a `HashMap` since we can be given an address that is offset /// from the base address, and we need to find the `AllocId` it belongs to. This is not the /// *full* inverse of `base_addr`; dead allocations have been removed. + /// FIXME(genmc,dead allocs): keep dead allocations in GenMC mode? int_to_ptr_map: Vec<(u64, AllocId)>, /// The base address for each allocation. We cannot put that into /// `AllocExtra` because function pointers also have a base address, and @@ -98,7 +99,8 @@ impl GlobalStateInner { /// Shifts `addr` to make it aligned with `align` by rounding `addr` to the smallest multiple /// of `align` that is larger or equal to `addr` -fn align_addr(addr: u64, align: u64) -> u64 { +/// FIXME(GenMC): is it ok to make this public? +pub(crate) fn align_addr(addr: u64, align: u64) -> u64 { match addr % align { 0 => addr, rem => addr.strict_add(align) - rem, @@ -132,7 +134,8 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> { // Miri's address assignment leaks state across thread boundaries, which is incompatible // with GenMC execution. So we instead let GenMC assign addresses to allocations. if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { - let addr = genmc_ctx.handle_alloc(&this.machine, info.size, info.align, memory_kind)?; + let addr = + genmc_ctx.handle_alloc(this, alloc_id, info.size, info.align, memory_kind)?; return interp_ok(addr); } @@ -268,7 +271,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // We only use this provenance if it has been exposed, or if the caller requested also non-exposed allocations if !only_exposed_allocations || global_state.exposed.contains(&alloc_id) { // This must still be live, since we remove allocations from `int_to_ptr_map` when they get freed. - debug_assert!(this.is_alloc_live(alloc_id)); + // In GenMC mode, we keep all allocations, so this check doesn't apply there. + debug_assert!( + this.machine.data_race.as_genmc_ref().is_some() || this.is_alloc_live(alloc_id) + ); Some(alloc_id) } else { None @@ -503,11 +509,15 @@ impl<'tcx> MiriMachine<'tcx> { let addr = *global_state.base_addr.get(&dead_id).unwrap(); let pos = global_state.int_to_ptr_map.binary_search_by_key(&addr, |(addr, _)| *addr).unwrap(); - let removed = global_state.int_to_ptr_map.remove(pos); - assert_eq!(removed, (addr, dead_id)); // double-check that we removed the right thing - // We can also remove it from `exposed`, since this allocation can anyway not be returned by - // `alloc_id_from_addr` any more. - global_state.exposed.remove(&dead_id); + + // FIXME(genmc,dead allocs): keep dead allocations in GenMC mode? + if self.data_race.as_genmc_ref().is_none() { + let removed = global_state.int_to_ptr_map.remove(pos); + assert_eq!(removed, (addr, dead_id)); // double-check that we removed the right thing + // We can also remove it from `exposed`, since this allocation can anyway not be returned by + // `alloc_id_from_addr` any more. + global_state.exposed.remove(&dead_id); + } // Also remember this address for future reuse. let thread = self.threads.active_thread(); global_state.reuse.add_addr(rng, addr, size, align, kind, thread, || { diff --git a/src/bin/miri.rs b/src/bin/miri.rs index ae1b25f885..fecd7a88d5 100644 --- a/src/bin/miri.rs +++ b/src/bin/miri.rs @@ -39,7 +39,7 @@ use std::sync::atomic::{AtomicI32, AtomicU32, Ordering}; use miri::{ BacktraceStyle, BorrowTrackerMethod, GenmcConfig, GenmcCtx, MiriConfig, MiriEntryFnType, - ProvenanceMode, RetagFields, TreeBorrowsParams, ValidationMode, + ProvenanceMode, RetagFields, TreeBorrowsParams, ValidationMode, miri_genmc, }; use rustc_abi::ExternAbi; use rustc_data_structures::sync; @@ -187,9 +187,19 @@ impl rustc_driver::Callbacks for MiriCompilerCalls { } if let Some(_genmc_config) = &config.genmc_config { - let _genmc_ctx = Rc::new(GenmcCtx::new(&config)); + let eval_entry_once = |genmc_ctx: Rc| { + miri::eval_entry(tcx, entry_def_id, entry_type, &config, Some(genmc_ctx)) + }; + + // FIXME(genmc): add estimation mode here. - todo!("GenMC mode not yet implemented"); + let return_code = + miri_genmc::run_genmc_mode(&config, eval_entry_once).unwrap_or_else(|| { + tcx.dcx().abort_if_errors(); + rustc_driver::EXIT_FAILURE + }); + + exit(return_code); }; if let Some(many_seeds) = self.many_seeds.take() { @@ -735,9 +745,11 @@ fn main() { // Validate settings for data race detection and GenMC mode. if miri_config.genmc_config.is_some() { if !miri_config.data_race_detector { - fatal_error!("Cannot disable data race detection in GenMC mode (currently)"); + fatal_error!("Cannot disable data race detection in GenMC mode"); } else if !miri_config.weak_memory_emulation { fatal_error!("Cannot disable weak memory emulation in GenMC mode"); + } else if !miri_config.native_lib.is_empty() { + fatal_error!("native-lib not supported in GenMC mode."); } if miri_config.borrow_tracker.is_some() { eprintln!( diff --git a/src/concurrency/data_race.rs b/src/concurrency/data_race.rs index 38d76f5cf7..5ddbe9e376 100644 --- a/src/concurrency/data_race.rs +++ b/src/concurrency/data_race.rs @@ -719,8 +719,7 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { // Only metadata on the location itself is used. if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { - // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics - let old_val = None; + let old_val = this.run_for_validation_ref(|this| this.read_scalar(place)).discard_err(); return genmc_ctx.atomic_load( this, place.ptr().addr(), @@ -752,10 +751,21 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { // This is also a very special exception where we just ignore an error -- if this read // was UB e.g. because the memory is uninitialized, we don't want to know! let old_val = this.run_for_validation_mut(|this| this.read_scalar(dest)).discard_err(); + // Inform GenMC about the atomic store. if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { - // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics - genmc_ctx.atomic_store(this, dest.ptr().addr(), dest.layout.size, val, atomic)?; + if genmc_ctx.atomic_store( + this, + dest.ptr().addr(), + dest.layout.size, + val, + old_val, + atomic, + )? { + // The store might be the latest store in coherence order (determined by GenMC). + // If it is, we need to update the value in Miri's memory: + this.allow_data_races_mut(|this| this.write_scalar(val, dest))?; + } return interp_ok(()); } this.allow_data_races_mut(move |this| this.write_scalar(val, dest))?; @@ -779,7 +789,6 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { // Inform GenMC about the atomic rmw operation. if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { - // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics let (old_val, new_val) = genmc_ctx.atomic_rmw_op( this, place.ptr().addr(), @@ -787,8 +796,11 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { atomic, (op, not), rhs.to_scalar(), + old.to_scalar(), )?; - this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?; + if let Some(new_val) = new_val { + this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?; + } return interp_ok(ImmTy::from_scalar(old_val, old.layout)); } @@ -818,14 +830,19 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { // Inform GenMC about the atomic atomic exchange. if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { - // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics - let (old_val, _is_success) = genmc_ctx.atomic_exchange( + let (old_val, new_val) = genmc_ctx.atomic_exchange( this, place.ptr().addr(), place.layout.size, new, atomic, + old, )?; + // The store might be the latest store in coherence order (determined by GenMC). + // If it is, we need to update the value in Miri's memory: + if let Some(new_val) = new_val { + this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?; + } return interp_ok(old_val); } @@ -851,7 +868,6 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { // Inform GenMC about the atomic min/max operation. if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { - // FIXME(GenMC): Inform GenMC what a non-atomic read here would return, to support mixed atomics/non-atomics let (old_val, new_val) = genmc_ctx.atomic_min_max_op( this, place.ptr().addr(), @@ -860,8 +876,13 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { min, old.layout.backend_repr.is_signed(), rhs.to_scalar(), + old.to_scalar(), )?; - this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?; + // The store might be the latest store in coherence order (determined by GenMC). + // If it is, we need to update the value in Miri's memory: + if let Some(new_val) = new_val { + this.allow_data_races_mut(|this| this.write_scalar(new_val, place))?; + } return interp_ok(ImmTy::from_scalar(old_val, old.layout)); } @@ -903,6 +924,7 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { let this = self.eval_context_mut(); this.atomic_access_check(place, AtomicAccessType::Rmw)?; + // // FIXME(GenMC): this comment is wrong: // Failure ordering cannot be stronger than success ordering, therefore first attempt // to read with the failure ordering and if successful then try again with the success // read ordering and write in the success case. @@ -911,20 +933,24 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> { // Inform GenMC about the atomic atomic compare exchange. if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { - let (old, cmpxchg_success) = genmc_ctx.atomic_compare_exchange( - this, - place.ptr().addr(), - place.layout.size, - this.read_scalar(expect_old)?, - new, - success, - fail, - can_fail_spuriously, - )?; - if cmpxchg_success { + let (old_value, is_co_maximal_write, cmpxchg_success) = genmc_ctx + .atomic_compare_exchange( + this, + place.ptr().addr(), + place.layout.size, + this.read_scalar(expect_old)?, + new, + success, + fail, + can_fail_spuriously, + old.to_scalar(), + )?; + // The store might be the latest store in coherence order (determined by GenMC). + // If it is, we need to update the value in Miri's memory: + if is_co_maximal_write { this.allow_data_races_mut(|this| this.write_scalar(new, place))?; } - return interp_ok(Immediate::ScalarPair(old, Scalar::from_bool(cmpxchg_success))); + return interp_ok(Immediate::ScalarPair(old_value, Scalar::from_bool(cmpxchg_success))); } // `binary_op` will bail if either of them is not a scalar. diff --git a/src/concurrency/genmc/config.rs b/src/concurrency/genmc/config.rs index c56adab90f..3d94d2c990 100644 --- a/src/concurrency/genmc/config.rs +++ b/src/concurrency/genmc/config.rs @@ -6,8 +6,6 @@ use super::GenmcParams; #[derive(Debug, Default, Clone)] pub struct GenmcConfig { pub(super) params: GenmcParams, - do_estimation: bool, - // FIXME(GenMC): add remaining options. } impl GenmcConfig { @@ -29,7 +27,11 @@ impl GenmcConfig { if trimmed_arg.is_empty() { return Ok(()); // this corresponds to "-Zmiri-genmc" } - // FIXME(GenMC): implement remaining parameters. - todo!(); + let _genmc_config = genmc_config.as_mut().unwrap(); + let Some(trimmed_arg) = trimmed_arg.strip_prefix("-") else { + return Err(format!("Invalid GenMC argument \"-Zmiri-genmc{trimmed_arg}\"")); + }; + // FIXME(genmc): Add parsing for remaining parameters. + Err(format!("Invalid GenMC argument: \"-Zmiri-genmc-{trimmed_arg}\"")) } } diff --git a/src/concurrency/genmc/dummy.rs b/src/concurrency/genmc/dummy.rs index 79d27c4be1..2068a75158 100644 --- a/src/concurrency/genmc/dummy.rs +++ b/src/concurrency/genmc/dummy.rs @@ -1,7 +1,5 @@ -#![allow(unused)] - use rustc_abi::{Align, Size}; -use rustc_const_eval::interpret::{InterpCx, InterpResult}; +use rustc_const_eval::interpret::{AllocId, InterpCx, InterpResult}; use rustc_middle::mir; use crate::{ @@ -15,16 +13,29 @@ pub struct GenmcCtx {} #[derive(Debug, Default, Clone)] pub struct GenmcConfig {} +pub mod miri_genmc { + use std::rc::Rc; + + use crate::{GenmcCtx, MiriConfig}; + + pub fn run_genmc_mode( + _config: &MiriConfig, + _eval_entry: impl Fn(Rc) -> Option, + ) -> Option { + unreachable!(); + } +} + impl GenmcCtx { pub fn new(_miri_config: &MiriConfig) -> Self { unreachable!() } - pub fn get_stuck_execution_count(&self) -> usize { + pub fn get_blocked_execution_count(&self) -> usize { unreachable!() } - pub fn print_genmc_graph(&self) { + pub fn get_explored_execution_count(&self) -> usize { unreachable!() } @@ -32,6 +43,10 @@ impl GenmcCtx { unreachable!() } + pub fn get_exit_status(&self) -> Option<(i32, bool)> { + unreachable!() + } + /**** Memory access handling ****/ pub(crate) fn handle_execution_start(&self) { @@ -67,8 +82,9 @@ impl GenmcCtx { _address: Size, _size: Size, _value: Scalar, + _old_value: Option, _ordering: AtomicWriteOrd, - ) -> InterpResult<'tcx, ()> { + ) -> InterpResult<'tcx, bool> { unreachable!() } @@ -76,7 +92,7 @@ impl GenmcCtx { &self, _machine: &MiriMachine<'tcx>, _ordering: AtomicFenceOrd, - ) -> InterpResult<'tcx, ()> { + ) -> InterpResult<'tcx> { unreachable!() } @@ -86,22 +102,24 @@ impl GenmcCtx { _address: Size, _size: Size, _ordering: AtomicRwOrd, - (rmw_op, not): (mir::BinOp, bool), + (_rmw_op, _not): (mir::BinOp, bool), _rhs_scalar: Scalar, - ) -> InterpResult<'tcx, (Scalar, Scalar)> { + _old_value: Scalar, + ) -> InterpResult<'tcx, (Scalar, Option)> { unreachable!() } pub(crate) fn atomic_min_max_op<'tcx>( &self, - ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, - address: Size, - size: Size, - ordering: AtomicRwOrd, - min: bool, - is_signed: bool, - rhs_scalar: Scalar, - ) -> InterpResult<'tcx, (Scalar, Scalar)> { + _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + _address: Size, + _size: Size, + _ordering: AtomicRwOrd, + _min: bool, + _is_signed: bool, + _rhs_scalar: Scalar, + _old_value: Scalar, + ) -> InterpResult<'tcx, (Scalar, Option)> { unreachable!() } @@ -112,7 +130,8 @@ impl GenmcCtx { _size: Size, _rhs_scalar: Scalar, _ordering: AtomicRwOrd, - ) -> InterpResult<'tcx, (Scalar, bool)> { + _old_value: Scalar, + ) -> InterpResult<'tcx, (Scalar, Option)> { unreachable!() } @@ -126,7 +145,8 @@ impl GenmcCtx { _success: AtomicRwOrd, _fail: AtomicReadOrd, _can_fail_spuriously: bool, - ) -> InterpResult<'tcx, (Scalar, bool)> { + _old_value: Scalar, + ) -> InterpResult<'tcx, (Scalar, bool, bool)> { unreachable!() } @@ -135,7 +155,7 @@ impl GenmcCtx { _machine: &MiriMachine<'tcx>, _address: Size, _size: Size, - ) -> InterpResult<'tcx, ()> { + ) -> InterpResult<'tcx> { unreachable!() } @@ -144,7 +164,7 @@ impl GenmcCtx { _machine: &MiriMachine<'tcx>, _address: Size, _size: Size, - ) -> InterpResult<'tcx, ()> { + ) -> InterpResult<'tcx> { unreachable!() } @@ -152,7 +172,8 @@ impl GenmcCtx { pub(crate) fn handle_alloc<'tcx>( &self, - _machine: &MiriMachine<'tcx>, + _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + _alloc_id: AllocId, _size: Size, _alignment: Align, _memory_kind: MemoryKind, @@ -163,11 +184,11 @@ impl GenmcCtx { pub(crate) fn handle_dealloc<'tcx>( &self, _machine: &MiriMachine<'tcx>, + _alloc_id: AllocId, _address: Size, _size: Size, - _align: Align, _kind: MemoryKind, - ) -> InterpResult<'tcx, ()> { + ) -> InterpResult<'tcx> { unreachable!() } @@ -176,8 +197,10 @@ impl GenmcCtx { pub(crate) fn handle_thread_create<'tcx>( &self, _threads: &ThreadManager<'tcx>, + _start_routine: crate::Pointer, + _func_arg: &crate::ImmTy<'tcx>, _new_thread_id: ThreadId, - ) -> InterpResult<'tcx, ()> { + ) -> InterpResult<'tcx> { unreachable!() } @@ -185,24 +208,26 @@ impl GenmcCtx { &self, _active_thread_id: ThreadId, _child_thread_id: ThreadId, - ) -> InterpResult<'tcx, ()> { + ) -> InterpResult<'tcx> { unreachable!() } - pub(crate) fn handle_thread_stack_empty(&self, _thread_id: ThreadId) { + pub(crate) fn handle_thread_finish<'tcx>(&self, _threads: &ThreadManager<'tcx>) { unreachable!() } - pub(crate) fn handle_thread_finish<'tcx>( + pub(crate) fn handle_exit<'tcx>( &self, - _threads: &ThreadManager<'tcx>, - ) -> InterpResult<'tcx, ()> { + _thread: ThreadId, + _exit_code: i32, + _is_exit_call: bool, + ) -> InterpResult<'tcx> { unreachable!() } /**** Scheduling functionality ****/ - pub(crate) fn schedule_thread<'tcx>( + pub fn schedule_thread<'tcx>( &self, _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, ) -> InterpResult<'tcx, ThreadId> { @@ -211,6 +236,7 @@ impl GenmcCtx { /**** Blocking instructions ****/ + #[allow(unused)] pub(crate) fn handle_verifier_assume<'tcx>( &self, _machine: &MiriMachine<'tcx>, @@ -229,7 +255,7 @@ impl VisitProvenance for GenmcCtx { impl GenmcConfig { pub fn parse_arg( _genmc_config: &mut Option, - trimmed_arg: &str, + _trimmed_arg: &str, ) -> Result<(), String> { if cfg!(feature = "genmc") { Err(format!("GenMC is disabled in this build of Miri")) @@ -237,8 +263,4 @@ impl GenmcConfig { Err(format!("GenMC is not supported on this target")) } } - - pub fn should_print_graph(&self, _rep: usize) -> bool { - unreachable!() - } } diff --git a/src/concurrency/genmc/global_allocations.rs b/src/concurrency/genmc/global_allocations.rs new file mode 100644 index 0000000000..3e913be5ff --- /dev/null +++ b/src/concurrency/genmc/global_allocations.rs @@ -0,0 +1,145 @@ +use std::collections::hash_map::Entry; +use std::sync::RwLock; + +use genmc_sys::{GENMC_GLOBAL_ADDRESSES_MASK, getGlobalAllocStaticMask}; +use rand::rngs::StdRng; +use rand::{Rng, SeedableRng}; +use rustc_const_eval::interpret::{ + AllocId, AllocInfo, AllocKind, InterpResult, PointerArithmetic, interp_ok, +}; +use rustc_data_structures::fx::FxHashMap; +use rustc_middle::{err_exhaust, throw_exhaust}; +use tracing::info; + +use crate::alloc_addresses::align_addr; + +#[derive(Debug, Default)] +pub struct GlobalAllocationHandler { + inner: RwLock, +} + +/// This contains more or less a subset of the functionality of `struct GlobalStateInner` in `alloc_addresses`. +#[derive(Clone, Debug)] +struct GlobalStateInner { + // FIXME(genmc): Decide on an API for GenMC to access this (to check if access to global memory is valid), or decide we don't need that. + #[allow(unused)] + /// This is used as a map between the address of each allocation and its `AllocId`. It is always + /// sorted by address. We cannot use a `HashMap` since we can be given an address that is offset + /// from the base address, and we need to find the `AllocId` it belongs to. This is not the + /// *full* inverse of `base_addr`; dead allocations have been removed. + int_to_ptr_map: Vec<(u64, AllocId)>, + /// The base address for each allocation. + /// This is the inverse of `int_to_ptr_map`. + base_addr: FxHashMap, + /// This is used as a memory address when a new pointer is casted to an integer. It + /// is always larger than any address that was previously made part of a block. + next_base_addr: u64, + /// To add some randomness to the allocations + /// FIXME(genmc): maybe seed this from the rng in MiriMachine? + rng: StdRng, +} + +impl Default for GlobalStateInner { + fn default() -> Self { + Self::new() + } +} + +impl GlobalStateInner { + pub fn new() -> Self { + assert_eq!(GENMC_GLOBAL_ADDRESSES_MASK, getGlobalAllocStaticMask()); + assert_ne!(GENMC_GLOBAL_ADDRESSES_MASK, 0); + Self { + int_to_ptr_map: Vec::default(), + base_addr: FxHashMap::default(), + next_base_addr: GENMC_GLOBAL_ADDRESSES_MASK, + rng: StdRng::seed_from_u64(0), + } + } + + fn global_allocate_addr<'tcx>( + &mut self, + alloc_id: AllocId, + info: AllocInfo, + ) -> InterpResult<'tcx, u64> { + let entry = match self.base_addr.entry(alloc_id) { + Entry::Occupied(occupied_entry) => { + // Looks like some other thread allocated this for us + // between when we released the read lock and aquired the write lock, + // so we just return that value. + return interp_ok(*occupied_entry.get()); + } + Entry::Vacant(vacant_entry) => vacant_entry, + }; + + // This is either called immediately after allocation (and then cached), or when + // adjusting `tcx` pointers (which never get freed). So assert that we are looking + // at a live allocation. This also ensures that we never re-assign an address to an + // allocation that previously had an address, but then was freed and the address + // information was removed. + assert!(!matches!(info.kind, AllocKind::Dead)); + + // This allocation does not have a base address yet, pick or reuse one. + + // We are not in native lib mode, so we control the addresses ourselves. + + // We have to pick a fresh address. + // Leave some space to the previous allocation, to give it some chance to be less aligned. + // We ensure that `(global_state.next_base_addr + slack) % 16` is uniformly distributed. + let slack = self.rng.random_range(0..16); + // From next_base_addr + slack, round up to adjust for alignment. + let base_addr = + self.next_base_addr.checked_add(slack).ok_or_else(|| err_exhaust!(AddressSpaceFull))?; + let base_addr = align_addr(base_addr, info.align.bytes()); + + // Remember next base address. If this allocation is zero-sized, leave a gap of at + // least 1 to avoid two allocations having the same base address. (The logic in + // `alloc_id_from_addr` assumes unique addresses, and different function/vtable pointers + // need to be distinguishable!) + self.next_base_addr = base_addr + .checked_add(info.size.bytes().max(1)) + .ok_or_else(|| err_exhaust!(AddressSpaceFull))?; + + assert_ne!(0, base_addr & GENMC_GLOBAL_ADDRESSES_MASK); + assert_ne!(0, self.next_base_addr & GENMC_GLOBAL_ADDRESSES_MASK); + + // Cache the address for future use. + entry.insert(base_addr); + + interp_ok(base_addr) + } +} + +impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {} +pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> { + fn get_global_allocation_address( + &self, + global_allocation_handler: &GlobalAllocationHandler, + alloc_id: AllocId, + ) -> InterpResult<'tcx, u64> { + let this = self.eval_context_ref(); + let info = this.get_alloc_info(alloc_id); + + let global_state = global_allocation_handler.inner.read().unwrap(); + if let Some(base_addr) = global_state.base_addr.get(&alloc_id) { + info!( + "GenMC: address for global with alloc id {alloc_id:?} was cached: {base_addr} == {base_addr:#x}" + ); + return interp_ok(*base_addr); + } + + drop(global_state); + // We need to upgrade to a write lock. std::sync::RwLock doesn't support this, so we drop the guard and lock again + // Note that another thread might run in between and allocate the address, but we handle this case in the allocation function. + let mut global_state = global_allocation_handler.inner.write().unwrap(); + let base_addr = global_state.global_allocate_addr(alloc_id, info)?; + // Even if `Size` didn't overflow, we might still have filled up the address space. + if global_state.next_base_addr > this.target_usize_max() { + throw_exhaust!(AddressSpaceFull); + } + info!( + "GenMC: global with alloc id {alloc_id:?} got address: {base_addr} == {base_addr:#x}" + ); + interp_ok(base_addr) + } +} diff --git a/src/concurrency/genmc/helper.rs b/src/concurrency/genmc/helper.rs new file mode 100644 index 0000000000..b0dcd13114 --- /dev/null +++ b/src/concurrency/genmc/helper.rs @@ -0,0 +1,88 @@ +use rustc_abi::Size; +use rustc_const_eval::interpret::{InterpResult, interp_ok}; +use rustc_middle::ty::ScalarInt; +use tracing::info; + +use super::GenmcScalar; +use crate::{MiriInterpCx, Scalar, throw_unsup_format}; + +pub fn split_access(address: Size, size: Size) -> impl Iterator { + /// Maximum size memory access in bytes that GenMC supports. + const MAX_SIZE: u64 = 8; + + let size_bytes = size.bytes(); + + let start_address = address.bytes(); + let end_address = start_address + size_bytes; + let start_missing = (MAX_SIZE - (start_address % MAX_SIZE)) % MAX_SIZE; + let end_missing = end_address % MAX_SIZE; + + let start_address_aligned = start_address + start_missing; + let end_address_aligned = end_address - end_missing; + + info!( + "GenMC: splitting NA memory access into {MAX_SIZE} byte chunks: {start_missing}B + {} * {MAX_SIZE}B + {end_missing}B = {size:?}", + (end_address_aligned - start_address_aligned) / MAX_SIZE + ); + debug_assert_eq!( + 0, + start_address_aligned % MAX_SIZE, + "Incorrectly aligned start address: {start_address_aligned} % {MAX_SIZE} != 0, {start_address} + {start_missing}" + ); + debug_assert_eq!( + 0, + end_address_aligned % MAX_SIZE, + "Incorrectly aligned end address: {end_address_aligned} % {MAX_SIZE} != 0, {end_address} - {end_missing}" + ); + debug_assert!(start_missing < MAX_SIZE && end_missing < MAX_SIZE); + + // FIXME(genmc): could make remaining accesses powers-of-2, instead of 1 byte. + let start_chunks = (start_address..start_address_aligned).map(|address| (address, 1)); + let aligned_chunks = (start_address_aligned..end_address_aligned) + .step_by(MAX_SIZE.try_into().unwrap()) + .map(|address| (address, MAX_SIZE)); + let end_chunks = (end_address_aligned..end_address).map(|address| (address, 1)); + + start_chunks.chain(aligned_chunks).chain(end_chunks) +} + +pub fn option_scalar_to_genmc_scalar<'tcx>( + ecx: &MiriInterpCx<'tcx>, + maybe_scalar: Option, +) -> InterpResult<'tcx, GenmcScalar> { + if let Some(scalar) = maybe_scalar { + scalar_to_genmc_scalar(ecx, scalar) + } else { + interp_ok(GenmcScalar::UNINIT) + } +} + +pub fn scalar_to_genmc_scalar<'tcx>( + _ecx: &MiriInterpCx<'tcx>, + scalar: Scalar, +) -> InterpResult<'tcx, GenmcScalar> { + interp_ok(match scalar { + rustc_const_eval::interpret::Scalar::Int(scalar_int) => { + // FIXME(genmc): 128bit atomics support + let value: u64 = scalar_int.to_uint(scalar_int.size()).try_into().unwrap(); + GenmcScalar { value, is_init: true } + } + rustc_const_eval::interpret::Scalar::Ptr(_pointer, _size) => + throw_unsup_format!( + "FIXME(genmc): Implement sending pointers (with provenance) to GenMC." + ), + }) +} + +pub fn genmc_scalar_to_scalar<'tcx>( + _ecx: &MiriInterpCx<'tcx>, + scalar: GenmcScalar, + size: Size, +) -> InterpResult<'tcx, Scalar> { + // FIXME(genmc): Add GencmScalar to Miri Pointer conversion. + + // NOTE: GenMC always returns 64 bit values, and the upper bits are not yet truncated. + // FIXME(genmc): GenMC should be doing the truncation, not Miri. + let (value_scalar_int, _got_truncated) = ScalarInt::truncate_from_uint(scalar.value, size); + interp_ok(Scalar::Int(value_scalar_int)) +} diff --git a/src/concurrency/genmc/mapping.rs b/src/concurrency/genmc/mapping.rs new file mode 100644 index 0000000000..1a55aaf49b --- /dev/null +++ b/src/concurrency/genmc/mapping.rs @@ -0,0 +1,23 @@ +use genmc_sys::MemOrdering; + +use crate::{AtomicReadOrd, AtomicWriteOrd}; + +impl AtomicReadOrd { + pub(super) fn convert(self) -> MemOrdering { + match self { + AtomicReadOrd::Relaxed => MemOrdering::Relaxed, + AtomicReadOrd::Acquire => MemOrdering::Acquire, + AtomicReadOrd::SeqCst => MemOrdering::SequentiallyConsistent, + } + } +} + +impl AtomicWriteOrd { + pub(super) fn convert(self) -> MemOrdering { + match self { + AtomicWriteOrd::Relaxed => MemOrdering::Relaxed, + AtomicWriteOrd::Release => MemOrdering::Release, + AtomicWriteOrd::SeqCst => MemOrdering::SequentiallyConsistent, + } + } +} diff --git a/src/concurrency/genmc/miri_genmc.rs b/src/concurrency/genmc/miri_genmc.rs new file mode 100644 index 0000000000..e6b76c7d36 --- /dev/null +++ b/src/concurrency/genmc/miri_genmc.rs @@ -0,0 +1,42 @@ +use std::rc::Rc; + +use crate::{GenmcCtx, MiriConfig}; + +pub fn run_genmc_mode( + config: &MiriConfig, + eval_entry: impl Fn(Rc) -> Option, +) -> Option { + let genmc_ctx = Rc::new(GenmcCtx::new(config)); + + for rep in 0u64.. { + tracing::info!("Miri-GenMC loop {}", rep + 1); + let result = eval_entry(genmc_ctx.clone()); + + // FIXME(genmc): tell GenMC to print the execution graph here (if requested by the user). + + let return_code = result?; + + let is_exploration_done = genmc_ctx.is_exploration_done(); + + tracing::info!( + "(GenMC Mode) Execution done (return code: {return_code}), is_exploration_done: {is_exploration_done}", + ); + + if is_exploration_done { + eprintln!("(GenMC) Verification complete. No errors were detected.",); + + let explored_execution_count = genmc_ctx.get_explored_execution_count(); + let blocked_execution_count = genmc_ctx.get_blocked_execution_count(); + + eprintln!("Number of complete executions explored: {explored_execution_count}"); + if blocked_execution_count > 0 { + eprintln!("Number of blocked executions seen: {blocked_execution_count}"); + } + + // Return the return code of the last execution. + return Some(return_code); + } + } + tracing::error!("GenMC mode did not finish in 2^64 iterations!"); + None +} diff --git a/src/concurrency/genmc/mod.rs b/src/concurrency/genmc/mod.rs index 3617775e27..78dcdb8e54 100644 --- a/src/concurrency/genmc/mod.rs +++ b/src/concurrency/genmc/mod.rs @@ -1,74 +1,140 @@ -#![allow(unused)] // FIXME(GenMC): remove this +use std::cell::{Cell, RefCell}; +use std::sync::Arc; -use std::cell::Cell; - -use genmc_sys::{GenmcParams, createGenmcHandle}; +use genmc_sys::{ + ActionKind, GENMC_GLOBAL_ADDRESSES_MASK, GenmcScalar, GenmcThreadId, MemOrdering, + MiriGenMCShim, UniquePtr, createGenmcHandle, +}; use rustc_abi::{Align, Size}; -use rustc_const_eval::interpret::{InterpCx, InterpResult, interp_ok}; -use rustc_middle::mir; - +use rustc_const_eval::interpret::{AllocId, InterpCx, InterpResult, interp_ok}; +use rustc_middle::{mir, throw_machine_stop, throw_ub_format, throw_unsup_format}; +use tracing::info; + +use self::global_allocations::{EvalContextExtPriv as _, GlobalAllocationHandler}; +use self::helper::{genmc_scalar_to_scalar, option_scalar_to_genmc_scalar, scalar_to_genmc_scalar}; +use self::thread_info_manager::ThreadInfoManager; +use crate::concurrency::genmc::helper::split_access; use crate::{ AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, MemoryKind, MiriConfig, - MiriMachine, Scalar, ThreadId, ThreadManager, VisitProvenance, VisitWith, + MiriMachine, MiriMemoryKind, Scalar, TerminationInfo, ThreadId, ThreadManager, VisitProvenance, + VisitWith, }; mod config; +mod global_allocations; +mod helper; +mod mapping; +pub mod miri_genmc; +mod thread_info_manager; + +pub use genmc_sys::GenmcParams; pub use self::config::GenmcConfig; -// FIXME(GenMC): add fields +const UNSUPPORTED_ATOMICS_SIZE_MSG: &str = + "GenMC mode currently does not support atomics larger than 8 bytes."; + +#[derive(Clone, Copy)] +struct ExitStatus { + exit_code: i32, + leak_check: bool, +} + pub struct GenmcCtx { + handle: RefCell>, + + // FIXME(genmc,performance): we could use one RefCell for all internals instead of multiple + thread_infos: RefCell, + /// Some actions Miri does are allowed to cause data races. /// GenMC will not be informed about certain actions (e.g. non-atomic loads) when this flag is set. allow_data_races: Cell, + + /// Keep track of global allocations, to ensure they keep the same address across different executions, even if the order of allocations changes. + /// The `AllocId` for globals is stable across executions, so we can use it as an identifier. + global_allocations: Arc, + + /// The exit status of the program. + /// `None` if no thread has called `exit` and the main thread isn't finished yet. + exit_status: Cell>, } +/// GenMC Context creation and administrative / query actions impl GenmcCtx { /// Create a new `GenmcCtx` from a given config. pub fn new(miri_config: &MiriConfig) -> Self { let genmc_config = miri_config.genmc_config.as_ref().unwrap(); - - let handle = createGenmcHandle(&genmc_config.params); - assert!(!handle.is_null()); - - eprintln!("Miri: GenMC handle creation successful!"); - - drop(handle); - eprintln!("Miri: Dropping GenMC handle successful!"); - - // FIXME(GenMC): implement - std::process::exit(0); + let handle = RefCell::new(createGenmcHandle(&genmc_config.params)); + Self { + handle, + thread_infos: Default::default(), + allow_data_races: Cell::new(false), + global_allocations: Default::default(), + exit_status: Cell::new(None), + } } - pub fn get_stuck_execution_count(&self) -> usize { - todo!() + pub fn get_blocked_execution_count(&self) -> u64 { + let mc = self.handle.borrow(); + mc.as_ref().unwrap().getBlockedExecutionCount() } - pub fn print_genmc_graph(&self) { - todo!() + pub fn get_explored_execution_count(&self) -> u64 { + let mc = self.handle.borrow(); + mc.as_ref().unwrap().getExploredExecutionCount() } /// This function determines if we should continue exploring executions or if we are done. /// /// In GenMC mode, the input program should be repeatedly executed until this function returns `true` or an error is found. pub fn is_exploration_done(&self) -> bool { - todo!() + let mut mc = self.handle.borrow_mut(); + mc.as_mut().unwrap().isExplorationDone() + } + + pub fn get_exit_status(&self) -> Option<(i32, bool)> { + let ExitStatus { exit_code, leak_check } = self.exit_status.get()?; + Some((exit_code, leak_check)) + } + + fn set_exit_status(&self, exit_code: i32, leak_check: bool) { + self.exit_status.set(Some(ExitStatus { exit_code, leak_check })); } +} + +/// GenMC event handling. These methods are used to inform GenMC about events happening in the program, and to handle scheduling decisions. +impl GenmcCtx { + /**** Memory access handling ****/ /// Inform GenMC that a new program execution has started. /// This function should be called at the start of every execution. pub(crate) fn handle_execution_start(&self) { - todo!() + self.allow_data_races.replace(false); + self.thread_infos.borrow_mut().reset(); + self.exit_status.set(None); + + let mut mc = self.handle.borrow_mut(); + mc.as_mut().unwrap().handleExecutionStart(); } /// Inform GenMC that the program's execution has ended. /// - /// This function must be called even when the execution got stuck (i.e., it returned a `InterpErrorKind::MachineStop` with error kind `TerminationInfo::GenmcStuckExecution`). + /// This function must be called even when the execution is blocked + /// (i.e., it returned a `InterpErrorKind::MachineStop` with error kind `TerminationInfo::GenmcBlockedExecution`). pub(crate) fn handle_execution_end<'tcx>( &self, - ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, ) -> Result<(), String> { - todo!() + let mut mc = self.handle.borrow_mut(); + let result = mc.as_mut().unwrap().handleExecutionEnd(); + if let Some(msg) = result.as_ref() { + // FIXME(genmc): error handling (may need changes on GenMC side first). + let msg = msg.to_string_lossy().to_string(); + info!("GenMC: execution ended with error \"{msg}\""); + Err(msg) + } else { + Ok(()) + } } /**** Memory access handling ****/ @@ -83,20 +149,29 @@ impl GenmcCtx { /// # Panics /// If data race free is attempted to be set more than once (i.e., no nesting allowed). pub(super) fn set_ongoing_action_data_race_free(&self, enable: bool) { + info!("GenMC: set_ongoing_action_data_race_free ({enable})"); let old = self.allow_data_races.replace(enable); assert_ne!(old, enable, "cannot nest allow_data_races"); } + //* might fails if there's a race, load might also not read anything (returns None) */ pub(crate) fn atomic_load<'tcx>( &self, ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, address: Size, size: Size, ordering: AtomicReadOrd, + // The value that we would get, if we were to do a non-atomic load here. old_val: Option, ) -> InterpResult<'tcx, Scalar> { - assert!(!self.allow_data_races.get()); - todo!() + info!("GenMC: atomic_load: old_val: {old_val:?}"); + assert!(!self.allow_data_races.get()); // FIXME(genmc): ensure correct behavior whereever `allow_data_races` is used. + let ordering = ordering.convert(); + let genmc_old_value = option_scalar_to_genmc_scalar(ecx, old_val)?; + let read_value = + self.atomic_load_impl(&ecx.machine, address, size, ordering, genmc_old_value)?; + info!("GenMC: atomic_load: received value from GenMC: {read_value:?}"); + genmc_scalar_to_scalar(ecx, read_value, size) } pub(crate) fn atomic_store<'tcx>( @@ -105,79 +180,92 @@ impl GenmcCtx { address: Size, size: Size, value: Scalar, + // The value that we would get, if we were to do a non-atomic load here. + old_value: Option, ordering: AtomicWriteOrd, - ) -> InterpResult<'tcx, ()> { + ) -> InterpResult<'tcx, bool> { assert!(!self.allow_data_races.get()); - todo!() + let ordering = ordering.convert(); + let genmc_value = scalar_to_genmc_scalar(ecx, value)?; + let genmc_old_value = option_scalar_to_genmc_scalar(ecx, old_value)?; + self.atomic_store_impl(&ecx.machine, address, size, genmc_value, genmc_old_value, ordering) } pub(crate) fn atomic_fence<'tcx>( &self, - machine: &MiriMachine<'tcx>, - ordering: AtomicFenceOrd, - ) -> InterpResult<'tcx, ()> { - assert!(!self.allow_data_races.get()); - todo!() + _machine: &MiriMachine<'tcx>, + _ordering: AtomicFenceOrd, + ) -> InterpResult<'tcx> { + throw_unsup_format!("FIXME(genmc): Add support for atomic fences.") } /// Inform GenMC about an atomic read-modify-write operation. /// - /// Returns `(old_val, new_val)`. + /// Returns `(old_val, Option)`. `new_val` might not be the latest write in coherence order, which is indicated by `None`. pub(crate) fn atomic_rmw_op<'tcx>( &self, - ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, - address: Size, - size: Size, - ordering: AtomicRwOrd, - (rmw_op, not): (mir::BinOp, bool), - rhs_scalar: Scalar, - ) -> InterpResult<'tcx, (Scalar, Scalar)> { + _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + _address: Size, + _size: Size, + _ordering: AtomicRwOrd, + (_rmw_op, _not): (mir::BinOp, bool), + _rhs_scalar: Scalar, + // The value that we would get, if we were to do a non-atomic load here. + _old_value: Scalar, + ) -> InterpResult<'tcx, (Scalar, Option)> { assert!(!self.allow_data_races.get()); - todo!() + throw_unsup_format!("FIXME(genmc): Add support for atomic RMW.") } /// Inform GenMC about an atomic `min` or `max` operation. /// - /// Returns `(old_val, new_val)`. + /// Returns `(old_val, Option)`. `new_val` might not be the latest write in coherence order, which is indicated by `None`. pub(crate) fn atomic_min_max_op<'tcx>( &self, - ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, - address: Size, - size: Size, - ordering: AtomicRwOrd, - min: bool, - is_signed: bool, - rhs_scalar: Scalar, - ) -> InterpResult<'tcx, (Scalar, Scalar)> { + _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + _address: Size, + _size: Size, + _ordering: AtomicRwOrd, + _min: bool, + _is_signed: bool, + _rhs_scalar: Scalar, + // The value that we would get, if we were to do a non-atomic load here. + _old_value: Scalar, + ) -> InterpResult<'tcx, (Scalar, Option)> { assert!(!self.allow_data_races.get()); - todo!() + throw_unsup_format!("FIXME(genmc): Add support for atomic min/max.") } + /// Returns `(old_val, Option)`. `new_val` might not be the latest write in coherence order, which is indicated by `None`. pub(crate) fn atomic_exchange<'tcx>( &self, - ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, - address: Size, - size: Size, - rhs_scalar: Scalar, - ordering: AtomicRwOrd, - ) -> InterpResult<'tcx, (Scalar, bool)> { + _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + _address: Size, + _size: Size, + _rhs_scalar: Scalar, + _ordering: AtomicRwOrd, + // The value that we would get, if we were to do a non-atomic load here. + _old_value: Scalar, + ) -> InterpResult<'tcx, (Scalar, Option)> { assert!(!self.allow_data_races.get()); - todo!() + throw_unsup_format!("FIXME(genmc): Add support for atomic swap.") } pub(crate) fn atomic_compare_exchange<'tcx>( &self, - ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, - address: Size, - size: Size, - expected_old_value: Scalar, - new_value: Scalar, - success: AtomicRwOrd, - fail: AtomicReadOrd, - can_fail_spuriously: bool, - ) -> InterpResult<'tcx, (Scalar, bool)> { + _ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + _address: Size, + _size: Size, + _expected_old_value: Scalar, + _new_value: Scalar, + _success: AtomicRwOrd, + _fail: AtomicReadOrd, + _can_fail_spuriously: bool, + // The value that we would get, if we were to do a non-atomic load here. + _old_value: Scalar, + ) -> InterpResult<'tcx, (Scalar, bool, bool)> { assert!(!self.allow_data_races.get()); - todo!() + throw_unsup_format!("FIXME(genmc): Add support for atomic compare_exchange.") } /// Inform GenMC about a non-atomic memory load @@ -188,8 +276,45 @@ impl GenmcCtx { machine: &MiriMachine<'tcx>, address: Size, size: Size, - ) -> InterpResult<'tcx, ()> { - todo!() + ) -> InterpResult<'tcx> { + info!( + "GenMC: received memory_load (non-atomic): address: {:#x}, size: {}", + address.bytes(), + size.bytes() + ); + if self.allow_data_races.get() { + info!("GenMC: skipping `memory_load` on address"); + return interp_ok(()); + } + // GenMC doesn't like ZSTs, and they can't have any data races, so we skip them + if size.bytes() == 0 { + return interp_ok(()); + } + + if size.bytes() <= 8 { + // NOTE: Values loaded non-atomically are still handled by Miri, so we discard whatever we get from GenMC + let _read_value = self.atomic_load_impl( + machine, + address, + size, + MemOrdering::NotAtomic, + GenmcScalar::UNINIT, // Don't use DUMMY here, since that might have it stored as the initial value + )?; + return interp_ok(()); + } + + for (address, size) in split_access(address, size) { + let chunk_addr = Size::from_bytes(address); + let chunk_size = Size::from_bytes(size); + let _read_value = self.atomic_load_impl( + machine, + chunk_addr, + chunk_size, + MemOrdering::NotAtomic, + GenmcScalar::UNINIT, // Don't use DUMMY here, since that might have it stored as the initial value + )?; + } + interp_ok(()) } pub(crate) fn memory_store<'tcx>( @@ -197,31 +322,122 @@ impl GenmcCtx { machine: &MiriMachine<'tcx>, address: Size, size: Size, - ) -> InterpResult<'tcx, ()> { - todo!() + ) -> InterpResult<'tcx> { + info!( + "GenMC: received memory_store (non-atomic): address: {:#x}, size: {}", + address.bytes(), + size.bytes() + ); + if self.allow_data_races.get() { + info!("GenMC: skipping `memory_store`"); + return interp_ok(()); + } + // GenMC doesn't like ZSTs, and they can't have any data races, so we skip them + if size.bytes() == 0 { + return interp_ok(()); + } + + if size.bytes() <= 8 { + let _is_co_max_write = self.atomic_store_impl( + machine, + address, + size, + GenmcScalar::DUMMY, + GenmcScalar::UNINIT, // Don't use DUMMY here, since that might have it stored as the initial value + MemOrdering::NotAtomic, + )?; + return interp_ok(()); + } + + for (address, size) in split_access(address, size) { + let chunk_addr = Size::from_bytes(address); + let chunk_size = Size::from_bytes(size); + let _is_co_max_write = self.atomic_store_impl( + machine, + chunk_addr, + chunk_size, + GenmcScalar::DUMMY, + GenmcScalar::UNINIT, // Don't use DUMMY here, since that might have it stored as the initial value + MemOrdering::NotAtomic, + )?; + } + interp_ok(()) } /**** Memory (de)allocation ****/ pub(crate) fn handle_alloc<'tcx>( &self, - machine: &MiriMachine<'tcx>, + ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, + alloc_id: AllocId, size: Size, alignment: Align, memory_kind: MemoryKind, ) -> InterpResult<'tcx, u64> { - todo!() + let machine = &ecx.machine; + let chosen_address = if memory_kind == MiriMemoryKind::Global.into() { + ecx.get_global_allocation_address(&self.global_allocations, alloc_id)? + } else { + if self.allow_data_races.get() { + unreachable!(); // FIXME(genmc): can this happen and if yes, how should this be handled? + } + let thread_infos = self.thread_infos.borrow(); + let curr_thread = machine.threads.active_thread(); + let genmc_tid = thread_infos.get_info(curr_thread).genmc_tid; + // GenMC doesn't support ZSTs, so we set the minimum size to 1 byte + let genmc_size = size.bytes().max(1); + + let alignment = alignment.bytes(); + + let mut mc = self.handle.borrow_mut(); + let pinned_mc = mc.as_mut().unwrap(); + let chosen_address = pinned_mc.handleMalloc(genmc_tid.0, genmc_size, alignment); + + assert_ne!(chosen_address, 0, "GenMC malloc returned nullptr."); + // Non-global addresses should not be in the global address space. + assert_eq!(0, chosen_address & GENMC_GLOBAL_ADDRESSES_MASK); + chosen_address + }; + + // Sanity check the address alignment: + debug_assert_eq!( + 0, + chosen_address % alignment.bytes(), + "Allocated address {chosen_address:#x} does not have requested alignment ({alignment:?})!" + ); + + interp_ok(chosen_address) } pub(crate) fn handle_dealloc<'tcx>( &self, machine: &MiriMachine<'tcx>, + alloc_id: AllocId, address: Size, size: Size, - align: Align, kind: MemoryKind, - ) -> InterpResult<'tcx, ()> { - todo!() + ) -> InterpResult<'tcx> { + assert_ne!( + kind, + MiriMemoryKind::Global.into(), + "we probably shouldn't try to deallocate global allocations (alloc_id: {alloc_id:?})" + ); + if self.allow_data_races.get() { + unreachable!(); // FIXME(genmc): can this happen and if yes, how should this be handled? + } + let thread_infos = self.thread_infos.borrow(); + let curr_thread = machine.threads.active_thread(); + let genmc_tid = thread_infos.get_info(curr_thread).genmc_tid; + + let genmc_address = address.bytes(); + // GenMC doesn't support ZSTs, so we set the minimum size to 1 byte + let genmc_size = size.bytes().max(1); + + let mut mc = self.handle.borrow_mut(); + let pinned_mc = mc.as_mut().unwrap(); + pinned_mc.handleFree(genmc_tid.0, genmc_address, genmc_size); + + interp_ok(()) } /**** Thread management ****/ @@ -229,56 +445,264 @@ impl GenmcCtx { pub(crate) fn handle_thread_create<'tcx>( &self, threads: &ThreadManager<'tcx>, + _start_routine: crate::Pointer, // FIXME(genmc): symmetry reduction will need this info + _func_arg: &crate::ImmTy<'tcx>, new_thread_id: ThreadId, - ) -> InterpResult<'tcx, ()> { + ) -> InterpResult<'tcx> { assert!(!self.allow_data_races.get()); - todo!() + let mut thread_infos = self.thread_infos.borrow_mut(); + + let curr_thread_id = threads.active_thread(); + let genmc_parent_tid = thread_infos.get_info(curr_thread_id).genmc_tid; + let genmc_new_tid = thread_infos.add_thread(new_thread_id); + + let mut mc = self.handle.borrow_mut(); + mc.as_mut().unwrap().handleThreadCreate(genmc_new_tid.0, genmc_parent_tid.0); + + interp_ok(()) } pub(crate) fn handle_thread_join<'tcx>( &self, active_thread_id: ThreadId, child_thread_id: ThreadId, - ) -> InterpResult<'tcx, ()> { + ) -> InterpResult<'tcx> { assert!(!self.allow_data_races.get()); - todo!() + let thread_infos = self.thread_infos.borrow(); + + let genmc_curr_tid = thread_infos.get_info(active_thread_id).genmc_tid; + let genmc_child_tid = thread_infos.get_info(child_thread_id).genmc_tid; + + let mut mc = self.handle.borrow_mut(); + mc.as_mut().unwrap().handleThreadJoin(genmc_curr_tid.0, genmc_child_tid.0); + + interp_ok(()) } - pub(crate) fn handle_thread_stack_empty(&self, thread_id: ThreadId) { - todo!() + pub(crate) fn handle_thread_finish<'tcx>(&self, threads: &ThreadManager<'tcx>) { + assert!(!self.allow_data_races.get()); + let curr_thread_id = threads.active_thread(); + + let thread_infos = self.thread_infos.borrow(); + let genmc_tid = thread_infos.get_info(curr_thread_id).genmc_tid; + + // NOTE: Miri doesn't support return values for threads, but GenMC expects one, so we return 0 + let ret_val = 0; + + info!( + "GenMC: handling thread finish (thread {curr_thread_id:?} ({genmc_tid:?}) returns with dummy value 0)" + ); + + let mut mc = self.handle.borrow_mut(); + mc.as_mut().unwrap().handleThreadFinish(genmc_tid.0, ret_val); } - pub(crate) fn handle_thread_finish<'tcx>( + /// Handle a call to `libc::exit` or the exit of the main thread. + /// Unless an error is returned, the program should continue executing (in a different thread, chosen by the next scheduling call). + pub(crate) fn handle_exit<'tcx>( &self, - threads: &ThreadManager<'tcx>, + thread: ThreadId, + exit_code: i32, + is_exit_call: bool, + ) -> InterpResult<'tcx> { + // Calling `libc::exit` doesn't do cleanup, so we skip the leak check in that case. + let leak_check = !is_exit_call; + self.set_exit_status(exit_code, leak_check); + + // FIXME(genmc): Add a flag to continue exploration even when the program exits with a non-zero exit code. + if exit_code != 0 { + info!("GenMC: 'exit' called with non-zero argument, aborting execution."); + throw_machine_stop!(TerminationInfo::GenmcFinishedExecution); + } + + if is_exit_call { + let thread_infos = self.thread_infos.borrow(); + let genmc_tid = thread_infos.get_info(thread).genmc_tid; + + let mut mc = self.handle.borrow_mut(); + mc.as_mut().unwrap().handleThreadKill(genmc_tid.0); + } + interp_ok(()) + } + + /**** Blocking instructions ****/ + + #[allow(unused)] + pub(crate) fn handle_verifier_assume<'tcx>( + &self, + machine: &MiriMachine<'tcx>, + condition: bool, ) -> InterpResult<'tcx, ()> { + if condition { interp_ok(()) } else { self.handle_user_block(machine) } + } +} + +impl GenmcCtx { + //* might fails if there's a race, load might also not read anything (returns None) */ + fn atomic_load_impl<'tcx>( + &self, + machine: &MiriMachine<'tcx>, + address: Size, + size: Size, + memory_ordering: MemOrdering, + genmc_old_value: GenmcScalar, + ) -> InterpResult<'tcx, GenmcScalar> { + assert!( + size.bytes() != 0 + && (memory_ordering == MemOrdering::NotAtomic || size.bytes().is_power_of_two()) + ); + if size.bytes() > 8 { + throw_unsup_format!("{UNSUPPORTED_ATOMICS_SIZE_MSG}"); + } assert!(!self.allow_data_races.get()); - todo!() + let thread_infos = self.thread_infos.borrow(); + let curr_thread_id = machine.threads.active_thread(); + let genmc_tid = thread_infos.get_info(curr_thread_id).genmc_tid; + + info!( + "GenMC: load, thread: {curr_thread_id:?} ({genmc_tid:?}), address: {addr} == {addr:#x}, size: {size:?}, ordering: {memory_ordering:?}, old_value: {genmc_old_value:x?}", + addr = address.bytes() + ); + let genmc_address = address.bytes(); + let genmc_size = size.bytes(); + + let mut mc = self.handle.borrow_mut(); + let pinned_mc = mc.as_mut().unwrap(); + let load_result = pinned_mc.handleLoad( + genmc_tid.0, + genmc_address, + genmc_size, + memory_ordering, + genmc_old_value, + ); + + if load_result.is_read_opt { + todo!(); + } + + if let Some(error) = load_result.error.as_ref() { + // FIXME(genmc): error handling + let msg = error.to_string_lossy().to_string(); + info!("GenMC: load operation returned an error: \"{msg}\""); + throw_ub_format!("{}", msg); + } + + info!("GenMC: load returned value: {:?}", load_result.read_value); + + interp_ok(load_result.read_value) + } + + fn atomic_store_impl<'tcx>( + &self, + machine: &MiriMachine<'tcx>, + address: Size, + size: Size, + genmc_value: GenmcScalar, + genmc_old_value: GenmcScalar, + memory_ordering: MemOrdering, + ) -> InterpResult<'tcx, bool> { + assert!( + size.bytes() != 0 + && (memory_ordering == MemOrdering::NotAtomic || size.bytes().is_power_of_two()) + ); + if size.bytes() > 8 { + throw_unsup_format!("{UNSUPPORTED_ATOMICS_SIZE_MSG}"); + } + let thread_infos = self.thread_infos.borrow(); + let curr_thread_id = machine.threads.active_thread(); + let genmc_tid = thread_infos.get_info(curr_thread_id).genmc_tid; + + let genmc_address = address.bytes(); + let genmc_size = size.bytes(); + + info!( + "GenMC: store, thread: {curr_thread_id:?} ({genmc_tid:?}), address: {addr} = {addr:#x}, size: {size:?}, ordering {memory_ordering:?}, value: {genmc_value:?}", + addr = address.bytes() + ); + + let mut mc = self.handle.borrow_mut(); + let pinned_mc = mc.as_mut().unwrap(); + let store_result = pinned_mc.handleStore( + genmc_tid.0, + genmc_address, + genmc_size, + genmc_value, + genmc_old_value, + memory_ordering, + ); + + if let Some(error) = store_result.error.as_ref() { + // FIXME(genmc): error handling + let msg = error.to_string_lossy().to_string(); + info!("GenMC: store operation returned an error: \"{msg}\""); + throw_ub_format!("{}", msg); + } + + interp_ok(store_result.isCoMaxWrite) } /**** Scheduling functionality ****/ - /// Ask for a scheduling decision. This should be called before every MIR instruction. - /// - /// GenMC may realize that the execution got stuck, then this function will return a `InterpErrorKind::MachineStop` with error kind `TerminationInfo::GenmcStuckExecution`). - /// - /// This is **not** an error by iself! Treat this as if the program ended normally: `handle_execution_end` should be called next, which will determine if were are any actual errors. - pub(crate) fn schedule_thread<'tcx>( + pub fn schedule_thread<'tcx>( &self, ecx: &InterpCx<'tcx, MiriMachine<'tcx>>, ) -> InterpResult<'tcx, ThreadId> { assert!(!self.allow_data_races.get()); - todo!() + let thread_manager = &ecx.machine.threads; + let active_thread_id = thread_manager.active_thread(); + + let curr_thread_next_instr_kind = + if !thread_manager.active_thread_ref().get_state().is_enabled() { + // The current thread can get blocked (e.g., due to a thread join, assume statement, ...), then we need to ask GenMC for another thread to schedule. + // `Load` is a safe default for the next instruction type, since we may not know what the next instruction is. + ActionKind::Load + } else { + let Some(frame) = thread_manager.get_thread_stack(active_thread_id).last() else { + return interp_ok(active_thread_id); + }; + let either::Either::Left(loc) = frame.current_loc() else { + // We are unwinding. + return interp_ok(active_thread_id); + }; + let basic_block = &frame.body().basic_blocks[loc.block]; + if let Some(_statement) = basic_block.statements.get(loc.statement_index) { + return interp_ok(active_thread_id); + } + + // FIXME(genmc): determine terminator kind. + ActionKind::Load + }; + + info!( + "GenMC: schedule_thread, active thread: {active_thread_id:?}, next instr.: '{curr_thread_next_instr_kind:?}'" + ); + + // let curr_thread_user_block = self.curr_thread_user_block.replace(false); + let thread_infos = self.thread_infos.borrow(); + let curr_thread_info = thread_infos.get_info(active_thread_id); + + let mut mc = self.handle.borrow_mut(); + let pinned_mc = mc.as_mut().unwrap(); + let result = + pinned_mc.scheduleNext(curr_thread_info.genmc_tid.0, curr_thread_next_instr_kind); + if result >= 0 { + let genmc_next_thread_id = result.try_into().unwrap(); + let genmc_next_thread_id = GenmcThreadId(genmc_next_thread_id); + let thread_infos = self.thread_infos.borrow(); + let next_thread_id = thread_infos.get_info_genmc(genmc_next_thread_id).miri_tid; + + return interp_ok(next_thread_id); + } + + // Negative result means that GenMC has no next thread to schedule. + info!("GenMC: scheduleNext returned no thread to schedule, execution is finished."); + throw_machine_stop!(TerminationInfo::GenmcFinishedExecution); } - /**** Blocking instructions ****/ + /**** Blocking functionality ****/ - pub(crate) fn handle_verifier_assume<'tcx>( - &self, - machine: &MiriMachine<'tcx>, - condition: bool, - ) -> InterpResult<'tcx, ()> { - if condition { interp_ok(()) } else { self.handle_user_block(machine) } + fn handle_user_block<'tcx>(&self, _machine: &MiriMachine<'tcx>) -> InterpResult<'tcx> { + todo!() } } @@ -288,8 +712,11 @@ impl VisitProvenance for GenmcCtx { } } -impl GenmcCtx { - fn handle_user_block<'tcx>(&self, machine: &MiriMachine<'tcx>) -> InterpResult<'tcx, ()> { - todo!() +impl std::fmt::Debug for GenmcCtx { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("GenmcCtx") + // .field("mc", &self.mc) + .field("thread_infos", &self.thread_infos) + .finish_non_exhaustive() } } diff --git a/src/concurrency/genmc/thread_info_manager.rs b/src/concurrency/genmc/thread_info_manager.rs new file mode 100644 index 0000000000..096b88e457 --- /dev/null +++ b/src/concurrency/genmc/thread_info_manager.rs @@ -0,0 +1,89 @@ +use genmc_sys::{GENMC_MAIN_THREAD_ID, GenmcThreadId}; +use rustc_data_structures::fx::FxHashMap; + +use crate::ThreadId; + +#[derive(Debug)] +pub struct ThreadInfo { + pub miri_tid: ThreadId, + pub genmc_tid: GenmcThreadId, +} + +impl ThreadInfo { + const MAIN_THREAD_INFO: Self = Self::new(ThreadId::MAIN_THREAD, GENMC_MAIN_THREAD_ID); + + #[must_use] + pub const fn new(miri_tid: ThreadId, genmc_tid: GenmcThreadId) -> Self { + Self { miri_tid, genmc_tid } + } +} + +#[derive(Debug)] +pub struct ThreadInfoManager { + tid_map: FxHashMap, + thread_infos: Vec, +} + +impl Default for ThreadInfoManager { + fn default() -> Self { + Self::new() + } +} + +impl ThreadInfoManager { + #[must_use] + pub fn new() -> Self { + let mut tid_map = FxHashMap::default(); + tid_map.insert(ThreadId::MAIN_THREAD, GENMC_MAIN_THREAD_ID); + let thread_infos = vec![ThreadInfo::MAIN_THREAD_INFO]; + Self { tid_map, thread_infos } + } + + pub fn reset(&mut self) { + self.tid_map.clear(); + self.tid_map.insert(ThreadId::MAIN_THREAD, GENMC_MAIN_THREAD_ID); + self.thread_infos.clear(); + self.thread_infos.push(ThreadInfo::MAIN_THREAD_INFO); + } + + #[must_use] + #[allow(unused)] + pub fn thread_count(&self) -> usize { + self.thread_infos.len() + } + + pub fn add_thread(&mut self, thread_id: ThreadId) -> GenmcThreadId { + // NOTE: GenMC thread ids are integers incremented by one every time + let index = self.thread_infos.len(); + let genmc_tid = GenmcThreadId(index.try_into().unwrap()); + let thread_info = ThreadInfo::new(thread_id, genmc_tid); + // FIXME(genmc): Document this in place where ThreadIds are created + assert!( + self.tid_map.insert(thread_id, genmc_tid).is_none(), + "Cannot reuse thread ids: thread id {thread_id:?} already inserted" + ); + self.thread_infos.push(thread_info); + + genmc_tid + } + + #[must_use] + pub fn get_info(&self, thread_id: ThreadId) -> &ThreadInfo { + let genmc_tid = *self.tid_map.get(&thread_id).unwrap(); + self.get_info_genmc(genmc_tid) + } + + #[must_use] + pub fn get_info_genmc(&self, genmc_tid: GenmcThreadId) -> &ThreadInfo { + let index: usize = genmc_tid.0.try_into().unwrap(); + let Some(infos) = self.thread_infos.get(index) else { + panic!( + "GenMC returned invalid thread id: {}, existing thread ids: {}..{}", + genmc_tid.0, + GENMC_MAIN_THREAD_ID.0, + self.thread_infos.len() + ); + }; + infos + } +} diff --git a/src/concurrency/mod.rs b/src/concurrency/mod.rs index 435615efd9..4c8e0776d9 100644 --- a/src/concurrency/mod.rs +++ b/src/concurrency/mod.rs @@ -22,5 +22,5 @@ pub mod weak_memory; mod genmc; pub use self::data_race_handler::{AllocDataRaceHandler, GlobalDataRaceHandler}; -pub use self::genmc::{GenmcConfig, GenmcCtx}; +pub use self::genmc::{GenmcConfig, GenmcCtx, miri_genmc}; pub use self::vector_clock::VClock; diff --git a/src/concurrency/thread.rs b/src/concurrency/thread.rs index fe1ef86ccd..6caba147e5 100644 --- a/src/concurrency/thread.rs +++ b/src/concurrency/thread.rs @@ -113,7 +113,8 @@ pub enum BlockReason { } /// The state of a thread. -enum ThreadState<'tcx> { +// FIXME(genmc,question): is this ok to be public? +pub(crate) enum ThreadState<'tcx> { /// The thread is enabled and can be executed. Enabled, /// The thread is blocked on something. @@ -135,15 +136,16 @@ impl<'tcx> std::fmt::Debug for ThreadState<'tcx> { } impl<'tcx> ThreadState<'tcx> { - fn is_enabled(&self) -> bool { + // FIXME(genmc,question): is it ok for these to be public? + pub(crate) fn is_enabled(&self) -> bool { matches!(self, ThreadState::Enabled) } - fn is_terminated(&self) -> bool { + pub(crate) fn is_terminated(&self) -> bool { matches!(self, ThreadState::Terminated) } - fn is_blocked_on(&self, reason: BlockReason) -> bool { + pub(crate) fn is_blocked_on(&self, reason: BlockReason) -> bool { matches!(*self, ThreadState::Blocked { reason: actual_reason, .. } if actual_reason == reason) } } @@ -209,6 +211,12 @@ impl<'tcx> Thread<'tcx> { self.thread_name.as_deref() } + // FIXME(genmc,question): is this ok to be public? (it exposes implementation details) + #[allow(unused)] // Note: only used if `genmc` feature is enabled. + pub(crate) fn get_state(&self) -> &ThreadState<'tcx> { + &self.state + } + /// Get the name of the current thread for display purposes; will include thread ID if not set. fn thread_display_name(&self, id: ThreadId) -> String { if let Some(ref thread_name) = self.thread_name { @@ -342,9 +350,10 @@ impl VisitProvenance for Frame<'_, Provenance, FrameExtra<'_>> { } } +// FIXME(genmc,question): is this ok to be public? /// The moment in time when a blocked thread should be woken up. #[derive(Debug)] -enum Timeout { +pub(crate) enum Timeout { Monotonic(Instant), RealTime(SystemTime), } @@ -491,6 +500,15 @@ impl<'tcx> ThreadManager<'tcx> { &mut self.threads[self.active_thread].stack } + // FIXME(genmc,question): is this ok to exist? + #[allow(unused)] // Note: only used if `genmc` feature is enabled. + pub(crate) fn get_thread_stack( + &self, + id: ThreadId, + ) -> &[Frame<'tcx, Provenance, FrameExtra<'tcx>>] { + &self.threads[id].stack + } + pub fn all_stacks( &self, ) -> impl Iterator>])> { @@ -676,13 +694,6 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> { #[inline] fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> { let this = self.eval_context_mut(); - // Inform GenMC that a thread has finished all user code. GenMC needs to know this for scheduling. - // FIXME(GenMC): Thread-local destructors *are* user code, so this is odd. Also now that we - // support pre-main constructors, it can get called there as well. - if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { - let thread_id = this.active_thread(); - genmc_ctx.handle_thread_stack_empty(thread_id); - } let mut callback = this .active_thread_mut() .on_stack_empty @@ -703,7 +714,8 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> { /// If GenMC mode is active, the scheduling is instead handled by GenMC. fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> { let this = self.eval_context_mut(); - // In GenMC mode, we let GenMC do the scheduling + + // In GenMC mode, we let GenMC do the scheduling. if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { let next_thread_id = genmc_ctx.schedule_thread(this)?; @@ -715,7 +727,7 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> { return interp_ok(SchedulingAction::ExecuteStep); } - // We are not in GenMC mode, so we control the schedule + // We are not in GenMC mode, so we control the scheduling. let thread_manager = &mut this.machine.threads; let clock = &this.machine.monotonic_clock; let rng = this.machine.rng.get_mut(); @@ -863,7 +875,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { GlobalDataRaceHandler::Vclocks(data_race) => data_race.thread_created(&this.machine.threads, new_thread_id, current_span), GlobalDataRaceHandler::Genmc(genmc_ctx) => - genmc_ctx.handle_thread_create(&this.machine.threads, new_thread_id)?, + genmc_ctx.handle_thread_create( + &this.machine.threads, + start_routine, + &func_arg, + new_thread_id, + )?, } // Write the current thread-id, switch to the next thread later // to treat this write operation as occurring on the current thread. @@ -916,13 +933,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let thread = this.active_thread_mut(); assert!(thread.stack.is_empty(), "only threads with an empty stack can be terminated"); thread.state = ThreadState::Terminated; - match &mut this.machine.data_race { - GlobalDataRaceHandler::None => {} - GlobalDataRaceHandler::Vclocks(data_race) => - data_race.thread_terminated(&this.machine.threads), - GlobalDataRaceHandler::Genmc(genmc_ctx) => - genmc_ctx.handle_thread_finish(&this.machine.threads)?, - } + // Deallocate TLS. let gone_thread = this.active_thread(); { @@ -953,6 +964,19 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { } } } + + // FIXME(genmc,question): Is it correct to move this code here? + match &mut this.machine.data_race { + GlobalDataRaceHandler::None => {} + GlobalDataRaceHandler::Vclocks(data_race) => + data_race.thread_terminated(&this.machine.threads), + GlobalDataRaceHandler::Genmc(genmc_ctx) => { + // Inform GenMC that the thread finished. + // This needs to happen once all accesses to the thread are done, including freeing any TLS statics. + genmc_ctx.handle_thread_finish(&this.machine.threads) + } + } + // Unblock joining threads. let unblock_reason = BlockReason::Join(gone_thread); let threads = &this.machine.threads.threads; @@ -1076,6 +1100,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { "{:?} blocked on {:?} when trying to join", thread_mgr.active_thread, joined_thread_id ); + if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { + genmc_ctx.handle_thread_join(thread_mgr.active_thread, joined_thread_id)?; + } + // The joined thread is still running, we need to wait for it. // Once we get unblocked, perform the appropriate synchronization and write the return value. let dest = return_dest.clone(); @@ -1236,6 +1264,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { SchedulingAction::ExecuteTimeoutCallback => { this.run_timeout_callback()?; } + // FIXME(genmc): properly handle sleep in GenMC mode. SchedulingAction::Sleep(duration) => { this.machine.monotonic_clock.sleep(duration); } diff --git a/src/diagnostics.rs b/src/diagnostics.rs index 9ecbd31c5b..56bcedc74b 100644 --- a/src/diagnostics.rs +++ b/src/diagnostics.rs @@ -31,8 +31,10 @@ pub enum TerminationInfo { }, Int2PtrWithStrictProvenance, Deadlock, - /// In GenMC mode, an execution can get stuck in certain cases. This is not an error. - GenmcStuckExecution, + /// Program exit is handled differently in GenMC mode. + /// Executions can get blocked or continue running after the main thread is finished or `exit` is called. + /// The `Exit` variant of this enum should not be used in GenMC mode. + GenmcFinishedExecution, MultipleSymbolDefinitions { link_name: Symbol, first: SpanData, @@ -77,7 +79,11 @@ impl fmt::Display for TerminationInfo { StackedBorrowsUb { msg, .. } => write!(f, "{msg}"), TreeBorrowsUb { title, .. } => write!(f, "{title}"), Deadlock => write!(f, "the evaluated program deadlocked"), - GenmcStuckExecution => write!(f, "GenMC determined that the execution got stuck"), + GenmcFinishedExecution => + write!( + f, + "GenMC determined that the execution is finished (either the program exited or got blocked)" + ), MultipleSymbolDefinitions { link_name, .. } => write!(f, "multiple definitions of symbol `{link_name}`"), SymbolShimClashing { link_name, .. } => @@ -231,6 +237,10 @@ pub fn report_error<'tcx>( let (title, helps) = if let MachineStop(info) = e.kind() { let info = info.downcast_ref::().expect("invalid MachineStop payload"); use TerminationInfo::*; + assert!( + ecx.machine.data_race.as_genmc_ref().is_none() || !matches!(info, Exit { .. }), + "Program exit in GenMC mode should use `GenmcFinishedExecution`." + ); let title = match info { &Exit { code, leak_check } => return Some((code, leak_check)), Abort(_) => Some("abnormal termination"), @@ -243,11 +253,17 @@ pub fn report_error<'tcx>( labels.push(format!("this thread got stuck here")); None } - GenmcStuckExecution => { - // This case should only happen in GenMC mode. We treat it like a normal program exit. - assert!(ecx.machine.data_race.as_genmc_ref().is_some()); - tracing::info!("GenMC: found stuck execution"); - return Some((0, true)); + GenmcFinishedExecution => { + // This case should only happen in GenMC mode. + let genmc_ctx = ecx.machine.data_race.as_genmc_ref().unwrap(); + // Check whether the execution got blocked or the program exited. + if let Some((exit_code, leak_check)) = genmc_ctx.get_exit_status() { + // We have an exit status (from `exit(x)` or main thread return). + // Skip leak checks if the execution was blocked. + return Some((exit_code, leak_check)); + } + // The program got blocked by GenMC without ever exiting, so we don't do any leak checks. + return Some((0, false)); } MultipleSymbolDefinitions { .. } | SymbolShimClashing { .. } => None, }; diff --git a/src/eval.rs b/src/eval.rs index 4c531a8d1f..a19eecb83c 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -303,8 +303,21 @@ impl<'tcx> MainThreadState<'tcx> { // to be like a global `static`, so that all memory reached by it is considered to "not leak". this.terminate_active_thread(TlsAllocAction::Leak)?; - // Stop interpreter loop. - throw_machine_stop!(TerminationInfo::Exit { code: exit_code, leak_check: true }); + // In GenMC mode, we let GenMC decide what happens on main thread exit. + if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { + // If there's no error, execution will continue (on another thread). + genmc_ctx.handle_exit( + ThreadId::MAIN_THREAD, + exit_code, + /* is_exit_call */ false, + )?; + } else { + // Stop interpreter loop. + throw_machine_stop!(TerminationInfo::Exit { + code: exit_code, + leak_check: true + }); + } } } interp_ok(Poll::Pending) diff --git a/src/lib.rs b/src/lib.rs index 507d4f7b42..04c50b5be4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -133,7 +133,7 @@ pub use crate::concurrency::thread::{ BlockReason, DynUnblockCallback, EvalContextExt as _, StackEmptyCallback, ThreadId, ThreadManager, TimeoutAnchor, TimeoutClock, UnblockKind, }; -pub use crate::concurrency::{GenmcConfig, GenmcCtx}; +pub use crate::concurrency::{GenmcConfig, GenmcCtx, miri_genmc}; pub use crate::data_structures::dedup_range_map::DedupRangeMap; pub use crate::data_structures::mono_hash_map::MonoHashMap; pub use crate::diagnostics::{ diff --git a/src/machine.rs b/src/machine.rs index 8f0814a070..6e5142f92e 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -1425,9 +1425,8 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { } match &machine.data_race { GlobalDataRaceHandler::None => {} - GlobalDataRaceHandler::Genmc(genmc_ctx) => { - genmc_ctx.memory_store(machine, ptr.addr(), range.size)?; - } + GlobalDataRaceHandler::Genmc(genmc_ctx) => + genmc_ctx.memory_store(machine, ptr.addr(), range.size)?, GlobalDataRaceHandler::Vclocks(_global_state) => { let AllocDataRaceHandler::Vclocks(data_race, weak_memory) = &mut alloc_extra.data_race @@ -1463,7 +1462,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { match &machine.data_race { GlobalDataRaceHandler::None => {} GlobalDataRaceHandler::Genmc(genmc_ctx) => - genmc_ctx.handle_dealloc(machine, ptr.addr(), size, align, kind)?, + genmc_ctx.handle_dealloc(machine, alloc_id, ptr.addr(), size, kind)?, GlobalDataRaceHandler::Vclocks(_global_state) => { let data_race = alloc_extra.data_race.as_vclocks_mut().unwrap(); data_race.write( diff --git a/src/shims/foreign_items.rs b/src/shims/foreign_items.rs index 21545b6802..6658163f11 100644 --- a/src/shims/foreign_items.rs +++ b/src/shims/foreign_items.rs @@ -443,13 +443,22 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> { "exit" => { let [code] = this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?; let code = this.read_scalar(code)?.to_i32()?; + if let Some(genmc_ctx) = this.machine.data_race.as_genmc_ref() { + // If there is no error, execution should continue (on a different thread). + genmc_ctx.handle_exit( + this.machine.threads.active_thread(), + code, + /* is_exit_call */ true, + )?; + todo!(); // FIXME(genmc): Add a way to return here that is allowed to not do progress (can't use existing EmulateItemResult variants). + } throw_machine_stop!(TerminationInfo::Exit { code, leak_check: false }); } "abort" => { let [] = this.check_shim_sig_lenient(abi, CanonAbi::C, link_name, args)?; throw_machine_stop!(TerminationInfo::Abort( "the program aborted execution".to_owned() - )) + )); } // Standard C allocation diff --git a/tests/genmc/fail/simple/2w2w_weak.acq_rel.stderr b/tests/genmc/fail/simple/2w2w_weak.acq_rel.stderr new file mode 100644 index 0000000000..5567f4fc7a --- /dev/null +++ b/tests/genmc/fail/simple/2w2w_weak.acq_rel.stderr @@ -0,0 +1,15 @@ +error: Undefined Behavior: entering unreachable code + --> tests/genmc/fail/simple/2w2w_weak.rs:LL:CC + | +LL | unsafe { std::hint::unreachable_unchecked() }; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here + | + = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior + = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information + = note: BACKTRACE: + = note: inside `miri_start` at tests/genmc/fail/simple/2w2w_weak.rs:LL:CC + +note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace + +error: aborting due to 1 previous error + diff --git a/tests/genmc/fail/simple/2w2w_weak.relaxed.stderr b/tests/genmc/fail/simple/2w2w_weak.relaxed.stderr new file mode 100644 index 0000000000..5567f4fc7a --- /dev/null +++ b/tests/genmc/fail/simple/2w2w_weak.relaxed.stderr @@ -0,0 +1,15 @@ +error: Undefined Behavior: entering unreachable code + --> tests/genmc/fail/simple/2w2w_weak.rs:LL:CC + | +LL | unsafe { std::hint::unreachable_unchecked() }; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Undefined Behavior occurred here + | + = help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior + = help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information + = note: BACKTRACE: + = note: inside `miri_start` at tests/genmc/fail/simple/2w2w_weak.rs:LL:CC + +note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace + +error: aborting due to 1 previous error + diff --git a/tests/genmc/fail/simple/2w2w_weak.rs b/tests/genmc/fail/simple/2w2w_weak.rs new file mode 100644 index 0000000000..f98ae48ccf --- /dev/null +++ b/tests/genmc/fail/simple/2w2w_weak.rs @@ -0,0 +1,52 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows +//@revisions: acq_rel relaxed + +// This test is the equivalent to the `2w2w_seqcst.rs` "pass" test. +// Here we use weaker atomic memory orderings to test if we can encounter +// an execution where (X == 1 && Y == 1). + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +use crate::genmc::{join_pthread, spawn_pthread}; + +static X: AtomicU64 = AtomicU64::new(0); +static Y: AtomicU64 = AtomicU64::new(0); + +#[cfg(acq_rel)] +const LOAD_ORD: Ordering = Ordering::Acquire; +#[cfg(acq_rel)] +const STORE_ORD: Ordering = Ordering::Release; + +#[cfg(not(acq_rel))] +const LOAD_ORD: Ordering = Ordering::Relaxed; +#[cfg(not(acq_rel))] +const STORE_ORD: Ordering = Ordering::Relaxed; + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let thread_id = unsafe { spawn_pthread(thread_func, std::ptr::null_mut()) }; + + X.store(1, STORE_ORD); + Y.store(2, STORE_ORD); + + unsafe { join_pthread(thread_id) }; + + let x = X.load(LOAD_ORD); + let y = Y.load(LOAD_ORD); + if x == 1 && y == 1 { + unsafe { std::hint::unreachable_unchecked() }; //~ ERROR: entering unreachable code + } + 0 +} + +extern "C" fn thread_func(_value: *mut c_void) -> *mut c_void { + Y.store(1, STORE_ORD); + X.store(2, STORE_ORD); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/2_2w_3sc_rel1.rs b/tests/genmc/pass/litmus/2_2w_3sc_rel1.rs new file mode 100644 index 0000000000..df219021da --- /dev/null +++ b/tests/genmc/pass/litmus/2_2w_3sc_rel1.rs @@ -0,0 +1,33 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +static X: AtomicU64 = AtomicU64::new(0); +static Y: AtomicU64 = AtomicU64::new(0); + +use crate::genmc::*; + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let _ids = unsafe { create_pthreads_no_params([thread_1, thread_2]) }; + + 0 +} + +extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.store(1, Ordering::SeqCst); + Y.store(2, Ordering::SeqCst); + std::ptr::null_mut() +} + +extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + Y.store(1, Ordering::Release); + X.store(2, Ordering::SeqCst); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/2_2w_3sc_rel1.stderr b/tests/genmc/pass/litmus/2_2w_3sc_rel1.stderr new file mode 100644 index 0000000000..0667962f99 --- /dev/null +++ b/tests/genmc/pass/litmus/2_2w_3sc_rel1.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 4 diff --git a/tests/genmc/pass/litmus/2_2w_4sc.rs b/tests/genmc/pass/litmus/2_2w_4sc.rs new file mode 100644 index 0000000000..9c500e5012 --- /dev/null +++ b/tests/genmc/pass/litmus/2_2w_4sc.rs @@ -0,0 +1,33 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +static X: AtomicU64 = AtomicU64::new(0); +static Y: AtomicU64 = AtomicU64::new(0); + +use crate::genmc::*; + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let _ids = unsafe { create_pthreads_no_params([thread_1, thread_2]) }; + + 0 +} + +extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.store(1, Ordering::SeqCst); + Y.store(2, Ordering::SeqCst); + std::ptr::null_mut() +} + +extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + Y.store(1, Ordering::SeqCst); + X.store(2, Ordering::SeqCst); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/2_2w_4sc.stderr b/tests/genmc/pass/litmus/2_2w_4sc.stderr new file mode 100644 index 0000000000..115b1986ce --- /dev/null +++ b/tests/genmc/pass/litmus/2_2w_4sc.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 3 diff --git a/tests/genmc/pass/litmus/2cowr.rs b/tests/genmc/pass/litmus/2cowr.rs new file mode 100644 index 0000000000..9078539e1d --- /dev/null +++ b/tests/genmc/pass/litmus/2cowr.rs @@ -0,0 +1,42 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::ptr::null_mut; +use std::sync::atomic::{AtomicU64, Ordering}; + +use crate::genmc::*; + +static X: AtomicU64 = AtomicU64::new(0); +static Y: AtomicU64 = AtomicU64::new(0); + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let _ids = unsafe { create_pthreads_no_params([thread_1, thread_2, thread_3, thread_4]) }; + + 0 +} + +pub extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + Y.load(Ordering::Acquire); + null_mut() +} + +pub extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + X.load(Ordering::Acquire); + null_mut() +} + +pub extern "C" fn thread_3(_value: *mut c_void) -> *mut c_void { + X.store(1, Ordering::Release); + null_mut() +} + +pub extern "C" fn thread_4(_value: *mut c_void) -> *mut c_void { + Y.store(1, Ordering::Release); + null_mut() +} diff --git a/tests/genmc/pass/litmus/2cowr.stderr b/tests/genmc/pass/litmus/2cowr.stderr new file mode 100644 index 0000000000..0667962f99 --- /dev/null +++ b/tests/genmc/pass/litmus/2cowr.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 4 diff --git a/tests/genmc/pass/litmus/2w2w.rs b/tests/genmc/pass/litmus/2w2w.rs new file mode 100644 index 0000000000..629634af09 --- /dev/null +++ b/tests/genmc/pass/litmus/2w2w.rs @@ -0,0 +1,33 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +static X: AtomicU64 = AtomicU64::new(0); +static Y: AtomicU64 = AtomicU64::new(0); + +use crate::genmc::*; + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let _ids = unsafe { create_pthreads_no_params([thread_1, thread_2]) }; + + 0 +} + +extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + Y.store(1, Ordering::Release); + X.store(2, Ordering::Release); + std::ptr::null_mut() +} + +extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + Y.store(1, Ordering::Release); + X.store(2, Ordering::Release); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/2w2w.stderr b/tests/genmc/pass/litmus/2w2w.stderr new file mode 100644 index 0000000000..0667962f99 --- /dev/null +++ b/tests/genmc/pass/litmus/2w2w.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 4 diff --git a/tests/genmc/pass/litmus/IRIW-acq-sc.rs b/tests/genmc/pass/litmus/IRIW-acq-sc.rs new file mode 100644 index 0000000000..af63b6f2ba --- /dev/null +++ b/tests/genmc/pass/litmus/IRIW-acq-sc.rs @@ -0,0 +1,44 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +use crate::genmc::*; + +static X: AtomicU64 = AtomicU64::new(0); +static Y: AtomicU64 = AtomicU64::new(0); + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let thread_order = [thread_1, thread_2, thread_3, thread_4]; + let _ids = unsafe { create_pthreads_no_params(thread_order) }; + + 0 +} + +pub extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.store(1, Ordering::SeqCst); + std::ptr::null_mut() +} + +pub extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + X.load(Ordering::Acquire); + Y.load(Ordering::SeqCst); + std::ptr::null_mut() +} + +pub extern "C" fn thread_3(_value: *mut c_void) -> *mut c_void { + Y.load(Ordering::Acquire); + X.load(Ordering::SeqCst); + std::ptr::null_mut() +} + +pub extern "C" fn thread_4(_value: *mut c_void) -> *mut c_void { + Y.store(1, Ordering::SeqCst); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/IRIW-acq-sc.stderr b/tests/genmc/pass/litmus/IRIW-acq-sc.stderr new file mode 100644 index 0000000000..c760b44605 --- /dev/null +++ b/tests/genmc/pass/litmus/IRIW-acq-sc.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 16 diff --git a/tests/genmc/pass/litmus/LB.rs b/tests/genmc/pass/litmus/LB.rs new file mode 100644 index 0000000000..686ea37748 --- /dev/null +++ b/tests/genmc/pass/litmus/LB.rs @@ -0,0 +1,33 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +static X: AtomicU64 = AtomicU64::new(0); +static Y: AtomicU64 = AtomicU64::new(0); + +use crate::genmc::*; + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let _ids = unsafe { create_pthreads_no_params([thread_1, thread_2]) }; + + 0 +} + +extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + Y.load(Ordering::Acquire); + X.store(2, Ordering::Release); + std::ptr::null_mut() +} + +extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + X.load(Ordering::Acquire); + Y.store(1, Ordering::Release); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/LB.stderr b/tests/genmc/pass/litmus/LB.stderr new file mode 100644 index 0000000000..115b1986ce --- /dev/null +++ b/tests/genmc/pass/litmus/LB.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 3 diff --git a/tests/genmc/pass/litmus/MP.rs b/tests/genmc/pass/litmus/MP.rs new file mode 100644 index 0000000000..b360f97b8b --- /dev/null +++ b/tests/genmc/pass/litmus/MP.rs @@ -0,0 +1,33 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +static X: AtomicU64 = AtomicU64::new(0); +static Y: AtomicU64 = AtomicU64::new(0); + +use crate::genmc::*; + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let _ids = unsafe { create_pthreads_no_params([thread_1, thread_2]) }; + + 0 +} + +extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.store(1, Ordering::Release); + Y.store(1, Ordering::Release); + std::ptr::null_mut() +} + +extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + Y.load(Ordering::Acquire); + X.load(Ordering::Acquire); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/MP.stderr b/tests/genmc/pass/litmus/MP.stderr new file mode 100644 index 0000000000..115b1986ce --- /dev/null +++ b/tests/genmc/pass/litmus/MP.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 3 diff --git a/tests/genmc/pass/litmus/SB.rs b/tests/genmc/pass/litmus/SB.rs new file mode 100644 index 0000000000..b090a7b098 --- /dev/null +++ b/tests/genmc/pass/litmus/SB.rs @@ -0,0 +1,33 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +static X: AtomicU64 = AtomicU64::new(0); +static Y: AtomicU64 = AtomicU64::new(0); + +use crate::genmc::*; + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let _ids = unsafe { create_pthreads_no_params([thread_1, thread_2]) }; + + 0 +} + +extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.store(1, Ordering::Release); + Y.load(Ordering::Acquire); + std::ptr::null_mut() +} + +extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + Y.store(1, Ordering::Release); + X.load(Ordering::Acquire); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/SB.stderr b/tests/genmc/pass/litmus/SB.stderr new file mode 100644 index 0000000000..0667962f99 --- /dev/null +++ b/tests/genmc/pass/litmus/SB.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 4 diff --git a/tests/genmc/pass/litmus/atomicpo.stderr b/tests/genmc/pass/litmus/atomicpo.stderr new file mode 100644 index 0000000000..0667962f99 --- /dev/null +++ b/tests/genmc/pass/litmus/atomicpo.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 4 diff --git a/tests/genmc/pass/litmus/corr.rs b/tests/genmc/pass/litmus/corr.rs new file mode 100644 index 0000000000..1de6e708da --- /dev/null +++ b/tests/genmc/pass/litmus/corr.rs @@ -0,0 +1,32 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +static X: AtomicU64 = AtomicU64::new(0); + +use crate::genmc::*; + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let _ids = unsafe { create_pthreads_no_params([thread_1, thread_2]) }; + + 0 +} + +extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.store(1, Ordering::Release); + X.store(2, Ordering::Release); + std::ptr::null_mut() +} + +extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + X.load(Ordering::Acquire); + X.load(Ordering::Acquire); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/corr.stderr b/tests/genmc/pass/litmus/corr.stderr new file mode 100644 index 0000000000..be75e68fde --- /dev/null +++ b/tests/genmc/pass/litmus/corr.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 6 diff --git a/tests/genmc/pass/litmus/corr0.rs b/tests/genmc/pass/litmus/corr0.rs new file mode 100644 index 0000000000..73d0a7318b --- /dev/null +++ b/tests/genmc/pass/litmus/corr0.rs @@ -0,0 +1,35 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +static X: AtomicU64 = AtomicU64::new(0); + +use crate::genmc::*; + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let _ids = unsafe { create_pthreads_no_params([thread_1, thread_2, thread_3]) }; + + 0 +} + +extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.store(1, Ordering::Release); + std::ptr::null_mut() +} + +extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + X.load(Ordering::Acquire); + std::ptr::null_mut() +} + +extern "C" fn thread_3(_value: *mut c_void) -> *mut c_void { + X.load(Ordering::Acquire); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/corr0.stderr b/tests/genmc/pass/litmus/corr0.stderr new file mode 100644 index 0000000000..0667962f99 --- /dev/null +++ b/tests/genmc/pass/litmus/corr0.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 4 diff --git a/tests/genmc/pass/litmus/corr1.rs b/tests/genmc/pass/litmus/corr1.rs new file mode 100644 index 0000000000..802005154d --- /dev/null +++ b/tests/genmc/pass/litmus/corr1.rs @@ -0,0 +1,43 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +static X: AtomicU64 = AtomicU64::new(0); + +use crate::genmc::*; + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + unsafe { + let ids = create_pthreads_no_params([thread_1, thread_2, thread_3, thread_4]); + join_pthreads(ids); + 0 + } +} + +extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.store(1, Ordering::Release); + std::ptr::null_mut() +} + +extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + X.store(2, Ordering::Release); + std::ptr::null_mut() +} + +extern "C" fn thread_3(_value: *mut c_void) -> *mut c_void { + X.load(Ordering::Acquire); + X.load(Ordering::Acquire); + std::ptr::null_mut() +} + +extern "C" fn thread_4(_value: *mut c_void) -> *mut c_void { + X.load(Ordering::Acquire); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/corr1.stderr b/tests/genmc/pass/litmus/corr1.stderr new file mode 100644 index 0000000000..f6d07e9c77 --- /dev/null +++ b/tests/genmc/pass/litmus/corr1.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 36 diff --git a/tests/genmc/pass/litmus/corr2.rs b/tests/genmc/pass/litmus/corr2.rs new file mode 100644 index 0000000000..8b61508fc7 --- /dev/null +++ b/tests/genmc/pass/litmus/corr2.rs @@ -0,0 +1,42 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +static X: AtomicU64 = AtomicU64::new(0); + +use crate::genmc::*; + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let _ids = unsafe { create_pthreads_no_params([thread_1, thread_2, thread_3, thread_4]) }; + + 0 +} + +extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.store(1, Ordering::Release); + std::ptr::null_mut() +} + +extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + X.store(2, Ordering::Release); + std::ptr::null_mut() +} + +extern "C" fn thread_3(_value: *mut c_void) -> *mut c_void { + X.load(Ordering::Acquire); + X.load(Ordering::Acquire); + std::ptr::null_mut() +} + +extern "C" fn thread_4(_value: *mut c_void) -> *mut c_void { + X.load(Ordering::Acquire); + X.load(Ordering::Acquire); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/corr2.stderr b/tests/genmc/pass/litmus/corr2.stderr new file mode 100644 index 0000000000..78a90b63fe --- /dev/null +++ b/tests/genmc/pass/litmus/corr2.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 72 diff --git a/tests/genmc/pass/litmus/corw.rs b/tests/genmc/pass/litmus/corw.rs new file mode 100644 index 0000000000..d19fd5c131 --- /dev/null +++ b/tests/genmc/pass/litmus/corw.rs @@ -0,0 +1,31 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +static X: AtomicU64 = AtomicU64::new(0); + +use crate::genmc::*; + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let _ids = unsafe { create_pthreads_no_params([thread_1, thread_2]) }; + + 0 +} + +extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.load(Ordering::Acquire); + X.store(1, Ordering::Release); + std::ptr::null_mut() +} + +extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + X.store(2, Ordering::Release); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/corw.stderr b/tests/genmc/pass/litmus/corw.stderr new file mode 100644 index 0000000000..115b1986ce --- /dev/null +++ b/tests/genmc/pass/litmus/corw.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 3 diff --git a/tests/genmc/pass/litmus/cowr.rs b/tests/genmc/pass/litmus/cowr.rs new file mode 100644 index 0000000000..b1333c69c3 --- /dev/null +++ b/tests/genmc/pass/litmus/cowr.rs @@ -0,0 +1,31 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +static X: AtomicU64 = AtomicU64::new(0); + +use crate::genmc::*; + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let _ids = unsafe { create_pthreads_no_params([thread_1, thread_2]) }; + + 0 +} + +extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.store(1, Ordering::Release); + X.load(Ordering::Acquire); + std::ptr::null_mut() +} + +extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + X.store(2, Ordering::Release); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/cowr.stderr b/tests/genmc/pass/litmus/cowr.stderr new file mode 100644 index 0000000000..115b1986ce --- /dev/null +++ b/tests/genmc/pass/litmus/cowr.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 3 diff --git a/tests/genmc/pass/litmus/default.rs b/tests/genmc/pass/litmus/default.rs new file mode 100644 index 0000000000..10538ca3af --- /dev/null +++ b/tests/genmc/pass/litmus/default.rs @@ -0,0 +1,36 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +use crate::genmc::*; + +static X: AtomicU64 = AtomicU64::new(0); + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let _ids = unsafe { create_pthreads_no_params([thread_1, thread_2, thread_3]) }; + + 0 +} + +extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.load(Ordering::Acquire); + X.load(Ordering::Acquire); + std::ptr::null_mut() +} + +extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + X.store(1, Ordering::Release); + std::ptr::null_mut() +} + +extern "C" fn thread_3(_value: *mut c_void) -> *mut c_void { + X.store(2, Ordering::Release); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/default.stderr b/tests/genmc/pass/litmus/default.stderr new file mode 100644 index 0000000000..e031393028 --- /dev/null +++ b/tests/genmc/pass/litmus/default.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 12 diff --git a/tests/genmc/pass/litmus/detour.rs b/tests/genmc/pass/litmus/detour.rs new file mode 100644 index 0000000000..55a081415e --- /dev/null +++ b/tests/genmc/pass/litmus/detour.rs @@ -0,0 +1,42 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicI64, Ordering}; + +use crate::genmc::*; + +static X: AtomicI64 = AtomicI64::new(0); +static Y: AtomicI64 = AtomicI64::new(0); +static Z: AtomicI64 = AtomicI64::new(0); + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let thread_order = [thread_1, thread_2, thread_3]; + let _ids = unsafe { create_pthreads_no_params(thread_order) }; + + 0 +} + +extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.store(1, Ordering::Relaxed); + std::ptr::null_mut() +} + +extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + let a = Z.load(Ordering::Relaxed); + X.store(a.wrapping_sub(1), Ordering::Relaxed); + let b = X.load(Ordering::Relaxed); + Y.store(b, Ordering::Relaxed); + std::ptr::null_mut() +} + +extern "C" fn thread_3(_value: *mut c_void) -> *mut c_void { + let c = Y.load(Ordering::Relaxed); + Z.store(c, Ordering::Relaxed); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/detour.stderr b/tests/genmc/pass/litmus/detour.stderr new file mode 100644 index 0000000000..15017249dc --- /dev/null +++ b/tests/genmc/pass/litmus/detour.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 9 diff --git a/tests/genmc/pass/litmus/fr_w_w_w_reads.rs b/tests/genmc/pass/litmus/fr_w_w_w_reads.rs new file mode 100644 index 0000000000..595cfd16c0 --- /dev/null +++ b/tests/genmc/pass/litmus/fr_w_w_w_reads.rs @@ -0,0 +1,44 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +#[path = "../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::sync::atomic::{AtomicU64, Ordering}; + +use crate::genmc::*; + +static X: AtomicU64 = AtomicU64::new(0); + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let thread_order = [thread_1, thread_2, thread_3, thread_4]; + let _ids = unsafe { create_pthreads_no_params(thread_order) }; + + 0 +} + +pub extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.store(1, Ordering::Relaxed); + std::ptr::null_mut() +} + +pub extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + X.store(2, Ordering::Relaxed); + std::ptr::null_mut() +} + +pub extern "C" fn thread_3(_value: *mut c_void) -> *mut c_void { + X.store(3, Ordering::Relaxed); + std::ptr::null_mut() +} + +pub extern "C" fn thread_4(_value: *mut c_void) -> *mut c_void { + let _r1 = X.load(Ordering::Relaxed); + let _r2 = X.load(Ordering::Relaxed); + let _r3 = X.load(Ordering::Relaxed); + let _r4 = X.load(Ordering::Relaxed); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/litmus/fr_w_w_w_reads.stderr b/tests/genmc/pass/litmus/fr_w_w_w_reads.stderr new file mode 100644 index 0000000000..3b6ba238f5 --- /dev/null +++ b/tests/genmc/pass/litmus/fr_w_w_w_reads.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 210 diff --git a/tests/genmc/pass/test_cxx_build.rs b/tests/genmc/pass/test_cxx_build.rs deleted file mode 100644 index f621bd9114..0000000000 --- a/tests/genmc/pass/test_cxx_build.rs +++ /dev/null @@ -1,8 +0,0 @@ -//@compile-flags: -Zmiri-genmc - -#![no_main] - -#[unsafe(no_mangle)] -fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { - 0 -} diff --git a/tests/genmc/pass/test_cxx_build.stderr b/tests/genmc/pass/test_cxx_build.stderr deleted file mode 100644 index 4b7aa824bd..0000000000 --- a/tests/genmc/pass/test_cxx_build.stderr +++ /dev/null @@ -1,5 +0,0 @@ -warning: borrow tracking has been disabled, it is not (yet) supported in GenMC mode. -C++: GenMC handle created! -Miri: GenMC handle creation successful! -C++: GenMC handle destroyed! -Miri: Dropping GenMC handle successful! diff --git a/tests/genmc/pass/thread/libc_exit.rs.disabled b/tests/genmc/pass/thread/libc_exit.rs.disabled new file mode 100644 index 0000000000..96a72790d0 --- /dev/null +++ b/tests/genmc/pass/thread/libc_exit.rs.disabled @@ -0,0 +1,59 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows +//@revisions: order0123 order3012 order2301 order1230 order3210 order1032 + +#![no_main] + +// Copied from `tests/genmc/pass/litmus/inc2w.rs` + +#[path = "../../../../utils/genmc.rs"] +mod genmc; + +use std::ffi::c_void; +use std::ptr::null_mut; +use std::sync::atomic::{AtomicU64, Ordering}; + +use crate::genmc::*; + +static X: AtomicU64 = AtomicU64::new(0); + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let thread_order = if cfg!(order0123) { + [thread_0_exit, thread_1, thread_2, thread_3] + } else if cfg!(order3012) { + [thread_3, thread_0_exit, thread_1, thread_2] + } else if cfg!(order2301) { + [thread_2, thread_3, thread_0_exit, thread_1] + } else if cfg!(order1230) { + [thread_1, thread_2, thread_3, thread_0_exit] + } else if cfg!(order3210) { + [thread_3, thread_2, thread_1, thread_0_exit] + } else if cfg!(order1032) { + [thread_1, thread_0_exit, thread_3, thread_2] + } else { + unimplemented!(); + }; + + let _ids = unsafe { create_pthreads_no_params(thread_order) }; + + 0 +} + +pub extern "C" fn thread_0_exit(_value: *mut c_void) -> *mut c_void { + unsafe { libc::exit(0) } +} + +pub extern "C" fn thread_1(_value: *mut c_void) -> *mut c_void { + X.fetch_add(1, Ordering::Relaxed); + null_mut() +} + +pub extern "C" fn thread_2(_value: *mut c_void) -> *mut c_void { + X.store(4, Ordering::Release); + null_mut() +} + +pub extern "C" fn thread_3(_value: *mut c_void) -> *mut c_void { + X.fetch_add(2, Ordering::Relaxed); + null_mut() +} diff --git a/tests/genmc/pass/thread/miri_main_spawn_pthreads.rs b/tests/genmc/pass/thread/miri_main_spawn_pthreads.rs new file mode 100644 index 0000000000..59df0e766f --- /dev/null +++ b/tests/genmc/pass/thread/miri_main_spawn_pthreads.rs @@ -0,0 +1,35 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +use std::ffi::c_void; + +use libc::{self, pthread_attr_t, pthread_t}; + +const N: usize = 2; + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let mut handles: Vec = vec![0; N]; + + let attr: *const pthread_attr_t = std::ptr::null(); + let value: *mut c_void = std::ptr::null_mut(); + + handles.iter_mut().for_each(|thread_id| { + if 0 != unsafe { libc::pthread_create(thread_id, attr, thread_func, value) } { + std::process::abort(); + } + }); + + handles.into_iter().for_each(|thread_id| { + if 0 != unsafe { libc::pthread_join(thread_id, std::ptr::null_mut()) } { + std::process::abort(); + } + }); + + 0 +} + +extern "C" fn thread_func(_value: *mut c_void) -> *mut c_void { + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/thread/miri_main_spawn_pthreads.stderr b/tests/genmc/pass/thread/miri_main_spawn_pthreads.stderr new file mode 100644 index 0000000000..3b22247ee4 --- /dev/null +++ b/tests/genmc/pass/thread/miri_main_spawn_pthreads.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 1 diff --git a/tests/genmc/pass/thread/thread_simple.rs b/tests/genmc/pass/thread/thread_simple.rs new file mode 100644 index 0000000000..abb3adbb03 --- /dev/null +++ b/tests/genmc/pass/thread/thread_simple.rs @@ -0,0 +1,34 @@ +//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows + +#![no_main] + +use std::ffi::c_void; +use std::sync::atomic::AtomicU64; +use std::sync::atomic::Ordering::SeqCst; + +use libc::{self, pthread_attr_t, pthread_t}; + +static FLAG: AtomicU64 = AtomicU64::new(0); + +#[unsafe(no_mangle)] +fn miri_start(_argc: isize, _argv: *const *const u8) -> isize { + let mut thread_id: pthread_t = 0; + + let attr: *const pthread_attr_t = std::ptr::null(); + let value: *mut c_void = std::ptr::null_mut(); + + assert!(0 == unsafe { libc::pthread_create(&raw mut thread_id, attr, thread_func, value) }); + + FLAG.store(1, SeqCst); + + assert!(0 == unsafe { libc::pthread_join(thread_id, std::ptr::null_mut()) }); + + let flag = FLAG.load(SeqCst); + assert!(flag == 1 || flag == 2); + return 0; +} + +extern "C" fn thread_func(_value: *mut c_void) -> *mut c_void { + FLAG.store(2, SeqCst); + std::ptr::null_mut() +} diff --git a/tests/genmc/pass/thread/thread_simple.stderr b/tests/genmc/pass/thread/thread_simple.stderr new file mode 100644 index 0000000000..01701dfe69 --- /dev/null +++ b/tests/genmc/pass/thread/thread_simple.stderr @@ -0,0 +1,2 @@ +(GenMC) Verification complete. No errors were detected. +Number of complete executions explored: 2 diff --git a/tests/utils/genmc.rs b/tests/utils/genmc.rs new file mode 100644 index 0000000000..376f296b3b --- /dev/null +++ b/tests/utils/genmc.rs @@ -0,0 +1,40 @@ +#![allow(unused)] + +use std::ffi::c_void; + +use libc::{self, pthread_attr_t, pthread_t}; + +/// Spawn 1 thread using `pthread_create`, abort the process on any errors. +pub unsafe fn spawn_pthread( + f: extern "C" fn(*mut c_void) -> *mut c_void, + value: *mut c_void, +) -> pthread_t { + let mut thread_id: pthread_t = 0; + + let attr: *const pthread_attr_t = std::ptr::null(); + + if 0 != unsafe { libc::pthread_create(&raw mut thread_id, attr, f, value) } { + std::process::abort(); + } + thread_id +} + +// Join the given pthread, abort the process on any errors. +pub unsafe fn join_pthread(thread_id: pthread_t) { + if 0 != unsafe { libc::pthread_join(thread_id, std::ptr::null_mut()) } { + std::process::abort(); + } +} + +/// Spawn `N` threads using `pthread_create` without any arguments, abort the process on any errors. +pub unsafe fn create_pthreads_no_params( + functions: [extern "C" fn(*mut c_void) -> *mut c_void; N], +) -> [pthread_t; N] { + let value = std::ptr::null_mut(); + functions.map(|func| spawn_pthread(func, value)) +} + +// Join the `N` given pthreads, abort the process on any errors. +pub unsafe fn join_pthreads(thread_ids: [pthread_t; N]) { + let _ = thread_ids.map(|id| join_pthread(id)); +}