Rollup merge of #155553 - RalfJung:miri, r=RalfJung

miri subtree update

Subtree update of `miri` to https://github.com/rust-lang/miri/commit/16dd940bb92f3ed98f588a5dbe58efe005d10ffc.

Created using https://github.com/rust-lang/josh-sync.

r? @ghost
This commit is contained in:
Jonathan Brouwer
2026-04-20 18:57:02 +02:00
committed by GitHub
102 changed files with 2237 additions and 1047 deletions
+2 -2
View File
@@ -1162,9 +1162,9 @@ checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
[[package]]
name = "rand"
version = "0.9.2"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
checksum = "7ec095654a25171c2124e9e3393a930bddbffdc939556c914957a4c3e0a87166"
dependencies = [
"rand_chacha",
"rand_core",
+3 -1
View File
@@ -87,7 +87,9 @@ pub fn phase_cargo_miri(mut args: impl Iterator<Item = String>) {
println!("`cargo miri {verb}` supports the same flags as `cargo {verb}`:\n");
let mut cmd = cargo();
cmd.arg(verb);
cmd.arg("--help");
// Forward all arguments (some of them can influence the help output, e.g.
// the nextest verb).
cmd.args(args);
exec(cmd);
}
_ => {
+3 -6
View File
@@ -28,7 +28,7 @@ mod downloading {
/// The GenMC repository the we get our commit from.
pub(crate) const GENMC_GITHUB_URL: &str = "https://github.com/MPI-SWS/genmc.git";
/// The GenMC commit we depend on. It must be available on the specified GenMC repository.
pub(crate) const GENMC_COMMIT: &str = "22d3d0b44dedb4e8e1aae3330e546465e4664529";
pub(crate) const GENMC_COMMIT: &str = "29b03a66402c4453fc77901ef3be90bb55707cd4";
/// Ensure that a local GenMC repo is present and set to the correct commit.
/// Return the path of the GenMC repo clone.
@@ -159,6 +159,7 @@ fn compile_cpp_dependencies(genmc_path: &Path) {
.out_dir(genmc_build_dir)
.profile(GENMC_CMAKE_PROFILE)
.define("BUILD_LLI", "OFF")
.define("EMIT_NA_LABELS", "OFF")
.define("GENMC_DEBUG", if enable_genmc_debug { "ON" } else { "OFF" });
// The actual compilation happens here:
@@ -172,7 +173,7 @@ fn compile_cpp_dependencies(genmc_path: &Path) {
// Part 2:
// Compile the cxx_bridge (the link between the Rust and C++ code).
let genmc_include_dir = genmc_install_dir.join("include").join("genmc");
let genmc_include_dir = genmc_install_dir.join("include");
// These are all the C++ files we need to compile, which needs to be updated if more C++ files are added to Miri.
// We use absolute paths since relative paths can confuse IDEs when attempting to go-to-source on a path in a compiler error.
@@ -181,10 +182,6 @@ fn compile_cpp_dependencies(genmc_path: &Path) {
.map(|file| std::path::absolute(cpp_files_base_path.join(file)).unwrap());
let mut bridge = cxx_build::bridge("src/lib.rs");
// FIXME(genmc,cmake): Remove once the GenMC debug setting is available in the config.h file.
if enable_genmc_debug {
bridge.define("ENABLE_GENMC_DEBUG", None);
}
bridge
.opt_level(2)
.debug(true) // Same settings that GenMC uses (default for cmake `RelWithDebInfo`)
@@ -5,17 +5,17 @@
#include "rust/cxx.h"
// GenMC generated headers:
#include "config.h"
#include "genmc/config.h"
// Miri `genmc-sys/src_cpp` headers:
#include "ResultHandling.hpp"
// GenMC headers:
#include "ExecutionGraph/EventLabel.hpp"
#include "Support/MemOrdering.hpp"
#include "Support/RMWOps.hpp"
#include "Verification/Config.hpp"
#include "Verification/GenMCDriver.hpp"
#include "genmc/Execution/EventLabel.hpp"
#include "genmc/Support/MemOrdering.hpp"
#include "genmc/Support/RMWOps.hpp"
#include "genmc/Verification/Config.hpp"
#include "genmc/Verification/GenMCDriver.hpp"
// C++ headers:
#include <cstdint>
@@ -36,6 +36,7 @@ struct StoreResult;
struct ReadModifyWriteResult;
struct CompareExchangeResult;
struct MutexLockResult;
struct MallocResult;
// GenMC uses `int` for its thread IDs.
using ThreadId = int;
@@ -86,13 +87,15 @@ struct MiriGenmcShim : private GenMCDriver {
/**** Memory access handling ****/
[[nodiscard]] LoadResult handle_load(
[[nodiscard]] LoadResult handle_atomic_load(
ThreadId thread_id,
uint64_t address,
uint64_t size,
MemOrdering ord,
GenmcScalar old_val
);
[[nodiscard]] LoadResult
handle_non_atomic_load(ThreadId thread_id, uint64_t address, uint64_t size);
[[nodiscard]] ReadModifyWriteResult handle_read_modify_write(
ThreadId thread_id,
uint64_t address,
@@ -113,7 +116,7 @@ struct MiriGenmcShim : private GenMCDriver {
MemOrdering fail_load_ordering,
bool can_fail_spuriously
);
[[nodiscard]] StoreResult handle_store(
[[nodiscard]] StoreResult handle_atomic_store(
ThreadId thread_id,
uint64_t address,
uint64_t size,
@@ -121,12 +124,14 @@ struct MiriGenmcShim : private GenMCDriver {
GenmcScalar old_val,
MemOrdering ord
);
[[nodiscard]] StoreResult
handle_non_atomic_store(ThreadId thread_id, uint64_t address, uint64_t size);
void handle_fence(ThreadId thread_id, MemOrdering ord);
/**** Memory (de)allocation ****/
auto handle_malloc(ThreadId thread_id, uint64_t size, uint64_t alignment) -> uint64_t;
auto handle_malloc(ThreadId thread_id, uint64_t size, uint64_t alignment) -> MallocResult;
/** Returns null on success, or an error string if an error occurs. */
auto handle_free(ThreadId thread_id, uint64_t address) -> std::unique_ptr<std::string>;
@@ -203,33 +208,15 @@ struct MiriGenmcShim : private GenMCDriver {
auto get_estimation_results() const -> EstimationResult;
private:
/** Increment the event index in the given thread by 1 and return the new event. */
[[nodiscard]] inline auto inc_pos(ThreadId tid) -> Event {
/** Returns the current event for a given thread. */
inline auto curr_pos(ThreadId tid) -> Event {
ERROR_ON(tid >= threads_action_.size(), "ThreadId out of bounds");
return ++threads_action_[tid].event;
return threads_action_[tid].event;
}
/** Decrement the event index in the given thread by 1 and return the new event. */
inline auto dec_pos(ThreadId tid) -> Event {
/** Increment the event index in the given thread by `count`. */
inline void inc_pos(ThreadId tid, unsigned int count) {
ERROR_ON(tid >= threads_action_.size(), "ThreadId out of bounds");
return --threads_action_[tid].event;
}
/**
* Helper function for loads that need to reset the event counter when no value is returned.
* Same syntax as `GenMCDriver::handleLoad`, but this takes a thread id instead of an Event.
* Automatically calls `inc_pos` and `dec_pos` where needed for the given thread.
*/
template <EventLabel::EventLabelKind k, typename... Ts>
auto handle_load_reset_if_none(ThreadId tid, std::optional<SVal> old_val, Ts&&... params)
-> HandleResult<SVal> {
const auto pos = inc_pos(tid);
const auto ret =
GenMCDriver::handleLoad<k>(nullptr, pos, old_val, std::forward<Ts>(params)...);
// If we didn't get a value, we have to reset the index of the current thread.
if (!std::holds_alternative<SVal>(ret)) {
dec_pos(tid);
}
return ret;
threads_action_[tid].event.index += count;
}
/**
@@ -293,40 +280,55 @@ inline std::optional<SVal> try_to_sval(GenmcScalar scalar) {
namespace LoadResultExt {
inline LoadResult no_value() {
return LoadResult {
.invalid = false,
.error = std::unique_ptr<std::string>(nullptr),
.has_value = false,
.read_value = GenmcScalarExt::uninit(),
};
}
inline LoadResult from_value(SVal read_value) {
return LoadResult { .error = std::unique_ptr<std::string>(nullptr),
.has_value = true,
return LoadResult { .invalid = false,
.error = std::unique_ptr<std::string>(nullptr),
.read_value = GenmcScalarExt::from_sval(read_value) };
}
inline LoadResult from_error(std::unique_ptr<std::string> error) {
return LoadResult { .error = std::move(error),
.has_value = false,
return LoadResult { .invalid = false,
.error = std::move(error),
.read_value = GenmcScalarExt::uninit() };
}
inline LoadResult from_invalid() {
return LoadResult { .invalid = true, .error = nullptr, .read_value = GenmcScalarExt::uninit() };
}
} // namespace LoadResultExt
namespace StoreResultExt {
inline StoreResult ok(bool is_coherence_order_maximal_write) {
return StoreResult { /* error: */ std::unique_ptr<std::string>(nullptr),
is_coherence_order_maximal_write };
return StoreResult { .invalid = false,
.error = std::unique_ptr<std::string>(nullptr),
.is_coherence_order_maximal_write = is_coherence_order_maximal_write };
}
inline StoreResult from_error(std::unique_ptr<std::string> error) {
return StoreResult { .error = std::move(error), .is_coherence_order_maximal_write = false };
return StoreResult { .invalid = false,
.error = std::move(error),
.is_coherence_order_maximal_write = false };
}
inline StoreResult from_invalid() {
return StoreResult { .invalid = true,
.error = nullptr,
.is_coherence_order_maximal_write = false };
}
} // namespace StoreResultExt
namespace ReadModifyWriteResultExt {
inline ReadModifyWriteResult
ok(SVal old_value, SVal new_value, bool is_coherence_order_maximal_write) {
return ReadModifyWriteResult { .error = std::unique_ptr<std::string>(nullptr),
return ReadModifyWriteResult { .invalid = false,
.error = std::unique_ptr<std::string>(nullptr),
.old_value = GenmcScalarExt::from_sval(old_value),
.new_value = GenmcScalarExt::from_sval(new_value),
.is_coherence_order_maximal_write =
@@ -334,7 +336,16 @@ ok(SVal old_value, SVal new_value, bool is_coherence_order_maximal_write) {
}
inline ReadModifyWriteResult from_error(std::unique_ptr<std::string> error) {
return ReadModifyWriteResult { .error = std::move(error),
return ReadModifyWriteResult { .invalid = false,
.error = std::move(error),
.old_value = GenmcScalarExt::uninit(),
.new_value = GenmcScalarExt::uninit(),
.is_coherence_order_maximal_write = false };
}
inline ReadModifyWriteResult from_invalid() {
return ReadModifyWriteResult { .invalid = true,
.error = nullptr,
.old_value = GenmcScalarExt::uninit(),
.new_value = GenmcScalarExt::uninit(),
.is_coherence_order_maximal_write = false };
@@ -343,7 +354,8 @@ inline ReadModifyWriteResult from_error(std::unique_ptr<std::string> error) {
namespace CompareExchangeResultExt {
inline CompareExchangeResult success(SVal old_value, bool is_coherence_order_maximal_write) {
return CompareExchangeResult { .error = nullptr,
return CompareExchangeResult { .invalid = false,
.error = nullptr,
.old_value = GenmcScalarExt::from_sval(old_value),
.is_success = true,
.is_coherence_order_maximal_write =
@@ -351,14 +363,24 @@ inline CompareExchangeResult success(SVal old_value, bool is_coherence_order_max
}
inline CompareExchangeResult failure(SVal old_value) {
return CompareExchangeResult { .error = nullptr,
return CompareExchangeResult { .invalid = false,
.error = nullptr,
.old_value = GenmcScalarExt::from_sval(old_value),
.is_success = false,
.is_coherence_order_maximal_write = false };
}
inline CompareExchangeResult from_error(std::unique_ptr<std::string> error) {
return CompareExchangeResult { .error = std::move(error),
return CompareExchangeResult { .invalid = false,
.error = std::move(error),
.old_value = GenmcScalarExt::uninit(),
.is_success = false,
.is_coherence_order_maximal_write = false };
}
inline CompareExchangeResult from_invalid() {
return CompareExchangeResult { .invalid = true,
.error = nullptr,
.old_value = GenmcScalarExt::uninit(),
.is_success = false,
.is_coherence_order_maximal_write = false };
@@ -367,20 +389,42 @@ inline CompareExchangeResult from_error(std::unique_ptr<std::string> error) {
namespace MutexLockResultExt {
inline MutexLockResult ok(bool is_lock_acquired) {
return MutexLockResult { /* error: */ nullptr, /* is_reset: */ false, is_lock_acquired };
return MutexLockResult { .invalid = false,
.error = nullptr,
.is_reset = false,
.is_lock_acquired = is_lock_acquired };
}
inline MutexLockResult reset() {
return MutexLockResult { /* error: */ nullptr,
/* is_reset: */ true,
/* is_lock_acquired: */ false };
return MutexLockResult { .invalid = false,
.error = nullptr,
.is_reset = true,
.is_lock_acquired = false };
}
inline MutexLockResult from_error(std::unique_ptr<std::string> error) {
return MutexLockResult { /* error: */ std::move(error),
/* is_reset: */ false,
/* is_lock_acquired: */ false };
return MutexLockResult { .invalid = false,
.error = std::move(error),
.is_reset = false,
.is_lock_acquired = false };
}
inline MutexLockResult from_invalid() {
return MutexLockResult { .invalid = true,
.error = nullptr,
.is_reset = false,
.is_lock_acquired = false };
}
} // namespace MutexLockResultExt
namespace MallocResultExt {
inline MallocResult ok(SVal addr) {
return MallocResult { .error = nullptr, .address = addr.get() };
}
inline MallocResult from_error(std::unique_ptr<std::string> error) {
return MallocResult { .error = std::move(error), .address = 0UL };
}
} // namespace MallocResultExt
#endif /* GENMC_MIRI_INTERFACE_HPP */
@@ -5,7 +5,7 @@
#include "rust/cxx.h"
// GenMC headers:
#include "Verification/VerificationError.hpp"
#include "genmc/Verification/VerificationError.hpp"
#include <format>
#include <memory>
@@ -7,22 +7,22 @@
#include "genmc-sys/src/lib.rs.h"
// GenMC headers:
#include "ADT/value_ptr.hpp"
#include "ExecutionGraph/EventLabel.hpp"
#include "ExecutionGraph/LoadAnnotation.hpp"
#include "Runtime/InterpreterEnumAPI.hpp"
#include "Static/ModuleID.hpp"
#include "Support/ASize.hpp"
#include "Support/Error.hpp"
#include "Support/Logger.hpp"
#include "Support/MemAccess.hpp"
#include "Support/RMWOps.hpp"
#include "Support/SAddr.hpp"
#include "Support/SVal.hpp"
#include "Support/ThreadInfo.hpp"
#include "Support/Verbosity.hpp"
#include "Verification/GenMCDriver.hpp"
#include "Verification/MemoryModel.hpp"
#include "genmc/ADT/value_ptr.hpp"
#include "genmc/Execution/EventLabel.hpp"
#include "genmc/Execution/LoadAnnotation.hpp"
#include "genmc/Support/ASize.hpp"
#include "genmc/Support/ActionEnums.hpp"
#include "genmc/Support/Error.hpp"
#include "genmc/Support/Logger.hpp"
#include "genmc/Support/MemAccess.hpp"
#include "genmc/Support/ModuleVarID.hpp"
#include "genmc/Support/RMWOps.hpp"
#include "genmc/Support/SAddr.hpp"
#include "genmc/Support/SVal.hpp"
#include "genmc/Support/ThreadInfo.hpp"
#include "genmc/Support/Verbosity.hpp"
#include "genmc/Verification/GenMCDriver.hpp"
#include "genmc/Verification/MemoryModel.hpp"
// C++ headers:
#include <cmath>
@@ -47,13 +47,13 @@ auto MiriGenmcShim::schedule_next(
[](auto&& arg) {
using T = std::decay_t<decltype(arg)>;
if constexpr (std::is_same_v<T, int>)
return SchedulingResult { ExecutionState::Ok, static_cast<int32_t>(arg) };
return SchedulingResult { ExecutionStatus::Ok, static_cast<int32_t>(arg) };
else if constexpr (std::is_same_v<T, Blocked>)
return SchedulingResult { ExecutionState::Blocked, 0 };
return SchedulingResult { ExecutionStatus::Blocked, 0 };
else if constexpr (std::is_same_v<T, Error>)
return SchedulingResult { ExecutionState::Error, 0 };
return SchedulingResult { ExecutionStatus::Error, 0 };
else if constexpr (std::is_same_v<T, Finished>)
return SchedulingResult { ExecutionState::Finished, 0 };
return SchedulingResult { ExecutionStatus::Finished, 0 };
else
static_assert(false, "non-exhaustive visitor!");
},
@@ -75,39 +75,66 @@ auto MiriGenmcShim::handle_execution_end() -> std::unique_ptr<std::string> {
/**** Blocking instructions ****/
void MiriGenmcShim::handle_assume_block(ThreadId thread_id, AssumeType assume_type) {
BUG_ON(getExec().getGraph().isThreadBlocked(thread_id));
GenMCDriver::handleAssume(nullptr, inc_pos(thread_id), assume_type);
auto ret = GenMCDriver::handleAssume(nullptr, curr_pos(thread_id), assume_type);
inc_pos(thread_id, ret.count);
}
/**** Memory access handling ****/
[[nodiscard]] auto MiriGenmcShim::handle_load(
[[nodiscard]] auto MiriGenmcShim::handle_atomic_load(
ThreadId thread_id,
uint64_t address,
uint64_t size,
MemOrdering ord,
GenmcScalar old_val
) -> LoadResult {
// `type` is only used for printing.
const auto type = AType::Unsigned;
const auto ret = handle_load_reset_if_none<EventLabel::EventLabelKind::Read>(
thread_id,
const auto ret = GenMCDriver::handleRead(
nullptr,
curr_pos(thread_id),
GenmcScalarExt::try_to_sval(old_val),
ord,
SAddr(address),
ASize(size),
type
nullptr,
std::nullopt,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&ret))
inc_pos(thread_id, ret.count);
if (const auto* err = std::get_if<VerificationError>(&ret.result))
return LoadResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(!ret_val, "Unimplemented: load returned unexpected result.");
if (std::holds_alternative<Invalid>(ret.result))
return LoadResultExt::from_invalid();
const auto* ret_val = std::get_if<SVal>(&ret.result);
// FIXME(genmc): handle `HandleResult::Reset` return value.
ERROR_ON(!ret_val, "Unimplemented: atomic load returned unexpected result.");
return LoadResultExt::from_value(*ret_val);
}
[[nodiscard]] auto MiriGenmcShim::handle_store(
[[nodiscard]] auto
MiriGenmcShim::handle_non_atomic_load(ThreadId thread_id, uint64_t address, uint64_t size)
-> LoadResult {
const auto ret = GenMCDriver::handleNALoad(
nullptr,
curr_pos(thread_id),
SAddr(address),
ASize(size),
EventDeps()
);
inc_pos(thread_id, ret.count);
if (const auto* err = std::get_if<VerificationError>(&ret.result))
return LoadResultExt::from_error(format_error(*err));
if (std::holds_alternative<Invalid>(ret.result))
return LoadResultExt::from_invalid();
// FIXME(genmc): handle `HandleResult::Reset` return value.
ERROR_ON(
!std::holds_alternative<std::monostate>(ret.result),
"Unimplemented: non-atomic load returned unexpected result."
);
return LoadResultExt::no_value();
}
[[nodiscard]] auto MiriGenmcShim::handle_atomic_store(
ThreadId thread_id,
uint64_t address,
uint64_t size,
@@ -115,31 +142,57 @@ void MiriGenmcShim::handle_assume_block(ThreadId thread_id, AssumeType assume_ty
GenmcScalar old_val,
MemOrdering ord
) -> StoreResult {
const auto pos = inc_pos(thread_id);
const auto ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::Write>(
const auto ret = GenMCDriver::handleWrite(
nullptr,
pos,
curr_pos(thread_id),
GenmcScalarExt::try_to_sval(old_val),
ord,
SAddr(address),
ASize(size),
/* type */ AType::Unsigned, // `type` is only used for printing.
GenmcScalarExt::to_sval(value),
WriteAttr(),
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&ret))
inc_pos(thread_id, ret.count);
if (const auto* err = std::get_if<VerificationError>(&ret.result))
return StoreResultExt::from_error(format_error(*err));
if (std::holds_alternative<Invalid>(ret.result))
return StoreResultExt::from_invalid();
const auto* is_co_max = std::get_if<bool>(&ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
ERROR_ON(!is_co_max, "Unimplemented: Store returned unexpected result.");
const auto* is_co_max = std::get_if<bool>(&ret.result);
// FIXME(genmc): handle `HandleResult::Reset` return value.
ERROR_ON(!is_co_max, "Unimplemented: atomic store returned unexpected result.");
return StoreResultExt::ok(*is_co_max);
}
[[nodiscard]] auto
MiriGenmcShim::handle_non_atomic_store(ThreadId thread_id, uint64_t address, uint64_t size)
-> StoreResult {
const auto ret = GenMCDriver::handleNAStore(
nullptr,
curr_pos(thread_id),
SAddr(address),
ASize(size),
EventDeps()
);
inc_pos(thread_id, ret.count);
if (const auto* err = std::get_if<VerificationError>(&ret.result))
return StoreResultExt::from_error(format_error(*err));
if (std::holds_alternative<Invalid>(ret.result))
return StoreResultExt::from_invalid();
// FIXME(genmc): handle `HandleResult::Reset` return value.
ERROR_ON(
!std::holds_alternative<std::monostate>(ret.result),
"Unimplemented: non-atomic store returned unexpected result."
);
return StoreResultExt::ok(true);
}
void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
const auto pos = inc_pos(thread_id);
GenMCDriver::handleFence(nullptr, pos, ord, EventDeps());
auto ret = GenMCDriver::handleFence(nullptr, curr_pos(thread_id), ord, EventDeps());
inc_pos(thread_id, ret.count);
}
[[nodiscard]] auto MiriGenmcShim::handle_read_modify_write(
@@ -155,45 +208,52 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
// into a load and a store component. This means we can have for example `AcqRel` loads and
// stores, but this is intended for RMW operations.
// Somewhat confusingly, the GenMC term for RMW read/write labels is
// `FaiRead` and `FaiWrite`.
const auto load_ret = handle_load_reset_if_none<EventLabel::EventLabelKind::FaiRead>(
thread_id,
const auto load_ret = GenMCDriver::handleFaiRead(
nullptr,
curr_pos(thread_id),
GenmcScalarExt::try_to_sval(old_val),
ordering,
SAddr(address),
ASize(size),
AType::Unsigned, // The type is only used for printing.
rmw_op,
GenmcScalarExt::to_sval(rhs_value),
WriteAttr(),
nullptr,
std::nullopt,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&load_ret))
inc_pos(thread_id, load_ret.count);
if (const auto* err = std::get_if<VerificationError>(&load_ret.result))
return ReadModifyWriteResultExt::from_error(format_error(*err));
if (std::holds_alternative<GenMCDriver::Invalid>(load_ret.result))
return ReadModifyWriteResultExt::from_invalid();
const auto* ret_val = std::get_if<SVal>(&load_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
const auto* ret_val = std::get_if<SVal>(&load_ret.result);
// FIXME(genmc): handle `HandleResult::Reset` return values.
ERROR_ON(!ret_val, "Unimplemented: read-modify-write returned unexpected result.");
const auto read_old_val = *ret_val;
const auto new_value =
executeRMWBinOp(read_old_val, GenmcScalarExt::to_sval(rhs_value), size, rmw_op);
const auto storePos = inc_pos(thread_id);
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::FaiWrite>(
const auto store_ret = GenMCDriver::handleFaiWrite(
nullptr,
storePos,
curr_pos(thread_id),
GenmcScalarExt::try_to_sval(old_val),
ordering,
SAddr(address),
ASize(size),
AType::Unsigned, // The type is only used for printing.
new_value
new_value,
WriteAttr(),
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
inc_pos(thread_id, store_ret.count);
if (const auto* err = std::get_if<VerificationError>(&store_ret.result))
return ReadModifyWriteResultExt::from_error(format_error(*err));
if (std::holds_alternative<GenMCDriver::Invalid>(store_ret.result))
return ReadModifyWriteResultExt::from_invalid();
const auto* is_co_max = std::get_if<bool>(&store_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
const auto* is_co_max = std::get_if<bool>(&store_ret.result);
// FIXME(genmc): handle `HandleResult::Reset` return values.
ERROR_ON(!is_co_max, "Unimplemented: RMW store returned unexpected result.");
return ReadModifyWriteResultExt::ok(
/* old_value: */ read_old_val,
@@ -222,20 +282,28 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
auto expectedVal = GenmcScalarExt::to_sval(expected_value);
auto new_val = GenmcScalarExt::to_sval(new_value);
const auto load_ret = handle_load_reset_if_none<EventLabel::EventLabelKind::CasRead>(
thread_id,
const auto load_ret = GenMCDriver::handleCasRead(
nullptr,
curr_pos(thread_id),
GenmcScalarExt::try_to_sval(old_val),
success_ordering,
SAddr(address),
ASize(size),
AType::Unsigned, // The type is only used for printing.
expectedVal,
new_val
new_val,
WriteAttr(),
nullptr,
std::nullopt,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&load_ret))
inc_pos(thread_id, load_ret.count);
if (const auto* err = std::get_if<VerificationError>(&load_ret.result))
return CompareExchangeResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&load_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
if (std::holds_alternative<GenMCDriver::Invalid>(load_ret.result))
return CompareExchangeResultExt::from_invalid();
const auto* ret_val = std::get_if<SVal>(&load_ret.result);
// FIXME(genmc): handle `HandleResult::Reset` return values.
ERROR_ON(nullptr == ret_val, "Unimplemented: load returned unexpected result.");
const auto read_old_val = *ret_val;
if (read_old_val != expectedVal)
@@ -243,21 +311,25 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
// FIXME(GenMC): Add support for modelling spurious failures.
const auto storePos = inc_pos(thread_id);
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::CasWrite>(
const auto store_ret = GenMCDriver::handleCasWrite(
nullptr,
storePos,
curr_pos(thread_id),
GenmcScalarExt::try_to_sval(old_val),
success_ordering,
SAddr(address),
ASize(size),
AType::Unsigned, // The type is only used for printing.
new_val
new_val,
WriteAttr(),
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
inc_pos(thread_id, store_ret.count);
if (const auto* err = std::get_if<VerificationError>(&store_ret.result))
return CompareExchangeResultExt::from_error(format_error(*err));
const auto* is_co_max = std::get_if<bool>(&store_ret);
// FIXME(genmc): handle `HandleResult::{Invalid, Reset}` return values.
if (std::holds_alternative<GenMCDriver::Invalid>(store_ret.result))
return CompareExchangeResultExt::from_invalid();
const auto* is_co_max = std::get_if<bool>(&store_ret.result);
// FIXME(genmc): handle `HandleResult::Reset` return values.
ERROR_ON(!is_co_max, "Unimplemented: compare-exchange store returned unexpected result.");
return CompareExchangeResultExt::success(read_old_val, *is_co_max);
}
@@ -265,33 +337,45 @@ void MiriGenmcShim::handle_fence(ThreadId thread_id, MemOrdering ord) {
/**** Memory (de)allocation ****/
auto MiriGenmcShim::handle_malloc(ThreadId thread_id, uint64_t size, uint64_t alignment)
-> uint64_t {
const auto pos = inc_pos(thread_id);
-> MallocResult {
// These are only used for printing and features Miri-GenMC doesn't support (yet).
const auto storage_duration = StorageDuration::SD_Heap;
// Volatile, as opposed to "persistent" (i.e., non-volatile memory that persists over reboots)
const auto storage_type = StorageType::ST_Volatile;
const auto address_space = AddressSpace::AS_User;
const SVal ret_val = GenMCDriver::handleMalloc(
const auto ret = GenMCDriver::handleMalloc(
nullptr,
pos,
curr_pos(thread_id),
size,
alignment,
storage_duration,
storage_type,
address_space,
nullptr,
"",
EventDeps()
);
return ret_val.get();
inc_pos(thread_id, ret.count);
if (const auto* err = std::get_if<VerificationError>(&ret.result))
return MallocResultExt::from_error(format_error(*err));
const auto* addr = std::get_if<SVal>(&ret.result);
ERROR_ON(!addr, "Unimplemented: malloc returned unexpected result.");
return MallocResultExt::ok(*addr);
}
auto MiriGenmcShim::handle_free(ThreadId thread_id, uint64_t address)
-> std::unique_ptr<std::string> {
auto pos = inc_pos(thread_id);
auto ret = GenMCDriver::handleFree(nullptr, pos, SAddr(address), EventDeps());
return ret.has_value() ? format_error(*ret) : nullptr;
auto ret = GenMCDriver::handleFree(nullptr, curr_pos(thread_id), SAddr(address), EventDeps());
inc_pos(thread_id, ret.count);
if (const auto* err = std::get_if<VerificationError>(&ret.result))
return format_error(*err);
ERROR_ON(
!std::holds_alternative<std::monostate>(ret.result),
"Unimplemented: free returned unexpected result."
);
return nullptr;
}
/**** Estimation mode result ****/
@@ -325,12 +409,12 @@ auto MiriGenmcShim::handle_mutex_lock(ThreadId thread_id, uint64_t address, uint
const auto annot = std::move(Annotation(
AssumeType::Spinloop,
Annotation::ExprVP(
NeExpr<ModuleID::ID>::create(
NeExpr<ModuleVarID>::create(
// `RegisterExpr` marks the value of the current expression, i.e., the loaded value.
// The `id` is ignored by GenMC; it is only used by the LLI frontend to substitute
// other variables from previous expressions that may be used here.
RegisterExpr<ModuleID::ID>::create(size_bits, /* id */ 0),
ConcreteExpr<ModuleID::ID>::create(size_bits, MutexState::LOCKED)
RegisterExpr<ModuleVarID>::create(size_bits, /* id */ 0),
ConcreteExpr<ModuleVarID>::create(size_bits, MutexState::LOCKED)
)
.release()
)
@@ -340,26 +424,34 @@ auto MiriGenmcShim::handle_mutex_lock(ThreadId thread_id, uint64_t address, uint
// access, if there previously was a non-atomic initializing access. We set the initial state of
// a mutex to be "unlocked".
const auto old_val = MutexState::UNLOCKED;
const auto load_ret = handle_load_reset_if_none<EventLabel::EventLabelKind::LockCasRead>(
thread_id,
const auto load_ret = GenMCDriver::handleLockCasRead(
nullptr,
curr_pos(thread_id),
old_val,
address,
size,
annot,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&load_ret))
inc_pos(thread_id, load_ret.count);
if (const auto* err = std::get_if<VerificationError>(&load_ret.result))
return MutexLockResultExt::from_error(format_error(*err));
if (std::holds_alternative<GenMCDriver::Invalid>(load_ret.result))
return MutexLockResultExt::from_invalid();
// If we get a `Reset`, GenMC decided that this lock operation should not yet run, since it
// would not acquire the mutex. Like the handling of the case further down where we read a `1`
// ("Mutex already locked"), Miri should call the handle function again once the current thread
// is scheduled by GenMC the next time.
if (std::holds_alternative<Reset>(load_ret))
if (std::holds_alternative<Reset>(load_ret.result))
return MutexLockResultExt::reset();
const auto* ret_val = std::get_if<SVal>(&load_ret);
const auto* ret_val = std::get_if<SVal>(&load_ret.result);
ERROR_ON(!ret_val, "Unimplemented: mutex lock returned unexpected result.");
ERROR_ON(!MutexState::isValid(*ret_val), "Mutex read value was neither 0 nor 1");
ERROR_ON(
!MutexState::isValid(*ret_val),
"Mutex read value was neither 0 nor 1 ({})",
std::to_string(ret_val->get())
);
if (*ret_val == MutexState::LOCKED) {
// We did not acquire the mutex, so we tell GenMC to block the thread until we can acquire
// it. GenMC determines this based on the annotation we pass with the load further up in
@@ -368,69 +460,72 @@ auto MiriGenmcShim::handle_mutex_lock(ThreadId thread_id, uint64_t address, uint
return MutexLockResultExt::ok(false);
}
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::LockCasWrite>(
nullptr,
inc_pos(thread_id),
old_val,
address,
size,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
const auto store_ret =
GenMCDriver::handleLockCasWrite(nullptr, curr_pos(thread_id), address, size, EventDeps());
inc_pos(thread_id, store_ret.count);
if (const auto* err = std::get_if<VerificationError>(&store_ret.result))
return MutexLockResultExt::from_error(format_error(*err));
if (std::holds_alternative<GenMCDriver::Invalid>(store_ret.result))
return MutexLockResultExt::from_invalid();
// We don't update Miri's memory for this operation so we don't need to know if the store
// was the co-maximal store, but we still check that we at least get a boolean as the result
// of the store.
const auto* is_co_max = std::get_if<bool>(&store_ret);
const auto* is_co_max = std::get_if<bool>(&store_ret.result);
ERROR_ON(!is_co_max, "Unimplemented: mutex_try_lock store returned unexpected result.");
return MutexLockResultExt::ok(true);
}
auto MiriGenmcShim::handle_mutex_try_lock(ThreadId thread_id, uint64_t address, uint64_t size)
-> MutexLockResult {
auto& currPos = threads_action_[thread_id].event;
// As usual, we need to tell GenMC which value was stored at this location before this atomic
// access, if there previously was a non-atomic initializing access. We set the initial state of
// a mutex to be "unlocked".
const auto old_val = MutexState::UNLOCKED;
const auto load_ret = GenMCDriver::handleLoad<EventLabel::EventLabelKind::TrylockCasRead>(
const auto load_ret = GenMCDriver::handleTrylockCasRead(
nullptr,
++currPos,
curr_pos(thread_id),
old_val,
SAddr(address),
ASize(size)
ASize(size),
std::nullopt,
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&load_ret))
inc_pos(thread_id, load_ret.count);
if (const auto* err = std::get_if<VerificationError>(&load_ret.result))
return MutexLockResultExt::from_error(format_error(*err));
const auto* ret_val = std::get_if<SVal>(&load_ret);
if (std::holds_alternative<GenMCDriver::Invalid>(load_ret.result))
return MutexLockResultExt::from_invalid();
const auto* ret_val = std::get_if<SVal>(&load_ret.result);
ERROR_ON(!ret_val, "Unimplemented: mutex trylock load returned unexpected result.");
ERROR_ON(!MutexState::isValid(*ret_val), "Mutex read value was neither 0 nor 1");
if (*ret_val == MutexState::LOCKED)
return MutexLockResultExt::ok(false); /* Lock already held. */
const auto store_ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::TrylockCasWrite>(
const auto store_ret = GenMCDriver::handleTrylockCasWrite(
nullptr,
++currPos,
old_val,
curr_pos(thread_id),
SAddr(address),
ASize(size)
ASize(size),
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&store_ret))
inc_pos(thread_id, store_ret.count);
if (const auto* err = std::get_if<VerificationError>(&store_ret.result))
return MutexLockResultExt::from_error(format_error(*err));
if (std::holds_alternative<GenMCDriver::Invalid>(store_ret.result))
return MutexLockResultExt::from_invalid();
// We don't update Miri's memory for this operation so we don't need to know if the store was
// co-maximal, but we still check that we get a boolean result.
const auto* is_co_max = std::get_if<bool>(&store_ret);
const auto* is_co_max = std::get_if<bool>(&store_ret.result);
ERROR_ON(!is_co_max, "Unimplemented: store part of mutex try_lock returned unexpected result.");
return MutexLockResultExt::ok(true);
}
auto MiriGenmcShim::handle_mutex_unlock(ThreadId thread_id, uint64_t address, uint64_t size)
-> StoreResult {
const auto pos = inc_pos(thread_id);
const auto ret = GenMCDriver::handleStore<EventLabel::EventLabelKind::UnlockWrite>(
const auto ret = GenMCDriver::handleUnlockWrite(
nullptr,
pos,
curr_pos(thread_id),
// As usual, we need to tell GenMC which value was stored at this location before this
// atomic access, if there previously was a non-atomic initializing access. We set the
// initial state of a mutex to be "unlocked".
@@ -438,13 +533,16 @@ auto MiriGenmcShim::handle_mutex_unlock(ThreadId thread_id, uint64_t address, ui
MemOrdering::Release,
SAddr(address),
ASize(size),
AType::Signed,
/* store_value */ MutexState::UNLOCKED,
WriteAttr(),
EventDeps()
);
if (const auto* err = std::get_if<VerificationError>(&ret))
inc_pos(thread_id, ret.count);
if (const auto* err = std::get_if<VerificationError>(&ret.result))
return StoreResultExt::from_error(format_error(*err));
const auto* is_co_max = std::get_if<bool>(&ret);
if (std::holds_alternative<GenMCDriver::Invalid>(ret.result))
return StoreResultExt::from_invalid();
const auto* is_co_max = std::get_if<bool>(&ret.result);
ERROR_ON(!is_co_max, "Unimplemented: store part of mutex unlock returned unexpected result.");
return StoreResultExt::ok(*is_co_max);
}
@@ -452,40 +550,62 @@ auto MiriGenmcShim::handle_mutex_unlock(ThreadId thread_id, uint64_t address, ui
/** Thread creation/joining */
void MiriGenmcShim::handle_thread_create(ThreadId thread_id, ThreadId parent_id) {
// NOTE: The threadCreate event happens in the parent:
const auto pos = inc_pos(parent_id);
// FIXME(genmc): for supporting symmetry reduction, these will need to be properly set:
const unsigned fun_id = 0;
const SVal arg = SVal(0);
const ThreadInfo child_info =
ThreadInfo { thread_id, parent_id, fun_id, arg, "unknown thread" };
const auto child_tid = GenMCDriver::handleThreadCreate(nullptr, pos, child_info, EventDeps());
// NOTE: The threadCreate event happens in the parent:
const auto ret =
GenMCDriver::handleThreadCreate(nullptr, curr_pos(parent_id), child_info, EventDeps());
inc_pos(parent_id, ret.count);
ERROR_ON(
!std::holds_alternative<int>(ret.result),
"Unimplemented: unexpected return value for thread create"
);
auto child_tid = std::get<int>(ret.result);
// Sanity check the thread id, which is the index in the `threads_action_` array.
BUG_ON(child_tid != thread_id || child_tid <= 0 || child_tid != threads_action_.size());
VERIFY(child_tid == thread_id && child_tid > 0 && child_tid == threads_action_.size());
threads_action_.push_back(Action(ActionKind::Load, Event(child_tid, 0)));
}
void MiriGenmcShim::handle_thread_join(ThreadId thread_id, ThreadId child_id) {
// The thread join event happens in the parent.
const auto pos = inc_pos(thread_id);
const auto ret = GenMCDriver::handleThreadJoin(nullptr, pos, child_id, EventDeps());
// If the join failed, decrease the event index again:
if (!std::holds_alternative<SVal>(ret)) {
dec_pos(thread_id);
}
// FIXME(genmc): handle `HandleResult::{Invalid, Reset, VerificationError}` return values.
const auto ret =
GenMCDriver::handleThreadJoin(nullptr, curr_pos(thread_id), child_id, EventDeps());
inc_pos(thread_id, ret.count);
// FIXME(genmc): handle `HandleResult::{Invalid, VerificationError}` return values.
ERROR_ON(
!std::holds_alternative<SVal>(ret.result) && !std::holds_alternative<Reset>(ret.result),
"Unimplemented: unexpected return value for thread join"
);
// FIXME(genmc): Here Reset{} is silently accepted. Double-check why that is.
// The reason is likely that, although GenMC wants to re-run the join instruction,
// when GenMC deems that the join has executed, it will also deem it successful,
// i.e., the return value is guaranteed to be 0 (or at least we assume that).
// In this case, it doesn't matter that we don't re-run the instruction, since
// Miri sets the correct return value, and GenMC will only schedule this thread
// when it knows the child has terminated.
// NOTE: Thread return value is ignored, since Miri doesn't need it.
}
void MiriGenmcShim::handle_thread_finish(ThreadId thread_id, uint64_t ret_val) {
const auto pos = inc_pos(thread_id);
GenMCDriver::handleThreadFinish(nullptr, pos, SVal(ret_val));
auto ret = GenMCDriver::handleThreadFinish(nullptr, curr_pos(thread_id), SVal(ret_val));
inc_pos(thread_id, ret.count);
ERROR_ON(
!std::holds_alternative<std::monostate>(ret.result),
"Unimplemented: unexpected return value for thread finish"
);
}
void MiriGenmcShim::handle_thread_kill(ThreadId thread_id) {
const auto pos = inc_pos(thread_id);
GenMCDriver::handleThreadKill(nullptr, pos);
auto ret = GenMCDriver::handleThreadKill(nullptr, curr_pos(thread_id));
inc_pos(thread_id, ret.count);
ERROR_ON(
!std::holds_alternative<std::monostate>(ret.result),
"Unimplemented: unexpected return value for thread kill"
);
}
@@ -7,9 +7,8 @@
#include "genmc-sys/src/lib.rs.h"
// GenMC headers:
#include "Support/Error.hpp"
#include "Support/Verbosity.hpp"
#include "Verification/InterpreterCallbacks.hpp"
#include "genmc/Support/Error.hpp"
#include "genmc/Support/Verbosity.hpp"
// C++ headers:
#include <cstdint>
@@ -113,6 +112,13 @@ static auto to_genmc_verbosity_level(const LogLevel log_level) -> VerbosityLevel
// value written by the skipped thread.
conf->replayCompletedThreads = true;
// Initialization checking is done by Miri; GenMC's checks are incorrect for Rust.
conf->disableInitializationChecks = true;
// Don't check static-address validity as it's incompatible with Miri's
// dynamic discovery of static variables.
conf->disableStaticValidityChecks = true;
// FIXME(genmc): implement symmetry reduction.
ERROR_ON(
params.do_symmetry_reduction,
@@ -160,45 +166,5 @@ static auto to_genmc_verbosity_level(const LogLevel log_level) -> VerbosityLevel
// Create the actual driver and Miri-GenMC communication shim.
auto driver = std::make_unique<MiriGenmcShim>(std::move(conf), mode);
// FIXME(genmc,HACK): Until a proper solution is implemented in GenMC, these callbacks will
// allow Miri to return information about global allocations and override uninitialized memory
// checks for non-atomic loads (Miri handles those without GenMC, so the error would be wrong).
auto interpreter_callbacks = InterpreterCallbacks {
// Miri already ensures that memory accesses are valid, so this check doesn't matter.
// We check that the address is static, but skip checking if it is part of an actual
// allocation.
.isStaticallyAllocated = [](SAddr addr) { return addr.isStatic(); },
// FIXME(genmc,error reporting): Once a proper a proper API for passing such information is
// implemented in GenMC, Miri should use it to improve the produced error messages.
.getStaticName = [](SAddr addr) { return "[UNKNOWN STATIC]"; },
// This function is called to get the initial value stored at the given address.
//
// From a Miri perspective, this API doesn't work very well: most memory starts out
// "uninitialized";
// only statics have an initial value. And their initial value is just a sequence of bytes,
// but GenMC expect this to be already split into separate atomic variables. So we return a
// dummy value.
// This value should never be visible to the interpreted program.
// GenMC does not understand uninitialized memory the same way Miri does, which may cause
// this function to be called. The returned value can be visible to Miri or the user:
// - Printing the execution graph may contain this value in place of uninitialized values.
// FIXME(genmc): NOTE: printing the execution graph is not yet implemented.
// - Non-atomic loads may return this value, but Miri ignores values of non-atomic loads.
// - Atomic loads will *not* see this value once mixed atomic-non-atomic support is added.
// Currently, atomic loads can see this value, unless initialized by an *atomic* store.
// FIXME(genmc): update this comment once mixed atomic-non-atomic support is added.
//
// FIXME(genmc): implement proper support for uninitialized memory in GenMC.
// Ideally, the initial value getter would return an `optional<SVal>`, since the memory
// location may be uninitialized.
.initValGetter = [](const AAccess& a) { return SVal(0xDEAD); },
// Miri serves non-atomic loads from its own memory and these GenMC checks are wrong in that
// case. This should no longer be required with proper mixed-size access support.
.skipUninitLoadChecks = [](const MemAccessLabel* access_label
) { return access_label->getOrdering() == MemOrdering::NotAtomic; },
};
driver->setInterpCallbacks(std::move(interpreter_callbacks));
return driver;
}
+39 -10
View File
@@ -140,15 +140,15 @@ enum LogLevel {
/// Log errors, warnings and tips.
Tip,
/// Debug print considered revisits.
/// Downgraded to `Tip` if `GENMC_DEBUG` is not enabled.
/// Downgraded to `Tip` if `ENABLE_GENMC_DEBUG` is not enabled.
Debug1Revisits,
/// Print the execution graph after every memory access.
/// Also includes the previous debug log level.
/// Downgraded to `Tip` if `GENMC_DEBUG` is not enabled.
/// Downgraded to `Tip` if `ENABLE_GENMC_DEBUG` is not enabled.
Debug2MemoryAccesses,
/// Print reads-from values considered by GenMC.
/// Also includes the previous debug log level.
/// Downgraded to `Tip` if `GENMC_DEBUG` is not enabled.
/// Downgraded to `Tip` if `ENABLE_GENMC_DEBUG` is not enabled.
Debug3ReadsFrom,
}
@@ -182,7 +182,7 @@ struct GenmcScalar {
#[must_use]
#[derive(Debug, Clone, Copy)]
enum ExecutionState {
enum ExecutionStatus {
Ok,
Error,
Blocked,
@@ -192,7 +192,7 @@ enum ExecutionState {
#[must_use]
#[derive(Debug)]
struct SchedulingResult {
exec_state: ExecutionState,
exec_status: ExecutionStatus,
next_thread: i32,
}
@@ -212,10 +212,10 @@ struct EstimationResult {
#[must_use]
#[derive(Debug)]
struct LoadResult {
/// If `true`, exploration should be dropped, **and all other fields are invalid**.
invalid: bool,
/// If not null, contains the error encountered during the handling of the load.
error: UniquePtr<CxxString>,
/// Indicates whether a value was read or not.
has_value: bool,
/// The value that was read. Should not be used if `has_value` is `false`.
read_value: GenmcScalar,
}
@@ -223,6 +223,8 @@ struct LoadResult {
#[must_use]
#[derive(Debug)]
struct StoreResult {
/// If `true`, exploration should be dropped, **and all other fields are invalid**.
invalid: bool,
/// If not null, contains the error encountered during the handling of the store.
error: UniquePtr<CxxString>,
/// `true` if the write should also be reflected in Miri's memory representation.
@@ -232,6 +234,8 @@ struct StoreResult {
#[must_use]
#[derive(Debug)]
struct ReadModifyWriteResult {
/// If `true`, exploration should be dropped, **and all other fields are invalid**.
invalid: bool,
/// If there was an error, it will be stored in `error`, otherwise it is `None`.
error: UniquePtr<CxxString>,
/// The value that was read by the RMW operation as the left operand.
@@ -245,6 +249,8 @@ struct ReadModifyWriteResult {
#[must_use]
#[derive(Debug)]
struct CompareExchangeResult {
/// If `true`, exploration should be dropped, **and all other fields are invalid**.
invalid: bool,
/// If there was an error, it will be stored in `error`, otherwise it is `None`.
error: UniquePtr<CxxString>,
/// The value that was read by the compare-exchange.
@@ -258,6 +264,8 @@ struct CompareExchangeResult {
#[must_use]
#[derive(Debug)]
struct MutexLockResult {
/// If `true`, exploration should be dropped, **and all other fields are invalid**.
invalid: bool,
/// If there was an error, it will be stored in `error`, otherwise it is `None`.
error: UniquePtr<CxxString>,
/// If true, GenMC determined that we should retry the mutex lock operation once the thread attempting to lock is scheduled again.
@@ -266,6 +274,15 @@ struct MutexLockResult {
is_lock_acquired: bool,
}
#[must_use]
#[derive(Debug)]
struct MallocResult {
/// If not null, contains the error encountered during the handling of malloc.
error: UniquePtr<CxxString>,
/// The allocated address.
address: u64,
}
/**** These are GenMC types that we have to copy-paste here since cxx does not support
"importing" externally defined C++ types. ****/
@@ -385,7 +402,7 @@ unsafe fn create_handle(
/***** Functions for handling events encountered during program execution. *****/
/**** Memory access handling ****/
fn handle_load(
fn handle_atomic_load(
self: Pin<&mut MiriGenmcShim>,
thread_id: i32,
address: u64,
@@ -393,6 +410,12 @@ fn handle_load(
memory_ordering: MemOrdering,
old_value: GenmcScalar,
) -> LoadResult;
fn handle_non_atomic_load(
self: Pin<&mut MiriGenmcShim>,
thread_id: i32,
address: u64,
size: u64,
) -> LoadResult;
fn handle_read_modify_write(
self: Pin<&mut MiriGenmcShim>,
thread_id: i32,
@@ -415,7 +438,7 @@ fn handle_compare_exchange(
fail_load_ordering: MemOrdering,
can_fail_spuriously: bool,
) -> CompareExchangeResult;
fn handle_store(
fn handle_atomic_store(
self: Pin<&mut MiriGenmcShim>,
thread_id: i32,
address: u64,
@@ -424,6 +447,12 @@ fn handle_store(
old_value: GenmcScalar,
memory_ordering: MemOrdering,
) -> StoreResult;
fn handle_non_atomic_store(
self: Pin<&mut MiriGenmcShim>,
thread_id: i32,
address: u64,
size: u64,
) -> StoreResult;
fn handle_fence(
self: Pin<&mut MiriGenmcShim>,
thread_id: i32,
@@ -436,7 +465,7 @@ fn handle_malloc(
thread_id: i32,
size: u64,
alignment: u64,
) -> u64;
) -> MallocResult;
/// Returns true if an error was found.
fn handle_free(
self: Pin<&mut MiriGenmcShim>,
+1 -1
View File
@@ -1 +1 @@
4c4205163abcbd08948b3efab796c543ba1ea687
e22c616e4e87914135c1db261a03e0437255335e
+3 -1
View File
@@ -170,7 +170,9 @@ fn addr_from_alloc_id_uncached(
{
let fn_sig = this.tcx.instantiate_bound_regions_with_erased(
this.tcx
.fn_sig(instance.def_id()).instantiate(*this.tcx, instance.args).skip_norm_wip(),
.fn_sig(instance.def_id())
.instantiate(*this.tcx, instance.args)
.skip_norm_wip(),
);
let fn_ptr = crate::shims::native_lib::build_libffi_closure(this, fn_sig)?;
@@ -4,7 +4,6 @@
use rustc_middle::mir;
use rustc_middle::mir::interpret;
use rustc_middle::ty::ScalarInt;
use tracing::debug;
use super::GenmcScalar;
use crate::alloc_addresses::EvalContextExt as _;
@@ -13,33 +12,6 @@
/// Maximum size memory access in bytes that GenMC supports.
pub(super) const MAX_ACCESS_SIZE: u64 = 8;
/// This function is used to split up a large memory access into aligned, non-overlapping chunks of a limited size.
/// Returns an iterator over the chunks, yielding `(base address, size)` of each chunk, ordered by address.
pub fn split_access(address: Size, size: Size) -> impl Iterator<Item = (u64, u64)> {
let start_address = address.bytes();
let end_address = start_address + size.bytes();
let start_address_aligned = start_address.next_multiple_of(MAX_ACCESS_SIZE);
let end_address_aligned = (end_address / MAX_ACCESS_SIZE) * MAX_ACCESS_SIZE; // prev_multiple_of
debug!(
"GenMC: splitting NA memory access into {MAX_ACCESS_SIZE} byte chunks: {}B + {} * {MAX_ACCESS_SIZE}B + {}B = {size:?}",
start_address_aligned - start_address,
(end_address_aligned - start_address_aligned) / MAX_ACCESS_SIZE,
end_address - end_address_aligned,
);
// FIXME(genmc): could make remaining accesses powers-of-2, instead of 1 byte.
let start_chunks = (start_address..start_address_aligned).map(|address| (address, 1));
let aligned_chunks = (start_address_aligned..end_address_aligned)
.step_by(MAX_ACCESS_SIZE.try_into().unwrap())
.map(|address| (address, MAX_ACCESS_SIZE));
let end_chunks = (end_address_aligned..end_address).map(|address| (address, 1));
start_chunks.chain(aligned_chunks).chain(end_chunks)
}
/// Inverse function to `scalar_to_genmc_scalar`.
///
/// Convert a Miri `Scalar` to a `GenmcScalar`.
+91 -77
View File
@@ -19,7 +19,6 @@
};
use self::run::GenmcMode;
use self::thread_id_map::ThreadIdMap;
use crate::concurrency::genmc::helper::split_access;
use crate::diagnostics::SpanDedupDiagnostic;
use crate::intrinsics::AtomicRmwOp;
use crate::*;
@@ -267,8 +266,13 @@ pub(crate) fn atomic_load<'tcx>(
} else {
GenmcScalar::UNINIT
};
let read_value =
self.handle_load(&ecx.machine, address, size, ordering.to_genmc(), genmc_old_value)?;
let read_value = self.handle_atomic_load(
&ecx.machine,
address,
size,
ordering.to_genmc(),
genmc_old_value,
)?;
genmc_scalar_to_scalar(ecx, self, read_value, size)
}
@@ -292,7 +296,7 @@ pub(crate) fn atomic_store<'tcx>(
} else {
GenmcScalar::UNINIT
};
self.handle_store(
self.handle_atomic_store(
&ecx.machine,
address,
size,
@@ -447,6 +451,9 @@ pub(crate) fn atomic_compare_exchange<'tcx>(
can_fail_spuriously,
);
if cas_result.invalid {
throw_machine_stop!(TerminationInfo::GenmcSkip);
}
if let Some(error) = cas_result.error.as_ref() {
// FIXME(genmc): error handling
throw_ub_format!("{}", error.to_string_lossy());
@@ -488,32 +495,7 @@ pub(crate) fn memory_load<'tcx>(
return interp_ok(());
}
let handle_load = |address, size| {
// NOTE: Values loaded non-atomically are still handled by Miri, so we discard whatever we get from GenMC
let _read_value = self.handle_load(
machine,
address,
size,
MemOrdering::NotAtomic,
// This value is used to update the co-maximal store event to the same location.
// We don't need to update that store, since if it is ever read by any atomic loads, the value will be updated then.
// We use uninit for lack of a better value, since we don't know whether the location we currently load from is initialized or not.
GenmcScalar::UNINIT,
)?;
interp_ok(())
};
// This load is small enough so GenMC can handle it.
if size.bytes() <= MAX_ACCESS_SIZE {
return handle_load(address, size);
}
// This load is too big to be a single GenMC access, we have to split it.
// FIXME(genmc): This will misbehave if there are non-64bit-atomics in there.
// Needs proper support on the GenMC side for large and mixed atomic accesses.
for (address, size) in split_access(address, size) {
handle_load(Size::from_bytes(address), Size::from_bytes(size))?;
}
self.handle_non_atomic_load(machine, address, size)?;
interp_ok(())
}
@@ -540,40 +522,7 @@ pub(crate) fn memory_store<'tcx>(
return interp_ok(());
}
let handle_store = |address, size| {
// We always write the the stored values to Miri's memory, whether GenMC says the write is co-maximal or not.
// The GenMC scheduler ensures that replaying an execution happens in porf-respecting order (po := program order, rf: reads-from order).
// This means that for any non-atomic read Miri performs, the corresponding write has already been replayed.
let _is_co_max_write = self.handle_store(
machine,
address,
size,
// We don't know the value that this store will write, but GenMC expects that we give it an actual value.
// Unfortunately, there are situations where this value can actually become visible
// to the program: when there is an atomic load reading from a non-atomic store.
// FIXME(genmc): update once mixed atomic-non-atomic support is added. Afterwards, this value should never be readable.
GenmcScalar::from_u64(0xDEADBEEF),
// This value is used to update the co-maximal store event to the same location.
// This old value cannot be read anymore by any future loads, since we are doing another non-atomic store to the same location.
// Any future load will either see the store we are adding now, or we have a data race (there can only be one possible non-atomic value to read from at any time).
// We use uninit for lack of a better value, since we don't know whether the location we currently write to is initialized or not.
GenmcScalar::UNINIT,
MemOrdering::NotAtomic,
)?;
interp_ok(())
};
// This store is small enough so GenMC can handle it.
if size.bytes() <= MAX_ACCESS_SIZE {
return handle_store(address, size);
}
// This store is too big to be a single GenMC access, we have to split it.
// FIXME(genmc): This will misbehave if there are non-64bit-atomics in there.
// Needs proper support on the GenMC side for large and mixed atomic accesses.
for (address, size) in split_access(address, size) {
handle_store(Size::from_bytes(address), Size::from_bytes(size))?;
}
self.handle_non_atomic_store(machine, address, size)?;
interp_ok(())
}
@@ -599,14 +548,15 @@ pub(crate) fn handle_alloc<'tcx>(
}
// GenMC doesn't support ZSTs, so we set the minimum size to 1 byte
let genmc_size = size.bytes().max(1);
let chosen_address = self.handle.borrow_mut().pin_mut().handle_malloc(
let malloc_result = self.handle.borrow_mut().pin_mut().handle_malloc(
self.active_thread_genmc_tid(machine),
genmc_size,
alignment.bytes(),
);
if chosen_address == 0 {
if let Some(_error) = malloc_result.error.as_ref() {
throw_exhaust!(AddressSpaceFull);
}
let chosen_address = malloc_result.address;
// Non-global addresses should not be in the global address space.
assert_eq!(0, chosen_address & GENMC_GLOBAL_ADDRESSES_MASK);
@@ -735,9 +685,9 @@ pub(crate) fn handle_exit<'tcx>(
}
impl GenmcCtx {
/// Inform GenMC about a load (atomic or non-atomic).
/// Inform GenMC about an atomic load.
/// Returns the value that GenMC wants this load to read.
fn handle_load<'tcx>(
fn handle_atomic_load<'tcx>(
&self,
machine: &MiriMachine<'tcx>,
address: Size,
@@ -758,7 +708,7 @@ fn handle_load<'tcx>(
"GenMC: load, address: {addr} == {addr:#x}, size: {size:?}, ordering: {memory_ordering:?}, old_value: {genmc_old_value:x?}",
addr = address.bytes()
);
let load_result = self.handle.borrow_mut().pin_mut().handle_load(
let load_result = self.handle.borrow_mut().pin_mut().handle_atomic_load(
self.active_thread_genmc_tid(machine),
address.bytes(),
size.bytes(),
@@ -766,23 +716,51 @@ fn handle_load<'tcx>(
genmc_old_value,
);
if load_result.invalid {
throw_machine_stop!(TerminationInfo::GenmcSkip);
}
if let Some(error) = load_result.error.as_ref() {
// FIXME(genmc): error handling
throw_ub_format!("{}", error.to_string_lossy());
}
if !load_result.has_value {
// FIXME(GenMC): Implementing certain GenMC optimizations will lead to this.
unimplemented!("GenMC: load returned no value.");
}
debug!("GenMC: load returned value: {:?}", load_result.read_value);
interp_ok(load_result.read_value)
}
/// Inform GenMC about a store (atomic or non-atomic).
/// Inform GenMC about a non-atomic load.
fn handle_non_atomic_load<'tcx>(
&self,
machine: &MiriMachine<'tcx>,
address: Size,
size: Size,
) -> InterpResult<'tcx> {
assert!(size.bytes() != 0);
debug!(
"GenMC: NA load, address: {addr} == {addr:#x}, size: {size:?}",
addr = address.bytes()
);
let load_result = self.handle.borrow_mut().pin_mut().handle_non_atomic_load(
self.active_thread_genmc_tid(machine),
address.bytes(),
size.bytes(),
);
if load_result.invalid {
throw_machine_stop!(TerminationInfo::GenmcSkip);
}
if let Some(error) = load_result.error.as_ref() {
// FIXME(genmc): error handling
throw_ub_format!("{}", error.to_string_lossy());
}
// `load_result.read_value` is just a dummy for non-atomic loads. And anyway Miri doesn't
// give us a chance to change the value here, it'll always use the one from its memory.
interp_ok(())
}
/// Inform GenMC about an atomic store.
/// Returns true if the store is co-maximal, i.e., it should be written to Miri's memory too.
fn handle_store<'tcx>(
fn handle_atomic_store<'tcx>(
&self,
machine: &MiriMachine<'tcx>,
address: Size,
@@ -804,7 +782,7 @@ fn handle_store<'tcx>(
"GenMC: store, address: {addr} = {addr:#x}, size: {size:?}, ordering {memory_ordering:?}, value: {genmc_value:?}",
addr = address.bytes()
);
let store_result = self.handle.borrow_mut().pin_mut().handle_store(
let store_result = self.handle.borrow_mut().pin_mut().handle_atomic_store(
self.active_thread_genmc_tid(machine),
address.bytes(),
size.bytes(),
@@ -813,6 +791,9 @@ fn handle_store<'tcx>(
memory_ordering,
);
if store_result.invalid {
throw_machine_stop!(TerminationInfo::GenmcSkip);
}
if let Some(error) = store_result.error.as_ref() {
// FIXME(genmc): error handling
throw_ub_format!("{}", error.to_string_lossy());
@@ -821,6 +802,36 @@ fn handle_store<'tcx>(
interp_ok(store_result.is_coherence_order_maximal_write)
}
/// Inform GenMC about a non-atomic store.
fn handle_non_atomic_store<'tcx>(
&self,
machine: &MiriMachine<'tcx>,
address: Size,
size: Size,
) -> InterpResult<'tcx> {
assert!(size.bytes() != 0);
debug!(
"GenMC: NA store, address: {addr} = {addr:#x}, size: {size:?}",
addr = address.bytes()
);
let store_result = self.handle.borrow_mut().pin_mut().handle_non_atomic_store(
self.active_thread_genmc_tid(machine),
address.bytes(),
size.bytes(),
);
if store_result.invalid {
throw_machine_stop!(TerminationInfo::GenmcSkip);
}
if let Some(error) = store_result.error.as_ref() {
// FIXME(genmc): error handling
throw_ub_format!("{}", error.to_string_lossy());
}
// Miri will always write non-atomic stores to memory. Make sure GenMC agrees with that.
assert!(store_result.is_coherence_order_maximal_write);
interp_ok(())
}
/// Inform GenMC about an atomic read-modify-write operation.
/// This includes atomic swap (also often called "exchange"), but does *not*
/// include compare-exchange (see `RMWBinOp` for full list of operations).
@@ -859,6 +870,9 @@ fn handle_atomic_rmw_op<'tcx>(
genmc_old_value,
);
if rmw_result.invalid {
throw_machine_stop!(TerminationInfo::GenmcSkip);
}
if let Some(error) = rmw_result.error.as_ref() {
// FIXME(genmc): error handling
throw_ub_format!("{}", error.to_string_lossy());
@@ -1,4 +1,4 @@
use genmc_sys::{ActionKind, ExecutionState};
use genmc_sys::{ActionKind, ExecutionStatus};
use rustc_data_structures::either::Either;
use rustc_middle::mir::TerminatorKind;
use rustc_middle::ty::{self, Ty};
@@ -117,9 +117,9 @@ pub(crate) fn schedule_thread<'tcx>(
let result = self.handle.borrow_mut().pin_mut().schedule_next(genmc_tid, atomic_kind);
// Depending on the exec_state, we either schedule the given thread, or we are finished with this execution.
match result.exec_state {
ExecutionState::Ok => interp_ok(Some(thread_infos.get_miri_tid(result.next_thread))),
ExecutionState::Blocked => {
match result.exec_status {
ExecutionStatus::Ok => interp_ok(Some(thread_infos.get_miri_tid(result.next_thread))),
ExecutionStatus::Blocked => {
// This execution doesn't need further exploration. We treat this as "success, no
// leak check needed", which makes it a NOP in the big outer loop.
throw_machine_stop!(TerminationInfo::Exit {
@@ -127,7 +127,7 @@ pub(crate) fn schedule_thread<'tcx>(
leak_check: false,
});
}
ExecutionState::Finished => {
ExecutionStatus::Finished => {
let exit_status = self.exec_state.exit_status.get().expect(
"If the execution is finished, we should have a return value from the program.",
);
@@ -136,7 +136,7 @@ pub(crate) fn schedule_thread<'tcx>(
leak_check: matches!(exit_status.exit_type, super::ExitType::MainThreadFinish),
});
}
ExecutionState::Error => {
ExecutionStatus::Error => {
// GenMC found an error in one of the `handle_*` functions, but didn't return the detected error from the function immediately.
// This is still an bug in the user program, so we print the error string.
panic!(
@@ -96,10 +96,7 @@ fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E
/// Read-only lookup (avoid read-acquiring the RefCell).
fn get(&self, k: K) -> Option<&V> {
let val: *const V = match self.0.borrow().get(&k) {
Some(v) => &**v,
None => return None,
};
let val: *const V = &**self.0.borrow().get(&k)?;
// This is safe because `val` points into a `Box`, that we know will not move and
// will also not be dropped as long as the shared reference `self` is live.
unsafe { Some(&*val) }
+8 -1
View File
@@ -18,7 +18,7 @@ pub enum TerminationInfo {
leak_check: bool,
},
Abort(String),
/// Miri was interrupted by a Ctrl+C from the user
/// Miri was interrupted by a Ctrl+C from the user.
Interrupted,
UnsupportedInIsolation(String),
StackedBorrowsUb {
@@ -32,6 +32,8 @@ pub enum TerminationInfo {
history: tree_diagnostics::HistoryData,
},
Int2PtrWithStrictProvenance,
/// GenMC determined that the execution should stop.
GenmcSkip,
/// All threads are blocked.
GlobalDeadlock,
/// Some thread discovered a deadlock condition (e.g. in a mutex with reentrancy checking).
@@ -81,6 +83,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
TreeBorrowsUb { title, .. } => write!(f, "{title}"),
GlobalDeadlock => write!(f, "the evaluated program deadlocked"),
LocalDeadlock => write!(f, "a thread deadlocked"),
GenmcSkip => write!(f, "GenMC wants to skip this execution"),
MultipleSymbolDefinitions { link_name, .. } =>
write!(f, "multiple definitions of symbol `{link_name}`"),
SymbolShimClashing { link_name, .. } =>
@@ -240,6 +243,10 @@ pub fn report_result<'tcx>(
Some("unsupported operation"),
StackedBorrowsUb { .. } | TreeBorrowsUb { .. } | DataRace { .. } =>
Some("Undefined Behavior"),
GenmcSkip => {
assert!(ecx.machine.data_race.as_genmc_ref().is_some());
return Some((0, false));
}
LocalDeadlock => {
labels.push(format!("thread got stuck here"));
None
+4
View File
@@ -21,6 +21,10 @@ fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
}
no_provenance!(i8 i16 i32 i64 isize u8 u16 u32 u64 usize bool ThreadId);
impl VisitProvenance for &'static str {
fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
}
impl<T: VisitProvenance> VisitProvenance for Option<T> {
fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
if let Some(x) = self {
+47
View File
@@ -62,6 +62,20 @@ fn flock<'tcx>(
throw_unsup_format!("cannot flock {}", self.name());
}
/// Modifies device parameters.
/// `op` is the device-dependent operation code. It's either a `c_long` or `c_int`, depending on
/// the target and whether it uses glibc or musl.
/// `arg` is the optional third argument which exists depending on the operation code. It's either
/// an integer or a pointer.
fn ioctl<'tcx>(
&self,
_op: Scalar,
_arg: Option<&OpTy<'tcx>>,
_ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, i32> {
throw_unsup_format!("cannot use ioctl on {}", self.name());
}
/// Return which epoll events are currently active.
fn epoll_active_events<'tcx>(&self) -> InterpResult<'tcx, EpollEvents> {
throw_unsup_format!("{}: epoll does not support this file description", self.name());
@@ -129,6 +143,39 @@ fn flock(&mut self, fd_num: i32, op: i32) -> InterpResult<'tcx, Scalar> {
interp_ok(Scalar::from_i32(this.try_unwrap_io_result(result)?))
}
fn ioctl(
&mut self,
fd: &OpTy<'tcx>,
op: &OpTy<'tcx>,
varargs: &[OpTy<'tcx>],
) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let fd = this.read_scalar(fd)?.to_i32()?;
let op = this.read_scalar(op)?;
// There is at most one relevant variadic argument.
// It exists depending on the device and the opcode and thus we can't
// use `check_min_vararg_count` here.
let arg = varargs.first();
let Some(fd) = this.machine.fds.get(fd) else {
return this.set_last_error_and_return_i32(LibcError("EBADF"));
};
// Handle common opcodes.
let fioclex = this.eval_libc("FIOCLEX");
let fionclex = this.eval_libc("FIONCLEX");
if op == fioclex || op == fionclex {
// Since we don't support `exec`, those are NOPs.
return interp_ok(Scalar::from_i32(0));
}
// Since some ioctl operations use the return value as an output parameter, we cannot strictly use the convention of
// zero indicating success and -1 indicating an error.
let return_value = fd.as_unix(this).ioctl(op, arg, this)?;
interp_ok(Scalar::from_i32(return_value))
}
fn fcntl(
&mut self,
fd_num: &OpTy<'tcx>,
@@ -307,6 +307,12 @@ fn emulate_foreign_item_inner(
let result = this.flock(fd, op)?;
this.write_scalar(result, dest)?;
}
"ioctl" => {
let ([fd, op], varargs) =
this.check_shim_sig_variadic_lenient(abi, CanonAbi::C, link_name, args)?;
let result = this.ioctl(fd, op, varargs)?;
this.write_scalar(result, dest)?;
}
// File and file system access
"open" => {
@@ -658,8 +664,7 @@ fn emulate_foreign_item_inner(
abi,
args,
)?;
let result = this.getpeername(socket, address, address_len)?;
this.write_scalar(result, dest)?;
this.getpeername(socket, address, address_len, dest)?;
}
// Time
@@ -80,12 +80,6 @@ fn emulate_foreign_item_inner(
let result = this.realpath(path, resolved_path)?;
this.write_scalar(result, dest)?;
}
"ioctl" => {
let ([fd_num, cmd], varargs) =
this.check_shim_sig_variadic_lenient(abi, CanonAbi::C, link_name, args)?;
let result = this.ioctl(fd_num, cmd, varargs)?;
this.write_scalar(result, dest)?;
}
// Environment related shims
"_NSGetEnviron" => {
@@ -341,30 +335,4 @@ fn emulate_foreign_item_inner(
interp_ok(EmulateItemResult::NeedsReturn)
}
fn ioctl(
&mut self,
fd_num: &OpTy<'tcx>,
cmd: &OpTy<'tcx>,
_varargs: &[OpTy<'tcx>],
) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let fioclex = this.eval_libc_u64("FIOCLEX");
let fd_num = this.read_scalar(fd_num)?.to_i32()?;
let cmd = this.read_scalar(cmd)?.to_u64()?;
if cmd == fioclex {
// Since we don't support `exec`, this is a NOP. However, we want to
// return EBADF if the FD is invalid.
if this.machine.fds.is_fd_num(fd_num) {
interp_ok(Scalar::from_i32(0))
} else {
this.set_last_error_and_return_i32(LibcError("EBADF"))
}
} else {
throw_unsup_format!("ioctl: unsupported command {cmd:#x}");
}
}
}
+618 -274
View File
@@ -1,6 +1,7 @@
use std::cell::{Cell, RefCell};
use std::io::Read;
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
use std::time::Duration;
use std::{io, iter};
use mio::Interest;
@@ -12,8 +13,10 @@
use rustc_middle::throw_unsup_format;
use rustc_target::spec::Os;
use crate::concurrency::blocking_io::InterestReceiver;
use crate::shims::files::{EvalContextExt as _, FdId, FileDescription, FileDescriptionRef};
use crate::{OpTy, Scalar, *};
use crate::shims::unix::UnixFileDescription;
use crate::*;
#[derive(Debug, PartialEq)]
enum SocketFamily {
@@ -23,22 +26,6 @@ enum SocketFamily {
IPv6,
}
enum SocketIoError {
/// The socket is not yet ready. Either EINPROGRESS or ENOTCONNECTED occurred.
NotReady,
/// Any other kind of I/O error.
Other(io::Error),
}
impl From<io::Error> for SocketIoError {
fn from(value: io::Error) -> Self {
match value.kind() {
io::ErrorKind::InProgress | io::ErrorKind::NotConnected => Self::NotReady,
_ => Self::Other(value),
}
}
}
#[derive(Debug)]
enum SocketState {
/// No syscall after `socket` has been made.
@@ -61,59 +48,6 @@ enum SocketState {
Connected(TcpStream),
}
impl SocketState {
/// If the socket is currently in [`SocketState::Connecting`], try to ensure
/// that the connection is established by first checking that [`TcpStream::take_error`]
/// doesn't return an error and then by checking that [`TcpStream::peer_addr`]
/// returns the address of the connected peer.
///
/// If the connection is established or the socket is in any other state,
/// [`Ok`] is returned.
///
/// **Important**: On Windows hosts this function can only be used to ensure a socket is connected
/// _after_ a [`Interest::WRITABLE`] event was received.
pub fn try_set_connected(&mut self) -> Result<(), SocketIoError> {
// Further explanation of the limitation on Windows hosts:
// Windows treats sockets which are connecting as connected until either the connection timeout hits
// or an error occurs. Thus, the [`TcpStream::peer_addr`] method returns [`Ok`] with the provided peer
// address even when the connection might not yet be established.
let SocketState::Connecting(stream) = self else { return Ok(()) };
if let Ok(Some(e)) = stream.take_error() {
// There was an error whilst connecting.
let e = SocketIoError::from(e);
// We won't get EINPROGRESS or ENOTCONNECTED here
// so we need to reset the state.
assert!(matches!(e, SocketIoError::Other(_)));
// Go back to initial state as the only way of getting into the
// `Connecting` state is from the `Initial` state.
*self = SocketState::Initial;
return Err(e);
}
if let Err(e) = stream.peer_addr() {
let e = SocketIoError::from(e);
if let SocketIoError::Other(_) = &e {
// All other errors are fatal for a socket and thus the state needs to be reset.
*self = SocketState::Initial;
}
return Err(e);
};
// We just read the peer address without an error so we can be
// sure that the connection is established.
// Temporarily use dummy state to take ownership of the stream.
let SocketState::Connecting(stream) = std::mem::replace(self, SocketState::Initial) else {
// At the start of the function we ensured that we're currently connecting.
unreachable!()
};
*self = SocketState::Connected(stream);
Ok(())
}
}
#[derive(Debug)]
struct Socket {
/// Family of the socket, used to ensure socket only binds/connects to address of
@@ -151,17 +85,40 @@ fn read<'tcx>(
) -> InterpResult<'tcx> {
assert!(communicate_allowed, "cannot have `Socket` with isolation enabled!");
if !matches!(&*self.state.borrow(), SocketState::Connected(_)) {
// We can only receive from connected sockets. For all other
// states we return a not connected error.
return finish.call(ecx, Err(LibcError("ENOTCONN")));
}
let socket = self;
// Since `read` is the same as `recv` with no flags, we just treat
// the `read` as a `recv` here.
ecx.block_for_recv(self, ptr, len, /* should_peek */ false, finish);
ecx.ensure_connected(
socket.clone(),
!socket.is_non_block.get(),
"read",
callback!(
@capture<'tcx> {
socket: FileDescriptionRef<Socket>,
ptr: Pointer,
len: usize,
finish: DynMachineCallback<'tcx, Result<usize, IoError>>,
} |this, result: Result<(), ()>| {
if result.is_err() {
return finish.call(this, Err(LibcError("ENOTCONN")))
}
interp_ok(())
// Since `read` is the same as `recv` with no flags, we just treat
// the `read` as a `recv` here.
if socket.is_non_block.get() {
// We have a non-blocking socket and thus don't want to block until
// we can read.
let result = this.try_non_block_recv(&socket, ptr, len, /* should_peek */ false)?;
finish.call(this, result)
} else {
// The socket is in blocking mode and thus the read call should block
// until we can read some bytes from the socket.
this.block_for_recv(socket, ptr, len, /* should_peek */ false, finish);
interp_ok(())
}
}
),
)
}
fn write<'tcx>(
@@ -174,17 +131,40 @@ fn write<'tcx>(
) -> InterpResult<'tcx> {
assert!(communicate_allowed, "cannot have `Socket` with isolation enabled!");
if !matches!(&*self.state.borrow(), SocketState::Connected(_)) {
// We can only send with connected sockets. For all other
// states we return a not connected error.
return finish.call(ecx, Err(LibcError("ENOTCONN")));
}
let socket = self;
// Since `write` is the same as `send` with no flags, we just treat
// the `write` as a `send` here.
ecx.block_for_send(self, ptr, len, finish);
ecx.ensure_connected(
socket.clone(),
!socket.is_non_block.get(),
"write",
callback!(
@capture<'tcx> {
socket: FileDescriptionRef<Socket>,
ptr: Pointer,
len: usize,
finish: DynMachineCallback<'tcx, Result<usize, IoError>>
} |this, result: Result<(), ()>| {
if result.is_err() {
return finish.call(this, Err(LibcError("ENOTCONN")))
}
interp_ok(())
// Since `write` is the same as `send` with no flags, we just treat
// the `write` as a `send` here.
if socket.is_non_block.get() {
// We have a non-blocking socket and thus don't want to block until
// we can write.
let result = this.try_non_block_send(&socket, ptr, len)?;
return finish.call(this, result)
} else {
// The socket is in blocking mode and thus the write call should block
// until we can write some bytes into the socket.
this.block_for_send(socket, ptr, len, finish);
interp_ok(())
}
}
),
)
}
fn short_fd_operations(&self) -> bool {
@@ -192,6 +172,10 @@ fn short_fd_operations(&self) -> bool {
true
}
fn as_unix<'tcx>(&self, _ecx: &MiriInterpCx<'tcx>) -> &dyn UnixFileDescription {
self
}
fn get_flags<'tcx>(&self, ecx: &mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, Scalar> {
let mut flags = ecx.eval_libc_i32("O_RDWR");
@@ -204,10 +188,64 @@ fn get_flags<'tcx>(&self, ecx: &mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, Sc
fn set_flags<'tcx>(
&self,
mut _flag: i32,
_ecx: &mut MiriInterpCx<'tcx>,
mut flag: i32,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, Scalar> {
throw_unsup_format!("fcntl: socket flags aren't supported")
let o_nonblock = ecx.eval_libc_i32("O_NONBLOCK");
// O_NONBLOCK flag can be set / unset by user.
if flag & o_nonblock == o_nonblock {
self.is_non_block.set(true);
flag &= !o_nonblock;
} else {
self.is_non_block.set(false);
}
// Throw error if there is any unsupported flag.
if flag != 0 {
throw_unsup_format!("fcntl: only O_NONBLOCK is supported for sockets")
}
interp_ok(Scalar::from_i32(0))
}
}
impl UnixFileDescription for Socket {
fn ioctl<'tcx>(
&self,
op: Scalar,
arg: Option<&OpTy<'tcx>>,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, i32> {
assert!(ecx.machine.communicate(), "cannot have `Socket` with isolation enabled!");
let fionbio = ecx.eval_libc("FIONBIO");
if op == fionbio {
// On these OSes, Rust uses the ioctl, so we trust that it is reasonable and controls
// the same internal flag as fcntl.
if !matches!(ecx.tcx.sess.target.os, Os::Linux | Os::Android | Os::MacOs | Os::FreeBsd)
{
// FIONBIO cannot be used to change the blocking mode of a socket on solarish targets:
// <https://github.com/rust-lang/rust/commit/dda5c97675b4f5b1f6fdab64606c8a1f21021b0a>
// Since there might be more targets which do weird things with this option, we use
// an allowlist instead of just denying solarish targets.
throw_unsup_format!(
"ioctl: setting FIONBIO on sockets is unsupported on target {}",
ecx.tcx.sess.target.os
);
}
let Some(value_ptr) = arg else {
throw_ub_format!("ioctl: setting FIONBIO on sockets requires a third argument");
};
let value = ecx.deref_pointer_as(value_ptr, ecx.machine.layouts.i32)?;
let non_block = ecx.read_scalar(&value)?.to_i32()? != 0;
self.is_non_block.set(non_block);
return interp_ok(0);
}
throw_unsup_format!("ioctl: unsupported operation {op:#x} on socket");
}
}
@@ -469,19 +507,35 @@ fn accept4(
}
if socket.is_non_block.get() {
throw_unsup_format!("accept4: non-blocking accept is unsupported")
// We have a non-blocking socket and thus don't want to block until
// we can accept an incoming connection.
match this.try_non_block_accept(
&socket,
address_ptr,
address_len_ptr,
is_client_sock_nonblock,
)? {
Ok(sockfd) => {
// We need to create the scalar using the destination size since
// `syscall(SYS_accept4, ...)` returns a long which doesn't match
// the int returned from the `accept`/`accept4` syscalls.
// See <https://man7.org/linux/man-pages/man2/syscall.2.html>.
this.write_scalar(Scalar::from_int(sockfd, dest.layout.size), dest)
}
Err(e) => this.set_last_error_and_return(e, dest),
}
} else {
// The socket is in blocking mode and thus the accept call should block
// until an incoming connection is ready.
this.block_for_accept(
socket,
address_ptr,
address_len_ptr,
is_client_sock_nonblock,
dest.clone(),
);
interp_ok(())
}
// The socket is in blocking mode and thus the accept call should block
// until an incoming connection is ready.
this.block_for_accept(
address_ptr,
address_len_ptr,
is_client_sock_nonblock,
socket,
dest.clone(),
);
interp_ok(())
}
fn connect(
@@ -530,22 +584,44 @@ fn connect(
// Mio returns a potentially unconnected stream.
// We can be ensured that the connection is established when
// [`TcpStream::take_err`] and [`TcpStream::peer_addr`] both
// don't return errors.
// For non-blocking sockets we need to check that for every
// [`Interest::WRITEABLE`] event on the stream.
// don't return an error after receiving an [`Interest::WRITEABLE`]
// event on the stream.
match TcpStream::connect(address) {
Ok(stream) => *socket.state.borrow_mut() = SocketState::Connecting(stream),
Err(e) => return this.set_last_error_and_return(e, dest),
};
if socket.is_non_block.get() {
throw_unsup_format!("connect: non-blocking connect is unsupported");
}
// We have a non-blocking socket and thus don't want to block until
// the connection is established.
// The socket is in blocking mode and thus the connect call should block
// until the connection with the server is established.
this.block_for_connect(socket, dest.clone());
interp_ok(())
// Since the [`TcpStream::connect`] function of mio hides the EINPROGRESS
// we just always return EINPROGRESS and check whether the connection succeeded
// once we want to use the connected socket.
this.set_last_error_and_return(LibcError("EINPROGRESS"), dest)
} else {
// The socket is in blocking mode and thus the connect call should block
// until the connection with the server is established.
let dest = dest.clone();
this.ensure_connected(
socket,
/* should_wait */ true,
"connect",
callback!(
@capture<'tcx> {
dest: MPlaceTy<'tcx>
} |this, result: Result<(), ()>| {
if result.is_err() {
this.set_last_error_and_return(LibcError("ENOTCONN"), &dest)
} else {
this.write_scalar(Scalar::from_i32(0), &dest)
}
}
),
)
}
}
fn send(
@@ -576,12 +652,6 @@ fn send(
return this.set_last_error_and_return(LibcError("ENOTSOCK"), dest);
};
if !matches!(&*socket.state.borrow(), SocketState::Connected(_)) {
// We can only send with connected sockets. For all other
// states we return a not connected error.
return this.set_last_error_and_return(LibcError("ENOTCONN"), dest);
}
// Non-deterministically decide to further reduce the length, simulating a partial send.
// We avoid reducing the write size to 0: the docs seem to be entirely fine with that,
// but the standard library is not (https://github.com/rust-lang/rust/issues/145959).
@@ -594,50 +664,86 @@ fn send(
length
};
let mut is_op_non_block = false;
// Interpret the flag. Every flag we recognize is "subtracted" from `flags`, so
// if there is anything left at the end, that's an unsupported flag.
if matches!(
this.tcx.sess.target.os,
Os::Linux | Os::Android | Os::FreeBsd | Os::Solaris | Os::Illumos
) {
// MSG_NOSIGNAL only exists on Linux, Android, FreeBSD,
// MSG_NOSIGNAL and MSG_DONTWAIT only exist on Linux, Android, FreeBSD,
// Solaris, and Illumos targets.
let msg_nosignal = this.eval_libc_i32("MSG_NOSIGNAL");
let msg_dontwait = this.eval_libc_i32("MSG_DONTWAIT");
if flags & msg_nosignal == msg_nosignal {
// This is only needed to ensure that no EPIPE signal is sent when
// trying to send into a stream which is no longer connected.
// Since we don't support signals, we can ignore this.
flags &= !msg_nosignal;
}
if flags & msg_dontwait == msg_dontwait {
flags &= !msg_dontwait;
is_op_non_block = true;
}
}
if flags != 0 {
throw_unsup_format!(
"send: flag {flags:#x} is unsupported, only MSG_NOSIGNAL is allowed",
"send: flag {flags:#x} is unsupported, only MSG_NOSIGNAL and MSG_DONTWAIT are allowed",
);
}
// If either the operation or the socket is non-blocking, we don't want
// to wait until the connection is established.
let should_wait = !is_op_non_block && !socket.is_non_block.get();
let dest = dest.clone();
this.block_for_send(
socket,
buffer_ptr,
length,
callback!(@capture<'tcx> {
dest: MPlaceTy<'tcx>
} |this, result: Result<usize, IoError>| {
match result {
Ok(read_size) => {
let read_size: u64 = read_size.try_into().unwrap();
let ssize_layout = this.libc_ty_layout("ssize_t");
this.write_scalar(Scalar::from_int(read_size, ssize_layout.size), &dest)
this.ensure_connected(
socket.clone(),
should_wait,
"send",
callback!(
@capture<'tcx> {
socket: FileDescriptionRef<Socket>,
flags: i32,
buffer_ptr: Pointer,
length: usize,
is_op_non_block: bool,
dest: MPlaceTy<'tcx>,
} |this, result: Result<(), ()>| {
if result.is_err() {
return this.set_last_error_and_return(LibcError("ENOTCONN"), &dest)
}
Err(e) => this.set_last_error_and_return(e, &dest)
}
}),
);
interp_ok(())
if is_op_non_block || socket.is_non_block.get() {
// We have a non-blocking operation or a non-blocking socket and
// thus don't want to block until we can send.
match this.try_non_block_send(&socket, buffer_ptr, length)? {
Ok(size) => this.write_scalar(Scalar::from_target_isize(size.try_into().unwrap(), this), &dest),
Err(e) => this.set_last_error_and_return(e, &dest),
}
} else {
// The socket is in blocking mode and thus the send call should block
// until we can send some bytes into the socket.
this.block_for_send(
socket,
buffer_ptr,
length,
callback!(@capture<'tcx> {
dest: MPlaceTy<'tcx>
} |this, result: Result<usize, IoError>| {
match result {
Ok(size) => this.write_scalar(Scalar::from_target_isize(size.try_into().unwrap(), this), &dest),
Err(e) => this.set_last_error_and_return(e, &dest)
}
}),
);
interp_ok(())
}
}
),
)
}
fn recv(
@@ -668,12 +774,6 @@ fn recv(
return this.set_last_error_and_return(LibcError("ENOTSOCK"), dest);
};
if !matches!(&*socket.state.borrow(), SocketState::Connected(_)) {
// We can only receive from connected sockets. For all other
// states we return a not connected error.
return this.set_last_error_and_return(LibcError("ENOTCONN"), dest);
}
// Non-deterministically decide to further reduce the length, simulating a partial receive.
// We don't simulate partial receives for lengths < 2 because the man page states that a
// return value of zero can only be returned in some special cases:
@@ -690,6 +790,7 @@ fn recv(
};
let mut should_peek = false;
let mut is_op_non_block = false;
// Interpret the flag. Every flag we recognize is "subtracted" from `flags`, so
// if there is anything left at the end, that's an unsupported flag.
@@ -710,35 +811,77 @@ fn recv(
}
}
if matches!(
this.tcx.sess.target.os,
Os::Linux | Os::Android | Os::FreeBsd | Os::Solaris | Os::Illumos
) {
// MSG_DONTWAIT only exists on Linux, Android, FreeBSD,
// Solaris, and Illumos targets.
let msg_dontwait = this.eval_libc_i32("MSG_DONTWAIT");
if flags & msg_dontwait == msg_dontwait {
flags &= !msg_dontwait;
is_op_non_block = true;
}
}
if flags != 0 {
throw_unsup_format!(
"recv: flag {flags:#x} is unsupported, only MSG_PEEK \
"recv: flag {flags:#x} is unsupported, only MSG_PEEK, MSG_DONTWAIT \
and MSG_CMSG_CLOEXEC are allowed",
);
}
// If either the operation or the socket is non-blocking, we don't want
// to wait until the connection is established.
let should_wait = !is_op_non_block && !socket.is_non_block.get();
let dest = dest.clone();
this.block_for_recv(
socket,
buffer_ptr,
length,
should_peek,
callback!(@capture<'tcx> {
dest: MPlaceTy<'tcx>
} |this, result: Result<usize, IoError>| {
match result {
Ok(read_size) => {
let read_size: u64 = read_size.try_into().unwrap();
let ssize_layout = this.libc_ty_layout("ssize_t");
this.write_scalar(Scalar::from_int(read_size, ssize_layout.size), &dest)
this.ensure_connected(
socket.clone(),
should_wait,
"recv",
callback!(
@capture<'tcx> {
socket: FileDescriptionRef<Socket>,
buffer_ptr: Pointer,
length: usize,
should_peek: bool,
is_op_non_block: bool,
dest: MPlaceTy<'tcx>,
} |this, result: Result<(), ()>| {
if result.is_err() {
return this.set_last_error_and_return(LibcError("ENOTCONN"), &dest)
}
Err(e) => this.set_last_error_and_return(e, &dest)
}
}),
);
interp_ok(())
if is_op_non_block || socket.is_non_block.get() {
// We have a non-blocking operation or a non-blocking socket and
// thus don't want to block until we can receive.
match this.try_non_block_recv(&socket, buffer_ptr, length, should_peek)? {
Ok(size) => this.write_scalar(Scalar::from_target_isize(size.try_into().unwrap(), this), &dest),
Err(e) => this.set_last_error_and_return(e, &dest),
}
} else {
// The socket is in blocking mode and thus the receive call should block
// until we can receive some bytes from the socket.
this.block_for_recv(
socket,
buffer_ptr,
length,
should_peek,
callback!(@capture<'tcx> {
dest: MPlaceTy<'tcx>
} |this, result: Result<usize, IoError>| {
match result {
Ok(size) => this.write_scalar(Scalar::from_target_isize(size.try_into().unwrap(), this), &dest),
Err(e) => this.set_last_error_and_return(e, &dest)
}
}),
);
interp_ok(())
}
}
),
)
}
fn setsockopt(
@@ -871,7 +1014,9 @@ fn getpeername(
socket: &OpTy<'tcx>,
address: &OpTy<'tcx>,
address_len: &OpTy<'tcx>,
) -> InterpResult<'tcx, Scalar> {
// Location where the output scalar is written to.
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let socket = this.read_scalar(socket)?.to_i32()?;
@@ -880,32 +1025,56 @@ fn getpeername(
// Get the file handle
let Some(fd) = this.machine.fds.get(socket) else {
return this.set_last_error_and_return_i32(LibcError("EBADF"));
return this.set_last_error_and_return(LibcError("EBADF"), dest);
};
let Some(socket) = fd.downcast::<Socket>() else {
// Man page specifies to return ENOTSOCK if `fd` is not a socket.
return this.set_last_error_and_return_i32(LibcError("ENOTSOCK"));
return this.set_last_error_and_return(LibcError("ENOTSOCK"), dest);
};
assert!(this.machine.communicate(), "cannot have `Socket` with isolation enabled!");
let state = socket.state.borrow();
let dest = dest.clone();
let SocketState::Connected(stream) = &*state else {
// We can only read the peer address of connected sockets.
return this.set_last_error_and_return_i32(LibcError("ENOTCONN"));
};
// It's only safe to call [`TcpStream::peer_addr`] after the socket is connected since
// UNIX targets should return ENOTCONN when the connection is not yet established.
this.ensure_connected(
socket.clone(),
/* should_wait */ false,
"getpeername",
callback!(
@capture<'tcx> {
socket: FileDescriptionRef<Socket>,
address_ptr: Pointer,
address_len_ptr: Pointer,
dest: MPlaceTy<'tcx>,
} |this, result: Result<(), ()>| {
if result.is_err() {
return this.set_last_error_and_return(LibcError("ENOTCONN"), &dest)
};
let address = match stream.peer_addr() {
Ok(address) => address,
Err(e) => return this.set_last_error_and_return_i32(e),
};
let SocketState::Connected(stream) = &*socket.state.borrow() else {
unreachable!()
};
match this.write_socket_address(&address, address_ptr, address_len_ptr, "getpeername")? {
Ok(_) => interp_ok(Scalar::from_i32(0)),
Err(e) => this.set_last_error_and_return_i32(e),
}
let address = match stream.peer_addr() {
Ok(address) => address,
Err(e) => return this.set_last_error_and_return(e, &dest),
};
match this.write_socket_address(
&address,
address_ptr,
address_len_ptr,
"getpeername",
)? {
Ok(_) => this.write_scalar(Scalar::from_i32(0), &dest),
Err(e) => this.set_last_error_and_return(e, &dest),
}
}
),
)
}
}
@@ -1182,12 +1351,15 @@ fn write_socket_address(
/// Block the thread until there's an incoming connection or an error occurred.
///
/// This recursively calls itself should the operation still block for some reason.
///
/// **Note**: This function is only safe to call when having previously ensured
/// that the socket is in [`SocketState::Listening`].
fn block_for_accept(
&mut self,
socket: FileDescriptionRef<Socket>,
address_ptr: Pointer,
address_len_ptr: Pointer,
is_client_sock_nonblock: bool,
socket: FileDescriptionRef<Socket>,
dest: MPlaceTy<'tcx>,
) {
let this = self.eval_context_mut();
@@ -1204,89 +1376,83 @@ fn block_for_accept(
} |this, kind: UnblockKind| {
assert_eq!(kind, UnblockKind::Ready);
let state = socket.state.borrow();
let SocketState::Listening(listener) = &*state else {
// We checked that the socket is in listening state before blocking
// and since there is no outgoing transition from that state this
// should be unreachable.
unreachable!()
};
let (stream, addr) = match listener.accept() {
Ok(peer) => peer,
Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
// We need to block the thread again as it would still block.
drop(state);
this.block_for_accept(address_ptr, address_len_ptr, is_client_sock_nonblock, socket, dest);
return interp_ok(())
match this.try_non_block_accept(&socket, address_ptr, address_len_ptr, is_client_sock_nonblock)? {
Ok(sockfd) => {
// We need to create the scalar using the destination size since
// `syscall(SYS_accept4, ...)` returns a long which doesn't match
// the int returned from the `accept`/`accept4` syscalls.
// See <https://man7.org/linux/man-pages/man2/syscall.2.html>.
this.write_scalar(Scalar::from_int(sockfd, dest.layout.size), &dest)
},
Err(e) => return this.set_last_error_and_return(e, &dest),
};
let family = match addr {
SocketAddr::V4(_) => SocketFamily::IPv4,
SocketAddr::V6(_) => SocketFamily::IPv6,
};
if address_ptr != Pointer::null() {
// We only attempt a write if the address pointer is not a null pointer.
// If the address pointer is a null pointer the user isn't interested in the
// address and we don't need to write anything.
if let Err(e) = this.write_socket_address(&addr, address_ptr, address_len_ptr, "accept4")? {
return this.set_last_error_and_return(e, &dest);
};
Err(IoError::HostError(e)) if e.kind() == io::ErrorKind::WouldBlock => {
// We need to block the thread again as it would still block.
this.block_for_accept(socket, address_ptr, address_len_ptr, is_client_sock_nonblock, dest);
interp_ok(())
}
Err(e) => this.set_last_error_and_return(e, &dest),
}
let fd = this.machine.fds.new_ref(Socket {
family,
state: RefCell::new(SocketState::Connected(stream)),
is_non_block: Cell::new(is_client_sock_nonblock),
});
let sockfd = this.machine.fds.insert(fd);
// We need to create the scalar using the destination size since
// `syscall(SYS_accept4, ...)` returns a long which doesn't match
// the int returned from the `accept`/`accept4` syscalls.
// See <https://man7.org/linux/man-pages/man2/syscall.2.html>.
this.write_scalar(Scalar::from_int(sockfd, dest.layout.size), &dest)
}),
);
}
/// Block the thread until the stream is connected or an error occurred.
fn block_for_connect(&mut self, socket: FileDescriptionRef<Socket>, dest: MPlaceTy<'tcx>) {
/// Attempt to accept an incoming connection on the listening socket in a
/// non-blocking manner.
///
/// **Note**: This function is only safe to call when having previously ensured
/// that the socket is in [`SocketState::Listening`].
fn try_non_block_accept(
&mut self,
socket: &FileDescriptionRef<Socket>,
address_ptr: Pointer,
address_len_ptr: Pointer,
is_client_sock_nonblock: bool,
) -> InterpResult<'tcx, Result<i32, IoError>> {
let this = self.eval_context_mut();
this.block_thread_for_io(
socket.clone(),
Interest::WRITABLE,
None,
callback!(@capture<'tcx> {
socket: FileDescriptionRef<Socket>,
dest: MPlaceTy<'tcx>,
} |this, kind: UnblockKind| {
assert_eq!(kind, UnblockKind::Ready);
let mut state = socket.state.borrow_mut();
let state = socket.state.borrow();
let SocketState::Listening(listener) = &*state else {
panic!(
"try_non_block_accept must only be called when socket is in `SocketState::Listening`"
)
};
// We received a "writable" event so `try_set_connected` is safe to call.
match state.try_set_connected() {
Ok(_) => this.write_scalar(Scalar::from_i32(0), &dest),
Err(SocketIoError::NotReady) => {
// We need to block the thread again as the connection is still not yet ready.
drop(state);
this.block_for_connect(socket, dest);
return interp_ok(())
},
Err(SocketIoError::Other(e)) => return this.set_last_error_and_return(e, &dest)
}
}),
);
let (stream, addr) = match listener.accept() {
Ok(peer) => peer,
Err(e) => return interp_ok(Err(IoError::HostError(e))),
};
let family = match addr {
SocketAddr::V4(_) => SocketFamily::IPv4,
SocketAddr::V6(_) => SocketFamily::IPv6,
};
if address_ptr != Pointer::null() {
// We only attempt a write if the address pointer is not a null pointer.
// If the address pointer is a null pointer the user isn't interested in the
// address and we don't need to write anything.
if let Err(e) =
this.write_socket_address(&addr, address_ptr, address_len_ptr, "accept4")?
{
return interp_ok(Err(e));
};
}
let fd = this.machine.fds.new_ref(Socket {
family,
state: RefCell::new(SocketState::Connected(stream)),
is_non_block: Cell::new(is_client_sock_nonblock),
});
let sockfd = this.machine.fds.insert(fd);
interp_ok(Ok(sockfd))
}
/// Block the thread until we can send bytes into the connected socket
/// or an error occurred.
///
/// This recursively calls itself should the operation still block for some reason.
///
/// **Note**: This function is only safe to call when having previously ensured
/// that the socket is in [`SocketState::Connected`].
fn block_for_send(
&mut self,
socket: FileDescriptionRef<Socket>,
@@ -1307,18 +1473,8 @@ fn block_for_send(
} |this, kind: UnblockKind| {
assert_eq!(kind, UnblockKind::Ready);
let mut state = socket.state.borrow_mut();
let SocketState::Connected(stream) = &mut*state else {
// We ensured that the socket is connected before blocking.
unreachable!()
};
// This is a *non-blocking* write.
let result = this.write_to_host(stream, length, buffer_ptr)?;
match result {
match this.try_non_block_send(&socket, buffer_ptr, length)? {
Err(IoError::HostError(e)) if e.kind() == io::ErrorKind::WouldBlock => {
// We need to block the thread again as it would still block.
drop(state);
this.block_for_send(socket, buffer_ptr, length, finish);
interp_ok(())
},
@@ -1328,10 +1484,41 @@ fn block_for_send(
);
}
/// Attempt to send bytes into the connected socket in a non-blocking manner.
///
/// **Note**: This function is only safe to call when having previously ensured
/// that the socket is in [`SocketState::Connected`].
fn try_non_block_send(
&mut self,
socket: &FileDescriptionRef<Socket>,
buffer_ptr: Pointer,
length: usize,
) -> InterpResult<'tcx, Result<usize, IoError>> {
let this = self.eval_context_mut();
let SocketState::Connected(stream) = &mut *socket.state.borrow_mut() else {
panic!("try_non_block_send must only be called when the socket is connected")
};
// This is a *non-blocking* write.
let result = this.write_to_host(stream, length, buffer_ptr)?;
match result {
Err(IoError::HostError(e)) if e.kind() == io::ErrorKind::NotConnected => {
// On Windows hosts, `send` can return WSAENOTCONN where EAGAIN or EWOULDBLOCK
// would be returned on UNIX-like systems. We thus remap this error to an EWOULDBLOCK.
interp_ok(Err(IoError::HostError(io::ErrorKind::WouldBlock.into())))
}
result => interp_ok(result),
}
}
/// Block the thread until we can receive bytes from the connected socket
/// or an error occurred.
///
/// This recursively calls itself should the operation still block for some reason.
///
/// **Note**: This function is only safe to call when having previously ensured
/// that the socket is in [`SocketState::Connected`].
fn block_for_recv(
&mut self,
socket: FileDescriptionRef<Socket>,
@@ -1354,24 +1541,9 @@ fn block_for_recv(
} |this, kind: UnblockKind| {
assert_eq!(kind, UnblockKind::Ready);
let mut state = socket.state.borrow_mut();
let SocketState::Connected(stream) = &mut*state else {
// We ensured that the socket is connected before blocking.
unreachable!()
};
// This is a *non-blocking* read/peek.
let result = this.read_from_host(|buf| {
if should_peek {
stream.peek(buf)
} else {
stream.read(buf)
}
}, length, buffer_ptr)?;
match result {
match this.try_non_block_recv(&socket, buffer_ptr, length, should_peek)? {
Err(IoError::HostError(e)) if e.kind() == io::ErrorKind::WouldBlock => {
// We need to block the thread again as it would still block.
drop(state);
this.block_for_recv(socket, buffer_ptr, length, should_peek, finish);
interp_ok(())
},
@@ -1380,6 +1552,178 @@ fn block_for_recv(
}),
);
}
/// Attempt to receive bytes from the connected socket in a non-blocking manner.
///
/// **Note**: This function is only safe to call when having previously ensured
/// that the socket is in [`SocketState::Connected`].
fn try_non_block_recv(
&mut self,
socket: &FileDescriptionRef<Socket>,
buffer_ptr: Pointer,
length: usize,
should_peek: bool,
) -> InterpResult<'tcx, Result<usize, IoError>> {
let this = self.eval_context_mut();
let SocketState::Connected(stream) = &mut *socket.state.borrow_mut() else {
panic!("try_non_block_recv must only be called when the socket is connected")
};
// This is a *non-blocking* read/peek.
let result = this.read_from_host(
|buf| {
if should_peek { stream.peek(buf) } else { stream.read(buf) }
},
length,
buffer_ptr,
)?;
match result {
Err(IoError::HostError(e)) if e.kind() == io::ErrorKind::NotConnected => {
// On Windows hosts, `recv` can return WSAENOTCONN where EAGAIN or EWOULDBLOCK
// would be returned on UNIX-like systems. We thus remap this error to an EWOULDBLOCK.
interp_ok(Err(IoError::HostError(io::ErrorKind::WouldBlock.into())))
}
result => interp_ok(result),
}
}
// Execute the provided callback function when the socket is either in
// [`SocketState::Connected`] or an error occurred.
/// If the socket is currently neither in the [`SocketState::Connecting`] nor
/// the [`SocketState::Connecting`] state, an ENOTCONN error is returned.
/// When the callback function is called with `Ok(_)`, then we're guaranteed
/// that the socket is in the [`SocketState::Connected`] state.
///
/// This function can optionally also block until either an error occurred or
/// the socket reached the [`SocketState::Connected`] state.
fn ensure_connected(
&mut self,
socket: FileDescriptionRef<Socket>,
should_wait: bool,
foreign_name: &'static str,
action: DynMachineCallback<'tcx, Result<(), ()>>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let state = socket.state.borrow();
match &*state {
SocketState::Connecting(_) => { /* fall-through to below */ }
SocketState::Connected(_) => {
drop(state);
return action.call(this, Ok(()));
}
_ => {
drop(state);
return action.call(this, Err(()));
}
};
drop(state);
// We're currently connecting. Since the underlying mio socket is non-blocking,
// the only way to determine whether we are done connecting is by polling.
// If we should wait until the connection is established, the timeout is `None`.
// Otherwise, we use a zero duration timeout, i.e. we return immediately
// (but we still go through the scheduler once -- which is fine).
let timeout = if should_wait {
None
} else {
Some((TimeoutClock::Monotonic, TimeoutAnchor::Absolute, Duration::ZERO))
};
this.block_thread_for_io(
socket.clone(),
Interest::WRITABLE,
timeout,
callback!(
@capture<'tcx> {
socket: FileDescriptionRef<Socket>,
should_wait: bool,
foreign_name: &'static str,
action: DynMachineCallback<'tcx, Result<(), ()>>,
} |this, kind: UnblockKind| {
if UnblockKind::TimedOut == kind {
// We can only time out when `should_wait` is false.
// This then means that the socket is not yet connected.
assert!(!should_wait);
this.machine.blocking_io.deregister(socket.id(), InterestReceiver::UnblockThread(this.active_thread()));
return action.call(this, Err(()))
}
// The thread woke up because it's ready, indicating a writeable or error event.
let mut state = socket.state.borrow_mut();
let stream = match &*state {
SocketState::Connecting(stream) => stream,
SocketState::Connected(_) => {
drop(state);
// This can happen because we blocked the thread:
// maybe another thread "upgraded" the connection in the meantime.
return action.call(this, Ok(()))
},
_ => {
drop(state);
// We ensured that we only block when we're currently connecting.
// Since this thread just got rescheduled, it could be that another
// thread realized that the connection failed and we're thus in
// an "invalid state".
return action.call(this, Err(()))
}
};
// Manually check whether there were any errors since calling `connect`.
if let Ok(Some(_)) = stream.take_error() {
// There was an error during connecting and thus we
// return ENOTCONN. It's the program's responsibility
// to read SO_ERROR itself.
//
// Go back to initial state since the only way of getting into the
// `Connecting` state is from the `Initial` state and at this point
// we know that the connection won't be established anymore.
//
// FIXME: We're currently just dropping the error information. Eventually
// we'll have to store it so that it can be recovered by the user.
*state = SocketState::Initial;
drop(state);
return action.call(this, Err(()))
}
// There was no error during connecting. We still need to ensure that
// the wakeup wasn't spurious. We do this by attempting to read the
// peer address of the socket (following the advice given by mio):
// <https://docs.rs/mio/latest/mio/net/struct.TcpStream.html#notes>
match stream.peer_addr() {
Ok(_) => { /* fall-through to below */},
Err(e) if matches!(e.kind(), io::ErrorKind::NotConnected | io::ErrorKind::InProgress) => {
// We received a spurious wakeup from the OS. This should be considered an OS bug:
// <https://github.com/tokio-rs/mio/issues/1942#issuecomment-4169378308>
panic!("{foreign_name}: received writable event from OS but socket is not yet connected")
},
Err(_) => {
// For all other errors the socket is connected. Since we're not interested in the
// peer address and only want to know whether the socket is connected, we can ignore
// the error and continue.
}
}
// The connection is established.
// Temporarily use dummy state to take ownership of the stream.
let SocketState::Connecting(stream) = std::mem::replace(&mut*state, SocketState::Initial) else {
// At the start of the function we ensured that we're currently connecting.
unreachable!()
};
*state = SocketState::Connected(stream);
drop(state);
action.call(this, Ok(()))
}
),
);
interp_ok(())
}
}
impl VisitProvenance for FileDescriptionRef<Socket> {
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Test that we can detect a double-free bug across two threads, which only shows up if the second thread reads an atomic pointer at a very specific moment.
// GenMC can detect this error consistently, without having to run the buggy code with multiple RNG seeds or in a loop.
@@ -1,5 +1,4 @@
//@revisions: send make
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Test that we can distinguish two pointers with the same address, but different provenance, after they are sent to GenMC and back.
// We have two variants, one where we send such a pointer to GenMC, and one where we make it on the GenMC side.
@@ -1,5 +1,5 @@
Running GenMC Verification...
error: Undefined Behavior: Attempt to access freed memory
error: Undefined Behavior: Attempt to access non-allocated memory
--> tests/genmc/fail/data_race/atomic_ptr_alloc_race.rs:LL:CC
|
LL | dealloc(b as *mut u8, Layout::new::<u64>());
@@ -1,5 +1,5 @@
//@revisions: write dealloc
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows -Zmiri-ignore-leaks
//@compile-flags: -Zmiri-ignore-leaks
// Test that we can detect data races between an allocation and an unsynchronized action in another thread.
// We have two variants, an alloc-dealloc race and an alloc-write race.
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Test that use-after-free bugs involving atomic pointers are detected in GenMC mode.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Test that use-after-free bugs involving atomic pointers are detected in GenMC mode.
// Compared to `atomic_ptr_dealloc_write_race.rs`, this variant checks that the data race is still detected, even if the write happens before the free.
//
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's test `wrong/racy/MPU2+rels+rlx`.
// Test if Miri with GenMC can detect the data race on `X`.
// The data race only occurs if thread 1 finishes, then threads 3 and 4 run, then thread 2.
@@ -1,4 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@revisions: rlx_rlx rlx_acq rel_rlx
// Translated from GenMC's test `wrong/racy/MP+rel+rlx`, `MP+rlx+acq` and `MP+rlx+rlx`.
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright (c) 2019 Carl Lerche
@@ -1,12 +0,0 @@
error: abnormal termination: the program aborted execution
--> tests/genmc/fail/loom/store_buffering.rs:LL:CC
|
LL | std::process::abort();
| ^^^^^^^^^^^^^^^^^^^^^ abnormal termination occurred here
|
= note: this is on thread `main`
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error
@@ -1,15 +1,9 @@
//@ revisions: non_genmc genmc
//@[genmc] compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: Copyright (c) 2019 Carl Lerche
// This is the test `store_buffering` from `loom/test/litmus.rs`, adapted for Miri-GenMC.
// https://github.com/tokio-rs/loom/blob/dbf32b04bae821c64be44405a0bb72ca08741558/tests/litmus.rs
// This test shows the comparison between running Miri with or without GenMC.
// Without GenMC, Miri requires multiple iterations of the loop to detect the error.
#![no_main]
#[path = "../../../utils/genmc.rs"]
@@ -23,30 +17,27 @@
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
// For normal Miri, we need multiple repetitions, but GenMC should find the bug with only 1.
const REPS: usize = if cfg!(non_genmc) { 128 } else { 1 };
for _ in 0..REPS {
// New atomics every iterations, so they don't influence each other.
let x = AtomicUsize::new(0);
let y = AtomicUsize::new(0);
let mut a: usize = 1234;
let mut b: usize = 1234;
unsafe {
let ids = [
spawn_pthread_closure(|| {
x.store(1, Relaxed);
a = y.load(Relaxed)
}),
spawn_pthread_closure(|| {
y.store(1, Relaxed);
b = x.load(Relaxed)
}),
];
join_pthreads(ids);
}
if (a, b) == (0, 0) {
std::process::abort(); //~ ERROR: abnormal termination
}
let x = AtomicUsize::new(0);
let y = AtomicUsize::new(0);
let mut a: usize = 1234;
let mut b: usize = 1234;
unsafe {
let ids = [
spawn_pthread_closure(|| {
x.store(1, Relaxed);
a = y.load(Relaxed)
}),
spawn_pthread_closure(|| {
y.store(1, Relaxed);
b = x.load(Relaxed)
}),
];
join_pthreads(ids);
}
if (a, b) == (0, 0) {
std::process::abort(); //~ ERROR: abnormal termination
}
0
@@ -2,8 +2,8 @@ Running GenMC Verification...
error: abnormal termination: the program aborted execution
--> tests/genmc/fail/loom/store_buffering.rs:LL:CC
|
LL | std::process::abort();
| ^^^^^^^^^^^^^^^^^^^^^ abnormal termination occurred here
LL | std::process::abort();
| ^^^^^^^^^^^^^^^^^^^^^ abnormal termination occurred here
|
= note: this is on thread `main`
@@ -1,5 +1,3 @@
//@ compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
fn main() {
std::thread::spawn(|| {
unsafe { std::hint::unreachable_unchecked() }; //~ERROR: entering unreachable code
@@ -1,4 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@error-in-other-file: Undefined Behavior
// Test that GenMC throws an error if a `std::sync::Mutex` is unlocked from a different thread than the one that locked it.
@@ -1,4 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@error-in-other-file: Undefined Behavior
// Test that GenMC can detect a double unlock of a mutex.
@@ -1,4 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@revisions: sc3_rel1 release4 relaxed4
// The pass tests "2w2w_3sc_1rel.rs", "2w2w_4rel" and "2w2w_4sc" and the fail test "2w2w_weak.rs" are related.
@@ -1,5 +1,4 @@
//@revisions: single multiple
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@error-in-other-file: resource exhaustion
// Ensure that we emit a proper error if GenMC fails to fulfill an allocation.
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Test several operations on atomic pointers.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Test that we can send pointers with any alignment to GenMC and back, even across threads.
// After a round-trip, the pointers should still work properly (no missing provenance).
@@ -1,4 +1,4 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows -Zmiri-ignore-leaks
//@compile-flags: -Zmiri-ignore-leaks
// Adapted from: `impl LazyKey`, `fn lazy_init`: rust/library/std/src/sys/thread_local/key/racy.rs
// Two threads race to initialize a key, which is just an index into an array in this test.
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Test the basic functionality of compare_exchange.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Test that we can read the value of a non-atomic store atomically and an of an atomic value non-atomically.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Test that we can read the initial value of global, heap and stack allocations in GenMC mode.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// This test check for correct handling of atomic read-modify-write operations for all integer sizes.
// Atomic max and min should return the previous value, and store the result in the atomic.
// Atomic addition and subtraction should have wrapping semantics.
@@ -0,0 +1,22 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Tests mixed-size non-atomic accesses.
#![no_main]
use std::sync::atomic::*;
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
let mut data = 0u64;
// Treat this like an array of two AtomicI32.
let atomics = unsafe { &*(&raw mut data as *mut u64 as *mut [AtomicI32; 2]) };
atomics[0].load(Ordering::SeqCst);
atomics[1].store(-1, Ordering::SeqCst);
atomics[0].store(-1, Ordering::Relaxed);
assert_eq!(data, u64::MAX);
0
}
@@ -0,0 +1,2 @@
Running GenMC Verification...
Verification complete with 1 executions. No errors found.
@@ -1,5 +1,5 @@
//@ revisions: default_R1W1 default_R1W2 spinloop_assume_R1W1 spinloop_assume_R1W2
//@compile-flags: -Zmiri-ignore-leaks -Zmiri-genmc -Zmiri-disable-stacked-borrows -Zmiri-genmc-verbose
//@compile-flags: -Zmiri-ignore-leaks -Zmiri-genmc-verbose
//@normalize-stderr-test: "Verification took .*s" -> "Verification took [TIME]s"
// This test is a translations of the GenMC test `ms-queue-dynamic`, but with all code related to GenMC's hazard pointer API removed.
@@ -1,5 +1,5 @@
//@ revisions: default_R1W1 default_R1W2 default_R1W3 spinloop_assume_R1W1 spinloop_assume_R1W2 spinloop_assume_R1W3
//@compile-flags: -Zmiri-ignore-leaks -Zmiri-genmc -Zmiri-disable-stacked-borrows -Zmiri-genmc-verbose
//@compile-flags: -Zmiri-ignore-leaks -Zmiri-genmc-verbose
//@normalize-stderr-test: "Verification took .*s" -> "Verification took [TIME]s"
// This test is a translations of the GenMC test `treiber-stack-dynamic`, but with all code related to GenMC's hazard pointer API removed.
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's test "2CoWR".
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's test "2+2W+2sc+scf".
// It tests correct handling of SeqCst fences combined with relaxed accesses.
@@ -1,4 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@revisions: release1 release2
// Translated from GenMC's test "2+2W+3sc+rel1" and "2+2W+3sc+rel2" (two variants that swap which store is `Release`).
@@ -1,5 +1,4 @@
//@revisions: weak sc
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@[sc]compile-flags: -Zmiri-disable-weak-memory-emulation
// Translated from GenMC's test "2+2W".
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's test "2+2W+4c".
//
// The pass tests "2w2w_3sc_1rel.rs", "2w2w_4rel" and "2w2w_4sc" and the fail test "2w2w_weak.rs" are related.
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/IRIW-acq-sc" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/IRIWish" test.
// This test prints the values read by the different threads to check that we get all the values we expect.
@@ -1,30 +1,30 @@
Running GenMC Verification...
[1, 1, 1, 1, 1]
[1, 1, 1, 0, 1]
[1, 1, 1, 0, 0]
[1, 1, 0, 1, 1]
[1, 1, 0, 0, 1]
[1, 1, 0, 0, 0]
[1, 0, 1, 1, 1]
[1, 0, 1, 0, 1]
[1, 0, 1, 0, 0]
[1, 0, 0, 1, 1]
[1, 0, 0, 0, 1]
[0, 0, 0, 0, 0]
[0, 0, 0, 0, 1]
[0, 0, 0, 0, 0]
[0, 0, 0, 0, 1]
[0, 0, 0, 0, 0]
[0, 0, 0, 0, 1]
[0, 0, 0, 0, 0]
[0, 0, 0, 0, 1]
[0, 1, 0, 0, 0]
[0, 1, 0, 0, 1]
[0, 1, 0, 0, 0]
[0, 1, 0, 0, 1]
[0, 1, 0, 0, 0]
[0, 1, 0, 0, 1]
[0, 1, 0, 0, 0]
[0, 1, 0, 0, 1]
[1, 0, 0, 0, 0]
[0, 1, 0, 0, 1]
[0, 1, 0, 0, 0]
[0, 1, 0, 0, 1]
[0, 1, 0, 0, 0]
[0, 1, 0, 0, 1]
[0, 1, 0, 0, 0]
[0, 1, 0, 0, 1]
[0, 1, 0, 0, 0]
[0, 0, 0, 0, 1]
[0, 0, 0, 0, 0]
[0, 0, 0, 0, 1]
[0, 0, 0, 0, 0]
[0, 0, 0, 0, 1]
[0, 0, 0, 0, 0]
[0, 0, 0, 0, 1]
[0, 0, 0, 0, 0]
[1, 0, 0, 0, 1]
[1, 0, 0, 1, 1]
[1, 0, 1, 0, 0]
[1, 0, 1, 0, 1]
[1, 0, 1, 1, 1]
[1, 1, 0, 0, 0]
[1, 1, 0, 0, 1]
[1, 1, 0, 1, 1]
[1, 1, 1, 0, 0]
[1, 1, 1, 0, 1]
[1, 1, 1, 1, 1]
Verification complete with 28 executions. No errors found.
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/LB" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/LB+incMPs" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/MP" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/MPU2+rels+acqf" test.
#![no_main]
@@ -1,38 +1,38 @@
Running GenMC Verification...
X=1, Y=2, a=Err(1), b=Ok(1), c=2
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=1
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=1
X=1, Y=2, a=Err(0), b=Ok(1), c=0
X=1, Y=2, a=Err(0), b=Ok(1), c=0
X=1, Y=2, a=Err(0), b=Ok(1), c=1
X=1, Y=2, a=Err(0), b=Ok(1), c=2
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=1
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=1
X=1, Y=2, a=Err(0), b=Ok(1), c=0
X=1, Y=2, a=Err(0), b=Ok(1), c=0
X=1, Y=2, a=Err(0), b=Ok(1), c=1
X=1, Y=2, a=Err(0), b=Ok(1), c=2
X=1, Y=1, a=Err(1), b=Err(0), c=0
X=1, Y=1, a=Err(1), b=Err(0), c=0
X=1, Y=1, a=Err(1), b=Err(0), c=1
X=1, Y=1, a=Err(1), b=Err(0), c=0
X=1, Y=1, a=Err(1), b=Err(0), c=0
X=1, Y=1, a=Err(1), b=Err(0), c=1
X=1, Y=2, a=Err(1), b=Ok(1), c=0
X=1, Y=2, a=Err(1), b=Ok(1), c=0
X=1, Y=2, a=Err(1), b=Ok(1), c=1
X=1, Y=2, a=Err(1), b=Ok(1), c=0
X=1, Y=2, a=Err(1), b=Ok(1), c=0
X=1, Y=2, a=Err(1), b=Ok(1), c=2
X=1, Y=3, a=Ok(2), b=Ok(1), c=0
X=1, Y=3, a=Ok(2), b=Ok(1), c=0
X=1, Y=3, a=Ok(2), b=Ok(1), c=1
X=1, Y=3, a=Ok(2), b=Ok(1), c=2
X=2, Y=3, a=Ok(2), b=Ok(1), c=3
X=1, Y=3, a=Ok(2), b=Ok(1), c=3
X=1, Y=3, a=Ok(2), b=Ok(1), c=2
X=1, Y=3, a=Ok(2), b=Ok(1), c=1
X=1, Y=3, a=Ok(2), b=Ok(1), c=0
X=1, Y=3, a=Ok(2), b=Ok(1), c=0
X=1, Y=1, a=Err(1), b=Err(0), c=1
X=1, Y=1, a=Err(1), b=Err(0), c=0
X=1, Y=1, a=Err(1), b=Err(0), c=0
X=1, Y=1, a=Err(1), b=Err(0), c=1
X=1, Y=1, a=Err(1), b=Err(0), c=0
X=1, Y=1, a=Err(1), b=Err(0), c=0
X=1, Y=2, a=Err(0), b=Ok(1), c=2
X=1, Y=2, a=Err(0), b=Ok(1), c=1
X=1, Y=2, a=Err(0), b=Ok(1), c=0
X=1, Y=2, a=Err(0), b=Ok(1), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=1
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=1
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=2, a=Err(0), b=Ok(1), c=2
X=1, Y=2, a=Err(0), b=Ok(1), c=1
X=1, Y=2, a=Err(0), b=Ok(1), c=0
X=1, Y=2, a=Err(0), b=Ok(1), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=1
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=1
X=1, Y=1, a=Err(0), b=Err(0), c=0
X=1, Y=1, a=Err(0), b=Err(0), c=0
Verification complete with 36 executions. No errors found.
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/MPU+rels+acq" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/MP+incMP" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/MP+rels+acqf" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/SB" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/SB+2sc+scf" test.
#![no_main]
@@ -1,5 +1,4 @@
//@revisions: weak sc
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@[sc]compile-flags: -Zmiri-disable-weak-memory-emulation
// Translated from GenMC's "litmus/Z6.U" test.
@@ -1,20 +1,20 @@
Running GenMC Verification...
a=1, b=1, X=1, Y=3
a=1, b=0, X=1, Y=1
a=1, b=1, X=1, Y=1
a=1, b=1, X=1, Y=3
a=3, b=1, X=1, Y=3
a=1, b=0, X=1, Y=1
a=1, b=1, X=1, Y=1
a=3, b=0, X=1, Y=1
a=3, b=1, X=1, Y=1
a=2, b=1, X=1, Y=3
a=4, b=1, X=1, Y=4
a=3, b=1, X=1, Y=3
a=2, b=1, X=1, Y=2
a=2, b=0, X=1, Y=2
a=1, b=1, X=1, Y=1
a=1, b=0, X=1, Y=1
a=4, b=1, X=1, Y=1
a=2, b=1, X=1, Y=2
a=4, b=0, X=1, Y=1
a=1, b=1, X=1, Y=3
a=3, b=1, X=1, Y=3
a=1, b=1, X=1, Y=1
a=4, b=1, X=1, Y=1
a=1, b=0, X=1, Y=1
a=3, b=1, X=1, Y=1
a=3, b=0, X=1, Y=1
a=1, b=1, X=1, Y=3
a=1, b=1, X=1, Y=1
a=1, b=0, X=1, Y=1
Verification complete with 18 executions. No errors found.
@@ -1,24 +1,24 @@
Running GenMC Verification...
a=2, b=1, X=1, Y=3
a=4, b=1, X=1, Y=4
a=4, b=0, X=1, Y=4
a=3, b=1, X=1, Y=3
a=2, b=1, X=1, Y=2
a=2, b=0, X=1, Y=2
a=1, b=1, X=1, Y=1
a=1, b=0, X=1, Y=1
a=4, b=1, X=1, Y=1
a=4, b=0, X=1, Y=1
a=1, b=1, X=1, Y=3
a=1, b=0, X=1, Y=3
a=3, b=1, X=1, Y=3
a=1, b=1, X=1, Y=3
a=1, b=0, X=1, Y=1
a=1, b=1, X=1, Y=1
a=1, b=0, X=1, Y=3
a=1, b=1, X=1, Y=3
a=3, b=0, X=1, Y=3
a=1, b=1, X=1, Y=1
a=3, b=1, X=1, Y=3
a=1, b=0, X=1, Y=1
a=3, b=1, X=1, Y=1
a=1, b=1, X=1, Y=1
a=3, b=0, X=1, Y=1
a=1, b=1, X=1, Y=3
a=1, b=0, X=1, Y=3
a=1, b=1, X=1, Y=1
a=3, b=1, X=1, Y=1
a=2, b=1, X=1, Y=3
a=4, b=0, X=1, Y=4
a=4, b=1, X=1, Y=4
a=3, b=1, X=1, Y=3
a=2, b=0, X=1, Y=2
a=2, b=1, X=1, Y=2
a=4, b=0, X=1, Y=1
a=4, b=1, X=1, Y=1
a=1, b=0, X=1, Y=1
a=1, b=1, X=1, Y=1
Verification complete with 22 executions. No errors found.
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/Z6+acq" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's test "litmus/atomicpo".
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's test "litmus/casdep".
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's test "litmus/ccr".
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's test "litmus/cii".
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "CoRR" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "CoRR0" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "CoRR1" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "CoRR2" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "CoRW" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "CoWR" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's test "litmus/cumul-release".
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/default" test.
#![no_main]
@@ -1,4 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@revisions: join no_join
// Translated from GenMC's "litmus/detour" test.
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "fr+w+w+w+reads" test.
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's test "litmus/inc2w".
#![no_main]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
#![no_main]
#[path = "../../../utils/genmc.rs"]
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// Translated from GenMC's "litmus/riwi" test.
#![no_main]
@@ -1,4 +1,4 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows -Zmiri-genmc-estimate
//@compile-flags: -Zmiri-genmc-estimate
// Translated from GenMC's "litmus/viktor-relseq" test.
//
@@ -1,4 +1,4 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows -Zmiri-genmc-verbose
//@compile-flags: -Zmiri-genmc-verbose
//@normalize-stderr-test: "Verification took .*s" -> "Verification took [TIME]s"
// Test that we can detect a deadlock involving `std::sync::Mutex` in GenMC mode.
@@ -1,11 +1,10 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows -Zmiri-genmc-verbose
//@compile-flags: -Zmiri-genmc-verbose
//@normalize-stderr-test: "Verification took .*s" -> "Verification took [TIME]s"
// Test various features of the `std::sync::Mutex` API with GenMC.
// Miri running with GenMC intercepts the Mutex functions `lock`, `try_lock` and `unlock`, instead of running their actual implementation.
// This interception should not break any functionality.
//
// FIXME(genmc): Once GenMC supports mixed size accesses, add stack/heap allocated Mutexes to the test.
// FIXME(genmc): Once the actual implementation of mutexes can be used in GenMC mode and there is a setting to disable Mutex interception: Add test revision without interception.
//
// Miri provides annotations to GenMC for the condition required to unblock a thread blocked on a Mutex lock call.
@@ -25,7 +24,6 @@
const REPS: u64 = 3;
static LOCK: Mutex<u64> = Mutex::new(0);
static OTHER_LOCK: Mutex<u64> = Mutex::new(1234);
#[unsafe(no_mangle)]
fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
@@ -35,7 +33,8 @@ fn miri_start(_argc: isize, _argv: *const *const u8) -> isize {
fn main_() {
// Two mutexes should not interfere, holding this guard does not affect the other mutex.
let other_guard = OTHER_LOCK.lock().unwrap();
let other_lock = Mutex::new(1234);
let other_guard = other_lock.lock().unwrap();
let guard = LOCK.lock().unwrap();
// Trying to lock should fail if the mutex is already held.
@@ -1,5 +1,5 @@
//@ revisions: bounded123 bounded321 replaced123 replaced321
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows -Zmiri-genmc-verbose
//@compile-flags: -Zmiri-genmc-verbose
//@normalize-stderr-test: "Verification took .*s" -> "Verification took [TIME]s"
// This test uses GenMC assume statements to bound or replace spinloops.
@@ -1,4 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@revisions: check_count try_upgrade
// Check that various operations on `std::sync::Arc` are handled properly in GenMC mode.
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// A lot of code runs before main, which we should be able to handle in GenMC mode.
fn main() {}
@@ -1,5 +1,3 @@
//@compile-flags: -Zmiri-genmc -Zmiri-disable-stacked-borrows
// We should be able to spawn and join standard library threads in GenMC mode.
// Since these threads do nothing, we should only explore 1 program execution.
@@ -1,4 +1,4 @@
//@compile-flags: -Zmiri-ignore-leaks -Zmiri-genmc -Zmiri-disable-stacked-borrows
//@compile-flags: -Zmiri-ignore-leaks
use std::alloc::{Layout, alloc};
use std::cell::Cell;
@@ -11,7 +11,7 @@
// same fd at the same time.
fn main() {
let (server_sockfd, addr) = net::make_listener_ipv4(0).unwrap();
let (server_sockfd, addr) = net::make_listener_ipv4().unwrap();
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
@@ -25,30 +25,43 @@ fn main() {
let mut buffer = [22u8; 128];
let bytes_written = unsafe {
errno_result(net::send_all(peerfd, buffer.as_mut_ptr().cast(), buffer.len(), 0))
.unwrap()
errno_result(libc_utils::write_all_generic(
buffer.as_mut_ptr().cast(),
buffer.len(),
libc_utils::NoRetry,
|buf, len| libc::send(peerfd, buf, len, 0),
))
.unwrap()
};
assert_eq!(bytes_written as usize, 128);
});
net::connect_ipv4(client_sockfd, addr);
net::connect_ipv4(client_sockfd, addr).unwrap();
let reader_thread = thread::spawn(move || {
let mut buffer = [0u8; 8];
let bytes_read = unsafe {
errno_result(net::recv_all(client_sockfd, buffer.as_mut_ptr().cast(), buffer.len(), 0))
.unwrap()
unsafe {
errno_result(libc_utils::read_all_generic(
buffer.as_mut_ptr().cast(),
buffer.len(),
libc_utils::NoRetry,
|buf, count| libc::recv(client_sockfd, buf, count, 0),
))
.unwrap()
};
assert_eq!(bytes_read, 8);
assert_eq!(&buffer, &[22u8; 8]);
});
let mut buffer = [0u8; 8];
let bytes_read = unsafe {
errno_result(net::recv_all(client_sockfd, buffer.as_mut_ptr().cast(), buffer.len(), 0))
.unwrap()
unsafe {
errno_result(libc_utils::read_all_generic(
buffer.as_mut_ptr().cast(),
buffer.len(),
libc_utils::NoRetry,
|buf, count| libc::recv(client_sockfd, buf, count, 0),
))
.unwrap()
};
assert_eq!(bytes_read, 8);
assert_eq!(&buffer, &[22u8; 8]);
reader_thread.join().unwrap();
@@ -0,0 +1,662 @@
//@ignore-target: windows
//@compile-flags: -Zmiri-disable-isolation
//@revisions: windows_host unix_host
//@[unix_host] ignore-host: windows
//@[windows_host] only-host: windows
#![feature(io_error_inprogress)]
#[path = "../../utils/libc.rs"]
mod libc_utils;
use std::io::ErrorKind;
use std::thread;
use std::time::Duration;
use libc_utils::*;
const TEST_BYTES: &[u8] = b"these are some test bytes!";
fn main() {
test_fcntl_nonblock_opt();
#[cfg(any(
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "solaris",
target_os = "illumos"
))]
test_sock_nonblock_opt();
#[cfg(not(any(target_os = "solaris", target_os = "illumos")))]
test_ioctl_fionbio_op();
test_accept_nonblock();
#[cfg(any(
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "solaris",
target_os = "illumos"
))]
test_accept4_sock_nonblock_opt();
test_connect_nonblock();
test_send_recv_nonblock();
#[cfg(any(
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "solaris",
target_os = "illumos"
))]
test_send_recv_dontwait();
test_write_read_nonblock();
test_getpeername_ipv4_nonblock();
test_getpeername_ipv4_nonblock_no_peer();
}
/// Test that setting the O_NONBLOCK flag changes the blocking state of a socket.
fn test_fcntl_nonblock_opt() {
let sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
unsafe {
// Change socket to be non-blocking.
errno_check(libc::fcntl(sockfd, libc::F_SETFL, libc::O_NONBLOCK));
}
let flags = unsafe { errno_result(libc::fcntl(sockfd, libc::F_GETFL, 0)).unwrap() };
// Ensure that socket is really non-blocking.
assert_eq!(flags & libc::O_NONBLOCK, libc::O_NONBLOCK);
unsafe {
// Change socket back to be blocking.
errno_check(libc::fcntl(sockfd, libc::F_SETFL, 0));
}
let flags = unsafe { errno_result(libc::fcntl(sockfd, libc::F_GETFL, 0)).unwrap() };
// Ensure that socket is really blocking.
assert_eq!(flags & libc::O_NONBLOCK, 0);
}
#[cfg(any(
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "solaris",
target_os = "illumos"
))]
/// Test creating a non-blocking socket by using the SOCK_NONBLOCK option
/// for the `socket` syscall.
fn test_sock_nonblock_opt() {
let sockfd = unsafe {
errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM | libc::SOCK_NONBLOCK, 0))
.unwrap()
};
let flags = unsafe { errno_result(libc::fcntl(sockfd, libc::F_GETFL, 0)).unwrap() };
// Ensure that socket is really non-blocking.
assert_eq!(flags & libc::O_NONBLOCK, libc::O_NONBLOCK);
}
#[cfg(not(any(target_os = "solaris", target_os = "illumos")))]
/// Test changing the blocking state of a socket using the `ioctl(fd, FIONBIO, ...)`
/// syscall.
fn test_ioctl_fionbio_op() {
let sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
unsafe {
// Change socket to be non-blocking.
let mut value = 1 as libc::c_int;
errno_check(libc::ioctl(sockfd, libc::FIONBIO, &mut value));
}
let flags = unsafe { errno_result(libc::fcntl(sockfd, libc::F_GETFL, 0)).unwrap() };
// Ensure that socket is really non-blocking.
assert_eq!(flags & libc::O_NONBLOCK, libc::O_NONBLOCK);
unsafe {
// Change socket back to be blocking.
let mut value = 0 as libc::c_int;
errno_check(libc::ioctl(sockfd, libc::FIONBIO, &mut value));
}
let flags = unsafe { errno_result(libc::fcntl(sockfd, libc::F_GETFL, 0)).unwrap() };
// Ensure that socket is really blocking.
assert_eq!(flags & libc::O_NONBLOCK, 0);
}
/// Test that nonblocking TCP server sockets return [`ErrorKind::WouldBlock`] when trying
/// to accept when no incoming connection exists. This also tests that nonblocking server sockets
/// are still able to accept incoming connections should they already exist before the `accept` or
/// `accept4` syscall is called.
fn test_accept_nonblock() {
let (server_sockfd, addr) = net::make_listener_ipv4().unwrap();
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
unsafe {
// Change server socket to be non-blocking.
errno_check(libc::fcntl(server_sockfd, libc::F_SETFL, libc::O_NONBLOCK));
}
// This should fail as we don't have an incoming connection for this address.
let err = net::accept_ipv4(server_sockfd).unwrap_err();
// Assert that either EAGAIN or EWOULDBLOCK was returned.
assert_eq!(err.kind(), ErrorKind::WouldBlock);
// Spawn the server thread.
let server_thread = thread::spawn(move || {
// Instantly yield to main thread to ensure that the `connect` syscall
// was called before we call the `accept` on the server.
thread::sleep(Duration::from_millis(10));
net::accept_ipv4(server_sockfd).unwrap();
});
net::connect_ipv4(client_sockfd, addr).unwrap();
server_thread.join().unwrap();
}
#[cfg(any(
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "solaris",
target_os = "illumos"
))]
/// Test that calling `accept4` with the SOCK_NONBLOCK flag produces
/// a non-blocking peer socket.
fn test_accept4_sock_nonblock_opt() {
let (server_sockfd, addr) = net::make_listener_ipv4().unwrap();
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
// Spawn the server thread.
let server_thread = thread::spawn(move || {
let (peerfd, _) = net::sockname_ipv4(|storage, len| unsafe {
libc::accept4(server_sockfd, storage, len, libc::SOCK_NONBLOCK)
})
.unwrap();
let flags = unsafe { errno_result(libc::fcntl(peerfd, libc::F_GETFL, 0)).unwrap() };
// Ensure that peer socket is non-blocking.
assert_eq!(flags & libc::O_NONBLOCK, libc::O_NONBLOCK);
let mut buffer = [0u8; 8];
// Reading from a socket should return EWOULDBLOCK when there is no
// data written into it.
let err = unsafe {
errno_result(libc::read(peerfd, buffer.as_mut_ptr().cast(), buffer.len())).unwrap_err()
};
assert_eq!(err.kind(), ErrorKind::WouldBlock);
});
net::connect_ipv4(client_sockfd, addr).unwrap();
server_thread.join().unwrap();
}
/// Test that connecting to a server socket works when the client
/// socket is non-blocking before the `connect` call.
fn test_connect_nonblock() {
let (server_sockfd, addr) = net::make_listener_ipv4().unwrap();
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
unsafe {
// Change client socket to be non-blocking.
errno_check(libc::fcntl(client_sockfd, libc::F_SETFL, libc::O_NONBLOCK));
}
// Spawn the server thread.
let server_thread = thread::spawn(move || {
net::accept_ipv4(server_sockfd).unwrap();
});
// Yield to server thread to ensure that it's currently accepting.
thread::sleep(Duration::from_millis(10));
// Non-blocking connects always "fail" with EINPROGRESS.
let err = net::connect_ipv4(client_sockfd, addr).unwrap_err();
assert_eq!(err.kind(), ErrorKind::InProgress);
loop {
let result = net::sockname_ipv4(|storage, len| unsafe {
libc::getpeername(client_sockfd, storage, len)
});
match result {
Ok(_) => {
// The client is now connected.
break;
}
Err(err) if err.kind() == ErrorKind::NotConnected => {
// The client is still connecting.
thread::sleep(Duration::from_millis(10));
}
Err(err) => panic!("unexpected error whilst ensuring connection: {err}"),
}
}
server_thread.join().unwrap();
}
/// Test sending bytes into and receiving bytes from a connected stream without blocking.
fn test_send_recv_nonblock() {
let (server_sockfd, addr) = net::make_listener_ipv4().unwrap();
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
// Spawn the server thread.
let server_thread = thread::spawn(move || {
let (peerfd, _) = net::accept_ipv4(server_sockfd).unwrap();
// `peerfd` is a blocking socket now. But that's okay, the client still does non-blocking
// reads/writes.
// Yield back to client so that it starts receiving before we start sending.
thread::sleep(Duration::from_millis(10));
unsafe {
errno_result(libc_utils::write_all_generic(
TEST_BYTES.as_ptr().cast(),
TEST_BYTES.len(),
libc_utils::NoRetry,
|buf, count| libc::send(peerfd, buf, count, 0),
))
.unwrap()
};
// The buffer should contain `TEST_BYTES` at the beginning.
// This will block until the client sent us this data.
let mut buffer = [0; TEST_BYTES.len()];
unsafe {
errno_result(libc_utils::read_all_generic(
buffer.as_mut_ptr().cast(),
buffer.len(),
libc_utils::NoRetry,
|buf, count| libc::recv(peerfd, buf, count, 0),
))
.unwrap()
};
assert_eq!(&buffer, TEST_BYTES);
});
net::connect_ipv4(client_sockfd, addr).unwrap();
unsafe {
// Change client socket to be non-blocking.
errno_check(libc::fcntl(client_sockfd, libc::F_SETFL, libc::O_NONBLOCK));
}
// We are connected and the server socket is not writing.
let mut buffer = [0; TEST_BYTES.len()];
// Receiving from a socket when the peer is not writing is
// not possible without blocking.
let err = unsafe {
errno_result(libc::recv(client_sockfd, buffer.as_mut_ptr().cast(), buffer.len(), 0))
.unwrap_err()
};
assert_eq!(err.kind(), ErrorKind::WouldBlock);
// Try to receive bytes from the peer socket without blocking.
// Since the peer socket might do partial writes, we might need to
// sleep multiple times until we received everything.
unsafe {
errno_result(libc_utils::read_all_generic(
buffer.as_mut_ptr().cast(),
buffer.len(),
libc_utils::RetryAfter(Duration::from_millis(10)),
|buf, count| libc::recv(client_sockfd, buf, count, 0),
))
.unwrap()
};
assert_eq!(&buffer, TEST_BYTES);
// Test non-blocking writing.
// Sending into the empty buffer should succeed without blocking.
unsafe {
errno_result(libc_utils::write_all_generic(
TEST_BYTES.as_ptr().cast(),
TEST_BYTES.len(),
libc_utils::NoRetry,
|buf, count| libc::send(client_sockfd, buf, count, 0),
))
.unwrap()
};
if !cfg!(windows_host) {
// Keep sending data until the buffer is full and we block.
// We cannot test this on Windows since there apparently the send buffer
// never fills up, at least for localhost connections.
let fill_buf = [1u8; 5_000_000];
// This fills the socket receive buffer and thus should start blocking.
let err = unsafe {
errno_result(libc_utils::write_all_generic(
fill_buf.as_ptr().cast(),
fill_buf.len(),
libc_utils::NoRetry,
|buf, count| libc::send(client_sockfd, buf, count, 0),
))
.unwrap_err()
};
assert_eq!(err.kind(), ErrorKind::WouldBlock)
}
server_thread.join().unwrap();
}
#[cfg(any(
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "solaris",
target_os = "illumos"
))]
/// Test sending bytes into and receiving bytes from a connected stream without blocking.
/// Instead of using non-blocking sockets, we test whether it works with blocking sockets
/// when passing the `libc::MSG_DONTWAIT` flag to the send and receive calls.
fn test_send_recv_dontwait() {
let (server_sockfd, addr) = net::make_listener_ipv4().unwrap();
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
// Spawn the server thread.
let server_thread = thread::spawn(move || {
let (peerfd, _) = net::accept_ipv4(server_sockfd).unwrap();
// Similar to above we use blocking operations on the server side.
// Yield back to client so that it starts receiving before we start sending.
thread::sleep(Duration::from_millis(10));
unsafe {
errno_result(libc_utils::write_all_generic(
TEST_BYTES.as_ptr().cast(),
TEST_BYTES.len(),
libc_utils::NoRetry,
|buf, count| libc::send(peerfd, buf, count, 0),
))
.unwrap()
};
// The buffer should contain `TEST_BYTES` at the beginning.
// This will block until the client sent us this data.
let mut buffer = [0; TEST_BYTES.len()];
unsafe {
errno_result(libc_utils::read_all_generic(
buffer.as_mut_ptr().cast(),
buffer.len(),
libc_utils::NoRetry,
|buf, count| libc::recv(peerfd, buf, count, 0),
))
.unwrap()
};
assert_eq!(&buffer, TEST_BYTES);
});
net::connect_ipv4(client_sockfd, addr).unwrap();
// We are connected and the server socket is not writing.
let mut buffer = [0; TEST_BYTES.len()];
// Receiving from a socket when the peer is not writing is
// not possible without blocking.
let err = unsafe {
errno_result(libc::recv(
client_sockfd,
buffer.as_mut_ptr().cast(),
buffer.len(),
libc::MSG_DONTWAIT,
))
.unwrap_err()
};
assert_eq!(err.kind(), ErrorKind::WouldBlock);
// Try to receive bytes from the peer socket without blocking.
// Since the peer socket might do partial writes, we might need to
// sleep multiple times until we received everything.
unsafe {
errno_result(libc_utils::read_all_generic(
buffer.as_mut_ptr().cast(),
buffer.len(),
libc_utils::RetryAfter(Duration::from_millis(10)),
|buf, count| libc::recv(client_sockfd, buf, count, libc::MSG_DONTWAIT),
))
.unwrap()
};
assert_eq!(&buffer, TEST_BYTES);
// Test non-blocking writing.
// Sending into the empty buffer should succeed without blocking.
unsafe {
errno_result(libc_utils::write_all_generic(
TEST_BYTES.as_ptr().cast(),
TEST_BYTES.len(),
libc_utils::NoRetry,
|buf, count| libc::send(client_sockfd, buf, count, libc::MSG_DONTWAIT),
))
.unwrap()
};
if !cfg!(windows_host) {
// Keep sending data until the buffer is full and we block.
// We cannot test this on Windows since there apparently the send buffer
// never fills up, at least for localhost connections.
let fill_buf = [1u8; 5_000_000];
// This fills the socket receive buffer and thus should start blocking.
let err = unsafe {
errno_result(libc_utils::write_all_generic(
fill_buf.as_ptr().cast(),
fill_buf.len(),
libc_utils::NoRetry,
|buf, count| libc::send(client_sockfd, buf, count, libc::MSG_DONTWAIT),
))
.unwrap_err()
};
assert_eq!(err.kind(), ErrorKind::WouldBlock)
}
server_thread.join().unwrap();
}
/// Test writing bytes into and reading bytes from a connected stream without blocking.
fn test_write_read_nonblock() {
let (server_sockfd, addr) = net::make_listener_ipv4().unwrap();
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
// Spawn the server thread.
let server_thread = thread::spawn(move || {
let (peerfd, _) = net::accept_ipv4(server_sockfd).unwrap();
// Similar to above we use blocking operations on the server side.
// Yield back to client so that it starts receiving before we start sending.
thread::sleep(Duration::from_millis(10));
let bytes_written = unsafe {
errno_result(libc_utils::write_all(
peerfd,
TEST_BYTES.as_ptr().cast(),
TEST_BYTES.len(),
))
.unwrap()
};
assert_eq!(bytes_written as usize, TEST_BYTES.len());
// The buffer should contain `TEST_BYTES` at the beginning.
// This will block until the client sent us this data.
let mut buffer = [0; TEST_BYTES.len()];
unsafe {
errno_result(libc_utils::read_all(peerfd, buffer.as_mut_ptr().cast(), buffer.len()))
.unwrap()
};
assert_eq!(&buffer, TEST_BYTES);
});
net::connect_ipv4(client_sockfd, addr).unwrap();
unsafe {
// Change client socket to be non-blocking.
errno_check(libc::fcntl(client_sockfd, libc::F_SETFL, libc::O_NONBLOCK));
}
// We are connected and the server socket is not writing.
let mut buffer = [0; TEST_BYTES.len()];
// Reading from a socket when the peer is not writing is
// not possible without blocking.
let err = unsafe {
errno_result(libc::read(
client_sockfd,
buffer.as_mut_ptr() as *mut libc::c_void,
buffer.len(),
))
.unwrap_err()
};
assert_eq!(err.kind(), ErrorKind::WouldBlock);
// Try to read bytes from the peer socket without blocking.
// Since the peer socket might do partial writes, we might need to
// sleep multiple times until we read everything.
unsafe {
errno_result(libc_utils::read_all_generic(
buffer.as_mut_ptr().cast(),
buffer.len(),
libc_utils::RetryAfter(Duration::from_millis(10)),
|buf, count| libc::read(client_sockfd, buf, count),
))
.unwrap()
};
assert_eq!(&buffer, TEST_BYTES);
// Now we test non-blocking writing.
// Writing into the empty buffer should succeed without blocking.
let bytes_written = unsafe {
errno_result(libc_utils::write_all(
client_sockfd,
TEST_BYTES.as_ptr().cast(),
TEST_BYTES.len(),
))
.unwrap()
};
assert_eq!(bytes_written as usize, TEST_BYTES.len());
if !cfg!(windows_host) {
// Keep sending data until the buffer is full and we block.
// We cannot test this on Windows since there apparently the send buffer
// never fills up, at least for localhost connections.
let fill_buf = [1u8; 5_000_000];
// This fills the socket receive buffer and thus should start blocking.
let err = unsafe {
errno_result(libc_utils::write_all_generic(
fill_buf.as_ptr().cast(),
fill_buf.len(),
libc_utils::NoRetry,
|buf, count| libc::write(client_sockfd, buf, count),
))
.unwrap_err()
};
assert_eq!(err.kind(), ErrorKind::WouldBlock)
}
server_thread.join().unwrap();
}
/// Test that the `getpeername` syscall successfully returns the peer address
/// for a non-blocking IPv4 socket whose connection has been successfully
/// established before calling the syscall.
fn test_getpeername_ipv4_nonblock() {
let (server_sockfd, addr) = net::make_listener_ipv4().unwrap();
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
unsafe {
// Change client socket to be non-blocking.
errno_check(libc::fcntl(client_sockfd, libc::F_SETFL, libc::O_NONBLOCK));
}
// Spawn the server thread.
let server_thread = thread::spawn(move || {
net::accept_ipv4(server_sockfd).unwrap();
});
// Yield to server thread to ensure that it's currently accepting.
thread::sleep(Duration::from_millis(10));
// Non-blocking connects always "fail" with EINPROGRESS.
let err = net::connect_ipv4(client_sockfd, addr).unwrap_err();
assert_eq!(err.kind(), ErrorKind::InProgress);
loop {
let peername_result = net::sockname_ipv4(|storage, len| unsafe {
libc::getpeername(client_sockfd, storage, len)
});
match peername_result {
Ok((_, peer_addr)) => {
assert_eq!(addr.sin_family, peer_addr.sin_family);
assert_eq!(addr.sin_port, peer_addr.sin_port);
assert_eq!(addr.sin_addr.s_addr, peer_addr.sin_addr.s_addr);
break;
}
Err(err) if err.kind() == ErrorKind::NotConnected => {
// Connection is not yet established; wait and retry later.
thread::sleep(Duration::from_millis(10))
}
Err(err) => {
panic!("error whilst getting peername: {err}")
}
}
}
server_thread.join().unwrap();
}
/// Test that the `getpeername` syscall returns ENOTCONN
/// for a non-blocking IPv4 socket which is stuck at
/// connecting to the remote address.
fn test_getpeername_ipv4_nonblock_no_peer() {
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
unsafe {
// Change client socket to be non-blocking.
errno_check(libc::fcntl(client_sockfd, libc::F_SETFL, libc::O_NONBLOCK));
}
// We cannot attempt to connect to a localhost address because
// it could be the case that a socket from another test is
// currently listening on `localhost:12321` because we bind to
// random ports everywhere. For `192.0.2.1` we know that nothing is
// listening because it's a blackhole address:
// <https://www.rfc-editor.org/rfc/rfc5737>
// The port `12321` is just a random non-zero port because Windows
// and Apple hosts return EADDRNOTAVAIL when attempting to connect to
// a zero port.
let addr = net::sock_addr_ipv4([192, 0, 2, 1], 12321);
// Non-blocking connect should fail with EINPROGRESS.
let err = net::connect_ipv4(client_sockfd, addr).unwrap_err();
assert_eq!(err.kind(), ErrorKind::InProgress);
// Since we're never accepting the connection, the socket should never be
// successfully connected and thus we should be unable to read the peername.
let Err(err) = net::sockname_ipv4(|storage, len| unsafe {
libc::getpeername(client_sockfd, storage, len)
}) else {
unreachable!()
};
assert_eq!(err.kind(), ErrorKind::NotConnected);
}
@@ -1,8 +1,6 @@
//@ignore-target: windows # No libc socket on Windows
//@compile-flags: -Zmiri-disable-isolation
#![feature(io_error_inprogress)]
#[path = "../../utils/libc.rs"]
mod libc_utils;
#[path = "../../utils/mod.rs"]
@@ -18,7 +16,7 @@
const TEST_BYTES: &[u8] = b"these are some test bytes!";
fn main() {
test_socket_close();
test_create_close();
test_bind_ipv4();
test_bind_ipv4_reuseaddr();
test_set_reuseaddr_invalid_len();
@@ -50,11 +48,17 @@ fn main() {
test_getpeername_ipv6();
}
fn test_socket_close() {
unsafe {
let sockfd = errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap();
errno_check(libc::close(sockfd));
}
/// Test creating a socket and then closing it afterwards.
fn test_create_close() {
let sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
let flags = unsafe { errno_result(libc::fcntl(sockfd, libc::F_GETFL, 0)).unwrap() };
// Ensure that socket is initially blocking.
assert_eq!(flags & libc::O_NONBLOCK, 0);
unsafe { errno_check(libc::close(sockfd)) };
}
fn test_bind_ipv4() {
@@ -193,13 +197,18 @@ fn test_listen() {
/// - Connecting when the server is already accepting
/// - Accepting when there is already an incoming connection
fn test_accept_connect() {
let (server_sockfd, addr) = net::make_listener_ipv4(0).unwrap();
let (server_sockfd, addr) = net::make_listener_ipv4().unwrap();
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
// Spawn the server thread.
let server_thread = thread::spawn(move || {
net::accept_ipv4(server_sockfd).unwrap();
let (peerfd, _) = net::accept_ipv4(server_sockfd).unwrap();
let flags = unsafe { errno_result(libc::fcntl(peerfd, libc::F_GETFL, 0)).unwrap() };
// Ensure that peer socket is blocking.
assert_eq!(flags & libc::O_NONBLOCK, 0);
// Yield back to the client thread to test whether calling `connect` first also
// works.
@@ -213,7 +222,7 @@ fn test_accept_connect() {
thread::sleep(Duration::from_millis(10));
// Test connecting to an already accepting server.
net::connect_ipv4(client_sockfd, addr);
net::connect_ipv4(client_sockfd, addr).unwrap();
// Server thread should now be in its `sleep`.
// Test connecting when there is no actively ongoing `accept`.
@@ -221,7 +230,7 @@ fn test_accept_connect() {
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
net::connect_ipv4(client_sockfd, addr);
net::connect_ipv4(client_sockfd, addr).unwrap();
server_thread.join().unwrap();
}
@@ -231,7 +240,7 @@ fn test_accept_connect() {
/// We especially want to test that the peeking doesn't remove the bytes from
/// the queue.
fn test_send_peek_recv() {
let (server_sockfd, addr) = net::make_listener_ipv4(0).unwrap();
let (server_sockfd, addr) = net::make_listener_ipv4().unwrap();
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
@@ -240,19 +249,18 @@ fn test_send_peek_recv() {
let (peerfd, _) = net::accept_ipv4(server_sockfd).unwrap();
// Write the bytes into the stream.
let bytes_written = unsafe {
errno_result(libc_utils::net::send_all(
peerfd,
unsafe {
errno_result(libc_utils::write_all_generic(
TEST_BYTES.as_ptr().cast(),
TEST_BYTES.len(),
0,
libc_utils::NoRetry,
|buf, count| libc::send(peerfd, buf, count, 0),
))
.unwrap()
};
assert_eq!(bytes_written as usize, TEST_BYTES.len());
});
net::connect_ipv4(client_sockfd, addr);
net::connect_ipv4(client_sockfd, addr).unwrap();
let mut buffer = [0; TEST_BYTES.len()];
let bytes_read = unsafe {
@@ -273,17 +281,15 @@ fn test_send_peek_recv() {
// able to read the same bytes again into a new buffer.
let mut buffer = [0; TEST_BYTES.len()];
let bytes_read = unsafe {
errno_result(libc_utils::net::recv_all(
client_sockfd,
unsafe {
errno_result(libc_utils::read_all_generic(
buffer.as_mut_ptr().cast(),
buffer.len(),
0,
libc_utils::NoRetry,
|buf, count| libc::recv(client_sockfd, buf, count, 0),
))
.unwrap()
};
assert_eq!(bytes_read as usize, TEST_BYTES.len());
assert_eq!(&buffer, TEST_BYTES);
server_thread.join().unwrap();
@@ -291,7 +297,7 @@ fn test_send_peek_recv() {
/// Test that we actually do partial sends and partial receives for sockets.
fn test_partial_send_recv() {
let (server_sockfd, addr) = net::make_listener_ipv4(0).unwrap();
let (server_sockfd, addr) = net::make_listener_ipv4().unwrap();
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
@@ -313,7 +319,7 @@ fn test_partial_send_recv() {
});
});
net::connect_ipv4(client_sockfd, addr);
net::connect_ipv4(client_sockfd, addr).unwrap();
// Ensure we sometimes do incomplete writes.
check_nondet(|| {
@@ -325,11 +331,10 @@ fn test_partial_send_recv() {
let buffer = [0u8; 100_000];
// Write a lot of bytes into the socket such that we can test
// incomplete reads.
let bytes_written = unsafe {
unsafe {
errno_result(libc_utils::write_all(client_sockfd, buffer.as_ptr().cast(), buffer.len()))
.unwrap()
};
assert_eq!(bytes_written as usize, buffer.len());
server_thread.join().unwrap();
}
@@ -339,7 +344,7 @@ fn test_partial_send_recv() {
/// We want to test this because `write` and `read` should be the same as
/// `send` and `recv` with zero flags.
fn test_write_read() {
let (server_sockfd, addr) = net::make_listener_ipv4(0).unwrap();
let (server_sockfd, addr) = net::make_listener_ipv4().unwrap();
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
@@ -359,15 +364,13 @@ fn test_write_read() {
assert_eq!(bytes_written as usize, TEST_BYTES.len());
});
net::connect_ipv4(client_sockfd, addr);
net::connect_ipv4(client_sockfd, addr).unwrap();
let mut buffer = [0; TEST_BYTES.len()];
let bytes_read = unsafe {
unsafe {
errno_result(libc_utils::read_all(client_sockfd, buffer.as_mut_ptr().cast(), buffer.len()))
.unwrap()
};
assert_eq!(bytes_read as usize, TEST_BYTES.len());
assert_eq!(&buffer, TEST_BYTES);
server_thread.join().unwrap();
@@ -484,14 +487,14 @@ fn test_getsockname_ipv6() {
/// For a connected socket, the `getpeername` syscall should
/// return the same address as the socket was connected to.
fn test_getpeername_ipv4() {
let (server_sockfd, addr) = net::make_listener_ipv4(0).unwrap();
let (server_sockfd, addr) = net::make_listener_ipv4().unwrap();
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET, libc::SOCK_STREAM, 0)).unwrap() };
// Spawn the server thread.
let server_thread = thread::spawn(move || net::accept_ipv4(server_sockfd).unwrap());
net::connect_ipv4(client_sockfd, addr);
net::connect_ipv4(client_sockfd, addr).unwrap();
let (_, peer_addr) = net::sockname_ipv4(|storage, len| unsafe {
libc::getpeername(client_sockfd, storage, len)
@@ -509,14 +512,14 @@ fn test_getpeername_ipv4() {
/// For a connected socket, the `getpeername` syscall should
/// return the same address as the socket was connected to.
fn test_getpeername_ipv6() {
let (server_sockfd, addr) = net::make_listener_ipv6(0).unwrap();
let (server_sockfd, addr) = net::make_listener_ipv6().unwrap();
let client_sockfd =
unsafe { errno_result(libc::socket(libc::AF_INET6, libc::SOCK_STREAM, 0)).unwrap() };
// Spawn the server thread.
let server_thread = thread::spawn(move || net::accept_ipv6(server_sockfd).unwrap());
net::connect_ipv6(client_sockfd, addr);
net::connect_ipv6(client_sockfd, addr).unwrap();
let (_, peer_addr) = net::sockname_ipv6(|storage, len| unsafe {
libc::getpeername(client_sockfd, storage, len)
@@ -0,0 +1,92 @@
//@ignore-target: windows # No libc socket on Windows
//@compile-flags: -Zmiri-disable-isolation -Zmiri-fixed-schedule
use std::io::{ErrorKind, Read, Write};
use std::net::{TcpListener, TcpStream};
use std::thread;
const TEST_BYTES: &[u8] = b"these are some test bytes!";
fn main() {
test_accept_nonblock();
test_send_recv_nonblock();
}
/// Test that nonblocking TCP server sockets return [`ErrorKind::WouldBlock`] when trying
/// to accept when no incoming connection exists. This also tests that nonblocking server sockets
/// are still able to accept incoming connections should they already exist before [`TcpListener::accept`]
/// is called.
fn test_accept_nonblock() {
let listener = TcpListener::bind("127.0.0.1:0").unwrap();
// Make server non-blocking.
listener.set_nonblocking(true).unwrap();
// Get local address with randomized port to know where
// we need to connect to.
let address = listener.local_addr().unwrap();
// Accepting when no incoming connecting exists should block.
let err = listener.accept().unwrap_err();
assert_eq!(err.kind(), ErrorKind::WouldBlock);
// Start server thread.
let handle = thread::spawn(move || {
// Accepting when there is an existing incoming connection should
// succeed without blocking.
let (_stream, _peer_addr) = listener.accept().unwrap();
});
// The connect is blocking and thus we yield to the server thread.
let _stream = TcpStream::connect(address).unwrap();
handle.join().unwrap();
}
/// Test sending bytes into and receiving bytes from a connected stream without blocking.
fn test_send_recv_nonblock() {
let listener = TcpListener::bind("127.0.0.1:0").unwrap();
// Get local address with randomized port to know where
// we need to connect to.
let address = listener.local_addr().unwrap();
// Start server thread.
let handle = thread::spawn(move || {
let (mut stream, _addr) = listener.accept().unwrap();
// Yield back to client thread to ensure that the first read
// is before we write anything into the socket.
thread::yield_now();
stream.write_all(TEST_BYTES).unwrap();
});
// The connect is blocking and thus we yield to the server thread.
let mut stream = TcpStream::connect(address).unwrap();
// Make client non-blocking.
stream.set_nonblocking(true).unwrap();
let mut buffer = [0; TEST_BYTES.len()];
// Reading when no data was written should return WouldBlock.
let err = stream.read_exact(&mut buffer).unwrap_err();
assert_eq!(err.kind(), ErrorKind::WouldBlock);
// Try to read bytes from the peer socket without blocking.
// Since the peer socket might do partial writes, we might need to
// sleep multiple times until we read everything.
let mut bytes_read = 0;
while bytes_read != TEST_BYTES.len() {
match stream.read(&mut buffer[bytes_read..]) {
Ok(read) => bytes_read += read,
Err(err) if err.kind() == ErrorKind::WouldBlock => {
// Not all data is written into the stream, yield to the server thread
// to write more into the stream.
thread::yield_now();
}
Err(err) => panic!("unexpected error whilst reading: {err}"),
}
}
assert_eq!(&buffer, TEST_BYTES);
handle.join().unwrap();
}
@@ -1,6 +0,0 @@
──────────────────────────────────────────────────
Warning: this tree is indicative only. Some tags may have been hidden.
0.. 2
| Act | └─┬──<TAG=root of the allocation>
| Res | └────<TAG=base.as_ptr(), base.as_ptr(), raw_parts.0, reconstructed.as_ptr(), reconstructed.as_ptr()>
──────────────────────────────────────────────────

Some files were not shown because too many files have changed in this diff Show More