std.Thread: delete Mutex.Recursive

Replaced by the lockStderr functions of std.Io. Trying to make
`std.process.stderr_thread_mutex` be a bridge across different Io
implementations didn't work in practice.
This commit is contained in:
Andrew Kelley
2026-02-03 20:06:50 -08:00
parent d1e01e9431
commit d45f9aca14
7 changed files with 39 additions and 100 deletions
+1 -2
View File
@@ -2160,8 +2160,7 @@ pub const LockedStderr = struct {
/// For doing application-level writes to the standard error stream.
/// Coordinates also with debug-level writes that are ignorant of Io interface
/// and implementations. When this returns, `std.process.stderr_thread_mutex`
/// will be locked.
/// and implementations.
///
/// See also:
/// * `tryLockStderr`
+1 -1
View File
@@ -10,7 +10,7 @@ const IoUring = std.os.linux.IoUring;
/// Must be a thread-safe allocator.
gpa: Allocator,
mutex: std.Thread.Mutex,
mutex: Io.Mutex,
main_fiber_buffer: [@sizeOf(Fiber) + Fiber.max_result_size]u8 align(@alignOf(Fiber)),
threads: Thread.List,
+1 -1
View File
@@ -15,7 +15,7 @@ const posix = std.posix;
/// Must be a thread-safe allocator.
gpa: Allocator,
mutex: std.Thread.Mutex,
mutex: Io.Mutex,
main_fiber_buffer: [@sizeOf(Fiber) + Fiber.max_result_size]u8 align(@alignOf(Fiber)),
threads: Thread.List,
+34 -5
View File
@@ -67,6 +67,9 @@ stderr_writer: File.Writer = .{
},
stderr_mode: Io.Terminal.Mode = .no_color,
stderr_writer_initialized: bool = false,
stderr_mutex: Io.Mutex = .init,
stderr_mutex_locker: std.Thread.Id = Thread.invalid_id,
stderr_mutex_lock_count: usize = 0,
argv0: Argv0,
environ: Environ,
@@ -689,6 +692,13 @@ const Thread = struct {
threadlocal var current: ?*Thread = null;
/// A value that does not alias any other thread id.
const invalid_id: std.Thread.Id = std.math.maxInt(std.Thread.Id);
fn currentId() std.Thread.Id {
return if (current) |t| t.id else std.Thread.getCurrentId();
}
/// The thread is neither in a syscall nor entering one, but we want to check for cancelation
/// anyway. If there is a pending cancel request, acknowledge it and return `error.Canceled`.
fn checkCancel() Io.Cancelable!void {
@@ -13502,15 +13512,29 @@ fn netLookupFallible(
fn lockStderr(userdata: ?*anyopaque, terminal_mode: ?Io.Terminal.Mode) Io.Cancelable!Io.LockedStderr {
const t: *Threaded = @ptrCast(@alignCast(userdata));
// Only global mutex since this is Threaded.
process.stderr_thread_mutex.lock();
const current_thread_id = Thread.currentId();
if (@atomicLoad(std.Thread.Id, &t.stderr_mutex_locker, .unordered) != current_thread_id) {
mutexLock(&t.stderr_mutex);
assert(t.stderr_mutex_lock_count == 0);
@atomicStore(std.Thread.Id, &t.stderr_mutex_locker, current_thread_id, .unordered);
}
t.stderr_mutex_lock_count += 1;
return initLockedStderr(t, terminal_mode);
}
fn tryLockStderr(userdata: ?*anyopaque, terminal_mode: ?Io.Terminal.Mode) Io.Cancelable!?Io.LockedStderr {
const t: *Threaded = @ptrCast(@alignCast(userdata));
// Only global mutex since this is Threaded.
if (!process.stderr_thread_mutex.tryLock()) return null;
const current_thread_id = Thread.currentId();
if (@atomicLoad(std.Thread.Id, &t.stderr_mutex_locker, .unordered) != current_thread_id) {
if (!t.stderr_mutex.tryLock()) return null;
assert(t.stderr_mutex_lock_count == 0);
@atomicStore(std.Thread.Id, &t.stderr_mutex_locker, current_thread_id, .unordered);
}
t.stderr_mutex_lock_count += 1;
return try initLockedStderr(t, terminal_mode);
}
@@ -13541,7 +13565,12 @@ fn unlockStderr(userdata: ?*anyopaque) void {
};
t.stderr_writer.interface.end = 0;
t.stderr_writer.interface.buffer = &.{};
process.stderr_thread_mutex.unlock();
t.stderr_mutex_lock_count -= 1;
if (t.stderr_mutex_lock_count == 0) {
@atomicStore(std.Thread.Id, &t.stderr_mutex_locker, Thread.invalid_id, .unordered);
mutexUnlock(&t.stderr_mutex);
}
}
fn processCurrentPath(userdata: ?*anyopaque, buffer: []u8) process.CurrentPathError!usize {
+2 -12
View File
@@ -1,6 +1,5 @@
//! This struct represents a kernel thread, and acts as a namespace for
//! concurrency primitives that operate on kernel threads. For concurrency
//! primitives that interact with the I/O interface, see `std.Io`.
//! This struct represents a kernel thread.
const Thread = @This();
const builtin = @import("builtin");
const target = builtin.target;
@@ -14,13 +13,8 @@ const posix = std.posix;
const windows = std.os.windows;
const testing = std.testing;
pub const Mutex = struct {
pub const Recursive = @import("Thread/Mutex/Recursive.zig");
};
pub const use_pthreads = native_os != .windows and native_os != .wasi and builtin.link_libc;
const Thread = @This();
const Impl = if (native_os == .windows)
WindowsThreadImpl
else if (use_pthreads)
@@ -1604,10 +1598,6 @@ test "setName, getName" {
thread.join();
}
test {
_ = Mutex;
}
fn testIncrementNotify(io: Io, value: *usize, event: *Io.Event) void {
value.* += 1;
event.set(io);
-72
View File
@@ -1,72 +0,0 @@
//! A synchronization primitive enforcing atomic access to a shared region of
//! code known as the "critical section".
//!
//! Equivalent to `std.Mutex` except it allows the same thread to obtain the
//! lock multiple times.
//!
//! A recursive mutex is an abstraction layer on top of a regular mutex;
//! therefore it is recommended to use instead `std.Mutex` unless there is a
//! specific reason a recursive mutex is warranted.
const Recursive = @This();
const std = @import("../../std.zig");
const Io = std.Io;
const assert = std.debug.assert;
mutex: Io.Mutex,
thread_id: std.Thread.Id,
lock_count: usize,
pub const init: Recursive = .{
.mutex = .init,
.thread_id = invalid_thread_id,
.lock_count = 0,
};
/// Acquires the `Mutex` without blocking the caller's thread.
///
/// Returns `false` if the calling thread would have to block to acquire it.
///
/// Otherwise, returns `true` and the caller should `unlock()` the Mutex to release it.
pub fn tryLock(r: *Recursive) bool {
const current_thread_id = std.Thread.getCurrentId();
if (@atomicLoad(std.Thread.Id, &r.thread_id, .unordered) != current_thread_id) {
if (!r.mutex.tryLock()) return false;
assert(r.lock_count == 0);
@atomicStore(std.Thread.Id, &r.thread_id, current_thread_id, .unordered);
}
r.lock_count += 1;
return true;
}
/// Acquires the `Mutex`, blocking the current thread while the mutex is
/// already held by another thread.
///
/// The `Mutex` can be held multiple times by the same thread.
///
/// Once acquired, call `unlock` on the `Mutex` to release it, regardless
/// of whether the lock was already held by the same thread.
pub fn lock(r: *Recursive) void {
const current_thread_id = std.Thread.getCurrentId();
if (@atomicLoad(std.Thread.Id, &r.thread_id, .unordered) != current_thread_id) {
Io.Threaded.mutexLock(&r.mutex);
assert(r.lock_count == 0);
@atomicStore(std.Thread.Id, &r.thread_id, current_thread_id, .unordered);
}
r.lock_count += 1;
}
/// Releases the `Mutex` which was previously acquired with `lock` or `tryLock`.
///
/// It is undefined behavior to unlock from a different thread that it was
/// locked from.
pub fn unlock(r: *Recursive) void {
r.lock_count -= 1;
if (r.lock_count == 0) {
@atomicStore(std.Thread.Id, &r.thread_id, invalid_thread_id, .unordered);
Io.Threaded.mutexUnlock(&r.mutex);
}
}
/// A value that does not alias any other thread id.
const invalid_thread_id: std.Thread.Id = std.math.maxInt(std.Thread.Id);
-7
View File
@@ -20,13 +20,6 @@ pub const Args = @import("process/Args.zig");
pub const Environ = @import("process/Environ.zig");
pub const Preopens = @import("process/Preopens.zig");
/// This is the global, process-wide protection to coordinate stderr writes.
///
/// The primary motivation for recursive mutex here is so that a panic while
/// stderr mutex is held still dumps the stack trace and other debug
/// information.
pub var stderr_thread_mutex: std.Thread.Mutex.Recursive = .init;
/// A standard set of pre-initialized useful APIs for programs to take
/// advantage of. This is the type of the first parameter of the main function.
/// Applications wanting more flexibility can accept `Init.Minimal` instead.