From cb7be96644819e2e903c191e712d33b22f30a52a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jan 2026 17:05:14 -0800 Subject: [PATCH 01/65] std.Io: give File a nonblocking bit on Windows This tracks whether it is a file opened in synchronous mode, or something that supports APC. This will be needed in order to know whether concurrent batch operations on the file should return error.ConcurrencyUnavailable, or use APC to complete the batch. This patch also switches to using NtCreateFile directly in std.Io.Threaded for dirCreateFile, as well as NtReadFile for fileReadStreaming, making it handle files opened in synchronous mode as well as files opened in asynchronous mode. --- lib/std/Io/File.zig | 15 +++++ lib/std/Io/Threaded.zig | 123 +++++++++++++++++++++++------------ lib/std/Progress.zig | 1 + lib/std/os/windows/ntdll.zig | 2 +- 4 files changed, 100 insertions(+), 41 deletions(-) diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index e537755a33..d0f487b911 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -10,8 +10,20 @@ const assert = std.debug.assert; const Dir = std.Io.Dir; handle: Handle, +flags: Flags = .{}, pub const Handle = std.posix.fd_t; +pub const Flags = switch (native_os) { + .windows => packed struct(u1) { + /// * true: opened with MODE.IO.ASYNCHRONOUS + /// * false: opened with SYNCHRONOUS_ALERT or SYNCHRONOUS_NONALERT, or + /// not a file. + /// This is default-initialized to false as a workaround for + /// https://codeberg.org/ziglang/zig/issues/30842 + nonblocking: bool = false, + }, + else => packed struct(u0) {}, +}; pub const Reader = @import("File/Reader.zig"); pub const Writer = @import("File/Writer.zig"); @@ -77,6 +89,7 @@ pub fn stdout() File { return switch (native_os) { .windows => .{ .handle = std.os.windows.peb().ProcessParameters.hStdOutput, + .flags = .{ .nonblocking = false }, }, else => .{ .handle = std.posix.STDOUT_FILENO, @@ -88,6 +101,7 @@ pub fn stderr() File { return switch (native_os) { .windows => .{ .handle = std.os.windows.peb().ProcessParameters.hStdError, + .flags = .{ .nonblocking = false }, }, else => .{ .handle = std.posix.STDERR_FILENO, @@ -99,6 +113,7 @@ pub fn stdin() File { return switch (native_os) { .windows => .{ .handle = std.os.windows.peb().ProcessParameters.hStdInput, + .flags = .{ .nonblocking = false }, }, else => .{ .handle = std.posix.STDIN_FILENO, diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index f0562504fd..cdbb0fd182 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -1173,6 +1173,11 @@ const Syscall = struct { .blocked_canceling => return error.Canceled, // new status is `.canceled` } } + fn toApc(s: Syscall) Io.Cancelable!void { + // TODO set state to indicate instead of NtCancelSynchronousIoFile we + // need to use NtCancelIoFileEx + return s.checkCancel(); + } /// Marks this syscall as finished. fn finish(s: Syscall) void { const thread = s.thread orelse return; @@ -2759,7 +2764,12 @@ fn dirCreateDirPathOpenWasi( fn dirStat(userdata: ?*anyopaque, dir: Dir) Dir.StatError!Dir.Stat { const t: *Threaded = @ptrCast(@alignCast(userdata)); - const file: File = .{ .handle = dir.handle }; + const file: File = if (is_windows) .{ + .handle = dir.handle, + .flags = .{ .nonblocking = false }, + } else .{ + .handle = dir.handle, + }; return fileStat(t, file); } @@ -3682,7 +3692,10 @@ fn dirCreateFileWindows( errdefer windows.CloseHandle(handle); const exclusive = switch (flags.lock) { - .none => return .{ .handle = handle }, + .none => return .{ + .handle = handle, + .flags = .{ .nonblocking = false }, + }, .shared => false, .exclusive => true, }; @@ -3702,7 +3715,10 @@ fn dirCreateFileWindows( )) { .SUCCESS => { syscall.finish(); - return .{ .handle = handle }; + return .{ + .handle = handle, + .flags = .{ .nonblocking = false }, + }; }, .INSUFFICIENT_RESOURCES => return syscall.fail(error.SystemResources), .LOCK_NOT_GRANTED => return syscall.fail(error.WouldBlock), @@ -4273,7 +4289,10 @@ pub fn dirOpenFileWtf16( errdefer w.CloseHandle(handle); const exclusive = switch (flags.lock) { - .none => return .{ .handle = handle }, + .none => return .{ + .handle = handle, + .flags = .{ .nonblocking = false }, + }, .shared => false, .exclusive => true, }; @@ -4296,7 +4315,10 @@ pub fn dirOpenFileWtf16( .ACCESS_VIOLATION => |err| return syscall.ntstatusBug(err), // bad io_status_block pointer else => |status| return syscall.unexpectedNtstatus(status), }; - return .{ .handle = handle }; + return .{ + .handle = handle, + .flags = .{ .nonblocking = false }, + }; } fn dirOpenFileWasi( @@ -8365,46 +8387,66 @@ fn fileReadStreamingPosix(file: File, data: []const []u8) File.Reader.Error!usiz } fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!usize { - const DWORD = windows.DWORD; var index: usize = 0; while (index < data.len and data[index].len == 0) index += 1; if (index == data.len) return 0; const buffer = data[index]; - const want_read_count: DWORD = @min(std.math.maxInt(DWORD), buffer.len); - const syscall: Syscall = try .start(); - while (true) { - var n: DWORD = undefined; - if (windows.kernel32.ReadFile(file.handle, buffer.ptr, want_read_count, &n, null) != 0) { - syscall.finish(); - return n; + var io_status_block: windows.IO_STATUS_BLOCK = undefined; + + read: { + const syscall: Syscall = try .start(); + while (true) { + switch (windows.ntdll.NtReadFile( + file.handle, + null, // event + noopApc, // apc callback + null, // apc context + &io_status_block, + buffer.ptr, + @min(std.math.maxInt(u32), buffer.len), + null, // byte offset + null, // key + )) { + .SUCCESS => break :read syscall.finish(), + .PENDING => break, + .CANCELLED => { + try syscall.checkCancel(); + continue; + }, + .INVALID_PARAMETER => |err| return syscall.ntstatusBug(err), // wrong value for flags.nonblocking + else => |status| return syscall.unexpectedNtstatus(status), + } } - switch (windows.GetLastError()) { - .IO_PENDING => |err| { - syscall.finish(); - return windows.errorBug(err); - }, - .OPERATION_ABORTED => { - try syscall.checkCancel(); - continue; - }, - .BROKEN_PIPE, .HANDLE_EOF => { - syscall.finish(); - return 0; - }, - .NETNAME_DELETED => if (is_debug) unreachable else return error.Unexpected, - .LOCK_VIOLATION => return syscall.fail(error.LockViolation), - .ACCESS_DENIED => return syscall.fail(error.AccessDenied), - .INVALID_HANDLE => if (is_debug) unreachable else return error.Unexpected, - // TODO: Determine if INVALID_FUNCTION is possible in more scenarios than just passing - // a handle to a directory. - .INVALID_FUNCTION => return syscall.fail(error.IsDir), - else => |err| { - syscall.finish(); - return windows.unexpectedError(err); - }, + try syscall.toApc(); + while (true) { + switch (windows.ntdll.NtDelayExecution(1, null)) { + .USER_APC => break syscall.finish(), + .SUCCESS, .CANCELLED => { + try syscall.checkCancel(); + continue; + }, + else => |status| return syscall.unexpectedNtstatus(status), + } } } + + switch (io_status_block.u.Status) { + .SUCCESS, .END_OF_FILE, .PIPE_BROKEN => {}, + .ACCESS_DENIED => return error.AccessDenied, + else => |status| return windows.unexpectedStatus(status), + } + return io_status_block.Information; +} + +fn noopApc( + apc_context: ?*anyopaque, + io_status_block: *windows.IO_STATUS_BLOCK, + unused: windows.ULONG, +) callconv(.winapi) void { + _ = apc_context; + _ = io_status_block; + _ = unused; } fn fileReadPositionalPosix(file: File, data: []const []u8, offset: u64) File.ReadPositionalError!usize { @@ -14560,9 +14602,9 @@ fn processSpawnWindows(userdata: ?*anyopaque, options: process.SpawnOptions) pro return .{ .id = piProcInfo.hProcess, .thread_handle = piProcInfo.hThread, - .stdin = if (g_hChildStd_IN_Wr) |h| .{ .handle = h } else null, - .stdout = if (g_hChildStd_OUT_Rd) |h| .{ .handle = h } else null, - .stderr = if (g_hChildStd_ERR_Rd) |h| .{ .handle = h } else null, + .stdin = if (g_hChildStd_IN_Wr) |h| .{ .handle = h, .flags = .{ .nonblocking = true } } else null, + .stdout = if (g_hChildStd_OUT_Rd) |h| .{ .handle = h, .flags = .{ .nonblocking = true } } else null, + .stderr = if (g_hChildStd_ERR_Rd) |h| .{ .handle = h, .flags = .{ .nonblocking = true } } else null, .request_resource_usage_statistics = options.request_resource_usage_statistics, }; } @@ -15696,6 +15738,7 @@ fn progressParentFile(userdata: ?*anyopaque) std.Progress.ParentFileError!File { .pointer => @ptrFromInt(int), else => return error.UnsupportedOperation, }, + .flags = if (is_windows) .{ .nonblocking = true } else .{}, }; } diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 5ccc46778b..5da6111079 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -979,6 +979,7 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff if (main_parent == .unused) continue; const file: Io.File = .{ .handle = main_storage.getIpcFd() orelse continue, + .flags = if (is_windows) .{ .nonblocking = true } else .{}, }; const opt_saved_metadata = findOld(file.handle, old_ipc_metadata_fds, old_ipc_metadata); var bytes_read: usize = 0; diff --git a/lib/std/os/windows/ntdll.zig b/lib/std/os/windows/ntdll.zig index f61cbbf5b8..774ca28f19 100644 --- a/lib/std/os/windows/ntdll.zig +++ b/lib/std/os/windows/ntdll.zig @@ -596,7 +596,7 @@ pub extern "ntdll" fn NtCancelSynchronousIoFile( pub extern "ntdll" fn NtDelayExecution( Alertable: BOOLEAN, - DelayInterval: *const LARGE_INTEGER, + DelayInterval: ?*const LARGE_INTEGER, ) callconv(.winapi) NTSTATUS; pub extern "ntdll" fn NtCancelIoFileEx( From 8827488fcd556e6e95d97e838f781e6141ebde8c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 21 Jan 2026 19:08:52 -0800 Subject: [PATCH 02/65] std: back out the flags field of Io.File For now, let us refrain from putting the sync mode into the Io.File struct, and document that to do concurrent batch operations, any Windows file handles must be in asynchronous mode. The consequences for violating this requirement is neither illegal behavior, nor an error, but that concurrency is lost. In other words, deadlock might occur. This prevents the addition of flags field. partial revert of 2faf14200f58ee72ec3a13e894d765f59e6483a9 --- lib/std/Io/File.zig | 15 --------------- lib/std/Io/Threaded.zig | 36 +++++++++--------------------------- lib/std/Progress.zig | 1 - 3 files changed, 9 insertions(+), 43 deletions(-) diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index d0f487b911..e537755a33 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -10,20 +10,8 @@ const assert = std.debug.assert; const Dir = std.Io.Dir; handle: Handle, -flags: Flags = .{}, pub const Handle = std.posix.fd_t; -pub const Flags = switch (native_os) { - .windows => packed struct(u1) { - /// * true: opened with MODE.IO.ASYNCHRONOUS - /// * false: opened with SYNCHRONOUS_ALERT or SYNCHRONOUS_NONALERT, or - /// not a file. - /// This is default-initialized to false as a workaround for - /// https://codeberg.org/ziglang/zig/issues/30842 - nonblocking: bool = false, - }, - else => packed struct(u0) {}, -}; pub const Reader = @import("File/Reader.zig"); pub const Writer = @import("File/Writer.zig"); @@ -89,7 +77,6 @@ pub fn stdout() File { return switch (native_os) { .windows => .{ .handle = std.os.windows.peb().ProcessParameters.hStdOutput, - .flags = .{ .nonblocking = false }, }, else => .{ .handle = std.posix.STDOUT_FILENO, @@ -101,7 +88,6 @@ pub fn stderr() File { return switch (native_os) { .windows => .{ .handle = std.os.windows.peb().ProcessParameters.hStdError, - .flags = .{ .nonblocking = false }, }, else => .{ .handle = std.posix.STDERR_FILENO, @@ -113,7 +99,6 @@ pub fn stdin() File { return switch (native_os) { .windows => .{ .handle = std.os.windows.peb().ProcessParameters.hStdInput, - .flags = .{ .nonblocking = false }, }, else => .{ .handle = std.posix.STDIN_FILENO, diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index cdbb0fd182..1a2ca61c00 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2764,12 +2764,7 @@ fn dirCreateDirPathOpenWasi( fn dirStat(userdata: ?*anyopaque, dir: Dir) Dir.StatError!Dir.Stat { const t: *Threaded = @ptrCast(@alignCast(userdata)); - const file: File = if (is_windows) .{ - .handle = dir.handle, - .flags = .{ .nonblocking = false }, - } else .{ - .handle = dir.handle, - }; + const file: File = .{ .handle = dir.handle }; return fileStat(t, file); } @@ -3692,10 +3687,7 @@ fn dirCreateFileWindows( errdefer windows.CloseHandle(handle); const exclusive = switch (flags.lock) { - .none => return .{ - .handle = handle, - .flags = .{ .nonblocking = false }, - }, + .none => return .{ .handle = handle }, .shared => false, .exclusive => true, }; @@ -3715,10 +3707,7 @@ fn dirCreateFileWindows( )) { .SUCCESS => { syscall.finish(); - return .{ - .handle = handle, - .flags = .{ .nonblocking = false }, - }; + return .{ .handle = handle }; }, .INSUFFICIENT_RESOURCES => return syscall.fail(error.SystemResources), .LOCK_NOT_GRANTED => return syscall.fail(error.WouldBlock), @@ -4289,10 +4278,7 @@ pub fn dirOpenFileWtf16( errdefer w.CloseHandle(handle); const exclusive = switch (flags.lock) { - .none => return .{ - .handle = handle, - .flags = .{ .nonblocking = false }, - }, + .none => return .{ .handle = handle }, .shared => false, .exclusive => true, }; @@ -4315,10 +4301,7 @@ pub fn dirOpenFileWtf16( .ACCESS_VIOLATION => |err| return syscall.ntstatusBug(err), // bad io_status_block pointer else => |status| return syscall.unexpectedNtstatus(status), }; - return .{ - .handle = handle, - .flags = .{ .nonblocking = false }, - }; + return .{ .handle = handle }; } fn dirOpenFileWasi( @@ -8414,7 +8397,7 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us try syscall.checkCancel(); continue; }, - .INVALID_PARAMETER => |err| return syscall.ntstatusBug(err), // wrong value for flags.nonblocking + .INVALID_PARAMETER => |err| return syscall.ntstatusBug(err), // streaming read of async mode file else => |status| return syscall.unexpectedNtstatus(status), } } @@ -14602,9 +14585,9 @@ fn processSpawnWindows(userdata: ?*anyopaque, options: process.SpawnOptions) pro return .{ .id = piProcInfo.hProcess, .thread_handle = piProcInfo.hThread, - .stdin = if (g_hChildStd_IN_Wr) |h| .{ .handle = h, .flags = .{ .nonblocking = true } } else null, - .stdout = if (g_hChildStd_OUT_Rd) |h| .{ .handle = h, .flags = .{ .nonblocking = true } } else null, - .stderr = if (g_hChildStd_ERR_Rd) |h| .{ .handle = h, .flags = .{ .nonblocking = true } } else null, + .stdin = if (g_hChildStd_IN_Wr) |h| .{ .handle = h } else null, + .stdout = if (g_hChildStd_OUT_Rd) |h| .{ .handle = h } else null, + .stderr = if (g_hChildStd_ERR_Rd) |h| .{ .handle = h } else null, .request_resource_usage_statistics = options.request_resource_usage_statistics, }; } @@ -15738,7 +15721,6 @@ fn progressParentFile(userdata: ?*anyopaque) std.Progress.ParentFileError!File { .pointer => @ptrFromInt(int), else => return error.UnsupportedOperation, }, - .flags = if (is_windows) .{ .nonblocking = true } else .{}, }; } diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 5da6111079..5ccc46778b 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -979,7 +979,6 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff if (main_parent == .unused) continue; const file: Io.File = .{ .handle = main_storage.getIpcFd() orelse continue, - .flags = if (is_windows) .{ .nonblocking = true } else .{}, }; const opt_saved_metadata = findOld(file.handle, old_ipc_metadata_fds, old_ipc_metadata); var bytes_read: usize = 0; From 558025759632806c9d5b22b5a88d2cc4165c7fac Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 21 Jan 2026 19:16:35 -0800 Subject: [PATCH 03/65] std.Io.Threaded: add some temporary, choice panics --- lib/std/Io/Threaded.zig | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 1a2ca61c00..7a04d23e54 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -8398,7 +8398,8 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us continue; }, .INVALID_PARAMETER => |err| return syscall.ntstatusBug(err), // streaming read of async mode file - else => |status| return syscall.unexpectedNtstatus(status), + else => |status| std.debug.panic("fileReadStreamingWindows NtReadFile returned {t}", .{status}), + //else => |status| return syscall.unexpectedNtstatus(status), } } try syscall.toApc(); @@ -8409,7 +8410,8 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us try syscall.checkCancel(); continue; }, - else => |status| return syscall.unexpectedNtstatus(status), + else => |status| std.debug.panic("fileReadStreamingWindows NtDelayExecution returned {t}", .{status}), + //else => |status| return syscall.unexpectedNtstatus(status), } } } @@ -8417,7 +8419,8 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us switch (io_status_block.u.Status) { .SUCCESS, .END_OF_FILE, .PIPE_BROKEN => {}, .ACCESS_DENIED => return error.AccessDenied, - else => |status| return windows.unexpectedStatus(status), + else => |status| std.debug.panic("fileReadStreamingWindows IO_STATUS_BLOCK returned {t}", .{status}), + //else => |status| return windows.unexpectedStatus(status), } return io_status_block.Information; } From 1e3072ec4664200934c13adf8452de490130a00f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 21 Jan 2026 21:05:15 -0800 Subject: [PATCH 04/65] std.Io.Threaded: introduce Thread.InterruptMethod implements APC cancelation except for the actual call to NtCancelIoFileEx --- lib/std/Io/Threaded.zig | 241 +++++++++++++++++++++++++--------------- 1 file changed, 149 insertions(+), 92 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 7a04d23e54..a02ccb8268 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -339,8 +339,8 @@ const Group = struct { .canceled => true, .parked => unreachable, .blocked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .blocked_canceling => unreachable, }; if (result) { @@ -379,7 +379,10 @@ const Group = struct { while (it) |thread| : (it = thread.next) { // This non-mutating RMW exists for ordering reasons: see comment in `Group.Task.start` for reasons. _ = thread.status.fetchOr(.{ .cancelation = @enumFromInt(0), .awaitable = .null }, .release); - if (thread.cancelAwaitable(.fromGroup(g.ptr))) any_blocked = true; + if (thread.cancelAwaitable(.fromGroup(g.ptr))) |method| { + thread.interrupt_method = method; + any_blocked = true; + } } return any_blocked; } @@ -391,7 +394,7 @@ const Group = struct { var any_signaled = false; var it = t.worker_threads.load(.acquire); // acquire `Thread` values while (it) |thread| : (it = thread.next) { - if (thread.signalCanceledSyscall(t, .fromGroup(g.ptr))) any_signaled = true; + if (thread.signalCanceledSyscall(t, .fromGroup(g.ptr), thread.interrupt_method)) any_signaled = true; } return any_signaled; } @@ -543,8 +546,8 @@ const Future = struct { .canceled => true, .parked => unreachable, .blocked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .blocked_canceling => unreachable, }; thread.status.store(.{ .cancelation = .none, .awaitable = .null }, .monotonic); @@ -573,11 +576,15 @@ const Future = struct { num_completed: *std.atomic.Value(u32), thread: ?*Thread, ) void { - var need_signal: bool = if (thread) |th| th.cancelAwaitable(.fromFuture(future)) else false; + var interrupt_method: ?Thread.InterruptMethod = + if (thread) |th| th.cancelAwaitable(.fromFuture(future)) else null; var timeout_ns: u64 = 1 << 10; while (true) { - need_signal = need_signal and thread.?.signalCanceledSyscall(t, .fromFuture(future)); - Thread.futexWaitUncancelable(&num_completed.raw, 0, if (need_signal) timeout_ns else null); + if (interrupt_method) |method| { + if (!thread.?.signalCanceledSyscall(t, .fromFuture(future), method)) + interrupt_method = null; + } + Thread.futexWaitUncancelable(&num_completed.raw, 0, if (interrupt_method != null) timeout_ns else null); switch (num_completed.load(.acquire)) { // acquire task results 0 => {}, 1 => break, @@ -625,6 +632,9 @@ const Thread = struct { cancel_protection: Io.CancelProtection, /// Always released when `Status.cancelation` is set to `.parked`. futex_waiter: if (use_parking_futex) ?*parking_futex.Waiter else ?noreturn, + apc_context: if (is_windows) ?*anyopaque else void, + /// Used only by group cancelation code for temporary storage. + interrupt_method: InterruptMethod, csprng: Csprng, @@ -652,11 +662,13 @@ const Thread = struct { /// To request cancelation, set the status to `.blocked_canceling` and repeatedly interrupt the system call until the status changes. blocked = 0b011, - /// Windows-only: the thread is blocked in an alertable wait via - /// `NtDelayExecution`. To request cancelation, set the status to - /// `blocked_alertable_canceling` and repeatedly alert the thread - /// until the status changes. - blocked_alertable = 0b010, + /// Windows-only: the thread is blocked in a call to `NtDelayExecution`. + /// To request cancelation, set the status to `.canceling` and call `NtCancelIoFileEx`. + blocked_apc = 0b100, + + /// Windows-only: the thread is blocked in a call to `GetAddrInfoExW`. + /// To request cancelation, set the status to `.canceling` and call `GetAddrInfoExCancel`. + blocked_windows_dns = 0b010, /// The thread has an outstanding cancelation request but is not in a cancelable operation. /// When it acknowledges the cancelation, it will set the status to `.canceled`. @@ -705,8 +717,8 @@ const Thread = struct { switch (status.cancelation) { .parked => unreachable, .blocked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .blocked_canceling => unreachable, .none, .canceled => {}, .canceling => { @@ -986,17 +998,17 @@ const Thread = struct { /// It is possible that `thread` gets canceled by this function, but is blocked in a syscall. In /// that case, the thread may need to be sent a signal to interrupt the call. This function will /// return `true` to indicate this, in which case the caller must call `signalCanceledSyscall`. - fn cancelAwaitable(thread: *Thread, awaitable: AwaitableId) bool { + fn cancelAwaitable(thread: *Thread, awaitable: AwaitableId) ?InterruptMethod { var status = thread.status.load(.monotonic); while (true) { - if (status.awaitable != awaitable) return false; // thread is working on something else + if (status.awaitable != awaitable) return null; // thread is working on something else status = switch (status.cancelation) { .none => thread.status.cmpxchgWeak( .{ .cancelation = .none, .awaitable = awaitable }, .{ .cancelation = .canceling, .awaitable = awaitable }, .monotonic, .monotonic, - ) orelse return false, + ) orelse return null, .parked => thread.status.cmpxchgWeak( .{ .cancelation = .parked, .awaitable = awaitable }, @@ -1009,7 +1021,7 @@ const Thread = struct { parking_futex.removeCanceledWaiter(futex_waiter); } unpark(&.{thread.id}, null); - return false; + return null; }, .blocked => thread.status.cmpxchgWeak( @@ -1017,7 +1029,17 @@ const Thread = struct { .{ .cancelation = .blocked_canceling, .awaitable = awaitable }, .monotonic, .monotonic, - ) orelse return true, + ) orelse return .sync, + + .blocked_apc => thread.status.cmpxchgWeak( + .{ .cancelation = .blocked_apc, .awaitable = awaitable }, + .{ .cancelation = .canceling, .awaitable = awaitable }, + .monotonic, + .monotonic, + ) orelse { + if (!is_windows) unreachable; + return .apc; + }, .blocked_alertable => thread.status.cmpxchgWeak( .{ .cancelation = .blocked_alertable, .awaitable = awaitable }, @@ -1026,14 +1048,14 @@ const Thread = struct { .monotonic, ) orelse { if (!is_windows) unreachable; - return true; + return .dns; }, .canceling, .canceled => { // This can happen when the task start raced with the cancelation, so the thread // saw the cancelation on the future/group *and* we are trying to signal the // thread here. - return false; + return null; }, .blocked_canceling => unreachable, // `awaitable` has not been canceled before now @@ -1042,6 +1064,11 @@ const Thread = struct { } } + const InterruptMethod = switch (native_os) { + .windows => enum { sync, dns, apc }, + else => enum { sync }, + }; + /// Sends a signal to `thread` if it is still blocked in a syscall (i.e. has not yet observed /// the cancelation request from `cancelAwaitable`). /// @@ -1051,24 +1078,21 @@ const Thread = struct { /// the thread is still blocked. For the implementation, `Future.waitForCancelWithSignaling` and /// `Group.waitForCancelWithSignaling`: they use exponential backoff starting at a 1us delay and /// doubling each call. In practice, it is rare to send more than one signal. - fn signalCanceledSyscall(thread: *Thread, t: *Threaded, awaitable: AwaitableId) bool { - const status = thread.status.load(.monotonic); - if (status.awaitable != awaitable) { - // The thread has moved on and is working on something totally different. - return false; - } + fn signalCanceledSyscall(thread: *Thread, t: *Threaded, awaitable: AwaitableId, method: InterruptMethod) bool { + const bad_status: Status = .{ .cancelation = .blocked_canceling, .awaitable = awaitable }; + if (thread.status.load(.monotonic) != bad_status) return false; // The thread ID and/or handle can be read non-atomically because they never change and were // released by the store that made `thread` available to us. - switch (status.cancelation) { - .blocked_canceling => if (std.Thread.use_pthreads) { - return switch (std.c.pthread_kill(thread.handle, .IO)) { - 0 => true, - else => false, - }; - } else switch (native_os) { - .linux => { + if (std.Thread.use_pthreads) switch (method) { + .sync => return switch (std.c.pthread_kill(thread.handle, .IO)) { + 0 => true, + else => false, + }, + } else switch (native_os) { + .linux => switch (method) { + .sync => { const pid: posix.pid_t = pid: { const cached_pid = @atomicLoad(Pid, &t.pid, .monotonic); if (cached_pid != .unknown) break :pid @intFromEnum(cached_pid); @@ -1081,7 +1105,9 @@ const Thread = struct { else => false, }; }, - .windows => { + }, + .windows => switch (method) { + .sync => { var iosb: windows.IO_STATUS_BLOCK = undefined; return switch (windows.ntdll.NtCancelSynchronousIoFile(thread.handle, null, &iosb)) { .NOT_FOUND => true, // this might mean the operation hasn't started yet @@ -1089,15 +1115,8 @@ const Thread = struct { else => false, }; }, - else => return false, - }, - - .blocked_alertable_canceling => { - if (!is_windows) unreachable; - return switch (windows.ntdll.NtAlertThread(thread.handle)) { - .SUCCESS => true, - else => false, - }; + .dns => @panic("TODO call GetAddrInfoExCancel"), + .apc => @panic("TODO call NtCancelIoFileEx"), }, else => { @@ -1145,8 +1164,8 @@ const Syscall = struct { }, .monotonic).cancelation) { .parked => unreachable, .blocked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .blocked_canceling => unreachable, .none => return .{ .thread = thread }, // new status is `.blocked` .canceling => return error.Canceled, // new status is `.canceled` @@ -1165,19 +1184,14 @@ const Syscall = struct { }, .monotonic).cancelation) { .none => unreachable, .parked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .canceling => unreachable, .canceled => unreachable, .blocked => {}, // new status is `.blocked` (unchanged) .blocked_canceling => return error.Canceled, // new status is `.canceled` } } - fn toApc(s: Syscall) Io.Cancelable!void { - // TODO set state to indicate instead of NtCancelSynchronousIoFile we - // need to use NtCancelIoFileEx - return s.checkCancel(); - } /// Marks this syscall as finished. fn finish(s: Syscall) void { const thread = s.thread orelse return; @@ -1187,8 +1201,8 @@ const Syscall = struct { }, .monotonic).cancelation) { .none => unreachable, .parked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .canceling => unreachable, .canceled => unreachable, .blocked => {}, // new status is `.none` @@ -1196,25 +1210,25 @@ const Syscall = struct { } } /// Indicates instead of `NtCancelSynchronousIoFile` we need to use - /// `NtAlertThread` to interrupt the wait. + /// `NtCancelIoFileEx` to interrupt the wait. /// /// Windows only, called from blocked state only. - fn toAlertable(s: Syscall) Io.Cancelable!AlertableSyscall { - comptime assert(is_windows); - const thread = s.thread orelse return .{ .thread = null }; + fn toApc(s: Syscall, apc_context: ?*anyopaque) Io.Cancelable!void { + const thread = s.thread orelse return; + thread.apc_context = apc_context; var prev = thread.status.load(.monotonic); while (true) prev = switch (prev.cancelation) { .none => unreachable, .parked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .canceling => unreachable, .canceled => unreachable, .blocked => thread.status.cmpxchgWeak(prev, .{ - .cancelation = .blocked_alertable, + .cancelation = .blocked_apc, .awaitable = prev.awaitable, - }, .monotonic, .monotonic) orelse return .{ .thread = thread }, + }, .monotonic, .monotonic) orelse return, .blocked_canceling => thread.status.cmpxchgWeak(prev, .{ .cancelation = .canceled, @@ -1222,6 +1236,45 @@ const Syscall = struct { }, .monotonic, .monotonic) orelse return error.Canceled, }; } + /// Windows only, called from blocked_apc state only. + fn checkCancelApc(s: Syscall) Io.Cancelable!void { + const thread = s.thread orelse return; + var prev = thread.status.load(.monotonic); + while (true) prev = switch (prev.cancelation) { + .none => unreachable, + .parked => unreachable, + .blocked_windows_dns => unreachable, + .blocked => unreachable, + .canceling => unreachable, + .canceled => unreachable, + .blocked_apc => return, + .blocked_canceling => thread.status.cmpxchgWeak(prev, .{ + .cancelation = .canceled, + .awaitable = prev.awaitable, + }, .monotonic, .monotonic) orelse return error.Canceled, + }; + } + /// Windows only, called from blocked_apc state only. + fn finishApc(s: Syscall) void { + const thread = s.thread orelse return; + var prev = thread.status.load(.monotonic); + while (true) prev = switch (prev.cancelation) { + .none => unreachable, + .parked => unreachable, + .blocked_windows_dns => unreachable, + .blocked => unreachable, + .canceling => unreachable, + .canceled => unreachable, + .blocked_apc => thread.status.cmpxchgWeak(prev, .{ + .cancelation = .none, + .awaitable = prev.awaitable, + }, .monotonic, .monotonic) orelse return, + .blocked_canceling => thread.status.cmpxchgWeak(prev, .{ + .cancelation = .canceling, + .awaitable = prev.awaitable, + }, .monotonic, .monotonic) orelse return, + }; + } /// Convenience wrapper which calls `finish`, then returns `err`. fn fail(s: Syscall, err: anytype) @TypeOf(err) { s.finish(); @@ -1501,6 +1554,8 @@ fn worker(t: *Threaded) void { .cancel_protection = .unblocked, .futex_waiter = undefined, .csprng = .{}, + .apc_context = undefined, + .interrupt_method = undefined, }; Thread.current = &thread; @@ -2109,8 +2164,8 @@ fn groupAsyncEager( .canceled => true, .parked => unreachable, .blocked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .blocked_canceling => unreachable, }; } else false; @@ -2121,8 +2176,8 @@ fn groupAsyncEager( .canceled => true, .parked => unreachable, .blocked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .blocked_canceling => unreachable, }; } else false; @@ -2301,8 +2356,8 @@ fn recancelInner() void { .canceling => unreachable, // called `recancel` but cancelation was already pending .parked => unreachable, .blocked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .blocked_canceling => unreachable, } } @@ -8376,6 +8431,7 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us const buffer = data[index]; var io_status_block: windows.IO_STATUS_BLOCK = undefined; + var done: bool = false; read: { const syscall: Syscall = try .start(); @@ -8383,8 +8439,8 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us switch (windows.ntdll.NtReadFile( file.handle, null, // event - noopApc, // apc callback - null, // apc context + flagApc, // apc callback + &done, // apc context &io_status_block, buffer.ptr, @min(std.math.maxInt(u32), buffer.len), @@ -8402,12 +8458,12 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us //else => |status| return syscall.unexpectedNtstatus(status), } } - try syscall.toApc(); + try syscall.toApc(&done); while (true) { switch (windows.ntdll.NtDelayExecution(1, null)) { - .USER_APC => break syscall.finish(), + .USER_APC => break syscall.finishApc(), .SUCCESS, .CANCELLED => { - try syscall.checkCancel(); + try syscall.checkCancelApc(); continue; }, else => |status| std.debug.panic("fileReadStreamingWindows NtDelayExecution returned {t}", .{status}), @@ -8425,12 +8481,13 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us return io_status_block.Information; } -fn noopApc( +fn flagApc( apc_context: ?*anyopaque, io_status_block: *windows.IO_STATUS_BLOCK, unused: windows.ULONG, ) callconv(.winapi) void { - _ = apc_context; + const flag: *bool = @ptrCast(apc_context); + flag.* = true; _ = io_status_block; _ = unused; } @@ -12442,7 +12499,7 @@ fn netLookupFallible( var res: *ws2_32.ADDRINFOEXW = undefined; const timeout: ?*ws2_32.timeval = null; while (true) { - // TODO: hook this up to cancelation with `NtDelayExecution` and APC callbacks. + // TODO: hook this up to cancelation with `Thread.Status.cancelation.blocked_windows_dns`. try Thread.checkCancel(); // TODO make this append to the queue eagerly rather than blocking until the whole thing finishes const rc: ws2_32.WinsockError = @enumFromInt(ws2_32.GetAddrInfoExW(name_w, port_w, .DNS, null, &hints, &res, timeout, null, null, null)); @@ -16176,8 +16233,8 @@ const parking_futex = struct { .canceled => break :cancelable, // status is still `.canceled` .parked => unreachable, .blocked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .blocked_canceling => unreachable, } // We could now be unparked for a cancelation at any time! @@ -16228,8 +16285,8 @@ const parking_futex = struct { }, .canceled => unreachable, .blocked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .blocked_canceling => unreachable, }, } @@ -16270,8 +16327,8 @@ const parking_futex = struct { .canceling => continue, // race with a canceler who hasn't called `removeCanceledWaiter` yet .canceled => unreachable, .blocked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .blocked_canceling => unreachable, } // We're waking this waiter. Remove them from the bucket and add them to our local list. @@ -16337,8 +16394,8 @@ const parking_sleep = struct { .canceled => break :cancelable, // status is still `.canceled` .parked => unreachable, .blocked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .blocked_canceling => unreachable, } while (park(deadline, null)) { @@ -16356,8 +16413,8 @@ const parking_sleep = struct { .none => unreachable, .canceled => unreachable, .blocked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .blocked_canceling => unreachable, } } else |err| switch (err) { @@ -16376,8 +16433,8 @@ const parking_sleep = struct { .none => unreachable, .canceled => unreachable, .blocked => unreachable, - .blocked_alertable => unreachable, - .blocked_alertable_canceling => unreachable, + .blocked_apc => unreachable, + .blocked_windows_dns => unreachable, .blocked_canceling => unreachable, }, } From e705ad83028b339314fa668ec04d57987ca73e87 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 21 Jan 2026 21:28:15 -0800 Subject: [PATCH 05/65] std.Io.Threaded: implement APC cancelation specifically the call to NtCancelIoFileEx --- lib/std/Io/Threaded.zig | 33 ++++++++++++++++++++++++++------- lib/std/os/windows/ntdll.zig | 6 +++--- 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index a02ccb8268..08d67a6997 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -632,12 +632,17 @@ const Thread = struct { cancel_protection: Io.CancelProtection, /// Always released when `Status.cancelation` is set to `.parked`. futex_waiter: if (use_parking_futex) ?*parking_futex.Waiter else ?noreturn, - apc_context: if (is_windows) ?*anyopaque else void, + apc: Apc, /// Used only by group cancelation code for temporary storage. interrupt_method: InterruptMethod, csprng: Csprng, + const Apc = if (is_windows) struct { + handle: windows.HANDLE, + iosb: ?*windows.IO_STATUS_BLOCK, + } else void; + const Handle = Handle: { if (std.Thread.use_pthreads) break :Handle std.c.pthread_t; if (is_windows) break :Handle windows.HANDLE; @@ -1116,7 +1121,14 @@ const Thread = struct { }; }, .dns => @panic("TODO call GetAddrInfoExCancel"), - .apc => @panic("TODO call NtCancelIoFileEx"), + .apc => { + var iosb: windows.IO_STATUS_BLOCK = undefined; + return switch (windows.ntdll.NtCancelIoFileEx(thread.apc.handle, thread.apc.iosb, &iosb)) { + .NOT_FOUND => true, // this might mean the operation hasn't started yet + .SUCCESS => false, // the OS confirmed that our cancelation worked + else => false, + }; + }, }, else => { @@ -1213,9 +1225,9 @@ const Syscall = struct { /// `NtCancelIoFileEx` to interrupt the wait. /// /// Windows only, called from blocked state only. - fn toApc(s: Syscall, apc_context: ?*anyopaque) Io.Cancelable!void { + fn toApc(s: Syscall, apc: Thread.Apc) Io.Cancelable!void { const thread = s.thread orelse return; - thread.apc_context = apc_context; + thread.apc = apc; var prev = thread.status.load(.monotonic); while (true) prev = switch (prev.cancelation) { .none => unreachable, @@ -1554,7 +1566,7 @@ fn worker(t: *Threaded) void { .cancel_protection = .unblocked, .futex_waiter = undefined, .csprng = .{}, - .apc_context = undefined, + .apc = undefined, .interrupt_method = undefined, }; Thread.current = &thread; @@ -8458,10 +8470,17 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us //else => |status| return syscall.unexpectedNtstatus(status), } } - try syscall.toApc(&done); + try syscall.toApc(.{ .handle = file.handle, .iosb = &io_status_block }); while (true) { switch (windows.ntdll.NtDelayExecution(1, null)) { - .USER_APC => break syscall.finishApc(), + .USER_APC => { + if (!done) { + // Other APC work was queued before calling into this function. + try syscall.checkCancelApc(); + continue; + } + break syscall.finishApc(); + }, .SUCCESS, .CANCELLED => { try syscall.checkCancelApc(); continue; diff --git a/lib/std/os/windows/ntdll.zig b/lib/std/os/windows/ntdll.zig index 774ca28f19..a6d5b21322 100644 --- a/lib/std/os/windows/ntdll.zig +++ b/lib/std/os/windows/ntdll.zig @@ -601,11 +601,11 @@ pub extern "ntdll" fn NtDelayExecution( pub extern "ntdll" fn NtCancelIoFileEx( FileHandle: HANDLE, - IoRequestToCancel: *const IO_STATUS_BLOCK, + IoRequestToCancel: ?*IO_STATUS_BLOCK, IoStatusBlock: *IO_STATUS_BLOCK, ) callconv(.winapi) NTSTATUS; pub extern "ntdll" fn NtCancelIoFile( - handle: HANDLE, - iosbToCancel: *const IO_STATUS_BLOCK, + FileHandle: HANDLE, + IoRequestToCancel: ?*IO_STATUS_BLOCK, ) callconv(.winapi) NTSTATUS; From a933d7a6f88332a1ab7e1e6079e72a91320d0d4c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 21 Jan 2026 21:42:23 -0800 Subject: [PATCH 06/65] std.Io.Threaded: don't pass null to NtDelayExecution Windows returns ACCESS_VIOLATION if you do that. --- lib/std/Io/Threaded.zig | 5 +++-- lib/std/os/windows/ntdll.zig | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 08d67a6997..a9a8586da6 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -8444,6 +8444,7 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us var io_status_block: windows.IO_STATUS_BLOCK = undefined; var done: bool = false; + const infinite: windows.LARGE_INTEGER = windows.INFINITE; read: { const syscall: Syscall = try .start(); @@ -8472,7 +8473,7 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us } try syscall.toApc(.{ .handle = file.handle, .iosb = &io_status_block }); while (true) { - switch (windows.ntdll.NtDelayExecution(1, null)) { + switch (windows.ntdll.NtDelayExecution(1, &infinite)) { .USER_APC => { if (!done) { // Other APC work was queued before calling into this function. @@ -8481,7 +8482,7 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us } break syscall.finishApc(); }, - .SUCCESS, .CANCELLED => { + .SUCCESS, .CANCELLED, .TIMEOUT, .ALERTED => { try syscall.checkCancelApc(); continue; }, diff --git a/lib/std/os/windows/ntdll.zig b/lib/std/os/windows/ntdll.zig index a6d5b21322..04b0a288de 100644 --- a/lib/std/os/windows/ntdll.zig +++ b/lib/std/os/windows/ntdll.zig @@ -596,7 +596,7 @@ pub extern "ntdll" fn NtCancelSynchronousIoFile( pub extern "ntdll" fn NtDelayExecution( Alertable: BOOLEAN, - DelayInterval: ?*const LARGE_INTEGER, + DelayInterval: *const LARGE_INTEGER, ) callconv(.winapi) NTSTATUS; pub extern "ntdll" fn NtCancelIoFileEx( From 6d9e6e2c385650521289088968e16e0b5a1eed60 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 22 Jan 2026 14:32:39 -0800 Subject: [PATCH 07/65] std.Io.Threaded: avoid extra fields of Thread As mlugg pointed out those race when a thread finishes an operation just after it is canceled and then that thread to picks up another task, resulting in these fields being potentially overwritten. This updates fileReadStreaming on Windows to handle being alerted, and then manage its own cancelation of the file I/O. --- lib/std/Io/Threaded.zig | 275 +++++++++++++---------------------- lib/std/os/windows/ntdll.zig | 2 +- 2 files changed, 103 insertions(+), 174 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index a9a8586da6..3d7520dbe3 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -339,8 +339,8 @@ const Group = struct { .canceled => true, .parked => unreachable, .blocked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .blocked_canceling => unreachable, }; if (result) { @@ -379,10 +379,7 @@ const Group = struct { while (it) |thread| : (it = thread.next) { // This non-mutating RMW exists for ordering reasons: see comment in `Group.Task.start` for reasons. _ = thread.status.fetchOr(.{ .cancelation = @enumFromInt(0), .awaitable = .null }, .release); - if (thread.cancelAwaitable(.fromGroup(g.ptr))) |method| { - thread.interrupt_method = method; - any_blocked = true; - } + if (thread.cancelAwaitable(.fromGroup(g.ptr))) any_blocked = true; } return any_blocked; } @@ -394,7 +391,7 @@ const Group = struct { var any_signaled = false; var it = t.worker_threads.load(.acquire); // acquire `Thread` values while (it) |thread| : (it = thread.next) { - if (thread.signalCanceledSyscall(t, .fromGroup(g.ptr), thread.interrupt_method)) any_signaled = true; + if (thread.signalCanceledSyscall(t, .fromGroup(g.ptr))) any_signaled = true; } return any_signaled; } @@ -546,8 +543,8 @@ const Future = struct { .canceled => true, .parked => unreachable, .blocked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .blocked_canceling => unreachable, }; thread.status.store(.{ .cancelation = .none, .awaitable = .null }, .monotonic); @@ -576,15 +573,11 @@ const Future = struct { num_completed: *std.atomic.Value(u32), thread: ?*Thread, ) void { - var interrupt_method: ?Thread.InterruptMethod = - if (thread) |th| th.cancelAwaitable(.fromFuture(future)) else null; + var need_signal: bool = if (thread) |th| th.cancelAwaitable(.fromFuture(future)) else false; var timeout_ns: u64 = 1 << 10; while (true) { - if (interrupt_method) |method| { - if (!thread.?.signalCanceledSyscall(t, .fromFuture(future), method)) - interrupt_method = null; - } - Thread.futexWaitUncancelable(&num_completed.raw, 0, if (interrupt_method != null) timeout_ns else null); + need_signal = need_signal and thread.?.signalCanceledSyscall(t, .fromFuture(future)); + Thread.futexWaitUncancelable(&num_completed.raw, 0, if (need_signal) timeout_ns else null); switch (num_completed.load(.acquire)) { // acquire task results 0 => {}, 1 => break, @@ -632,17 +625,9 @@ const Thread = struct { cancel_protection: Io.CancelProtection, /// Always released when `Status.cancelation` is set to `.parked`. futex_waiter: if (use_parking_futex) ?*parking_futex.Waiter else ?noreturn, - apc: Apc, - /// Used only by group cancelation code for temporary storage. - interrupt_method: InterruptMethod, csprng: Csprng, - const Apc = if (is_windows) struct { - handle: windows.HANDLE, - iosb: ?*windows.IO_STATUS_BLOCK, - } else void; - const Handle = Handle: { if (std.Thread.use_pthreads) break :Handle std.c.pthread_t; if (is_windows) break :Handle windows.HANDLE; @@ -667,13 +652,11 @@ const Thread = struct { /// To request cancelation, set the status to `.blocked_canceling` and repeatedly interrupt the system call until the status changes. blocked = 0b011, - /// Windows-only: the thread is blocked in a call to `NtDelayExecution`. - /// To request cancelation, set the status to `.canceling` and call `NtCancelIoFileEx`. - blocked_apc = 0b100, - - /// Windows-only: the thread is blocked in a call to `GetAddrInfoExW`. - /// To request cancelation, set the status to `.canceling` and call `GetAddrInfoExCancel`. - blocked_windows_dns = 0b010, + /// Windows-only: the thread is blocked in an alertable wait via + /// `NtDelayExecution`. To request cancelation, set the status to + /// `blocked_alertable_canceling` and repeatedly alert the thread + /// until the status changes. + blocked_alertable = 0b010, /// The thread has an outstanding cancelation request but is not in a cancelable operation. /// When it acknowledges the cancelation, it will set the status to `.canceled`. @@ -722,8 +705,8 @@ const Thread = struct { switch (status.cancelation) { .parked => unreachable, .blocked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .blocked_canceling => unreachable, .none, .canceled => {}, .canceling => { @@ -1003,17 +986,17 @@ const Thread = struct { /// It is possible that `thread` gets canceled by this function, but is blocked in a syscall. In /// that case, the thread may need to be sent a signal to interrupt the call. This function will /// return `true` to indicate this, in which case the caller must call `signalCanceledSyscall`. - fn cancelAwaitable(thread: *Thread, awaitable: AwaitableId) ?InterruptMethod { + fn cancelAwaitable(thread: *Thread, awaitable: AwaitableId) bool { var status = thread.status.load(.monotonic); while (true) { - if (status.awaitable != awaitable) return null; // thread is working on something else + if (status.awaitable != awaitable) return false; // thread is working on something else status = switch (status.cancelation) { .none => thread.status.cmpxchgWeak( .{ .cancelation = .none, .awaitable = awaitable }, .{ .cancelation = .canceling, .awaitable = awaitable }, .monotonic, .monotonic, - ) orelse return null, + ) orelse return false, .parked => thread.status.cmpxchgWeak( .{ .cancelation = .parked, .awaitable = awaitable }, @@ -1026,7 +1009,7 @@ const Thread = struct { parking_futex.removeCanceledWaiter(futex_waiter); } unpark(&.{thread.id}, null); - return null; + return false; }, .blocked => thread.status.cmpxchgWeak( @@ -1034,17 +1017,7 @@ const Thread = struct { .{ .cancelation = .blocked_canceling, .awaitable = awaitable }, .monotonic, .monotonic, - ) orelse return .sync, - - .blocked_apc => thread.status.cmpxchgWeak( - .{ .cancelation = .blocked_apc, .awaitable = awaitable }, - .{ .cancelation = .canceling, .awaitable = awaitable }, - .monotonic, - .monotonic, - ) orelse { - if (!is_windows) unreachable; - return .apc; - }, + ) orelse return true, .blocked_alertable => thread.status.cmpxchgWeak( .{ .cancelation = .blocked_alertable, .awaitable = awaitable }, @@ -1053,14 +1026,14 @@ const Thread = struct { .monotonic, ) orelse { if (!is_windows) unreachable; - return .dns; + return true; }, .canceling, .canceled => { // This can happen when the task start raced with the cancelation, so the thread // saw the cancelation on the future/group *and* we are trying to signal the // thread here. - return null; + return false; }, .blocked_canceling => unreachable, // `awaitable` has not been canceled before now @@ -1069,11 +1042,6 @@ const Thread = struct { } } - const InterruptMethod = switch (native_os) { - .windows => enum { sync, dns, apc }, - else => enum { sync }, - }; - /// Sends a signal to `thread` if it is still blocked in a syscall (i.e. has not yet observed /// the cancelation request from `cancelAwaitable`). /// @@ -1083,21 +1051,24 @@ const Thread = struct { /// the thread is still blocked. For the implementation, `Future.waitForCancelWithSignaling` and /// `Group.waitForCancelWithSignaling`: they use exponential backoff starting at a 1us delay and /// doubling each call. In practice, it is rare to send more than one signal. - fn signalCanceledSyscall(thread: *Thread, t: *Threaded, awaitable: AwaitableId, method: InterruptMethod) bool { - const bad_status: Status = .{ .cancelation = .blocked_canceling, .awaitable = awaitable }; - if (thread.status.load(.monotonic) != bad_status) return false; + fn signalCanceledSyscall(thread: *Thread, t: *Threaded, awaitable: AwaitableId) bool { + const status = thread.status.load(.monotonic); + if (status.awaitable != awaitable) { + // The thread has moved on and is working on something totally different. + return false; + } // The thread ID and/or handle can be read non-atomically because they never change and were // released by the store that made `thread` available to us. - if (std.Thread.use_pthreads) switch (method) { - .sync => return switch (std.c.pthread_kill(thread.handle, .IO)) { - 0 => true, - else => false, - }, - } else switch (native_os) { - .linux => switch (method) { - .sync => { + switch (status.cancelation) { + .blocked_canceling => if (std.Thread.use_pthreads) { + return switch (std.c.pthread_kill(thread.handle, .IO)) { + 0 => true, + else => false, + }; + } else switch (native_os) { + .linux => { const pid: posix.pid_t = pid: { const cached_pid = @atomicLoad(Pid, &t.pid, .monotonic); if (cached_pid != .unknown) break :pid @intFromEnum(cached_pid); @@ -1110,9 +1081,7 @@ const Thread = struct { else => false, }; }, - }, - .windows => switch (method) { - .sync => { + .windows => { var iosb: windows.IO_STATUS_BLOCK = undefined; return switch (windows.ntdll.NtCancelSynchronousIoFile(thread.handle, null, &iosb)) { .NOT_FOUND => true, // this might mean the operation hasn't started yet @@ -1120,15 +1089,15 @@ const Thread = struct { else => false, }; }, - .dns => @panic("TODO call GetAddrInfoExCancel"), - .apc => { - var iosb: windows.IO_STATUS_BLOCK = undefined; - return switch (windows.ntdll.NtCancelIoFileEx(thread.apc.handle, thread.apc.iosb, &iosb)) { - .NOT_FOUND => true, // this might mean the operation hasn't started yet - .SUCCESS => false, // the OS confirmed that our cancelation worked - else => false, - }; - }, + else => return false, + }, + + .blocked_alertable_canceling => { + if (!is_windows) unreachable; + return switch (windows.ntdll.NtAlertThread(thread.handle)) { + .SUCCESS => true, + else => false, + }; }, else => { @@ -1176,8 +1145,8 @@ const Syscall = struct { }, .monotonic).cancelation) { .parked => unreachable, .blocked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .blocked_canceling => unreachable, .none => return .{ .thread = thread }, // new status is `.blocked` .canceling => return error.Canceled, // new status is `.canceled` @@ -1196,8 +1165,8 @@ const Syscall = struct { }, .monotonic).cancelation) { .none => unreachable, .parked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .canceling => unreachable, .canceled => unreachable, .blocked => {}, // new status is `.blocked` (unchanged) @@ -1213,8 +1182,8 @@ const Syscall = struct { }, .monotonic).cancelation) { .none => unreachable, .parked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .canceling => unreachable, .canceled => unreachable, .blocked => {}, // new status is `.none` @@ -1222,25 +1191,25 @@ const Syscall = struct { } } /// Indicates instead of `NtCancelSynchronousIoFile` we need to use - /// `NtCancelIoFileEx` to interrupt the wait. + /// `NtAlertThread` to interrupt the wait. /// /// Windows only, called from blocked state only. - fn toApc(s: Syscall, apc: Thread.Apc) Io.Cancelable!void { - const thread = s.thread orelse return; - thread.apc = apc; + fn toAlertable(s: Syscall) Io.Cancelable!AlertableSyscall { + comptime assert(is_windows); + const thread = s.thread orelse return .{ .thread = null }; var prev = thread.status.load(.monotonic); while (true) prev = switch (prev.cancelation) { .none => unreachable, .parked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .canceling => unreachable, .canceled => unreachable, .blocked => thread.status.cmpxchgWeak(prev, .{ - .cancelation = .blocked_apc, + .cancelation = .blocked_alertable, .awaitable = prev.awaitable, - }, .monotonic, .monotonic) orelse return, + }, .monotonic, .monotonic) orelse return .{ .thread = thread }, .blocked_canceling => thread.status.cmpxchgWeak(prev, .{ .cancelation = .canceled, @@ -1248,45 +1217,6 @@ const Syscall = struct { }, .monotonic, .monotonic) orelse return error.Canceled, }; } - /// Windows only, called from blocked_apc state only. - fn checkCancelApc(s: Syscall) Io.Cancelable!void { - const thread = s.thread orelse return; - var prev = thread.status.load(.monotonic); - while (true) prev = switch (prev.cancelation) { - .none => unreachable, - .parked => unreachable, - .blocked_windows_dns => unreachable, - .blocked => unreachable, - .canceling => unreachable, - .canceled => unreachable, - .blocked_apc => return, - .blocked_canceling => thread.status.cmpxchgWeak(prev, .{ - .cancelation = .canceled, - .awaitable = prev.awaitable, - }, .monotonic, .monotonic) orelse return error.Canceled, - }; - } - /// Windows only, called from blocked_apc state only. - fn finishApc(s: Syscall) void { - const thread = s.thread orelse return; - var prev = thread.status.load(.monotonic); - while (true) prev = switch (prev.cancelation) { - .none => unreachable, - .parked => unreachable, - .blocked_windows_dns => unreachable, - .blocked => unreachable, - .canceling => unreachable, - .canceled => unreachable, - .blocked_apc => thread.status.cmpxchgWeak(prev, .{ - .cancelation = .none, - .awaitable = prev.awaitable, - }, .monotonic, .monotonic) orelse return, - .blocked_canceling => thread.status.cmpxchgWeak(prev, .{ - .cancelation = .canceling, - .awaitable = prev.awaitable, - }, .monotonic, .monotonic) orelse return, - }; - } /// Convenience wrapper which calls `finish`, then returns `err`. fn fail(s: Syscall, err: anytype) @TypeOf(err) { s.finish(); @@ -1566,8 +1496,6 @@ fn worker(t: *Threaded) void { .cancel_protection = .unblocked, .futex_waiter = undefined, .csprng = .{}, - .apc = undefined, - .interrupt_method = undefined, }; Thread.current = &thread; @@ -2176,8 +2104,8 @@ fn groupAsyncEager( .canceled => true, .parked => unreachable, .blocked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .blocked_canceling => unreachable, }; } else false; @@ -2188,8 +2116,8 @@ fn groupAsyncEager( .canceled => true, .parked => unreachable, .blocked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .blocked_canceling => unreachable, }; } else false; @@ -2368,8 +2296,8 @@ fn recancelInner() void { .canceling => unreachable, // called `recancel` but cancelation was already pending .parked => unreachable, .blocked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .blocked_canceling => unreachable, } } @@ -8467,36 +8395,37 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us continue; }, .INVALID_PARAMETER => |err| return syscall.ntstatusBug(err), // streaming read of async mode file - else => |status| std.debug.panic("fileReadStreamingWindows NtReadFile returned {t}", .{status}), - //else => |status| return syscall.unexpectedNtstatus(status), + else => |status| return syscall.unexpectedNtstatus(status), } } - try syscall.toApc(.{ .handle = file.handle, .iosb = &io_status_block }); - while (true) { - switch (windows.ntdll.NtDelayExecution(1, &infinite)) { - .USER_APC => { - if (!done) { - // Other APC work was queued before calling into this function. - try syscall.checkCancelApc(); - continue; - } - break syscall.finishApc(); + // Once we get here we received PENDING so we must not return from the + // function until the operation completes. + defer while (!done) { + _ = windows.ntdll.NtDelayExecution(1, &infinite); + }; + + const alertable_syscall = syscall.toAlertable() catch |err| switch (err) { + error.Canceled => |e| { + _ = windows.ntdll.NtCancelIoFile(file.handle, &io_status_block); + return e; + }, + }; + defer alertable_syscall.finish(); + while (!done) { + _ = windows.ntdll.NtDelayExecution(1, &infinite); + alertable_syscall.checkCancel() catch |err| switch (err) { + error.Canceled => |e| { + _ = windows.ntdll.NtCancelIoFile(file.handle, &io_status_block); + return e; }, - .SUCCESS, .CANCELLED, .TIMEOUT, .ALERTED => { - try syscall.checkCancelApc(); - continue; - }, - else => |status| std.debug.panic("fileReadStreamingWindows NtDelayExecution returned {t}", .{status}), - //else => |status| return syscall.unexpectedNtstatus(status), - } + }; } } switch (io_status_block.u.Status) { .SUCCESS, .END_OF_FILE, .PIPE_BROKEN => {}, .ACCESS_DENIED => return error.AccessDenied, - else => |status| std.debug.panic("fileReadStreamingWindows IO_STATUS_BLOCK returned {t}", .{status}), - //else => |status| return windows.unexpectedStatus(status), + else => |status| return windows.unexpectedStatus(status), } return io_status_block.Information; } @@ -12519,7 +12448,7 @@ fn netLookupFallible( var res: *ws2_32.ADDRINFOEXW = undefined; const timeout: ?*ws2_32.timeval = null; while (true) { - // TODO: hook this up to cancelation with `Thread.Status.cancelation.blocked_windows_dns`. + // TODO: hook this up to cancelation with `NtDelayExecution` and APC callbacks. try Thread.checkCancel(); // TODO make this append to the queue eagerly rather than blocking until the whole thing finishes const rc: ws2_32.WinsockError = @enumFromInt(ws2_32.GetAddrInfoExW(name_w, port_w, .DNS, null, &hints, &res, timeout, null, null, null)); @@ -16253,8 +16182,8 @@ const parking_futex = struct { .canceled => break :cancelable, // status is still `.canceled` .parked => unreachable, .blocked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .blocked_canceling => unreachable, } // We could now be unparked for a cancelation at any time! @@ -16305,8 +16234,8 @@ const parking_futex = struct { }, .canceled => unreachable, .blocked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .blocked_canceling => unreachable, }, } @@ -16347,8 +16276,8 @@ const parking_futex = struct { .canceling => continue, // race with a canceler who hasn't called `removeCanceledWaiter` yet .canceled => unreachable, .blocked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .blocked_canceling => unreachable, } // We're waking this waiter. Remove them from the bucket and add them to our local list. @@ -16414,8 +16343,8 @@ const parking_sleep = struct { .canceled => break :cancelable, // status is still `.canceled` .parked => unreachable, .blocked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .blocked_canceling => unreachable, } while (park(deadline, null)) { @@ -16433,8 +16362,8 @@ const parking_sleep = struct { .none => unreachable, .canceled => unreachable, .blocked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .blocked_canceling => unreachable, } } else |err| switch (err) { @@ -16453,8 +16382,8 @@ const parking_sleep = struct { .none => unreachable, .canceled => unreachable, .blocked => unreachable, - .blocked_apc => unreachable, - .blocked_windows_dns => unreachable, + .blocked_alertable => unreachable, + .blocked_alertable_canceling => unreachable, .blocked_canceling => unreachable, }, } diff --git a/lib/std/os/windows/ntdll.zig b/lib/std/os/windows/ntdll.zig index 04b0a288de..d68cd1494b 100644 --- a/lib/std/os/windows/ntdll.zig +++ b/lib/std/os/windows/ntdll.zig @@ -601,7 +601,7 @@ pub extern "ntdll" fn NtDelayExecution( pub extern "ntdll" fn NtCancelIoFileEx( FileHandle: HANDLE, - IoRequestToCancel: ?*IO_STATUS_BLOCK, + IoRequestToCancel: *const IO_STATUS_BLOCK, IoStatusBlock: *IO_STATUS_BLOCK, ) callconv(.winapi) NTSTATUS; From 11b0a504df219b09db1d7364cc8b6a12b631ac64 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 22 Jan 2026 16:54:32 -0800 Subject: [PATCH 08/65] std.Io.Threaded: handle some more error codes from NtReadFile --- lib/std/Io/Threaded.zig | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 3d7520dbe3..36fb28db04 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -8388,12 +8388,15 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us null, // byte offset null, // key )) { - .SUCCESS => break :read syscall.finish(), + .SUCCESS, .END_OF_FILE, .PIPE_BROKEN => break :read syscall.finish(), .PENDING => break, .CANCELLED => { try syscall.checkCancel(); continue; }, + .INVALID_DEVICE_REQUEST => return syscall.fail(error.IsDir), + .LOCK_NOT_GRANTED => return syscall.fail(error.LockViolation), + .ACCESS_DENIED => return syscall.fail(error.AccessDenied), .INVALID_PARAMETER => |err| return syscall.ntstatusBug(err), // streaming read of async mode file else => |status| return syscall.unexpectedNtstatus(status), } @@ -8424,6 +8427,8 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us switch (io_status_block.u.Status) { .SUCCESS, .END_OF_FILE, .PIPE_BROKEN => {}, + .INVALID_DEVICE_REQUEST => return error.IsDir, + .LOCK_NOT_GRANTED => return error.LockViolation, .ACCESS_DENIED => return error.AccessDenied, else => |status| return windows.unexpectedStatus(status), } From 9862518797f79c946ccfad3339260597ce37ca18 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 23 Jan 2026 00:03:37 -0800 Subject: [PATCH 09/65] std.Io.Threaded: fix NtDelayExecution delay interval --- lib/std/Io/Threaded.zig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 36fb28db04..9db284b8fe 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -8372,7 +8372,7 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us var io_status_block: windows.IO_STATUS_BLOCK = undefined; var done: bool = false; - const infinite: windows.LARGE_INTEGER = windows.INFINITE; + const max_delay_interval: windows.LARGE_INTEGER = std.math.minInt(i64); read: { const syscall: Syscall = try .start(); @@ -8404,7 +8404,7 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us // Once we get here we received PENDING so we must not return from the // function until the operation completes. defer while (!done) { - _ = windows.ntdll.NtDelayExecution(1, &infinite); + _ = windows.ntdll.NtDelayExecution(1, &max_delay_interval); }; const alertable_syscall = syscall.toAlertable() catch |err| switch (err) { @@ -8415,7 +8415,7 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us }; defer alertable_syscall.finish(); while (!done) { - _ = windows.ntdll.NtDelayExecution(1, &infinite); + _ = windows.ntdll.NtDelayExecution(1, &max_delay_interval); alertable_syscall.checkCancel() catch |err| switch (err) { error.Canceled => |e| { _ = windows.ntdll.NtCancelIoFile(file.handle, &io_status_block); From 90890fcb5cf39e53dc470db8260964f95b607937 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 24 Jan 2026 03:37:43 -0500 Subject: [PATCH 10/65] Io.Threaded: fix UAF-induced crashes during asynchronous operations When `NtReadFile` returns `SUCCESS`, the APC routine still runs when next alertable, which was previously clobbering an out of scope `done`. Instead of adding an extra syscall to the success path, avoid all APC side effects, allowing instant completions to return immediately. --- lib/std/Io/Threaded.zig | 93 ++++++++++++++++++++--------------------- src/codegen/c/Type.zig | 4 +- src/link.zig | 11 ++++- 3 files changed, 56 insertions(+), 52 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 9db284b8fe..4fd2ab1703 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -1314,6 +1314,13 @@ const AlertableSyscall = struct { } }; +fn noopApc(_: ?*anyopaque, _: *windows.IO_STATUS_BLOCK, _: windows.ULONG) callconv(.winapi) void {} + +fn waitForApcOrAlert() void { + const infinite_timeout: windows.LARGE_INTEGER = std.math.minInt(windows.LARGE_INTEGER); + _ = windows.ntdll.NtDelayExecution(windows.TRUE, &infinite_timeout); +} + const max_iovecs_len = 8; const splat_buffer_size = 64; const default_PATH = "/usr/local/bin:/bin/:/usr/bin"; @@ -8371,40 +8378,41 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us const buffer = data[index]; var io_status_block: windows.IO_STATUS_BLOCK = undefined; - var done: bool = false; - const max_delay_interval: windows.LARGE_INTEGER = std.math.minInt(i64); - - read: { - const syscall: Syscall = try .start(); - while (true) { - switch (windows.ntdll.NtReadFile( - file.handle, - null, // event - flagApc, // apc callback - &done, // apc context - &io_status_block, - buffer.ptr, - @min(std.math.maxInt(u32), buffer.len), - null, // byte offset - null, // key - )) { - .SUCCESS, .END_OF_FILE, .PIPE_BROKEN => break :read syscall.finish(), - .PENDING => break, - .CANCELLED => { - try syscall.checkCancel(); - continue; - }, - .INVALID_DEVICE_REQUEST => return syscall.fail(error.IsDir), - .LOCK_NOT_GRANTED => return syscall.fail(error.LockViolation), - .ACCESS_DENIED => return syscall.fail(error.AccessDenied), - .INVALID_PARAMETER => |err| return syscall.ntstatusBug(err), // streaming read of async mode file - else => |status| return syscall.unexpectedNtstatus(status), - } + const syscall: Syscall = try .start(); + while (true) { + io_status_block.u.Status = .PENDING; + switch (windows.ntdll.NtReadFile( + file.handle, + null, // event + noopApc, // apc callback + null, // apc context + &io_status_block, + buffer.ptr, + @min(std.math.maxInt(u32), buffer.len), + null, // byte offset + null, // key + )) { + .SUCCESS, .END_OF_FILE, .PIPE_BROKEN => { + syscall.finish(); + return io_status_block.Information; + }, + .PENDING => break, + .CANCELLED => { + try syscall.checkCancel(); + continue; + }, + .INVALID_DEVICE_REQUEST => return syscall.fail(error.IsDir), + .LOCK_NOT_GRANTED => return syscall.fail(error.LockViolation), + .ACCESS_DENIED => return syscall.fail(error.AccessDenied), + .INVALID_PARAMETER => |err| return syscall.ntstatusBug(err), // streaming read of async mode file + else => |status| return syscall.unexpectedNtstatus(status), } + } + { // Once we get here we received PENDING so we must not return from the // function until the operation completes. - defer while (!done) { - _ = windows.ntdll.NtDelayExecution(1, &max_delay_interval); + defer while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { + waitForApcOrAlert(); }; const alertable_syscall = syscall.toAlertable() catch |err| switch (err) { @@ -8414,36 +8422,25 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us }, }; defer alertable_syscall.finish(); - while (!done) { - _ = windows.ntdll.NtDelayExecution(1, &max_delay_interval); + waitForApcOrAlert(); + while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { alertable_syscall.checkCancel() catch |err| switch (err) { error.Canceled => |e| { _ = windows.ntdll.NtCancelIoFile(file.handle, &io_status_block); return e; }, }; + waitForApcOrAlert(); } } - switch (io_status_block.u.Status) { - .SUCCESS, .END_OF_FILE, .PIPE_BROKEN => {}, + .SUCCESS, .END_OF_FILE, .PIPE_BROKEN => return io_status_block.Information, + .PENDING => unreachable, // cannot return until the operation completes .INVALID_DEVICE_REQUEST => return error.IsDir, .LOCK_NOT_GRANTED => return error.LockViolation, .ACCESS_DENIED => return error.AccessDenied, else => |status| return windows.unexpectedStatus(status), } - return io_status_block.Information; -} - -fn flagApc( - apc_context: ?*anyopaque, - io_status_block: *windows.IO_STATUS_BLOCK, - unused: windows.ULONG, -) callconv(.winapi) void { - const flag: *bool = @ptrCast(apc_context); - flag.* = true; - _ = io_status_block; - _ = unused; } fn fileReadPositionalPosix(file: File, data: []const []u8, offset: u64) File.ReadPositionalError!usize { @@ -14646,7 +14643,7 @@ fn getCngHandle(t: *Threaded) Io.RandomSecureError!windows.HANDLE { t.mutex.lock(); // Another thread might have won the race. defer t.mutex.unlock(); if (t.random_file.handle) |prev_handle| { - _ = windows.ntdll.NtClose(fresh_handle); + windows.CloseHandle(fresh_handle); return prev_handle; } else { t.random_file.handle = fresh_handle; diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig index fb37b60580..0bcdb207fc 100644 --- a/src/codegen/c/Type.zig +++ b/src/codegen/c/Type.zig @@ -2389,7 +2389,7 @@ pub const Pool = struct { .nonstring = elem_ctype.isAnyChar() and switch (ptr_info.sentinel) { .none => true, .zero_u8 => false, - else => |sentinel| Value.fromInterned(sentinel).orderAgainstZero(zcu).compare(.neq), + else => |sentinel| !Value.fromInterned(sentinel).compareAllWithZero(.eq, zcu), }, }); }, @@ -2438,7 +2438,7 @@ pub const Pool = struct { .nonstring = elem_ctype.isAnyChar() and switch (array_info.sentinel) { .none => true, .zero_u8 => false, - else => |sentinel| Value.fromInterned(sentinel).orderAgainstZero(zcu).compare(.neq), + else => |sentinel| !Value.fromInterned(sentinel).compareAllWithZero(.eq, zcu), }, }); if (!kind.isParameter()) return array_ctype; diff --git a/src/link.zig b/src/link.zig index 6f19ec0e58..3af768a363 100644 --- a/src/link.zig +++ b/src/link.zig @@ -605,8 +605,8 @@ pub const File = struct { switch (base.tag) { .lld => assert(base.file == null), .elf, .macho, .wasm => { - if (base.file != null) return; dev.checkAny(&.{ .coff_linker, .elf_linker, .macho_linker, .plan9_linker, .wasm_linker }); + if (base.file != null) return; const emit = base.emit; if (base.child_pid) |pid| { if (builtin.os.tag == .windows) { @@ -645,6 +645,7 @@ pub const File = struct { base.file = try emit.root_dir.handle.openFile(io, emit.sub_path, .{ .mode = .read_write }); }, .elf2, .coff2 => if (base.file == null) { + dev.checkAny(&.{ .elf2_linker, .coff2_linker }); const mf = if (base.cast(.elf2)) |elf| &elf.mf else if (base.cast(.coff2)) |coff| @@ -657,7 +658,13 @@ pub const File = struct { base.file = mf.memory_map.file; try mf.ensureTotalCapacity(@intCast(mf.nodes.items[0].location().resolve(mf)[1])); }, - .c, .spirv => dev.checkAny(&.{ .c_linker, .spirv_linker }), + .c => if (base.file == null) { + dev.check(.c_linker); + base.file = try base.emit.root_dir.handle.openFile(io, base.emit.sub_path, .{ + .mode = .write_only, + }); + }, + .spirv => dev.check(.spirv_linker), .plan9 => unreachable, } } From bd4b6d8b14e24e0bc6c072e5fb1bba3c666564d2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 5 Jan 2026 22:19:08 -0800 Subject: [PATCH 11/65] std.Io: delete the poll API --- lib/std/Io.zig | 466 +------------------------------------------------ 1 file changed, 8 insertions(+), 458 deletions(-) diff --git a/lib/std/Io.zig b/lib/std/Io.zig index 6dc0e24731..506203418f 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -15,463 +15,13 @@ const Io = @This(); const builtin = @import("builtin"); -const is_windows = builtin.os.tag == .windows; const std = @import("std.zig"); -const windows = std.os.windows; -const posix = std.posix; const math = std.math; const assert = std.debug.assert; const Allocator = std.mem.Allocator; const Alignment = std.mem.Alignment; -pub fn poll( - gpa: Allocator, - comptime StreamEnum: type, - files: PollFiles(StreamEnum), -) Poller(StreamEnum) { - const enum_fields = @typeInfo(StreamEnum).@"enum".fields; - var result: Poller(StreamEnum) = .{ - .gpa = gpa, - .readers = @splat(.failing), - .poll_fds = undefined, - .windows = if (is_windows) .{ - .first_read_done = false, - .overlapped = [1]windows.OVERLAPPED{ - std.mem.zeroes(windows.OVERLAPPED), - } ** enum_fields.len, - .small_bufs = undefined, - .active = .{ - .count = 0, - .handles_buf = undefined, - .stream_map = undefined, - }, - } else {}, - }; - - inline for (enum_fields, 0..) |field, i| { - if (is_windows) { - result.windows.active.handles_buf[i] = @field(files, field.name).handle; - } else { - result.poll_fds[i] = .{ - .fd = @field(files, field.name).handle, - .events = posix.POLL.IN, - .revents = undefined, - }; - } - } - - return result; -} - -pub fn Poller(comptime StreamEnum: type) type { - return struct { - const enum_fields = @typeInfo(StreamEnum).@"enum".fields; - const PollFd = if (is_windows) void else posix.pollfd; - - gpa: Allocator, - readers: [enum_fields.len]Reader, - poll_fds: [enum_fields.len]PollFd, - windows: if (is_windows) struct { - first_read_done: bool, - overlapped: [enum_fields.len]windows.OVERLAPPED, - small_bufs: [enum_fields.len][128]u8, - active: struct { - count: math.IntFittingRange(0, enum_fields.len), - handles_buf: [enum_fields.len]windows.HANDLE, - stream_map: [enum_fields.len]StreamEnum, - - pub fn removeAt(self: *@This(), index: u32) void { - assert(index < self.count); - for (index + 1..self.count) |i| { - self.handles_buf[i - 1] = self.handles_buf[i]; - self.stream_map[i - 1] = self.stream_map[i]; - } - self.count -= 1; - } - }, - } else void, - - const Self = @This(); - - pub fn deinit(self: *Self) void { - const gpa = self.gpa; - if (is_windows) { - // cancel any pending IO to prevent clobbering OVERLAPPED value - for (self.windows.active.handles_buf[0..self.windows.active.count]) |h| { - _ = windows.kernel32.CancelIo(h); - } - } - inline for (&self.readers) |*r| gpa.free(r.buffer); - self.* = undefined; - } - - pub fn poll(self: *Self) !bool { - if (is_windows) { - return pollWindows(self, null); - } else { - return pollPosix(self, null); - } - } - - pub fn pollTimeout(self: *Self, nanoseconds: u64) !bool { - if (is_windows) { - return pollWindows(self, nanoseconds); - } else { - return pollPosix(self, nanoseconds); - } - } - - pub fn reader(self: *Self, which: StreamEnum) *Reader { - return &self.readers[@intFromEnum(which)]; - } - - pub fn toOwnedSlice(self: *Self, which: StreamEnum) error{OutOfMemory}![]u8 { - const gpa = self.gpa; - const r = reader(self, which); - if (r.seek == 0) { - const new = try gpa.realloc(r.buffer, r.end); - r.buffer = &.{}; - r.end = 0; - return new; - } - const new = try gpa.dupe(u8, r.buffered()); - gpa.free(r.buffer); - r.buffer = &.{}; - r.seek = 0; - r.end = 0; - return new; - } - - fn pollWindows(self: *Self, nanoseconds: ?u64) !bool { - const bump_amt = 512; - const gpa = self.gpa; - - if (!self.windows.first_read_done) { - var already_read_data = false; - for (0..enum_fields.len) |i| { - const handle = self.windows.active.handles_buf[i]; - switch (try windowsAsyncReadToFifoAndQueueSmallRead( - gpa, - handle, - &self.windows.overlapped[i], - &self.readers[i], - &self.windows.small_bufs[i], - bump_amt, - )) { - .populated, .empty => |state| { - if (state == .populated) already_read_data = true; - self.windows.active.handles_buf[self.windows.active.count] = handle; - self.windows.active.stream_map[self.windows.active.count] = @as(StreamEnum, @enumFromInt(i)); - self.windows.active.count += 1; - }, - .closed => {}, // don't add to the wait_objects list - .closed_populated => { - // don't add to the wait_objects list, but we did already get data - already_read_data = true; - }, - } - } - self.windows.first_read_done = true; - if (already_read_data) return true; - } - - while (true) { - if (self.windows.active.count == 0) return false; - - const status = windows.kernel32.WaitForMultipleObjects( - self.windows.active.count, - &self.windows.active.handles_buf, - 0, - if (nanoseconds) |ns| - @min(std.math.cast(u32, ns / std.time.ns_per_ms) orelse (windows.INFINITE - 1), windows.INFINITE - 1) - else - windows.INFINITE, - ); - if (status == windows.WAIT_FAILED) - return windows.unexpectedError(windows.GetLastError()); - if (status == windows.WAIT_TIMEOUT) - return true; - - if (status < windows.WAIT_OBJECT_0 or status > windows.WAIT_OBJECT_0 + enum_fields.len - 1) - unreachable; - - const active_idx = status - windows.WAIT_OBJECT_0; - - const stream_idx = @intFromEnum(self.windows.active.stream_map[active_idx]); - const handle = self.windows.active.handles_buf[active_idx]; - - const overlapped = &self.windows.overlapped[stream_idx]; - const stream_reader = &self.readers[stream_idx]; - const small_buf = &self.windows.small_bufs[stream_idx]; - - const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) { - .success => |n| n, - .closed => { - self.windows.active.removeAt(active_idx); - continue; - }, - .aborted => unreachable, - }; - const buf = small_buf[0..num_bytes_read]; - const dest = try writableSliceGreedyAlloc(stream_reader, gpa, buf.len); - @memcpy(dest[0..buf.len], buf); - advanceBufferEnd(stream_reader, buf.len); - - switch (try windowsAsyncReadToFifoAndQueueSmallRead( - gpa, - handle, - overlapped, - stream_reader, - small_buf, - bump_amt, - )) { - .empty => {}, // irrelevant, we already got data from the small buffer - .populated => {}, - .closed, - .closed_populated, // identical, since we already got data from the small buffer - => self.windows.active.removeAt(active_idx), - } - return true; - } - } - - fn pollPosix(self: *Self, nanoseconds: ?u64) !bool { - const gpa = self.gpa; - // We ask for ensureUnusedCapacity with this much extra space. This - // has more of an effect on small reads because once the reads - // start to get larger the amount of space an ArrayList will - // allocate grows exponentially. - const bump_amt = 512; - - const err_mask = posix.POLL.ERR | posix.POLL.NVAL | posix.POLL.HUP; - - const events_len = try posix.poll(&self.poll_fds, if (nanoseconds) |ns| - std.math.cast(i32, ns / std.time.ns_per_ms) orelse std.math.maxInt(i32) - else - -1); - if (events_len == 0) { - for (self.poll_fds) |poll_fd| { - if (poll_fd.fd != -1) return true; - } else return false; - } - - var keep_polling = false; - for (&self.poll_fds, &self.readers) |*poll_fd, *r| { - // Try reading whatever is available before checking the error - // conditions. - // It's still possible to read after a POLL.HUP is received, - // always check if there's some data waiting to be read first. - if (poll_fd.revents & posix.POLL.IN != 0) { - const buf = try writableSliceGreedyAlloc(r, gpa, bump_amt); - const amt = posix.read(poll_fd.fd, buf) catch |err| switch (err) { - error.BrokenPipe => 0, // Handle the same as EOF. - else => |e| return e, - }; - advanceBufferEnd(r, amt); - if (amt == 0) { - // Remove the fd when the EOF condition is met. - poll_fd.fd = -1; - } else { - keep_polling = true; - } - } else if (poll_fd.revents & err_mask != 0) { - // Exclude the fds that signaled an error. - poll_fd.fd = -1; - } else if (poll_fd.fd != -1) { - keep_polling = true; - } - } - return keep_polling; - } - - /// Returns a slice into the unused capacity of `buffer` with at least - /// `min_len` bytes, extending `buffer` by resizing it with `gpa` as necessary. - /// - /// After calling this function, typically the caller will follow up with a - /// call to `advanceBufferEnd` to report the actual number of bytes buffered. - fn writableSliceGreedyAlloc(r: *Reader, allocator: Allocator, min_len: usize) Allocator.Error![]u8 { - { - const unused = r.buffer[r.end..]; - if (unused.len >= min_len) return unused; - } - if (r.seek > 0) { - const data = r.buffer[r.seek..r.end]; - @memmove(r.buffer[0..data.len], data); - r.seek = 0; - r.end = data.len; - } - { - var list: std.ArrayList(u8) = .{ - .items = r.buffer[0..r.end], - .capacity = r.buffer.len, - }; - defer r.buffer = list.allocatedSlice(); - try list.ensureUnusedCapacity(allocator, min_len); - } - const unused = r.buffer[r.end..]; - assert(unused.len >= min_len); - return unused; - } - - /// After writing directly into the unused capacity of `buffer`, this function - /// updates `end` so that users of `Reader` can receive the data. - fn advanceBufferEnd(r: *Reader, n: usize) void { - assert(n <= r.buffer.len - r.end); - r.end += n; - } - - /// The `ReadFile` docuementation states that `lpNumberOfBytesRead` does not have a meaningful - /// result when using overlapped I/O, but also that it cannot be `null` on Windows 7. For - /// compatibility, we point it to this dummy variables, which we never otherwise access. - /// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile - var win_dummy_bytes_read: u32 = undefined; - - /// Read as much data as possible from `handle` with `overlapped`, and write it to the FIFO. Before - /// returning, queue a read into `small_buf` so that `WaitForMultipleObjects` returns when more data - /// is available. `handle` must have no pending asynchronous operation. - fn windowsAsyncReadToFifoAndQueueSmallRead( - gpa: Allocator, - handle: windows.HANDLE, - overlapped: *windows.OVERLAPPED, - r: *Reader, - small_buf: *[128]u8, - bump_amt: usize, - ) !enum { empty, populated, closed_populated, closed } { - var read_any_data = false; - while (true) { - const fifo_read_pending = while (true) { - const buf = try writableSliceGreedyAlloc(r, gpa, bump_amt); - const buf_len = math.cast(u32, buf.len) orelse math.maxInt(u32); - - if (0 == windows.kernel32.ReadFile( - handle, - buf.ptr, - buf_len, - &win_dummy_bytes_read, - overlapped, - )) switch (windows.GetLastError()) { - .IO_PENDING => break true, - .BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed, - else => |err| return windows.unexpectedError(err), - }; - - const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) { - .success => |n| n, - .closed => return if (read_any_data) .closed_populated else .closed, - .aborted => unreachable, - }; - - read_any_data = true; - advanceBufferEnd(r, num_bytes_read); - - if (num_bytes_read == buf_len) { - // We filled the buffer, so there's probably more data available. - continue; - } else { - // We didn't fill the buffer, so assume we're out of data. - // There is no pending read. - break false; - } - }; - - if (fifo_read_pending) cancel_read: { - // Cancel the pending read into the FIFO. - _ = windows.kernel32.CancelIo(handle); - - // We have to wait for the handle to be signalled, i.e. for the cancelation to complete. - switch (windows.kernel32.WaitForSingleObject(handle, windows.INFINITE)) { - windows.WAIT_OBJECT_0 => {}, - windows.WAIT_FAILED => return windows.unexpectedError(windows.GetLastError()), - else => unreachable, - } - - // If it completed before we canceled, make sure to tell the FIFO! - const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, true)) { - .success => |n| n, - .closed => return if (read_any_data) .closed_populated else .closed, - .aborted => break :cancel_read, - }; - read_any_data = true; - advanceBufferEnd(r, num_bytes_read); - } - - // Try to queue the 1-byte read. - if (0 == windows.kernel32.ReadFile( - handle, - small_buf, - small_buf.len, - &win_dummy_bytes_read, - overlapped, - )) switch (windows.GetLastError()) { - .IO_PENDING => { - // 1-byte read pending as intended - return if (read_any_data) .populated else .empty; - }, - .BROKEN_PIPE => return if (read_any_data) .closed_populated else .closed, - else => |err| return windows.unexpectedError(err), - }; - - // We got data back this time. Write it to the FIFO and run the main loop again. - const num_bytes_read = switch (try windowsGetReadResult(handle, overlapped, false)) { - .success => |n| n, - .closed => return if (read_any_data) .closed_populated else .closed, - .aborted => unreachable, - }; - const buf = small_buf[0..num_bytes_read]; - const dest = try writableSliceGreedyAlloc(r, gpa, buf.len); - @memcpy(dest[0..buf.len], buf); - advanceBufferEnd(r, buf.len); - read_any_data = true; - } - } - - /// Simple wrapper around `GetOverlappedResult` to determine the result of a `ReadFile` operation. - /// If `!allow_aborted`, then `aborted` is never returned (`OPERATION_ABORTED` is considered unexpected). - /// - /// The `ReadFile` documentation states that the number of bytes read by an overlapped `ReadFile` must be determined using `GetOverlappedResult`, even if the - /// operation immediately returns data: - /// "Use NULL for [lpNumberOfBytesRead] if this is an asynchronous operation to avoid potentially - /// erroneous results." - /// "If `hFile` was opened with `FILE_FLAG_OVERLAPPED`, the following conditions are in effect: [...] - /// The lpNumberOfBytesRead parameter should be set to NULL. Use the GetOverlappedResult function to - /// get the actual number of bytes read." - /// See: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-readfile - fn windowsGetReadResult( - handle: windows.HANDLE, - overlapped: *windows.OVERLAPPED, - allow_aborted: bool, - ) !union(enum) { - success: u32, - closed, - aborted, - } { - var num_bytes_read: u32 = undefined; - if (0 == windows.kernel32.GetOverlappedResult( - handle, - overlapped, - &num_bytes_read, - 0, - )) switch (windows.GetLastError()) { - .BROKEN_PIPE => return .closed, - .OPERATION_ABORTED => |err| if (allow_aborted) { - return .aborted; - } else { - return windows.unexpectedError(err); - }, - else => |err| return windows.unexpectedError(err), - }; - return .{ .success = num_bytes_read }; - } - }; -} - -/// Given an enum, returns a struct with fields of that enum, each field -/// representing an I/O stream for polling. -pub fn PollFiles(comptime StreamEnum: type) type { - return @Struct(.auto, null, std.meta.fieldNames(StreamEnum), &@splat(Io.File), &@splat(.{})); -} - userdata: ?*anyopaque, vtable: *const VTable, @@ -704,18 +254,18 @@ pub const VTable = struct { pub const Limit = enum(usize) { nothing = 0, - unlimited = std.math.maxInt(usize), + unlimited = math.maxInt(usize), _, - /// `std.math.maxInt(usize)` is interpreted to mean `.unlimited`. + /// `math.maxInt(usize)` is interpreted to mean `.unlimited`. pub fn limited(n: usize) Limit { return @enumFromInt(n); } - /// Any value grater than `std.math.maxInt(usize)` is interpreted to mean + /// Any value grater than `math.maxInt(usize)` is interpreted to mean /// `.unlimited`. pub fn limited64(n: u64) Limit { - return @enumFromInt(@min(n, std.math.maxInt(usize))); + return @enumFromInt(@min(n, math.maxInt(usize))); } pub fn countVec(data: []const []const u8) Limit { @@ -929,9 +479,9 @@ pub const Clock = enum { }; } - pub fn compare(lhs: Clock.Timestamp, op: std.math.CompareOperator, rhs: Clock.Timestamp) bool { + pub fn compare(lhs: Clock.Timestamp, op: math.CompareOperator, rhs: Clock.Timestamp) bool { assert(lhs.clock == rhs.clock); - return std.math.compare(lhs.raw.nanoseconds, op, rhs.raw.nanoseconds); + return math.compare(lhs.raw.nanoseconds, op, rhs.raw.nanoseconds); } }; @@ -996,7 +546,7 @@ pub const Duration = struct { nanoseconds: i96, pub const zero: Duration = .{ .nanoseconds = 0 }; - pub const max: Duration = .{ .nanoseconds = std.math.maxInt(i96) }; + pub const max: Duration = .{ .nanoseconds = math.maxInt(i96) }; pub fn fromNanoseconds(x: i96) Duration { return .{ .nanoseconds = x }; @@ -1652,7 +1202,7 @@ pub const Event = enum(u32) { pub fn set(e: *Event, io: Io) void { switch (@atomicRmw(Event, e, .Xchg, .is_set, .release)) { .unset, .is_set => {}, - .waiting => io.futexWake(Event, e, std.math.maxInt(u32)), + .waiting => io.futexWake(Event, e, math.maxInt(u32)), } } From 0a0ecc4fb132d086c0f816a304a9fd8fde4a803e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 7 Jan 2026 18:38:19 -0800 Subject: [PATCH 12/65] std.Io: proof-of-concept "operations" API This commit shows a proof-of-concept direction for std.Io.VTable to go, which is to have general support for batching, timeouts, and non-blocking. I'm not sure if this is a good idea or not so I'm putting it up for scrutiny. This commit introduces `std.Io.operate`, `std.Io.Operation`, and implements it experimentally for `FileReadStreaming`. In `std.Io.Threaded`, the implementation is based on poll(). This commit shows how it can be used in `std.process.run` to collect both stdout and stderr in a single-threaded program using `std.Threaded.Io`. It also demonstrates how to upgrade code that was previously using `std.Io.poll` (*not* integrated with the interface!) using concurrency. This may not be ideal since it makes the build runner no longer support single-threaded mode. There is still a needed abstraction for conveniently reading multiple File streams concurrently without io.concurrent, but this commit demonstrates that such an API can be built on top of the new `std.Io.operate` functionality. --- lib/std/Build/Step.zig | 47 ++++++++++----- lib/std/Io.zig | 36 ++++++++++- lib/std/Io/File.zig | 8 ++- lib/std/Io/File/Reader.zig | 4 +- lib/std/Io/Reader.zig | 21 +++++++ lib/std/Io/Threaded.zig | 87 ++++++++++++++++++++++++++- lib/std/process.zig | 23 ++++++-- lib/std/process/Child.zig | 118 ++++++++++++++++++++++--------------- 8 files changed, 272 insertions(+), 72 deletions(-) diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 24e00bea5e..bacc81cbfa 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -381,10 +381,15 @@ pub fn addError(step: *Step, comptime fmt: []const u8, args: anytype) error{OutO pub const ZigProcess = struct { child: std.process.Child, - poller: Io.Poller(StreamEnum), progress_ipc_fd: if (std.Progress.have_ipc) ?std.posix.fd_t else void, pub const StreamEnum = enum { stdout, stderr }; + + pub fn deinit(zp: *ZigProcess, gpa: Allocator, io: Io) void { + _ = gpa; + zp.child.kill(io); + zp.* = undefined; + } }; /// Assumes that argv contains `--listen=-` and that the process being spawned @@ -459,14 +464,10 @@ pub fn evalZigProcess( zp.* = .{ .child = zp.child, - .poller = Io.poll(gpa, ZigProcess.StreamEnum, .{ - .stdout = zp.child.stdout.?, - .stderr = zp.child.stderr.?, - }), .progress_ipc_fd = if (std.Progress.have_ipc) prog_node.getIpcFd() else {}, }; if (watch) s.setZigProcess(zp); - defer if (!watch) zp.poller.deinit(); + defer if (!watch) zp.deinit(gpa, io); const result = try zigProcessUpdate(s, zp, watch, web_server, gpa); @@ -526,6 +527,9 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. const arena = b.allocator; const io = b.graph.io; + var stderr_task = try io.concurrent(readStreamAlloc, .{ gpa, io, zp.child.stderr.?, .unlimited }); + defer if (stderr_task.cancel(io)) |slice| gpa.free(slice) else |_| {}; + var timer = try std.time.Timer.start(); try sendMessage(io, zp.child.stdin.?, .update); @@ -533,14 +537,18 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. var result: ?Path = null; - const stdout = zp.poller.reader(.stdout); + var stdout_buffer: [512]u8 = undefined; + var stdout_reader: Io.File.Reader = .initStreaming(zp.child.stdout.?, io, &stdout_buffer); + const stdout = &stdout_reader.interface; - poll: while (true) { + var body_buffer: std.ArrayList(u8) = .empty; + + while (true) { const Header = std.zig.Server.Message.Header; - while (stdout.buffered().len < @sizeOf(Header)) if (!try zp.poller.poll()) break :poll; - const header = stdout.takeStruct(Header, .little) catch unreachable; - while (stdout.buffered().len < header.bytes_len) if (!try zp.poller.poll()) break :poll; - const body = stdout.take(header.bytes_len) catch unreachable; + const header = try stdout.takeStruct(Header, .little); + body_buffer.clearRetainingCapacity(); + try stdout.appendExact(gpa, &body_buffer, header.bytes_len); + const body = body_buffer.items; switch (header.tag) { .zig_version => { if (!std.mem.eql(u8, builtin.zig_version_string, body)) { @@ -553,11 +561,11 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. .error_bundle => { s.result_error_bundle = try std.zig.Server.allocErrorBundle(gpa, body); // This message indicates the end of the update. - if (watch) break :poll; + if (watch) break; }, .emit_digest => { const EmitDigest = std.zig.Server.Message.EmitDigest; - const emit_digest = @as(*align(1) const EmitDigest, @ptrCast(body)); + const emit_digest: *align(1) const EmitDigest = @ptrCast(body); s.result_cached = emit_digest.flags.cache_hit; const digest = body[@sizeOf(EmitDigest)..][0..Cache.bin_digest_len]; result = .{ @@ -631,7 +639,8 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. s.result_duration_ns = timer.read(); - const stderr_contents = try zp.poller.toOwnedSlice(.stderr); + const stderr_contents = try stderr_task.await(io); + defer gpa.free(stderr_contents); if (stderr_contents.len > 0) { try s.result_error_msgs.append(arena, try arena.dupe(u8, stderr_contents)); } @@ -639,6 +648,14 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. return result; } +fn readStreamAlloc(gpa: Allocator, io: Io, file: Io.File, limit: Io.Limit) ![]u8 { + var file_reader: Io.File.Reader = .initStreaming(file, io, &.{}); + return file_reader.interface.allocRemaining(gpa, limit) catch |err| switch (err) { + error.ReadFailed => return file_reader.err.?, + else => |e| return e, + }; +} + pub fn getZigProcess(s: *Step) ?*ZigProcess { return switch (s.id) { .compile => s.cast(Compile).?.zig_process, diff --git a/lib/std/Io.zig b/lib/std/Io.zig index 506203418f..3663e9b8d7 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -149,6 +149,8 @@ pub const VTable = struct { futexWaitUncancelable: *const fn (?*anyopaque, ptr: *const u32, expected: u32) void, futexWake: *const fn (?*anyopaque, ptr: *const u32, max_waiters: u32) void, + operate: *const fn (?*anyopaque, []Operation, n_wait: usize, Timeout) OperateError!void, + dirCreateDir: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirError!void, dirCreateDirPath: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirPathError!Dir.CreatePathStatus, dirCreateDirPathOpen: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions, Dir.OpenOptions) Dir.CreateDirPathOpenError!Dir, @@ -184,8 +186,6 @@ pub const VTable = struct { fileWriteFileStreaming: *const fn (?*anyopaque, File, header: []const u8, *Io.File.Reader, Io.Limit) File.Writer.WriteFileError!usize, fileWriteFilePositional: *const fn (?*anyopaque, File, header: []const u8, *Io.File.Reader, Io.Limit, offset: u64) File.WriteFilePositionalError!usize, /// Returns 0 on end of stream. - fileReadStreaming: *const fn (?*anyopaque, File, data: []const []u8) File.Reader.Error!usize, - /// Returns 0 on end of stream. fileReadPositional: *const fn (?*anyopaque, File, data: []const []u8, offset: u64) File.ReadPositionalError!usize, fileSeekBy: *const fn (?*anyopaque, File, relative_offset: i64) File.SeekError!void, fileSeekTo: *const fn (?*anyopaque, File, absolute_offset: u64) File.SeekError!void, @@ -252,6 +252,38 @@ pub const VTable = struct { netLookup: *const fn (?*anyopaque, net.HostName, *Queue(net.HostName.LookupResult), net.HostName.LookupOptions) net.HostName.LookupError!void, }; +pub const Operation = union(enum) { + noop, + file_read_streaming: FileReadStreaming, + + pub const FileReadStreaming = struct { + file: File, + data: []const []u8, + /// Causes `result` to return `error.WouldBlock` instead of blocking. + nonblocking: bool = false, + /// Returns 0 on end of stream. + result: File.Reader.Error!usize, + }; +}; + +pub const OperateError = error{ Canceled, Timeout }; + +/// Performs all `operations` in a non-deterministic order. Returns after all +/// `operations` have been attempted. The degree to which the operations are +/// performed concurrently is determined by the `Io` implementation. +/// +/// `n_wait` is an amount of operations between `0` and `operations.len` that +/// determines how many attempted operations must complete before `operate` +/// returns. Operation completion is defined by returning a value other than +/// `error.WouldBlock`. If the operation cannot return `error.WouldBlock`, it +/// always counts as completing. +/// +/// In the event `error.Canceled` is returned, any number of `operations` may +/// still have been completed successfully. +pub fn operate(io: Io, operations: []Operation, n_wait: usize, timeout: Timeout) OperateError!void { + return io.vtable.operate(io.userdata, operations, n_wait, timeout); +} + pub const Limit = enum(usize) { nothing = 0, unlimited = math.maxInt(usize), diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index e537755a33..303cb43908 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -554,7 +554,13 @@ pub fn setTimestampsNow(file: File, io: Io) SetTimestampsError!void { /// See also: /// * `reader` pub fn readStreaming(file: File, io: Io, buffer: []const []u8) Reader.Error!usize { - return io.vtable.fileReadStreaming(io.userdata, file, buffer); + var operation: Io.Operation = .{ .file_read_streaming = .{ + .file = file, + .data = buffer, + .result = undefined, + } }; + io.vtable.operate(io.userdata, (&operation)[0..1], 1, .none) catch unreachable; + return operation.file_read_streaming.result; } pub const ReadPositionalError = error{ diff --git a/lib/std/Io/File/Reader.zig b/lib/std/Io/File/Reader.zig index 2e0e192cb2..d3d1c05e3f 100644 --- a/lib/std/Io/File/Reader.zig +++ b/lib/std/Io/File/Reader.zig @@ -300,7 +300,7 @@ fn readVecStreaming(r: *Reader, data: [][]u8) Io.Reader.Error!usize { const dest_n, const data_size = try r.interface.writableVector(&iovecs_buffer, data); const dest = iovecs_buffer[0..dest_n]; assert(dest[0].len > 0); - const n = io.vtable.fileReadStreaming(io.userdata, r.file, dest) catch |err| { + const n = r.file.readStreaming(io, dest) catch |err| { r.err = err; return error.ReadFailed; }; @@ -355,7 +355,7 @@ fn discard(io_reader: *Io.Reader, limit: Io.Limit) Io.Reader.Error!usize { const dest_n, const data_size = try r.interface.writableVector(&iovecs_buffer, &data); const dest = iovecs_buffer[0..dest_n]; assert(dest[0].len > 0); - const n = io.vtable.fileReadStreaming(io.userdata, file, dest) catch |err| { + const n = file.readStreaming(io, dest) catch |err| { r.err = err; return error.ReadFailed; }; diff --git a/lib/std/Io/Reader.zig b/lib/std/Io/Reader.zig index a2b70afc67..9c5c762844 100644 --- a/lib/std/Io/Reader.zig +++ b/lib/std/Io/Reader.zig @@ -315,6 +315,27 @@ pub fn allocRemainingAlignedSentinel( } } +pub const AppendExactError = Allocator.Error || Error; + +/// Transfers exactly `n` bytes from the reader to the `ArrayList`. +/// +/// See also: +/// * `appendRemaining` +pub fn appendExact( + r: *Reader, + gpa: Allocator, + list: *ArrayList(u8), + n: usize, +) AppendExactError!void { + try list.ensureUnusedCapacity(gpa, n); + var a = std.Io.Writer.Allocating.fromArrayList(gpa, list); + defer list.* = a.toArrayList(); + streamExact(r, &a.writer, n) catch |err| switch (err) { + error.ReadFailed, error.EndOfStream => |e| return e, + error.WriteFailed => unreachable, + }; +} + /// Transfers all bytes from the current position to the end of the stream, up /// to `limit`, appending them to `list`. /// diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 4fd2ab1703..3710527f47 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -1586,6 +1586,8 @@ pub fn io(t: *Threaded) Io { .futexWaitUncancelable = futexWaitUncancelable, .futexWake = futexWake, + .operate = operate, + .dirCreateDir = dirCreateDir, .dirCreateDirPath = dirCreateDirPath, .dirCreateDirPathOpen = dirCreateDirPathOpen, @@ -1620,7 +1622,6 @@ pub fn io(t: *Threaded) Io { .fileWritePositional = fileWritePositional, .fileWriteFileStreaming = fileWriteFileStreaming, .fileWriteFilePositional = fileWriteFilePositional, - .fileReadStreaming = fileReadStreaming, .fileReadPositional = fileReadPositional, .fileSeekBy = fileSeekBy, .fileSeekTo = fileSeekTo, @@ -1746,6 +1747,8 @@ pub fn ioBasic(t: *Threaded) Io { .futexWaitUncancelable = futexWaitUncancelable, .futexWake = futexWake, + .operate = operate, + .dirCreateDir = dirCreateDir, .dirCreateDirPath = dirCreateDirPath, .dirCreateDirPathOpen = dirCreateDirPathOpen, @@ -1780,7 +1783,6 @@ pub fn ioBasic(t: *Threaded) Io { .fileWritePositional = fileWritePositional, .fileWriteFileStreaming = fileWriteFileStreaming, .fileWriteFilePositional = fileWriteFilePositional, - .fileReadStreaming = fileReadStreaming, .fileReadPositional = fileReadPositional, .fileSeekBy = fileSeekBy, .fileSeekTo = fileSeekTo, @@ -2447,6 +2449,87 @@ fn futexWake(userdata: ?*anyopaque, ptr: *const u32, max_waiters: u32) void { Thread.futexWake(ptr, max_waiters); } +fn operate(userdata: ?*anyopaque, operations: []Io.Operation, n_wait: usize, timeout: Io.Timeout) Io.OperateError!void { + const t: *Threaded = @ptrCast(@alignCast(userdata)); + const t_io = ioBasic(t); + + if (is_windows) @panic("TODO"); + + const deadline = timeout.toDeadline(t_io) catch |err| switch (err) { + error.UnsupportedClock, error.Unexpected => null, + }; + + var poll_buffer: [100]posix.pollfd = undefined; + var map_buffer: [poll_buffer.len]u8 = undefined; // poll_buffer index to operations index + var poll_i: usize = 0; + var completed: usize = 0; + + // Put all the file reads with nonblocking enabled into the poll set. + if (operations.len > poll_buffer.len) @panic("TODO"); + + // TODO if any operation is canceled, cancel the rest + + for (operations, 0..) |*operation, operation_index| switch (operation.*) { + .noop => continue, + .file_read_streaming => |*o| { + if (o.nonblocking) { + o.result = error.WouldBlock; + poll_buffer[poll_i] = .{ + .fd = o.file.handle, + .events = posix.POLL.IN, + .revents = undefined, + }; + map_buffer[poll_i] = @intCast(operation_index); + poll_i += 1; + } else { + o.result = fileReadStreaming(o.file, o.data); + completed += 1; + } + }, + }; + + if (poll_i == 0) { + @branchHint(.likely); + return; + } + + const max_poll_ms = std.math.maxInt(i32); + + while (completed < n_wait) { + const timeout_ms: i32 = if (deadline) |d| t: { + const duration = d.durationFromNow(t_io) catch @panic("TODO make this unreachable"); + if (duration.raw.nanoseconds <= 0) return error.Timeout; + break :t @intCast(@min(max_poll_ms, duration.raw.toMilliseconds())); + } else -1; + const syscall = try Syscall.start(); + const poll_rc = posix.system.poll(&poll_buffer, poll_i, timeout_ms); + syscall.finish(); + switch (posix.errno(poll_rc)) { + .SUCCESS => { + if (poll_rc == 0) { + // Although spurious timeouts are OK, when no deadline + // is passed we must not return `error.Timeout`. + if (deadline == null) continue; + return error.Timeout; + } + for (poll_buffer[0..poll_i], map_buffer[0..poll_i]) |*poll_fd, operation_index| { + if (poll_fd.revents == 0) continue; + poll_fd.fd = -1; // Disarm this operation. + switch (operations[operation_index]) { + .noop => unreachable, + .file_read_streaming => |*o| { + o.result = fileReadStreaming(o.file, o.data); + completed += 1; + }, + } + } + }, + .INTR => continue, + else => @panic("TODO handle unexpected error from poll()"), + } + } +} + const dirCreateDir = switch (native_os) { .windows => dirCreateDirWindows, .wasi => dirCreateDirWasi, diff --git a/lib/std/process.zig b/lib/std/process.zig index 8395882c16..10bcc76497 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -454,13 +454,17 @@ pub fn spawnPath(io: Io, dir: Io.Dir, options: SpawnOptions) SpawnError!Child { } pub const RunError = CurrentPathError || posix.ReadError || SpawnError || posix.PollError || error{ - StdoutStreamTooLong, - StderrStreamTooLong, + StreamTooLong, }; pub const RunOptions = struct { argv: []const []const u8, - max_output_bytes: usize = 50 * 1024, + stderr_limit: Io.Limit = .unlimited, + stdout_limit: Io.Limit = .unlimited, + /// How many bytes to initially allocate for stderr. + stderr_reserve_amount: usize = 1, + /// How many bytes to initially allocate for stdout. + stdout_reserve_amount: usize = 1, /// Set to change the current working directory when spawning the child process. cwd: ?[]const u8 = null, @@ -486,6 +490,7 @@ pub const RunOptions = struct { create_no_window: bool = true, /// Darwin-only. Disable ASLR for the child process. disable_aslr: bool = false, + timeout: Io.Timeout = .none, }; pub const RunResult = struct { @@ -518,7 +523,17 @@ pub fn run(gpa: Allocator, io: Io, options: RunOptions) RunError!RunResult { var stderr: std.ArrayList(u8) = .empty; defer stderr.deinit(gpa); - try child.collectOutput(gpa, &stdout, &stderr, options.max_output_bytes); + try stdout.ensureUnusedCapacity(gpa, options.stdout_reserve_amount); + try stderr.ensureUnusedCapacity(gpa, options.stderr_reserve_amount); + + try child.collectOutput(io, .{ + .allocator = gpa, + .stdout = &stdout, + .stderr = &stderr, + .stdout_limit = options.stdout_limit, + .stderr_limit = options.stderr_limit, + .timeout = options.timeout, + }); const term = try child.wait(io); diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index 17e15f208d..6675c7bbe7 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -9,7 +9,6 @@ const process = std.process; const File = std.Io.File; const assert = std.debug.assert; const Allocator = std.mem.Allocator; -const ArrayList = std.ArrayList; pub const Id = switch (native_os) { .windows => std.os.windows.HANDLE, @@ -126,53 +125,80 @@ pub fn wait(child: *Child, io: Io) WaitError!Term { return io.vtable.childWait(io.userdata, child); } -/// Collect the output from the process's stdout and stderr. Will return once all output -/// has been collected. This does not mean that the process has ended. `wait` should still -/// be called to wait for and clean up the process. +pub const CollectOutputError = error{ + Timeout, + StreamTooLong, +} || Allocator.Error || Io.File.Reader.Error; + +pub const CollectOutputOptions = struct { + stdout: *std.ArrayList(u8), + stderr: *std.ArrayList(u8), + /// Used for `stdout` and `stderr`. If not provided, only the existing + /// capacity will be used. + allocator: ?Allocator = null, + stdout_limit: Io.Limit = .unlimited, + stderr_limit: Io.Limit = .unlimited, + timeout: Io.Timeout = .none, +}; + +/// Collect the output from the process's stdout and stderr. Will return once +/// all output has been collected. This does not mean that the process has +/// ended. `wait` should still be called to wait for and clean up the process. /// /// The process must have been started with stdout and stderr set to /// `process.SpawnOptions.StdIo.pipe`. -pub fn collectOutput( - child: *const Child, - /// Used for `stdout` and `stderr`. - allocator: Allocator, - stdout: *ArrayList(u8), - stderr: *ArrayList(u8), - max_output_bytes: usize, -) !void { - var poller = std.Io.poll(allocator, enum { stdout, stderr }, .{ - .stdout = child.stdout.?, - .stderr = child.stderr.?, - }); - defer poller.deinit(); - - const stdout_r = poller.reader(.stdout); - stdout_r.buffer = stdout.allocatedSlice(); - stdout_r.seek = 0; - stdout_r.end = stdout.items.len; - - const stderr_r = poller.reader(.stderr); - stderr_r.buffer = stderr.allocatedSlice(); - stderr_r.seek = 0; - stderr_r.end = stderr.items.len; - - defer { - stdout.* = .{ - .items = stdout_r.buffer[0..stdout_r.end], - .capacity = stdout_r.buffer.len, - }; - stderr.* = .{ - .items = stderr_r.buffer[0..stderr_r.end], - .capacity = stderr_r.buffer.len, - }; - stdout_r.buffer = &.{}; - stderr_r.buffer = &.{}; - } - - while (try poller.poll()) { - if (stdout_r.bufferedLen() > max_output_bytes) - return error.StdoutStreamTooLong; - if (stderr_r.bufferedLen() > max_output_bytes) - return error.StderrStreamTooLong; +pub fn collectOutput(child: *const Child, io: Io, options: CollectOutputOptions) CollectOutputError!void { + const files: [2]Io.File = .{ child.stdout.?, child.stderr.? }; + const lists: [2]*std.ArrayList(u8) = .{ options.stdout, options.stderr }; + const limits: [2]Io.Limit = .{ options.stdout_limit, options.stderr_limit }; + var dones: [2]bool = .{ false, false }; + var reads: [2]Io.Operation = undefined; + var vecs: [2][1][]u8 = undefined; + while (true) { + for (&reads, &lists, &files, dones, &vecs) |*read, list, file, done, *vec| { + if (done) { + read.* = .noop; + continue; + } + if (options.allocator) |gpa| try list.ensureUnusedCapacity(gpa, 1); + const cap = list.unusedCapacitySlice(); + if (cap.len == 0) return error.StreamTooLong; + vec[0] = cap; + read.* = .{ .file_read_streaming = .{ + .file = file, + .data = vec, + .nonblocking = true, + .result = undefined, + } }; + } + var all_done = true; + var any_canceled = false; + var other_err: (error{StreamTooLong} || Io.File.Reader.Error)!void = {}; + const op_result = io.vtable.operate(io.userdata, &reads, 1, options.timeout); + for (&reads, &lists, &limits, &dones) |*read, list, limit, *done| { + if (done.*) continue; + const n = read.file_read_streaming.result catch |err| switch (err) { + error.Canceled => { + any_canceled = true; + continue; + }, + error.WouldBlock => continue, + else => |e| { + other_err = e; + continue; + }, + }; + if (n == 0) { + done.* = true; + } else { + all_done = false; + } + list.items.len += n; + if (list.items.len > @intFromEnum(limit)) other_err = error.StreamTooLong; + } + if (any_canceled) return error.Canceled; + try op_result; // could be error.Canceled + try other_err; + if (all_done) return; } } From 05064e128137e6c7b1afe6d648b33ebfa60877f2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Jan 2026 12:55:38 -0800 Subject: [PATCH 13/65] std.Io: simplify operate function - no timeout - no n_wait - infallible --- lib/std/Io.zig | 19 +++--------- lib/std/Io/File.zig | 2 +- lib/std/Io/Threaded.zig | 61 ++++++++++++++++++--------------------- lib/std/process.zig | 2 -- lib/std/process/Child.zig | 4 +-- 5 files changed, 34 insertions(+), 54 deletions(-) diff --git a/lib/std/Io.zig b/lib/std/Io.zig index 3663e9b8d7..c52b51e00a 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -149,7 +149,7 @@ pub const VTable = struct { futexWaitUncancelable: *const fn (?*anyopaque, ptr: *const u32, expected: u32) void, futexWake: *const fn (?*anyopaque, ptr: *const u32, max_waiters: u32) void, - operate: *const fn (?*anyopaque, []Operation, n_wait: usize, Timeout) OperateError!void, + operate: *const fn (?*anyopaque, []Operation) void, dirCreateDir: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirError!void, dirCreateDirPath: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirPathError!Dir.CreatePathStatus, @@ -266,22 +266,11 @@ pub const Operation = union(enum) { }; }; -pub const OperateError = error{ Canceled, Timeout }; - /// Performs all `operations` in a non-deterministic order. Returns after all -/// `operations` have been attempted. The degree to which the operations are +/// `operations` have been completed. The degree to which the operations are /// performed concurrently is determined by the `Io` implementation. -/// -/// `n_wait` is an amount of operations between `0` and `operations.len` that -/// determines how many attempted operations must complete before `operate` -/// returns. Operation completion is defined by returning a value other than -/// `error.WouldBlock`. If the operation cannot return `error.WouldBlock`, it -/// always counts as completing. -/// -/// In the event `error.Canceled` is returned, any number of `operations` may -/// still have been completed successfully. -pub fn operate(io: Io, operations: []Operation, n_wait: usize, timeout: Timeout) OperateError!void { - return io.vtable.operate(io.userdata, operations, n_wait, timeout); +pub fn operate(io: Io, operations: []Operation) void { + return io.vtable.operate(io.userdata, operations); } pub const Limit = enum(usize) { diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index 303cb43908..16663eb484 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -559,7 +559,7 @@ pub fn readStreaming(file: File, io: Io, buffer: []const []u8) Reader.Error!usiz .data = buffer, .result = undefined, } }; - io.vtable.operate(io.userdata, (&operation)[0..1], 1, .none) catch unreachable; + io.vtable.operate(io.userdata, (&operation)[0..1]); return operation.file_read_streaming.result; } diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 3710527f47..8ee79a7ae3 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2449,20 +2449,15 @@ fn futexWake(userdata: ?*anyopaque, ptr: *const u32, max_waiters: u32) void { Thread.futexWake(ptr, max_waiters); } -fn operate(userdata: ?*anyopaque, operations: []Io.Operation, n_wait: usize, timeout: Io.Timeout) Io.OperateError!void { +fn operate(userdata: ?*anyopaque, operations: []Io.Operation) void { const t: *Threaded = @ptrCast(@alignCast(userdata)); - const t_io = ioBasic(t); + _ = t; if (is_windows) @panic("TODO"); - const deadline = timeout.toDeadline(t_io) catch |err| switch (err) { - error.UnsupportedClock, error.Unexpected => null, - }; - var poll_buffer: [100]posix.pollfd = undefined; var map_buffer: [poll_buffer.len]u8 = undefined; // poll_buffer index to operations index var poll_i: usize = 0; - var completed: usize = 0; // Put all the file reads with nonblocking enabled into the poll set. if (operations.len > poll_buffer.len) @panic("TODO"); @@ -2483,7 +2478,6 @@ fn operate(userdata: ?*anyopaque, operations: []Io.Operation, n_wait: usize, tim poll_i += 1; } else { o.result = fileReadStreaming(o.file, o.data); - completed += 1; } }, }; @@ -2493,41 +2487,42 @@ fn operate(userdata: ?*anyopaque, operations: []Io.Operation, n_wait: usize, tim return; } - const max_poll_ms = std.math.maxInt(i32); - - while (completed < n_wait) { - const timeout_ms: i32 = if (deadline) |d| t: { - const duration = d.durationFromNow(t_io) catch @panic("TODO make this unreachable"); - if (duration.raw.nanoseconds <= 0) return error.Timeout; - break :t @intCast(@min(max_poll_ms, duration.raw.toMilliseconds())); - } else -1; - const syscall = try Syscall.start(); - const poll_rc = posix.system.poll(&poll_buffer, poll_i, timeout_ms); + while (true) { + const syscall = Syscall.start() catch |err| switch (err) { + error.Canceled => { + for (map_buffer[0..poll_i]) |operation_index| { + switch (operations[operation_index]) { + .noop => unreachable, + inline else => |*o| o.result = error.Canceled, + } + } + return; + }, + }; + const poll_rc = posix.system.poll(&poll_buffer, poll_i, -1); syscall.finish(); switch (posix.errno(poll_rc)) { .SUCCESS => { if (poll_rc == 0) { - // Although spurious timeouts are OK, when no deadline - // is passed we must not return `error.Timeout`. - if (deadline == null) continue; - return error.Timeout; - } - for (poll_buffer[0..poll_i], map_buffer[0..poll_i]) |*poll_fd, operation_index| { - if (poll_fd.revents == 0) continue; - poll_fd.fd = -1; // Disarm this operation. - switch (operations[operation_index]) { - .noop => unreachable, - .file_read_streaming => |*o| { - o.result = fileReadStreaming(o.file, o.data); - completed += 1; - }, - } + // Spurious timeout; handle same as INTR. + continue; } + break; }, .INTR => continue, else => @panic("TODO handle unexpected error from poll()"), } } + + for (poll_buffer[0..poll_i], map_buffer[0..poll_i]) |*poll_fd, operation_index| { + if (poll_fd.revents == 0) continue; + switch (operations[operation_index]) { + .noop => unreachable, + .file_read_streaming => |*o| { + o.result = fileReadStreaming(o.file, o.data); + }, + } + } } const dirCreateDir = switch (native_os) { diff --git a/lib/std/process.zig b/lib/std/process.zig index 10bcc76497..4a021879a5 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -490,7 +490,6 @@ pub const RunOptions = struct { create_no_window: bool = true, /// Darwin-only. Disable ASLR for the child process. disable_aslr: bool = false, - timeout: Io.Timeout = .none, }; pub const RunResult = struct { @@ -532,7 +531,6 @@ pub fn run(gpa: Allocator, io: Io, options: RunOptions) RunError!RunResult { .stderr = &stderr, .stdout_limit = options.stdout_limit, .stderr_limit = options.stderr_limit, - .timeout = options.timeout, }); const term = try child.wait(io); diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index 6675c7bbe7..e541ca4e65 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -138,7 +138,6 @@ pub const CollectOutputOptions = struct { allocator: ?Allocator = null, stdout_limit: Io.Limit = .unlimited, stderr_limit: Io.Limit = .unlimited, - timeout: Io.Timeout = .none, }; /// Collect the output from the process's stdout and stderr. Will return once @@ -174,7 +173,7 @@ pub fn collectOutput(child: *const Child, io: Io, options: CollectOutputOptions) var all_done = true; var any_canceled = false; var other_err: (error{StreamTooLong} || Io.File.Reader.Error)!void = {}; - const op_result = io.vtable.operate(io.userdata, &reads, 1, options.timeout); + io.vtable.operate(io.userdata, &reads); for (&reads, &lists, &limits, &dones) |*read, list, limit, *done| { if (done.*) continue; const n = read.file_read_streaming.result catch |err| switch (err) { @@ -197,7 +196,6 @@ pub fn collectOutput(child: *const Child, io: Io, options: CollectOutputOptions) if (list.items.len > @intFromEnum(limit)) other_err = error.StreamTooLong; } if (any_canceled) return error.Canceled; - try op_result; // could be error.Canceled try other_err; if (all_done) return; } From 93f5c99149948b104ab504eff3a171b6c1bff065 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Jan 2026 14:10:31 -0800 Subject: [PATCH 14/65] std.Io.Threaded.operate: handle cancelation and poll errors --- lib/std/Io/Threaded.zig | 49 +++++++++++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 8ee79a7ae3..1f4bcf3478 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -1323,6 +1323,7 @@ fn waitForApcOrAlert() void { const max_iovecs_len = 8; const splat_buffer_size = 64; +const poll_buffer_len = 100; const default_PATH = "/usr/local/bin:/bin/:/usr/bin"; comptime { @@ -2455,15 +2456,13 @@ fn operate(userdata: ?*anyopaque, operations: []Io.Operation) void { if (is_windows) @panic("TODO"); - var poll_buffer: [100]posix.pollfd = undefined; - var map_buffer: [poll_buffer.len]u8 = undefined; // poll_buffer index to operations index + var poll_buffer: [poll_buffer_len]posix.pollfd = undefined; + var map_buffer: [poll_buffer_len]u8 = undefined; // poll_buffer index to operations index var poll_i: usize = 0; // Put all the file reads with nonblocking enabled into the poll set. if (operations.len > poll_buffer.len) @panic("TODO"); - // TODO if any operation is canceled, cancel the rest - for (operations, 0..) |*operation, operation_index| switch (operation.*) { .noop => continue, .file_read_streaming => |*o| { @@ -2477,7 +2476,13 @@ fn operate(userdata: ?*anyopaque, operations: []Io.Operation) void { map_buffer[poll_i] = @intCast(operation_index); poll_i += 1; } else { - o.result = fileReadStreaming(o.file, o.data); + o.result = fileReadStreaming(o.file, o.data) catch |err| switch (err) { + error.Canceled => { + setOperationsCanceled(operations[operation_index..]); + return; + }, + else => err, + }; } }, }; @@ -2490,12 +2495,7 @@ fn operate(userdata: ?*anyopaque, operations: []Io.Operation) void { while (true) { const syscall = Syscall.start() catch |err| switch (err) { error.Canceled => { - for (map_buffer[0..poll_i]) |operation_index| { - switch (operations[operation_index]) { - .noop => unreachable, - inline else => |*o| o.result = error.Canceled, - } - } + setAllOperationsError(operations, map_buffer[0..poll_i], error.Canceled); return; }, }; @@ -2510,7 +2510,14 @@ fn operate(userdata: ?*anyopaque, operations: []Io.Operation) void { break; }, .INTR => continue, - else => @panic("TODO handle unexpected error from poll()"), + .NOMEM => { + setAllOperationsError(operations, map_buffer[0..poll_i], error.SystemResources); + return; + }, + else => { + setAllOperationsError(operations, map_buffer[0..poll_i], error.Unexpected); + return; + }, } } @@ -2525,6 +2532,24 @@ fn operate(userdata: ?*anyopaque, operations: []Io.Operation) void { } } +fn setAllOperationsError( + operations: []Io.Operation, + map: []const u8, + err: error{ Canceled, SystemResources, Unexpected }, +) void { + for (map) |operation_index| switch (operations[operation_index]) { + .noop => unreachable, + inline else => |*o| o.result = err, + }; +} + +fn setOperationsCanceled(operations: []Io.Operation) void { + for (operations) |*op| switch (op.*) { + .noop => unreachable, + inline else => |*o| o.result = error.Canceled, + }; +} + const dirCreateDir = switch (native_os) { .windows => dirCreateDirWindows, .wasi => dirCreateDirWasi, From 6a7fe61d74f80456b32cb46ef21715bbffaf49a7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Jan 2026 15:07:03 -0800 Subject: [PATCH 15/65] std.Io.Threaded.operate: handle poll buffer exceeded --- lib/std/Io/Threaded.zig | 141 ++++++++++++++++++++-------------------- 1 file changed, 72 insertions(+), 69 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 1f4bcf3478..e58d923793 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2458,81 +2458,84 @@ fn operate(userdata: ?*anyopaque, operations: []Io.Operation) void { var poll_buffer: [poll_buffer_len]posix.pollfd = undefined; var map_buffer: [poll_buffer_len]u8 = undefined; // poll_buffer index to operations index - var poll_i: usize = 0; + var operation_index: usize = 0; - // Put all the file reads with nonblocking enabled into the poll set. - if (operations.len > poll_buffer.len) @panic("TODO"); - - for (operations, 0..) |*operation, operation_index| switch (operation.*) { - .noop => continue, - .file_read_streaming => |*o| { - if (o.nonblocking) { - o.result = error.WouldBlock; - poll_buffer[poll_i] = .{ - .fd = o.file.handle, - .events = posix.POLL.IN, - .revents = undefined, - }; - map_buffer[poll_i] = @intCast(operation_index); - poll_i += 1; - } else { - o.result = fileReadStreaming(o.file, o.data) catch |err| switch (err) { - error.Canceled => { - setOperationsCanceled(operations[operation_index..]); - return; - }, - else => err, - }; + while (operation_index < operations.len) { + var poll_i: usize = 0; + while (operation_index < operations.len) : (operation_index += 1) { + switch (operations[operation_index]) { + .noop => continue, + .file_read_streaming => |*o| { + if (o.nonblocking) { + o.result = error.WouldBlock; + poll_buffer[poll_i] = .{ + .fd = o.file.handle, + .events = posix.POLL.IN, + .revents = 0, + }; + if (map_buffer.len - poll_i == 0) break; + map_buffer[poll_i] = @intCast(operation_index); + poll_i += 1; + } else { + o.result = fileReadStreaming(o.file, o.data) catch |err| switch (err) { + error.Canceled => { + setOperationsError(operations[operation_index..], error.Canceled); + return; + }, + else => err, + }; + } + }, } - }, - }; - - if (poll_i == 0) { - @branchHint(.likely); - return; - } - - while (true) { - const syscall = Syscall.start() catch |err| switch (err) { - error.Canceled => { - setAllOperationsError(operations, map_buffer[0..poll_i], error.Canceled); - return; - }, - }; - const poll_rc = posix.system.poll(&poll_buffer, poll_i, -1); - syscall.finish(); - switch (posix.errno(poll_rc)) { - .SUCCESS => { - if (poll_rc == 0) { - // Spurious timeout; handle same as INTR. - continue; - } - break; - }, - .INTR => continue, - .NOMEM => { - setAllOperationsError(operations, map_buffer[0..poll_i], error.SystemResources); - return; - }, - else => { - setAllOperationsError(operations, map_buffer[0..poll_i], error.Unexpected); - return; - }, } - } - for (poll_buffer[0..poll_i], map_buffer[0..poll_i]) |*poll_fd, operation_index| { - if (poll_fd.revents == 0) continue; - switch (operations[operation_index]) { - .noop => unreachable, - .file_read_streaming => |*o| { - o.result = fileReadStreaming(o.file, o.data); - }, + if (poll_i == 0) { + @branchHint(.likely); + return; + } + + while (true) { + const syscall = Syscall.start() catch |err| switch (err) { + error.Canceled => { + setPollOperationsError(operations, map_buffer[0..poll_i], error.Canceled); + setOperationsError(operations[operation_index..], error.Canceled); + return; + }, + }; + const poll_rc = posix.system.poll(&poll_buffer, poll_i, -1); + syscall.finish(); + switch (posix.errno(poll_rc)) { + .SUCCESS => { + if (poll_rc == 0) { + // Spurious timeout; handle same as INTR. + continue; + } + for (poll_buffer[0..poll_i], map_buffer[0..poll_i]) |*poll_fd, i| { + if (poll_fd.revents == 0) continue; + switch (operations[i]) { + .noop => unreachable, + .file_read_streaming => |*o| { + o.result = fileReadStreaming(o.file, o.data); + }, + } + } + break; + }, + .INTR => continue, + .NOMEM => { + setPollOperationsError(operations, map_buffer[0..poll_i], error.SystemResources); + break; + }, + else => { + setPollOperationsError(operations, map_buffer[0..poll_i], error.Unexpected); + break; + }, + } } } } -fn setAllOperationsError( +fn setPollOperationsError( operations: []Io.Operation, map: []const u8, err: error{ Canceled, SystemResources, Unexpected }, @@ -2543,10 +2546,10 @@ fn setAllOperationsError( }; } -fn setOperationsCanceled(operations: []Io.Operation) void { +fn setOperationsError(operations: []Io.Operation, err: error{ Canceled, SystemResources, Unexpected }) void { for (operations) |*op| switch (op.*) { .noop => unreachable, - inline else => |*o| o.result = error.Canceled, + inline else => |*o| o.result = err, }; } From e0d06b40e3681cc9aad09764987d079fb0f9e0fa Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Jan 2026 15:20:21 -0800 Subject: [PATCH 16/65] std.Io.Threaded: set poll_buffer_len to 32 reasoning is that polling with large amount of operations will be rarely done with std.Io.Threaded. However this still provides the opportunity to provide concurrency for any real world use cases that need it. --- lib/std/Io/Threaded.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index e58d923793..55b3596ee7 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -1323,7 +1323,7 @@ fn waitForApcOrAlert() void { const max_iovecs_len = 8; const splat_buffer_size = 64; -const poll_buffer_len = 100; +const poll_buffer_len = 32; const default_PATH = "/usr/local/bin:/bin/:/usr/bin"; comptime { From b996675dcf5533e506eee0ed44c64aebab69af6e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Jan 2026 16:36:18 -0800 Subject: [PATCH 17/65] fix error set --- lib/std/process/Child.zig | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index e541ca4e65..fc31014520 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -125,10 +125,7 @@ pub fn wait(child: *Child, io: Io) WaitError!Term { return io.vtable.childWait(io.userdata, child); } -pub const CollectOutputError = error{ - Timeout, - StreamTooLong, -} || Allocator.Error || Io.File.Reader.Error; +pub const CollectOutputError = error{StreamTooLong} || Allocator.Error || Io.File.Reader.Error; pub const CollectOutputOptions = struct { stdout: *std.ArrayList(u8), From 87408f8addac76b8b4811fbfede30a1c0f637a8f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Jan 2026 16:55:26 -0800 Subject: [PATCH 18/65] std.process.Child: rewrite using concurrent I plan to immediately revert this, but here's a commit for posterity --- lib/std/process.zig | 1 + lib/std/process/Child.zig | 81 ++++++++++++--------------------------- 2 files changed, 26 insertions(+), 56 deletions(-) diff --git a/lib/std/process.zig b/lib/std/process.zig index 4a021879a5..b203838e3f 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -455,6 +455,7 @@ pub fn spawnPath(io: Io, dir: Io.Dir, options: SpawnOptions) SpawnError!Child { pub const RunError = CurrentPathError || posix.ReadError || SpawnError || posix.PollError || error{ StreamTooLong, + ConcurrencyUnavailable, }; pub const RunOptions = struct { diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index fc31014520..e64f6106fa 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -125,14 +125,15 @@ pub fn wait(child: *Child, io: Io) WaitError!Term { return io.vtable.childWait(io.userdata, child); } -pub const CollectOutputError = error{StreamTooLong} || Allocator.Error || Io.File.Reader.Error; +pub const CollectOutputError = error{ + StreamTooLong, + ConcurrencyUnavailable, +} || Allocator.Error || Io.File.Reader.Error; pub const CollectOutputOptions = struct { stdout: *std.ArrayList(u8), stderr: *std.ArrayList(u8), - /// Used for `stdout` and `stderr`. If not provided, only the existing - /// capacity will be used. - allocator: ?Allocator = null, + allocator: Allocator, stdout_limit: Io.Limit = .unlimited, stderr_limit: Io.Limit = .unlimited, }; @@ -144,56 +145,24 @@ pub const CollectOutputOptions = struct { /// The process must have been started with stdout and stderr set to /// `process.SpawnOptions.StdIo.pipe`. pub fn collectOutput(child: *const Child, io: Io, options: CollectOutputOptions) CollectOutputError!void { - const files: [2]Io.File = .{ child.stdout.?, child.stderr.? }; - const lists: [2]*std.ArrayList(u8) = .{ options.stdout, options.stderr }; - const limits: [2]Io.Limit = .{ options.stdout_limit, options.stderr_limit }; - var dones: [2]bool = .{ false, false }; - var reads: [2]Io.Operation = undefined; - var vecs: [2][1][]u8 = undefined; - while (true) { - for (&reads, &lists, &files, dones, &vecs) |*read, list, file, done, *vec| { - if (done) { - read.* = .noop; - continue; - } - if (options.allocator) |gpa| try list.ensureUnusedCapacity(gpa, 1); - const cap = list.unusedCapacitySlice(); - if (cap.len == 0) return error.StreamTooLong; - vec[0] = cap; - read.* = .{ .file_read_streaming = .{ - .file = file, - .data = vec, - .nonblocking = true, - .result = undefined, - } }; - } - var all_done = true; - var any_canceled = false; - var other_err: (error{StreamTooLong} || Io.File.Reader.Error)!void = {}; - io.vtable.operate(io.userdata, &reads); - for (&reads, &lists, &limits, &dones) |*read, list, limit, *done| { - if (done.*) continue; - const n = read.file_read_streaming.result catch |err| switch (err) { - error.Canceled => { - any_canceled = true; - continue; - }, - error.WouldBlock => continue, - else => |e| { - other_err = e; - continue; - }, - }; - if (n == 0) { - done.* = true; - } else { - all_done = false; - } - list.items.len += n; - if (list.items.len > @intFromEnum(limit)) other_err = error.StreamTooLong; - } - if (any_canceled) return error.Canceled; - try other_err; - if (all_done) return; - } + var stdout = try io.concurrent(collectStream, .{ + io, options.allocator, child.stdout.?, options.stdout, options.stdout_limit, + }); + defer stdout.cancel(io) catch {}; + + var stderr = try io.concurrent(collectStream, .{ + io, options.allocator, child.stderr.?, options.stderr, options.stderr_limit, + }); + defer stderr.cancel(io) catch {}; + + try stdout.await(io); + try stderr.await(io); +} + +fn collectStream(io: Io, gpa: Allocator, file: File, list: *std.ArrayList(u8), limit: Io.Limit) CollectOutputError!void { + var fr = file.readerStreaming(io, &.{}); + fr.interface.appendRemaining(gpa, list, limit) catch |err| switch (err) { + error.ReadFailed => return fr.err.?, + else => |e| return e, + }; } From e2a266e744adaa7ee84a512fdf4cea44686ca0b6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Jan 2026 16:55:51 -0800 Subject: [PATCH 19/65] Revert "std.process.Child: rewrite using concurrent" This reverts commit 76e1ba8f490812c6e2ebf6f6becd89a71275d21e. --- lib/std/process.zig | 1 - lib/std/process/Child.zig | 81 +++++++++++++++++++++++++++------------ 2 files changed, 56 insertions(+), 26 deletions(-) diff --git a/lib/std/process.zig b/lib/std/process.zig index b203838e3f..4a021879a5 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -455,7 +455,6 @@ pub fn spawnPath(io: Io, dir: Io.Dir, options: SpawnOptions) SpawnError!Child { pub const RunError = CurrentPathError || posix.ReadError || SpawnError || posix.PollError || error{ StreamTooLong, - ConcurrencyUnavailable, }; pub const RunOptions = struct { diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index e64f6106fa..fc31014520 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -125,15 +125,14 @@ pub fn wait(child: *Child, io: Io) WaitError!Term { return io.vtable.childWait(io.userdata, child); } -pub const CollectOutputError = error{ - StreamTooLong, - ConcurrencyUnavailable, -} || Allocator.Error || Io.File.Reader.Error; +pub const CollectOutputError = error{StreamTooLong} || Allocator.Error || Io.File.Reader.Error; pub const CollectOutputOptions = struct { stdout: *std.ArrayList(u8), stderr: *std.ArrayList(u8), - allocator: Allocator, + /// Used for `stdout` and `stderr`. If not provided, only the existing + /// capacity will be used. + allocator: ?Allocator = null, stdout_limit: Io.Limit = .unlimited, stderr_limit: Io.Limit = .unlimited, }; @@ -145,24 +144,56 @@ pub const CollectOutputOptions = struct { /// The process must have been started with stdout and stderr set to /// `process.SpawnOptions.StdIo.pipe`. pub fn collectOutput(child: *const Child, io: Io, options: CollectOutputOptions) CollectOutputError!void { - var stdout = try io.concurrent(collectStream, .{ - io, options.allocator, child.stdout.?, options.stdout, options.stdout_limit, - }); - defer stdout.cancel(io) catch {}; - - var stderr = try io.concurrent(collectStream, .{ - io, options.allocator, child.stderr.?, options.stderr, options.stderr_limit, - }); - defer stderr.cancel(io) catch {}; - - try stdout.await(io); - try stderr.await(io); -} - -fn collectStream(io: Io, gpa: Allocator, file: File, list: *std.ArrayList(u8), limit: Io.Limit) CollectOutputError!void { - var fr = file.readerStreaming(io, &.{}); - fr.interface.appendRemaining(gpa, list, limit) catch |err| switch (err) { - error.ReadFailed => return fr.err.?, - else => |e| return e, - }; + const files: [2]Io.File = .{ child.stdout.?, child.stderr.? }; + const lists: [2]*std.ArrayList(u8) = .{ options.stdout, options.stderr }; + const limits: [2]Io.Limit = .{ options.stdout_limit, options.stderr_limit }; + var dones: [2]bool = .{ false, false }; + var reads: [2]Io.Operation = undefined; + var vecs: [2][1][]u8 = undefined; + while (true) { + for (&reads, &lists, &files, dones, &vecs) |*read, list, file, done, *vec| { + if (done) { + read.* = .noop; + continue; + } + if (options.allocator) |gpa| try list.ensureUnusedCapacity(gpa, 1); + const cap = list.unusedCapacitySlice(); + if (cap.len == 0) return error.StreamTooLong; + vec[0] = cap; + read.* = .{ .file_read_streaming = .{ + .file = file, + .data = vec, + .nonblocking = true, + .result = undefined, + } }; + } + var all_done = true; + var any_canceled = false; + var other_err: (error{StreamTooLong} || Io.File.Reader.Error)!void = {}; + io.vtable.operate(io.userdata, &reads); + for (&reads, &lists, &limits, &dones) |*read, list, limit, *done| { + if (done.*) continue; + const n = read.file_read_streaming.result catch |err| switch (err) { + error.Canceled => { + any_canceled = true; + continue; + }, + error.WouldBlock => continue, + else => |e| { + other_err = e; + continue; + }, + }; + if (n == 0) { + done.* = true; + } else { + all_done = false; + } + list.items.len += n; + if (list.items.len > @intFromEnum(limit)) other_err = error.StreamTooLong; + } + if (any_canceled) return error.Canceled; + try other_err; + if (all_done) return; + } } From 642f329ac91d69d02588ba15714edafb09e709da Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 9 Jan 2026 15:06:50 -0800 Subject: [PATCH 20/65] std.Io: exploring a different batch API proposal --- lib/std/Io.zig | 95 ++++++++++++-- lib/std/Io/File.zig | 5 +- lib/std/Io/Threaded.zig | 260 +++++++++++++++++++++++++------------- lib/std/process.zig | 16 +-- lib/std/process/Child.zig | 92 +++++++------- 5 files changed, 314 insertions(+), 154 deletions(-) diff --git a/lib/std/Io.zig b/lib/std/Io.zig index c52b51e00a..d916bb6995 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -149,7 +149,10 @@ pub const VTable = struct { futexWaitUncancelable: *const fn (?*anyopaque, ptr: *const u32, expected: u32) void, futexWake: *const fn (?*anyopaque, ptr: *const u32, max_waiters: u32) void, - operate: *const fn (?*anyopaque, []Operation) void, + batch: *const fn (?*anyopaque, []Operation) ConcurrentError!void, + batchSubmit: *const fn (?*anyopaque, *Batch) void, + batchWait: *const fn (?*anyopaque, *Batch, resubmissions: []const usize, Timeout) Batch.WaitError!usize, + batchCancel: *const fn (?*anyopaque, *Batch) void, dirCreateDir: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirError!void, dirCreateDirPath: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirPathError!Dir.CreatePathStatus, @@ -253,26 +256,96 @@ pub const VTable = struct { }; pub const Operation = union(enum) { - noop, + noop: Noop, file_read_streaming: FileReadStreaming, + pub const Noop = struct { + reserved: [2]usize, + status: Status(void) = .{ .result = {} }, + }; + + /// Returns 0 on end of stream. pub const FileReadStreaming = struct { file: File, data: []const []u8, - /// Causes `result` to return `error.WouldBlock` instead of blocking. - nonblocking: bool = false, - /// Returns 0 on end of stream. - result: File.Reader.Error!usize, + status: Status(File.Reader.Error!usize) = .{ .unstarted = {} }, }; + + pub fn Status(Result: type) type { + return union { + unstarted: void, + pending: usize, + result: Result, + }; + } }; -/// Performs all `operations` in a non-deterministic order. Returns after all -/// `operations` have been completed. The degree to which the operations are -/// performed concurrently is determined by the `Io` implementation. -pub fn operate(io: Io, operations: []Operation) void { - return io.vtable.operate(io.userdata, operations); +/// Performs all `operations` in an unspecified order, concurrently. +/// +/// Returns after all `operations` have been completed. If the operations could +/// not be completed concurrently, returns `error.ConcurrencyUnavailable`. +/// +/// With this API, it is rare for concurrency to not be available. Even a +/// single-threaded `Io` implementation can, for example, take advantage of +/// poll() to implement this. Note that poll() is fallible however. +/// +/// If `operations.len` is one, `error.ConcurrencyUnavailable` is unreachable. +/// +/// On entry, all operations must already have `.status = .unstarted` except +/// noops must have `.status = .{ .result = {} }`, to safety check the state +/// transitions. +/// +/// On return, all operations have `.status = .{ .result = ... }`. +pub fn batch(io: Io, operations: []Operation) ConcurrentError!void { + return io.vtable.batch(io.userdata, operations); } +/// Performs one `Operation`. +pub fn operate(io: Io, operation: *Operation) void { + return io.vtable.batch(io.userdata, (operation)[0..1]) catch unreachable; +} + +/// Submits many operations together without waiting for all of them to +/// complete. +/// +/// This is a low-level abstraction based on `Operation`. For a higher +/// level API that operates on `Future`, see `Select`. +pub const Batch = struct { + operations: []Operation, + index: usize, + reserved: ?*anyopaque, + + pub fn init(operations: []Operation) Batch { + return .{ .operations = operations, .index = 0, .reserved = null }; + } + + /// Submits all non-noop `operations`. + pub fn submit(b: *Batch, io: Io) void { + return io.vtable.batchSubmit(io.userdata, b); + } + + pub const WaitError = ConcurrentError || Cancelable || Timeout.Error; + + /// Resubmits the previously completed or noop-initialized `operations` at + /// indexes given by `resubmissions`. This set of indexes typically will be empty + /// on the first call to `await` since all operations have already been + /// submitted via `async`. + /// + /// Returns the index of a completed `Operation`, or `operations.len` if + /// all operations are completed. + /// + /// When `error.Canceled` is returned, all operations have already completed. + pub fn wait(b: *Batch, io: Io, resubmissions: []const usize, timeout: Timeout) WaitError!usize { + return io.vtable.batchWait(io.userdata, b, resubmissions, timeout); + } + + /// Returns after all `operations` have completed. Each operation + /// independently may or may not have been canceled. + pub fn cancel(b: *Batch, io: Io) void { + return io.vtable.batchCancel(io.userdata, b); + } +}; + pub const Limit = enum(usize) { nothing = 0, unlimited = math.maxInt(usize), diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index 16663eb484..f27f249975 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -557,10 +557,9 @@ pub fn readStreaming(file: File, io: Io, buffer: []const []u8) Reader.Error!usiz var operation: Io.Operation = .{ .file_read_streaming = .{ .file = file, .data = buffer, - .result = undefined, } }; - io.vtable.operate(io.userdata, (&operation)[0..1]); - return operation.file_read_streaming.result; + io.operate(&operation); + return operation.file_read_streaming.status.result; } pub const ReadPositionalError = error{ diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 55b3596ee7..0eda4e8fdc 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -1587,7 +1587,10 @@ pub fn io(t: *Threaded) Io { .futexWaitUncancelable = futexWaitUncancelable, .futexWake = futexWake, - .operate = operate, + .batch = batch, + .batchSubmit = batchSubmit, + .batchWait = batchWait, + .batchCancel = batchCancel, .dirCreateDir = dirCreateDir, .dirCreateDirPath = dirCreateDirPath, @@ -1748,7 +1751,10 @@ pub fn ioBasic(t: *Threaded) Io { .futexWaitUncancelable = futexWaitUncancelable, .futexWake = futexWake, - .operate = operate, + .batch = batch, + .batchSubmit = batchSubmit, + .batchWait = batchWait, + .batchCancel = batchCancel, .dirCreateDir = dirCreateDir, .dirCreateDirPath = dirCreateDirPath, @@ -2450,107 +2456,187 @@ fn futexWake(userdata: ?*anyopaque, ptr: *const u32, max_waiters: u32) void { Thread.futexWake(ptr, max_waiters); } -fn operate(userdata: ?*anyopaque, operations: []Io.Operation) void { +fn batchSubmit(userdata: ?*anyopaque, b: *Io.Batch) void { const t: *Threaded = @ptrCast(@alignCast(userdata)); _ = t; + _ = b; + return; +} + +fn operate(op: *Io.Operation) void { + switch (op.*) { + .noop => {}, + .file_read_streaming => |*o| o.status = .{ .result = fileReadStreaming(o.file, o.data) }, + } +} + +fn batchWait( + userdata: ?*anyopaque, + b: *Io.Batch, + resubmissions: []const usize, + timeout: Io.Timeout, +) Io.Batch.WaitError!usize { + _ = resubmissions; + const t: *Threaded = @ptrCast(@alignCast(userdata)); + const operations = b.operations; + if (operations.len == 1) { + operate(&operations[0]); + return b.operations.len; + } + if (is_windows) @panic("TODO"); + + var poll_buffer: [poll_buffer_len]posix.pollfd = undefined; + var map_buffer: [poll_buffer_len]u8 = undefined; // poll_buffer index to operations index + var poll_i: usize = 0; + + for (operations, 0..) |*op, operation_index| switch (op.*) { + .noop => continue, + .file_read_streaming => |*o| { + if (poll_buffer.len - poll_i == 0) return error.ConcurrencyUnavailable; + poll_buffer[poll_i] = .{ + .fd = o.file.handle, + .events = posix.POLL.IN, + .revents = 0, + }; + map_buffer[poll_i] = @intCast(operation_index); + poll_i += 1; + }, + }; + + if (poll_i == 0) return operations.len; + + const t_io = ioBasic(t); + const deadline = timeout.toDeadline(t_io) catch return error.UnsupportedClock; + const max_poll_ms = std.math.maxInt(i32); + + while (true) { + const timeout_ms: i32 = if (deadline) |d| t: { + const duration = d.durationFromNow(t_io) catch return error.UnsupportedClock; + if (duration.raw.nanoseconds <= 0) return error.Timeout; + break :t @intCast(@min(max_poll_ms, duration.raw.toMilliseconds())); + } else -1; + const syscall = try Syscall.start(); + const rc = posix.system.poll(&poll_buffer, poll_i, timeout_ms); + syscall.finish(); + switch (posix.errno(rc)) { + .SUCCESS => { + if (rc == 0) { + // Although spurious timeouts are OK, when no deadline is + // passed we must not return `error.Timeout`. + if (deadline == null) continue; + return error.Timeout; + } + for (poll_buffer[0..poll_i], map_buffer[0..poll_i]) |*poll_fd, i| { + if (poll_fd.revents == 0) continue; + operate(&operations[i]); + return i; + } + }, + .INTR => continue, + else => return error.ConcurrencyUnavailable, + } + } +} + +fn batchCancel(userdata: ?*anyopaque, b: *Io.Batch) void { + const t: *Threaded = @ptrCast(@alignCast(userdata)); + _ = t; + _ = b; + return; +} + +fn batch(userdata: ?*anyopaque, operations: []Io.Operation) Io.ConcurrentError!void { + const t: *Threaded = @ptrCast(@alignCast(userdata)); + _ = t; + + if (operations.len == 1) { + @branchHint(.likely); + return operate(&operations[0]); + } if (is_windows) @panic("TODO"); var poll_buffer: [poll_buffer_len]posix.pollfd = undefined; var map_buffer: [poll_buffer_len]u8 = undefined; // poll_buffer index to operations index - var operation_index: usize = 0; + var poll_i: usize = 0; - while (operation_index < operations.len) { - var poll_i: usize = 0; - while (operation_index < operations.len) : (operation_index += 1) { - switch (operations[operation_index]) { - .noop => continue, - .file_read_streaming => |*o| { - if (o.nonblocking) { - o.result = error.WouldBlock; - poll_buffer[poll_i] = .{ - .fd = o.file.handle, - .events = posix.POLL.IN, - .revents = 0, - }; - if (map_buffer.len - poll_i == 0) break; - map_buffer[poll_i] = @intCast(operation_index); - poll_i += 1; - } else { - o.result = fileReadStreaming(o.file, o.data) catch |err| switch (err) { - error.Canceled => { - setOperationsError(operations[operation_index..], error.Canceled); - return; - }, - else => err, - }; - } - }, - } - } - - if (poll_i == 0) { - @branchHint(.likely); - return; - } - - while (true) { - const syscall = Syscall.start() catch |err| switch (err) { - error.Canceled => { - setPollOperationsError(operations, map_buffer[0..poll_i], error.Canceled); - setOperationsError(operations[operation_index..], error.Canceled); - return; - }, + for (operations, 0..) |*op, operation_index| switch (op.*) { + .noop => continue, + .file_read_streaming => |*o| { + if (poll_buffer.len - poll_i == 0) return error.ConcurrencyUnavailable; + poll_buffer[poll_i] = .{ + .fd = o.file.handle, + .events = posix.POLL.IN, + .revents = 0, }; - const poll_rc = posix.system.poll(&poll_buffer, poll_i, -1); - syscall.finish(); - switch (posix.errno(poll_rc)) { - .SUCCESS => { - if (poll_rc == 0) { - // Spurious timeout; handle same as INTR. - continue; - } - for (poll_buffer[0..poll_i], map_buffer[0..poll_i]) |*poll_fd, i| { - if (poll_fd.revents == 0) continue; - switch (operations[i]) { - .noop => unreachable, - .file_read_streaming => |*o| { - o.result = fileReadStreaming(o.file, o.data); - }, - } - } - break; - }, - .INTR => continue, - .NOMEM => { - setPollOperationsError(operations, map_buffer[0..poll_i], error.SystemResources); - break; - }, - else => { - setPollOperationsError(operations, map_buffer[0..poll_i], error.Unexpected); - break; - }, - } + map_buffer[poll_i] = @intCast(operation_index); + poll_i += 1; + }, + }; + + const polls = poll_buffer[0..poll_i]; + const map = map_buffer[0..poll_i]; + + var pending = poll_i; + while (pending > 1) { + const syscall = Syscall.start() catch |err| switch (err) { + error.Canceled => { + if (!setOperationsError(operations, polls, map, error.Canceled)) + recancelInner(); + return; + }, + }; + const rc = posix.system.poll(polls.ptr, polls.len, -1); + syscall.finish(); + switch (posix.errno(rc)) { + .SUCCESS => { + if (rc == 0) { + // Spurious timeout; handle the same as INTR. + continue; + } + for (polls, map) |*poll_fd, i| { + if (poll_fd.revents == 0) continue; + poll_fd.fd = -1; + pending -= 1; + operate(&operations[i]); + } + }, + .INTR => continue, + .NOMEM => { + assert(setOperationsError(operations, polls, map, error.SystemResources)); + return; + }, + else => { + assert(setOperationsError(operations, polls, map, error.Unexpected)); + return; + }, } } + + if (pending == 1) for (poll_buffer[0..poll_i], map_buffer[0..poll_i]) |*poll_fd, i| { + if (poll_fd.fd == -1) continue; + operate(&operations[i]); + }; } -fn setPollOperationsError( +fn setOperationsError( operations: []Io.Operation, + polls: []const posix.pollfd, map: []const u8, err: error{ Canceled, SystemResources, Unexpected }, -) void { - for (map) |operation_index| switch (operations[operation_index]) { - .noop => unreachable, - inline else => |*o| o.result = err, - }; -} - -fn setOperationsError(operations: []Io.Operation, err: error{ Canceled, SystemResources, Unexpected }) void { - for (operations) |*op| switch (op.*) { - .noop => unreachable, - inline else => |*o| o.result = err, - }; +) bool { + var marked = false; + for (polls, map) |*poll_fd, i| { + if (poll_fd.fd == -1) continue; + switch (operations[i]) { + .noop => unreachable, + inline else => |*o| { + o.status = .{ .result = err }; + marked = true; + }, + } + } + return marked; } const dirCreateDir = switch (native_os) { diff --git a/lib/std/process.zig b/lib/std/process.zig index 4a021879a5..b5de41f5d8 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -453,9 +453,7 @@ pub fn spawnPath(io: Io, dir: Io.Dir, options: SpawnOptions) SpawnError!Child { return io.vtable.processSpawnPath(io.userdata, dir, options); } -pub const RunError = CurrentPathError || posix.ReadError || SpawnError || posix.PollError || error{ - StreamTooLong, -}; +pub const RunError = SpawnError || Child.CollectOutputError; pub const RunOptions = struct { argv: []const []const u8, @@ -535,13 +533,15 @@ pub fn run(gpa: Allocator, io: Io, options: RunOptions) RunError!RunResult { const term = try child.wait(io); - const owned_stdout = try stdout.toOwnedSlice(gpa); - errdefer gpa.free(owned_stdout); - const owned_stderr = try stderr.toOwnedSlice(gpa); + const stdout_slice = try stdout.toOwnedSlice(gpa); + errdefer gpa.free(stdout_slice); + + const stderr_slice = try stderr.toOwnedSlice(gpa); + errdefer gpa.free(stderr_slice); return .{ - .stdout = owned_stdout, - .stderr = owned_stderr, + .stdout = stdout_slice, + .stderr = stderr_slice, .term = term, }; } diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index fc31014520..9d31b77080 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -125,7 +125,9 @@ pub fn wait(child: *Child, io: Io) WaitError!Term { return io.vtable.childWait(io.userdata, child); } -pub const CollectOutputError = error{StreamTooLong} || Allocator.Error || Io.File.Reader.Error; +pub const CollectOutputError = error{ + StreamTooLong, +} || Io.ConcurrentError || Allocator.Error || Io.File.Reader.Error || Io.Timeout.Error; pub const CollectOutputOptions = struct { stdout: *std.ArrayList(u8), @@ -135,6 +137,7 @@ pub const CollectOutputOptions = struct { allocator: ?Allocator = null, stdout_limit: Io.Limit = .unlimited, stderr_limit: Io.Limit = .unlimited, + timeout: Io.Timeout = .none, }; /// Collect the output from the process's stdout and stderr. Will return once @@ -144,56 +147,55 @@ pub const CollectOutputOptions = struct { /// The process must have been started with stdout and stderr set to /// `process.SpawnOptions.StdIo.pipe`. pub fn collectOutput(child: *const Child, io: Io, options: CollectOutputOptions) CollectOutputError!void { - const files: [2]Io.File = .{ child.stdout.?, child.stderr.? }; const lists: [2]*std.ArrayList(u8) = .{ options.stdout, options.stderr }; const limits: [2]Io.Limit = .{ options.stdout_limit, options.stderr_limit }; - var dones: [2]bool = .{ false, false }; - var reads: [2]Io.Operation = undefined; + + if (options.allocator) |gpa| { + for (lists) |list| try list.ensureUnusedCapacity(gpa, 1); + } else { + for (lists) |list| { + if (list.unusedCapacitySlice().len == 0) + return error.StreamTooLong; + } + } + var vecs: [2][1][]u8 = undefined; - while (true) { - for (&reads, &lists, &files, dones, &vecs) |*read, list, file, done, *vec| { - if (done) { - read.* = .noop; - continue; - } + for (lists, &vecs) |list, *vec| + vec[0] = list.unusedCapacitySlice(); + + var operations: [2]Io.Operation = .{ + .{ .file_read_streaming = .{ + .file = child.stdout.?, + .data = &vecs[0], + } }, + .{ .file_read_streaming = .{ + .file = child.stderr.?, + .data = &vecs[1], + } }, + }; + + var batch: Io.Batch = .init(&operations); + batch.submit(io); + defer batch.cancel(io); + + var pending = operations.len; + var retry_index: ?usize = null; + while (pending > 0) { + const resubmissions: []const usize = if (retry_index) |i| &.{i} else &.{}; + const index = try batch.wait(io, resubmissions, options.timeout); + const n = try operations[index].file_read_streaming.status.result; + if (n == 0) { + pending -= 1; + } else { + retry_index = index; + const list = lists[index]; + const limit = limits[index]; + list.items.len += n; + if (list.items.len >= @intFromEnum(limit)) return error.StreamTooLong; if (options.allocator) |gpa| try list.ensureUnusedCapacity(gpa, 1); const cap = list.unusedCapacitySlice(); if (cap.len == 0) return error.StreamTooLong; - vec[0] = cap; - read.* = .{ .file_read_streaming = .{ - .file = file, - .data = vec, - .nonblocking = true, - .result = undefined, - } }; + vecs[index][0] = cap; } - var all_done = true; - var any_canceled = false; - var other_err: (error{StreamTooLong} || Io.File.Reader.Error)!void = {}; - io.vtable.operate(io.userdata, &reads); - for (&reads, &lists, &limits, &dones) |*read, list, limit, *done| { - if (done.*) continue; - const n = read.file_read_streaming.result catch |err| switch (err) { - error.Canceled => { - any_canceled = true; - continue; - }, - error.WouldBlock => continue, - else => |e| { - other_err = e; - continue; - }, - }; - if (n == 0) { - done.* = true; - } else { - all_done = false; - } - list.items.len += n; - if (list.items.len > @intFromEnum(limit)) other_err = error.StreamTooLong; - } - if (any_canceled) return error.Canceled; - try other_err; - if (all_done) return; } } From 23d25dbb9e4e469eb2e59d50bd888b7a61ffb876 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 9 Jan 2026 19:21:59 -0800 Subject: [PATCH 21/65] std.process.Child.collectOutput: change back to other impl this one avoids calling poll() more than necessary --- lib/std/Io.zig | 2 +- lib/std/process/Child.zig | 86 +++++++++++++++++++-------------------- 2 files changed, 43 insertions(+), 45 deletions(-) diff --git a/lib/std/Io.zig b/lib/std/Io.zig index d916bb6995..2f34dc07e4 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -260,7 +260,7 @@ pub const Operation = union(enum) { file_read_streaming: FileReadStreaming, pub const Noop = struct { - reserved: [2]usize, + reserved: [2]usize = .{ 0, 0 }, status: Status(void) = .{ .result = {} }, }; diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index 9d31b77080..364c52446f 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -137,7 +137,6 @@ pub const CollectOutputOptions = struct { allocator: ?Allocator = null, stdout_limit: Io.Limit = .unlimited, stderr_limit: Io.Limit = .unlimited, - timeout: Io.Timeout = .none, }; /// Collect the output from the process's stdout and stderr. Will return once @@ -147,55 +146,54 @@ pub const CollectOutputOptions = struct { /// The process must have been started with stdout and stderr set to /// `process.SpawnOptions.StdIo.pipe`. pub fn collectOutput(child: *const Child, io: Io, options: CollectOutputOptions) CollectOutputError!void { + const files: [2]Io.File = .{ child.stdout.?, child.stderr.? }; const lists: [2]*std.ArrayList(u8) = .{ options.stdout, options.stderr }; const limits: [2]Io.Limit = .{ options.stdout_limit, options.stderr_limit }; - - if (options.allocator) |gpa| { - for (lists) |list| try list.ensureUnusedCapacity(gpa, 1); - } else { - for (lists) |list| { - if (list.unusedCapacitySlice().len == 0) - return error.StreamTooLong; - } - } - + var dones: [2]bool = .{ false, false }; + var reads: [2]Io.Operation = undefined; var vecs: [2][1][]u8 = undefined; - for (lists, &vecs) |list, *vec| - vec[0] = list.unusedCapacitySlice(); - - var operations: [2]Io.Operation = .{ - .{ .file_read_streaming = .{ - .file = child.stdout.?, - .data = &vecs[0], - } }, - .{ .file_read_streaming = .{ - .file = child.stderr.?, - .data = &vecs[1], - } }, - }; - - var batch: Io.Batch = .init(&operations); - batch.submit(io); - defer batch.cancel(io); - - var pending = operations.len; - var retry_index: ?usize = null; - while (pending > 0) { - const resubmissions: []const usize = if (retry_index) |i| &.{i} else &.{}; - const index = try batch.wait(io, resubmissions, options.timeout); - const n = try operations[index].file_read_streaming.status.result; - if (n == 0) { - pending -= 1; - } else { - retry_index = index; - const list = lists[index]; - const limit = limits[index]; - list.items.len += n; - if (list.items.len >= @intFromEnum(limit)) return error.StreamTooLong; + while (true) { + for (&reads, &lists, &files, dones, &vecs) |*read, list, file, done, *vec| { + if (done) { + read.* = .{ .noop = .{} }; + continue; + } if (options.allocator) |gpa| try list.ensureUnusedCapacity(gpa, 1); const cap = list.unusedCapacitySlice(); if (cap.len == 0) return error.StreamTooLong; - vecs[index][0] = cap; + vec[0] = cap; + read.* = .{ .file_read_streaming = .{ + .file = file, + .data = vec, + } }; } + var all_done = true; + var any_canceled = false; + var other_err: (error{StreamTooLong} || Io.File.Reader.Error)!void = {}; + try io.vtable.batch(io.userdata, &reads); + for (&reads, &lists, &limits, &dones) |*read, list, limit, *done| { + if (done.*) continue; + const n = read.file_read_streaming.status.result catch |err| switch (err) { + error.Canceled => { + any_canceled = true; + continue; + }, + error.WouldBlock => continue, + else => |e| { + other_err = e; + continue; + }, + }; + if (n == 0) { + done.* = true; + } else { + all_done = false; + } + list.items.len += n; + if (list.items.len > @intFromEnum(limit)) other_err = error.StreamTooLong; + } + if (any_canceled) return error.Canceled; + try other_err; + if (all_done) return; } } From 0a379513afdf807c357993ac4508449ec933c55b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 9 Jan 2026 20:46:51 -0800 Subject: [PATCH 22/65] std.Io.Threaded: super broken Windows impl of batch this is a cry for help --- lib/std/Io/Threaded.zig | 83 ++++++++++++++++++++++++++++++++--------- 1 file changed, 66 insertions(+), 17 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 0eda4e8fdc..f7b4b73b33 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2547,14 +2547,13 @@ fn batchCancel(userdata: ?*anyopaque, b: *Io.Batch) void { fn batch(userdata: ?*anyopaque, operations: []Io.Operation) Io.ConcurrentError!void { const t: *Threaded = @ptrCast(@alignCast(userdata)); - _ = t; if (operations.len == 1) { @branchHint(.likely); return operate(&operations[0]); } - if (is_windows) @panic("TODO"); + if (is_windows) return batchWindows(t, operations); var poll_buffer: [poll_buffer_len]posix.pollfd = undefined; var map_buffer: [poll_buffer_len]u8 = undefined; // poll_buffer index to operations index @@ -2578,7 +2577,7 @@ fn batch(userdata: ?*anyopaque, operations: []Io.Operation) Io.ConcurrentError!v const map = map_buffer[0..poll_i]; var pending = poll_i; - while (pending > 1) { + while (pending > 0) { const syscall = Syscall.start() catch |err| switch (err) { error.Canceled => { if (!setOperationsError(operations, polls, map, error.Canceled)) @@ -2589,17 +2588,11 @@ fn batch(userdata: ?*anyopaque, operations: []Io.Operation) Io.ConcurrentError!v const rc = posix.system.poll(polls.ptr, polls.len, -1); syscall.finish(); switch (posix.errno(rc)) { - .SUCCESS => { - if (rc == 0) { - // Spurious timeout; handle the same as INTR. - continue; - } - for (polls, map) |*poll_fd, i| { - if (poll_fd.revents == 0) continue; - poll_fd.fd = -1; - pending -= 1; - operate(&operations[i]); - } + .SUCCESS => for (polls, map) |*poll_fd, i| { + if (poll_fd.revents == 0) continue; + poll_fd.fd = -1; + pending -= 1; + operate(&operations[i]); }, .INTR => continue, .NOMEM => { @@ -2612,11 +2605,67 @@ fn batch(userdata: ?*anyopaque, operations: []Io.Operation) Io.ConcurrentError!v }, } } +} - if (pending == 1) for (poll_buffer[0..poll_i], map_buffer[0..poll_i]) |*poll_fd, i| { - if (poll_fd.fd == -1) continue; - operate(&operations[i]); +fn batchWindows(t: *Threaded, operations: []Io.Operation) Io.ConcurrentError!void { + _ = t; + var overlapped_buffer: [poll_buffer_len]windows.OVERLAPPED = undefined; + var handles_buffer: [poll_buffer_len]windows.HANDLE = undefined; + var map_buffer: [poll_buffer_len]u8 = undefined; // handles_buffer index to operations index + var buffer_i: usize = 0; + + for (operations, 0..) |*op, operation_index| switch (op.*) { + .noop => continue, + .file_read_streaming => |*o| { + if (handles_buffer.len - buffer_i == 0) return error.ConcurrencyUnavailable; + + const overlapped = &overlapped_buffer[buffer_i]; + overlapped.* = .{ + .Internal = 0, + .InternalHigh = 0, + .DUMMYUNIONNAME = .{ + .DUMMYSTRUCTNAME = .{ + .Offset = 0, + .OffsetHigh = 0, + }, + .Pointer = null, + }, + .hEvent = null, + }; + var n: windows.DWORD = undefined; + const buf = o.data[0]; + if (windows.kernel32.ReadFile(o.file.handle, buf.ptr, buf.len, &n, overlapped) == 0) { + @panic("TODO"); + } + handles_buffer[buffer_i] = o.file.handle; + map_buffer[buffer_i] = @intCast(operation_index); + buffer_i += 1; + }, }; + + const handles = handles_buffer[0..buffer_i]; + const map = map_buffer[0..buffer_i]; + var pending = buffer_i; + + while (pending > 0) { + const syscall: Syscall = try .start(); + const index = windows.WaitForMultipleObjectsEx(handles, false, windows.INFINITE, true); + syscall.finish(); + var n: windows.DWORD = undefined; + if (0 == windows.kernel32.GetOverlappedResult(handles[index], overlapped_buffer[index], &n, 0)) { + switch (windows.GetLastError()) { + .BROKEN_PIPE => @panic("TODO"), + .OPERATION_ABORTED => @panic("TODO"), + else => @panic("TODO"), + } + } else switch (operations[map[index]]) { + .noop => unreachable, + .file_read_streaming => |*o| { + o.status = .{ .result = n }; + pending -= 1; + }, + } + } } fn setOperationsError( From 8146ccfeccd17e6ee5adaa59112899a89ad409bd Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 10 Jan 2026 15:34:36 -0500 Subject: [PATCH 23/65] Io: add ring to `Batch` API --- lib/std/Io.zig | 173 ++++++++++++++++++++++++++----------- lib/std/Io/File.zig | 2 +- lib/std/Io/File/Reader.zig | 22 +---- lib/std/Io/Threaded.zig | 151 +++++++++++++++++++++----------- lib/std/process/Child.zig | 72 +++++++-------- 5 files changed, 260 insertions(+), 160 deletions(-) diff --git a/lib/std/Io.zig b/lib/std/Io.zig index 2f34dc07e4..b503979fda 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -149,9 +149,8 @@ pub const VTable = struct { futexWaitUncancelable: *const fn (?*anyopaque, ptr: *const u32, expected: u32) void, futexWake: *const fn (?*anyopaque, ptr: *const u32, max_waiters: u32) void, - batch: *const fn (?*anyopaque, []Operation) ConcurrentError!void, - batchSubmit: *const fn (?*anyopaque, *Batch) void, - batchWait: *const fn (?*anyopaque, *Batch, resubmissions: []const usize, Timeout) Batch.WaitError!usize, + operate: *const fn (?*anyopaque, *Operation) Cancelable!void, + batchWait: *const fn (?*anyopaque, *Batch, Timeout) Batch.WaitError!void, batchCancel: *const fn (?*anyopaque, *Batch) void, dirCreateDir: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirError!void, @@ -261,48 +260,50 @@ pub const Operation = union(enum) { pub const Noop = struct { reserved: [2]usize = .{ 0, 0 }, - status: Status(void) = .{ .result = {} }, + status: Status(void) = .{ .unstarted = {} }, }; /// Returns 0 on end of stream. pub const FileReadStreaming = struct { file: File, data: []const []u8, - status: Status(File.Reader.Error!usize) = .{ .unstarted = {} }, + status: Status(Error!usize) = .{ .unstarted = {} }, + + pub const Error = error{ + InputOutput, + SystemResources, + /// Trying to read a directory file descriptor as if it were a file. + IsDir, + BrokenPipe, + ConnectionResetByPeer, + /// File was not opened with read capability. + NotOpenForReading, + SocketUnconnected, + /// Non-blocking has been enabled, and reading from the file descriptor + /// would block. + WouldBlock, + /// In WASI, this error occurs when the file descriptor does + /// not hold the required rights to read from it. + AccessDenied, + /// Unable to read file due to lock. Depending on the `Io` implementation, + /// reading from a locked file may return this error, or may ignore the + /// lock. + LockViolation, + } || Io.UnexpectedError; }; pub fn Status(Result: type) type { return union { unstarted: void, - pending: usize, + pending: *Batch, result: Result, }; } }; -/// Performs all `operations` in an unspecified order, concurrently. -/// -/// Returns after all `operations` have been completed. If the operations could -/// not be completed concurrently, returns `error.ConcurrencyUnavailable`. -/// -/// With this API, it is rare for concurrency to not be available. Even a -/// single-threaded `Io` implementation can, for example, take advantage of -/// poll() to implement this. Note that poll() is fallible however. -/// -/// If `operations.len` is one, `error.ConcurrencyUnavailable` is unreachable. -/// -/// On entry, all operations must already have `.status = .unstarted` except -/// noops must have `.status = .{ .result = {} }`, to safety check the state -/// transitions. -/// -/// On return, all operations have `.status = .{ .result = ... }`. -pub fn batch(io: Io, operations: []Operation) ConcurrentError!void { - return io.vtable.batch(io.userdata, operations); -} - /// Performs one `Operation`. -pub fn operate(io: Io, operation: *Operation) void { - return io.vtable.batch(io.userdata, (operation)[0..1]) catch unreachable; +pub fn operate(io: Io, operation: *Operation) Cancelable!void { + return io.vtable.operate(io.userdata, operation) catch unreachable; } /// Submits many operations together without waiting for all of them to @@ -312,35 +313,107 @@ pub fn operate(io: Io, operation: *Operation) void { /// level API that operates on `Future`, see `Select`. pub const Batch = struct { operations: []Operation, - index: usize, - reserved: ?*anyopaque, + ring: [*]u32, + user: struct { + submit_tail: RingIndex, + complete_head: RingIndex, + complete_tail: RingIndex, + }, + impl: struct { + submit_head: RingIndex, + submit_tail: RingIndex, + complete_tail: RingIndex, + reserved: ?*anyopaque, + }, - pub fn init(operations: []Operation) Batch { - return .{ .operations = operations, .index = 0, .reserved = null }; - } + pub const RingIndex = enum(u32) { + _, - /// Submits all non-noop `operations`. - pub fn submit(b: *Batch, io: Io) void { - return io.vtable.batchSubmit(io.userdata, b); - } + pub fn index(ri: RingIndex, len: u31) u31 { + const i = @intFromEnum(ri); + assert(i < @as(u32, len) * 2); + return @intCast(if (i < len) i else i - len); + } + + pub fn prev(ri: RingIndex, len: u31) RingIndex { + const i = @intFromEnum(ri); + const double_len = @as(u32, len) * 2; + assert(i <= double_len); + return @enumFromInt((if (i > 0) i else double_len) - 1); + } + + pub fn next(ri: RingIndex, len: u31) RingIndex { + const i = @intFromEnum(ri) + 1; + const double_len = @as(u32, len) * 2; + assert(i <= double_len); + return @enumFromInt(if (i < double_len) i else 0); + } + }; pub const WaitError = ConcurrentError || Cancelable || Timeout.Error; - /// Resubmits the previously completed or noop-initialized `operations` at - /// indexes given by `resubmissions`. This set of indexes typically will be empty - /// on the first call to `await` since all operations have already been - /// submitted via `async`. - /// - /// Returns the index of a completed `Operation`, or `operations.len` if - /// all operations are completed. - /// - /// When `error.Canceled` is returned, all operations have already completed. - pub fn wait(b: *Batch, io: Io, resubmissions: []const usize, timeout: Timeout) WaitError!usize { - return io.vtable.batchWait(io.userdata, b, resubmissions, timeout); + pub fn init(operations: []Operation, ring: []u32) Batch { + const len: u31 = @intCast(operations.len); + assert(ring.len == len); + return .{ + .operations = operations, + .ring = ring.ptr, + .user = .{ + .submit_tail = @enumFromInt(0), + .complete_head = @enumFromInt(0), + .complete_tail = @enumFromInt(0), + }, + .impl = .{ + .submit_head = @enumFromInt(0), + .submit_tail = @enumFromInt(0), + .complete_tail = @enumFromInt(0), + .reserved = null, + }, + }; } - /// Returns after all `operations` have completed. Each operation - /// independently may or may not have been canceled. + /// Adds `b.operations[operation]` to the list of submitted operations + /// that will be performed when `wait` is called. + pub fn add(b: *Batch, operation: usize) void { + const tail = b.user.submit_tail; + const len: u31 = @intCast(b.operations.len); + b.user.submit_tail = tail.next(len); + b.ring[0..len][tail.index(len)] = @intCast(operation); + } + + fn flush(b: *Batch) void { + @atomicStore(RingIndex, &b.impl.submit_tail, b.user.submit_tail, .release); + } + + /// Returns `operation` such that `b.operations[operation]` has completed. + /// Returns `null` when `wait` should be called. + pub fn next(b: *Batch) ?u32 { + const head = b.user.complete_head; + if (head == b.user.complete_tail) { + @branchHint(.unlikely); + b.flush(); + const tail = @atomicLoad(RingIndex, &b.impl.complete_tail, .acquire); + if (head == tail) { + @branchHint(.unlikely); + return null; + } + assert(head != tail); + b.user.complete_tail = tail; + } + const len: u31 = @intCast(b.operations.len); + b.user.complete_head = head.next(len); + return b.ring[0..len][head.index(len)]; + } + + /// Starts work on any submitted operations and returns when at least one has completeed. + /// + /// Returns `error.Timeout` if `timeout` expires first. + pub fn wait(b: *Batch, io: Io, timeout: Timeout) WaitError!void { + return io.vtable.batchWait(io.userdata, b, timeout); + } + + /// Returns after all `operations` have completed. Operations which have not completed + /// after this function returns were successfully dropped and had no side effects. pub fn cancel(b: *Batch, io: Io) void { return io.vtable.batchCancel(io.userdata, b); } diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index f27f249975..cc7042d443 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -558,7 +558,7 @@ pub fn readStreaming(file: File, io: Io, buffer: []const []u8) Reader.Error!usiz .file = file, .data = buffer, } }; - io.operate(&operation); + try io.operate(&operation); return operation.file_read_streaming.status.result; } diff --git a/lib/std/Io/File/Reader.zig b/lib/std/Io/File/Reader.zig index d3d1c05e3f..7703521d7e 100644 --- a/lib/std/Io/File/Reader.zig +++ b/lib/std/Io/File/Reader.zig @@ -26,27 +26,7 @@ size_err: ?SizeError = null, seek_err: ?SeekError = null, interface: Io.Reader, -pub const Error = error{ - InputOutput, - SystemResources, - /// Trying to read a directory file descriptor as if it were a file. - IsDir, - BrokenPipe, - ConnectionResetByPeer, - /// File was not opened with read capability. - NotOpenForReading, - SocketUnconnected, - /// Non-blocking has been enabled, and reading from the file descriptor - /// would block. - WouldBlock, - /// In WASI, this error occurs when the file descriptor does - /// not hold the required rights to read from it. - AccessDenied, - /// Unable to read file due to lock. Depending on the `Io` implementation, - /// reading from a locked file may return this error, or may ignore the - /// lock. - LockViolation, -} || Io.Cancelable || Io.UnexpectedError; +pub const Error = Io.Operation.FileReadStreaming.Error || Io.Cancelable; pub const SizeError = File.StatError || error{ /// Occurs if, for example, the file handle is a network socket and therefore does not have a size. diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index f7b4b73b33..fefa8fe84d 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -1587,8 +1587,7 @@ pub fn io(t: *Threaded) Io { .futexWaitUncancelable = futexWaitUncancelable, .futexWake = futexWake, - .batch = batch, - .batchSubmit = batchSubmit, + .operate = operate, .batchWait = batchWait, .batchCancel = batchCancel, @@ -1751,8 +1750,7 @@ pub fn ioBasic(t: *Threaded) Io { .futexWaitUncancelable = futexWaitUncancelable, .futexWake = futexWake, - .batch = batch, - .batchSubmit = batchSubmit, + .operate = operate, .batchWait = batchWait, .batchCancel = batchCancel, @@ -2456,59 +2454,82 @@ fn futexWake(userdata: ?*anyopaque, ptr: *const u32, max_waiters: u32) void { Thread.futexWake(ptr, max_waiters); } -fn batchSubmit(userdata: ?*anyopaque, b: *Io.Batch) void { +fn operate(userdata: ?*anyopaque, op: *Io.Operation) Io.Cancelable!void { const t: *Threaded = @ptrCast(@alignCast(userdata)); _ = t; - _ = b; - return; -} - -fn operate(op: *Io.Operation) void { switch (op.*) { - .noop => {}, - .file_read_streaming => |*o| o.status = .{ .result = fileReadStreaming(o.file, o.data) }, + .noop => |*o| { + _ = o.status.unstarted; + o.status = .{ .result = {} }; + }, + .file_read_streaming => |*o| { + _ = o.status.unstarted; + o.status = .{ .result = fileReadStreaming(o.file, o.data) catch |err| switch (err) { + error.Canceled => return error.Canceled, + else => |e| e, + } }; + }, } } -fn batchWait( - userdata: ?*anyopaque, - b: *Io.Batch, - resubmissions: []const usize, - timeout: Io.Timeout, -) Io.Batch.WaitError!usize { - _ = resubmissions; +fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.WaitError!void { const t: *Threaded = @ptrCast(@alignCast(userdata)); const operations = b.operations; - if (operations.len == 1) { - operate(&operations[0]); - return b.operations.len; + const len: u31 = @intCast(operations.len); + const ring = b.ring[0..len]; + var submit_head = b.impl.submit_head; + const submit_tail = b.user.submit_tail; + b.impl.submit_tail = submit_tail; + var complete_tail = b.impl.complete_tail; + var map_buffer: [poll_buffer_len]u32 = undefined; // poll_buffer index to operations index + var poll_i: usize = 0; + defer { + for (map_buffer[0..poll_i]) |op| { + submit_head = submit_head.prev(len); + ring[submit_head.index(len)] = op; + } + b.impl.submit_head = submit_head; + b.impl.complete_tail = complete_tail; + b.user.complete_tail = complete_tail; } if (is_windows) @panic("TODO"); - var poll_buffer: [poll_buffer_len]posix.pollfd = undefined; - var map_buffer: [poll_buffer_len]u8 = undefined; // poll_buffer index to operations index - var poll_i: usize = 0; - - for (operations, 0..) |*op, operation_index| switch (op.*) { - .noop => continue, - .file_read_streaming => |*o| { - if (poll_buffer.len - poll_i == 0) return error.ConcurrencyUnavailable; - poll_buffer[poll_i] = .{ - .fd = o.file.handle, - .events = posix.POLL.IN, - .revents = 0, - }; - map_buffer[poll_i] = @intCast(operation_index); - poll_i += 1; + while (submit_head != submit_tail) : (submit_head = submit_head.next(len)) { + const op = ring[submit_head.index(len)]; + const operation = &operations[op]; + switch (operation.*) { + else => { + try operate(t, operation); + ring[complete_tail.index(len)] = op; + complete_tail = complete_tail.next(len); + }, + .file_read_streaming => |*o| { + _ = o.status.unstarted; + if (poll_buffer.len - poll_i == 0) return error.ConcurrencyUnavailable; + poll_buffer[poll_i] = .{ + .fd = o.file.handle, + .events = posix.POLL.IN, + .revents = 0, + }; + map_buffer[poll_i] = op; + poll_i += 1; + }, + } + } + switch (poll_i) { + 0 => return, + 1 => if (timeout == .none) { + const op = map_buffer[0]; + try operate(t, &operations[op]); + ring[complete_tail.index(len)] = op; + complete_tail = complete_tail.next(len); + return; }, - }; - - if (poll_i == 0) return operations.len; - + else => {}, + } const t_io = ioBasic(t); const deadline = timeout.toDeadline(t_io) catch return error.UnsupportedClock; const max_poll_ms = std.math.maxInt(i32); - while (true) { const timeout_ms: i32 = if (deadline) |d| t: { const duration = d.durationFromNow(t_io) catch return error.UnsupportedClock; @@ -2526,11 +2547,24 @@ fn batchWait( if (deadline == null) continue; return error.Timeout; } - for (poll_buffer[0..poll_i], map_buffer[0..poll_i]) |*poll_fd, i| { - if (poll_fd.revents == 0) continue; - operate(&operations[i]); - return i; + var canceled = false; + for (poll_buffer[0..poll_i], map_buffer[0..poll_i]) |*poll_fd, op| { + if (poll_fd.revents == 0) { + submit_head = submit_head.prev(len); + ring[submit_head.index(len)] = op; + } else { + operate(t, &operations[op]) catch |err| switch (err) { + error.Canceled => { + canceled = true; + continue; + }, + }; + ring[complete_tail.index(len)] = op; + complete_tail = complete_tail.next(len); + } } + poll_i = 0; + return if (canceled) error.Canceled; }, .INTR => continue, else => return error.ConcurrencyUnavailable, @@ -2540,9 +2574,27 @@ fn batchWait( fn batchCancel(userdata: ?*anyopaque, b: *Io.Batch) void { const t: *Threaded = @ptrCast(@alignCast(userdata)); - _ = t; - _ = b; - return; + const operations = b.operations; + const len: u31 = @intCast(operations.len); + const ring = b.ring[0..len]; + var submit_head = b.impl.submit_head; + const submit_tail = b.user.submit_tail; + b.impl.submit_tail = submit_tail; + var complete_tail = b.impl.complete_tail; + while (submit_head != submit_tail) : (submit_head = submit_head.next(len)) { + const op = ring[submit_head.index(len)]; + switch (operations[op]) { + .noop => { + operate(t, &operations[op]) catch unreachable; + ring[complete_tail.index(len)] = op; + complete_tail = complete_tail.next(len); + }, + .file_read_streaming => |*o| _ = o.status.unstarted, + } + } + b.impl.submit_head = submit_tail; + b.impl.complete_tail = complete_tail; + b.user.complete_tail = complete_tail; } fn batch(userdata: ?*anyopaque, operations: []Io.Operation) Io.ConcurrentError!void { @@ -10352,6 +10404,7 @@ fn nowWasi(clock: Io.Clock) Io.Clock.Error!Io.Timestamp { fn sleep(userdata: ?*anyopaque, timeout: Io.Timeout) Io.SleepError!void { const t: *Threaded = @ptrCast(@alignCast(userdata)); + if (timeout == .none) return; if (use_parking_sleep) return parking_sleep.sleep(try timeout.toDeadline(ioBasic(t))); if (native_os == .wasi) return sleepWasi(t, timeout); if (@TypeOf(posix.system.clock_nanosleep) != void) return sleepPosix(timeout); diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index 364c52446f..19c974ff9f 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -149,51 +149,45 @@ pub fn collectOutput(child: *const Child, io: Io, options: CollectOutputOptions) const files: [2]Io.File = .{ child.stdout.?, child.stderr.? }; const lists: [2]*std.ArrayList(u8) = .{ options.stdout, options.stderr }; const limits: [2]Io.Limit = .{ options.stdout_limit, options.stderr_limit }; - var dones: [2]bool = .{ false, false }; var reads: [2]Io.Operation = undefined; var vecs: [2][1][]u8 = undefined; - while (true) { - for (&reads, &lists, &files, dones, &vecs) |*read, list, file, done, *vec| { - if (done) { - read.* = .{ .noop = .{} }; - continue; - } - if (options.allocator) |gpa| try list.ensureUnusedCapacity(gpa, 1); - const cap = list.unusedCapacitySlice(); - if (cap.len == 0) return error.StreamTooLong; - vec[0] = cap; - read.* = .{ .file_read_streaming = .{ - .file = file, - .data = vec, - } }; + var ring: [2]u32 = undefined; + var batch: Io.Batch = .init(&reads, &ring); + defer { + batch.cancel(io); + while (batch.next()) |op| { + lists[op].items.len += reads[op].file_read_streaming.status.result catch continue; } - var all_done = true; - var any_canceled = false; - var other_err: (error{StreamTooLong} || Io.File.Reader.Error)!void = {}; - try io.vtable.batch(io.userdata, &reads); - for (&reads, &lists, &limits, &dones) |*read, list, limit, *done| { - if (done.*) continue; - const n = read.file_read_streaming.status.result catch |err| switch (err) { - error.Canceled => { - any_canceled = true; - continue; - }, - error.WouldBlock => continue, - else => |e| { - other_err = e; - continue; - }, - }; + } + var remaining: usize = 0; + for (0.., &reads, &lists, &files, &vecs) |op, *read, list, file, *vec| { + if (options.allocator) |gpa| try list.ensureUnusedCapacity(gpa, 1); + const cap = list.unusedCapacitySlice(); + if (cap.len == 0) return error.StreamTooLong; + vec[0] = cap; + read.* = .{ .file_read_streaming = .{ + .file = file, + .data = vec, + } }; + batch.add(op); + remaining += 1; + } + while (remaining > 0) { + try batch.wait(io, .none); + while (batch.next()) |op| { + const n = try reads[op].file_read_streaming.status.result; if (n == 0) { - done.* = true; + remaining -= 1; } else { - all_done = false; + lists[op].items.len += n; + if (lists[op].items.len > @intFromEnum(limits[op])) return error.StreamTooLong; + if (options.allocator) |gpa| try lists[op].ensureUnusedCapacity(gpa, 1); + const cap = lists[op].unusedCapacitySlice(); + if (cap.len == 0) return error.StreamTooLong; + vecs[op][0] = cap; + reads[op].file_read_streaming.status = .{ .unstarted = {} }; + batch.add(op); } - list.items.len += n; - if (list.items.len > @intFromEnum(limit)) other_err = error.StreamTooLong; } - if (any_canceled) return error.Canceled; - try other_err; - if (all_done) return; } } From 78a1476475047d0ae591fd838ec55b6b87561dd3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 12 Jan 2026 17:41:08 -0800 Subject: [PATCH 24/65] Build.WebServer: update concurrency API usage --- lib/std/Build/Step.zig | 1 + lib/std/Build/WebServer.zig | 36 +++++++++++++++++++++++------------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index bacc81cbfa..e2c51cc6fe 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -542,6 +542,7 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. const stdout = &stdout_reader.interface; var body_buffer: std.ArrayList(u8) = .empty; + defer body_buffer.deinit(gpa); while (true) { const Header = std.zig.Server.Message.Header; diff --git a/lib/std/Build/WebServer.zig b/lib/std/Build/WebServer.zig index e1536fb8fa..1f380b6c50 100644 --- a/lib/std/Build/WebServer.zig +++ b/lib/std/Build/WebServer.zig @@ -588,11 +588,12 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim }); defer child.kill(io); - var poller = Io.poll(gpa, enum { stdout, stderr }, .{ - .stdout = child.stdout.?, - .stderr = child.stderr.?, - }); - defer poller.deinit(); + var stderr_task = try io.concurrent(readStreamAlloc, .{ gpa, io, child.stderr.?, .unlimited }); + defer if (stderr_task.cancel(io)) |slice| gpa.free(slice) else |_| {}; + + var stdout_buffer: [512]u8 = undefined; + var stdout_reader: Io.File.Reader = .initStreaming(child.stdout.?, io, &stdout_buffer); + const stdout = &stdout_reader.interface; try child.stdin.?.writeStreamingAll(io, @ptrCast(@as([]const std.zig.Client.Message.Header, &.{ .{ .tag = .update, .bytes_len = 0 }, @@ -600,16 +601,17 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim }))); const Header = std.zig.Server.Message.Header; + var result: ?Cache.Path = null; var result_error_bundle = std.zig.ErrorBundle.empty; + var body_buffer: std.ArrayList(u8) = .empty; + defer body_buffer.deinit(gpa); - const stdout = poller.reader(.stdout); - - poll: while (true) { - while (stdout.buffered().len < @sizeOf(Header)) if (!(try poller.poll())) break :poll; - const header = stdout.takeStruct(Header, .little) catch unreachable; - while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll; - const body = stdout.take(header.bytes_len) catch unreachable; + while (true) { + const header = try stdout.takeStruct(Header, .little); + body_buffer.clearRetainingCapacity(); + try stdout.appendExact(gpa, &body_buffer, header.bytes_len); + const body = body_buffer.items; switch (header.tag) { .zig_version => { @@ -636,7 +638,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim } } - const stderr_contents = try poller.toOwnedSlice(.stderr); + const stderr_contents = try stderr_task.await(io); if (stderr_contents.len > 0) { std.debug.print("{s}", .{stderr_contents}); } @@ -697,6 +699,14 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim return base_path.join(arena, bin_name); } +fn readStreamAlloc(gpa: Allocator, io: Io, file: Io.File, limit: Io.Limit) ![]u8 { + var file_reader: Io.File.Reader = .initStreaming(file, io, &.{}); + return file_reader.interface.allocRemaining(gpa, limit) catch |err| switch (err) { + error.ReadFailed => return file_reader.err.?, + else => |e| return e, + }; +} + pub fn updateTimeReportCompile(ws: *WebServer, opts: struct { compile: *Build.Step.Compile, From a0c2645948682b4bfdd2971f7512376605a45502 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 12 Jan 2026 18:04:56 -0800 Subject: [PATCH 25/65] std.Io.Threaded: delete dead code --- lib/std/Io/Threaded.zig | 207 +++++++++++++++------------------------- 1 file changed, 79 insertions(+), 128 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index fefa8fe84d..2167baa5e4 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2474,6 +2474,7 @@ fn operate(userdata: ?*anyopaque, op: *Io.Operation) Io.Cancelable!void { fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.WaitError!void { const t: *Threaded = @ptrCast(@alignCast(userdata)); + if (is_windows) return batchWaitWindows(t, b, timeout); const operations = b.operations; const len: u31 = @intCast(operations.len); const ring = b.ring[0..len]; @@ -2492,13 +2493,12 @@ fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch. b.impl.complete_tail = complete_tail; b.user.complete_tail = complete_tail; } - if (is_windows) @panic("TODO"); var poll_buffer: [poll_buffer_len]posix.pollfd = undefined; while (submit_head != submit_tail) : (submit_head = submit_head.next(len)) { const op = ring[submit_head.index(len)]; const operation = &operations[op]; switch (operation.*) { - else => { + .noop => { try operate(t, operation); ring[complete_tail.index(len)] = op; complete_tail = complete_tail.next(len); @@ -2597,149 +2597,100 @@ fn batchCancel(userdata: ?*anyopaque, b: *Io.Batch) void { b.user.complete_tail = complete_tail; } -fn batch(userdata: ?*anyopaque, operations: []Io.Operation) Io.ConcurrentError!void { - const t: *Threaded = @ptrCast(@alignCast(userdata)); +fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.ConcurrentError!void { + const operations = b.operations; + const len: u31 = @intCast(operations.len); + const ring = b.ring[0..len]; + var submit_head = b.impl.submit_head; + const submit_tail = b.user.submit_tail; + b.impl.submit_tail = submit_tail; + var complete_tail = b.impl.complete_tail; - if (operations.len == 1) { - @branchHint(.likely); - return operate(&operations[0]); + var overlapped_buffer: [poll_buffer_len]windows.OVERLAPPED = undefined; + var handles_buffer: [poll_buffer_len]windows.HANDLE = undefined; + var map_buffer: [poll_buffer_len]u32 = undefined; // handles_buffer index to operations index + var buffer_i: usize = 0; + + defer { + for (map_buffer[0..buffer_i]) |op| { + submit_head = submit_head.prev(len); + ring[submit_head.index(len)] = op; + } + b.impl.submit_head = submit_head; + b.impl.complete_tail = complete_tail; + b.user.complete_tail = complete_tail; } - if (is_windows) return batchWindows(t, operations); - - var poll_buffer: [poll_buffer_len]posix.pollfd = undefined; - var map_buffer: [poll_buffer_len]u8 = undefined; // poll_buffer index to operations index - var poll_i: usize = 0; - - for (operations, 0..) |*op, operation_index| switch (op.*) { - .noop => continue, - .file_read_streaming => |*o| { - if (poll_buffer.len - poll_i == 0) return error.ConcurrencyUnavailable; - poll_buffer[poll_i] = .{ - .fd = o.file.handle, - .events = posix.POLL.IN, - .revents = 0, - }; - map_buffer[poll_i] = @intCast(operation_index); - poll_i += 1; - }, - }; - - const polls = poll_buffer[0..poll_i]; - const map = map_buffer[0..poll_i]; - - var pending = poll_i; - while (pending > 0) { - const syscall = Syscall.start() catch |err| switch (err) { - error.Canceled => { - if (!setOperationsError(operations, polls, map, error.Canceled)) - recancelInner(); - return; + while (submit_head != submit_tail) : (submit_head = submit_head.next(len)) { + const op = ring[submit_head.index(len)]; + const operation = &operations[op]; + switch (operation.*) { + .noop => { + try operate(t, operation); + ring[complete_tail.index(len)] = op; + complete_tail = complete_tail.next(len); }, - }; - const rc = posix.system.poll(polls.ptr, polls.len, -1); - syscall.finish(); - switch (posix.errno(rc)) { - .SUCCESS => for (polls, map) |*poll_fd, i| { - if (poll_fd.revents == 0) continue; - poll_fd.fd = -1; - pending -= 1; - operate(&operations[i]); - }, - .INTR => continue, - .NOMEM => { - assert(setOperationsError(operations, polls, map, error.SystemResources)); - return; - }, - else => { - assert(setOperationsError(operations, polls, map, error.Unexpected)); - return; + .file_read_streaming => |*o| { + _ = o.status.unstarted; + if (handles_buffer.len - buffer_i == 0) return error.ConcurrencyUnavailable; + const overlapped = &overlapped_buffer[buffer_i]; + overlapped.* = .{ + .Internal = 0, + .InternalHigh = 0, + .DUMMYUNIONNAME = .{ + .DUMMYSTRUCTNAME = .{ + .Offset = 0, + .OffsetHigh = 0, + }, + .Pointer = null, + }, + .hEvent = null, + }; + var n: windows.DWORD = undefined; + const buf = o.data[0]; + if (windows.kernel32.ReadFile(o.file.handle, buf.ptr, buf.len, &n, overlapped) == 0) { + @panic("TODO"); + } + handles_buffer[buffer_i] = o.file.handle; + map_buffer[buffer_i] = op; + buffer_i += 1; }, } } -} -fn batchWindows(t: *Threaded, operations: []Io.Operation) Io.ConcurrentError!void { - _ = t; - var overlapped_buffer: [poll_buffer_len]windows.OVERLAPPED = undefined; - var handles_buffer: [poll_buffer_len]windows.HANDLE = undefined; - var map_buffer: [poll_buffer_len]u8 = undefined; // handles_buffer index to operations index - var buffer_i: usize = 0; - - for (operations, 0..) |*op, operation_index| switch (op.*) { - .noop => continue, - .file_read_streaming => |*o| { - if (handles_buffer.len - buffer_i == 0) return error.ConcurrencyUnavailable; - - const overlapped = &overlapped_buffer[buffer_i]; - overlapped.* = .{ - .Internal = 0, - .InternalHigh = 0, - .DUMMYUNIONNAME = .{ - .DUMMYSTRUCTNAME = .{ - .Offset = 0, - .OffsetHigh = 0, - }, - .Pointer = null, - }, - .hEvent = null, - }; - var n: windows.DWORD = undefined; - const buf = o.data[0]; - if (windows.kernel32.ReadFile(o.file.handle, buf.ptr, buf.len, &n, overlapped) == 0) { - @panic("TODO"); - } - handles_buffer[buffer_i] = o.file.handle; - map_buffer[buffer_i] = @intCast(operation_index); - buffer_i += 1; + switch (buffer_i) { + 0 => return, + 1 => if (timeout == .none) { + const op = map_buffer[0]; + try operate(t, &operations[op]); + ring[complete_tail.index(len)] = op; + complete_tail = complete_tail.next(len); + return; }, - }; + else => {}, + } const handles = handles_buffer[0..buffer_i]; const map = map_buffer[0..buffer_i]; - var pending = buffer_i; - while (pending > 0) { - const syscall: Syscall = try .start(); - const index = windows.WaitForMultipleObjectsEx(handles, false, windows.INFINITE, true); - syscall.finish(); - var n: windows.DWORD = undefined; - if (0 == windows.kernel32.GetOverlappedResult(handles[index], overlapped_buffer[index], &n, 0)) { - switch (windows.GetLastError()) { - .BROKEN_PIPE => @panic("TODO"), - .OPERATION_ABORTED => @panic("TODO"), - else => @panic("TODO"), - } - } else switch (operations[map[index]]) { - .noop => unreachable, - .file_read_streaming => |*o| { - o.status = .{ .result = n }; - pending -= 1; - }, + const syscall: Syscall = try .start(); + const index = windows.WaitForMultipleObjectsEx(handles, false, windows.INFINITE, true); + syscall.finish(); + var n: windows.DWORD = undefined; + if (0 == windows.kernel32.GetOverlappedResult(handles[index], overlapped_buffer[index], &n, 0)) { + switch (windows.GetLastError()) { + .BROKEN_PIPE => @panic("TODO"), + .OPERATION_ABORTED => @panic("TODO"), + else => @panic("TODO"), } + } else switch (operations[map[index]]) { + .noop => unreachable, + .file_read_streaming => |*o| { + o.status = .{ .result = n }; + }, } } -fn setOperationsError( - operations: []Io.Operation, - polls: []const posix.pollfd, - map: []const u8, - err: error{ Canceled, SystemResources, Unexpected }, -) bool { - var marked = false; - for (polls, map) |*poll_fd, i| { - if (poll_fd.fd == -1) continue; - switch (operations[i]) { - .noop => unreachable, - inline else => |*o| { - o.status = .{ .result = err }; - marked = true; - }, - } - } - return marked; -} - const dirCreateDir = switch (native_os) { .windows => dirCreateDirWindows, .wasi => dirCreateDirWasi, From 20cadd60aa71d500913fdf03f50ba2de01cf918b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 12 Jan 2026 23:21:55 -0800 Subject: [PATCH 26/65] std.Io.File: introduce MultiReader Concurrently read from multiple file streams, eliminating risk of deadlocking. --- lib/std/Build/Step.zig | 29 ++-- lib/std/Io.zig | 4 +- lib/std/Io/File.zig | 3 + lib/std/Io/File/MultiReader.zig | 240 ++++++++++++++++++++++++++++++++ lib/std/Io/Reader.zig | 8 +- 5 files changed, 256 insertions(+), 28 deletions(-) create mode 100644 lib/std/Io/File/MultiReader.zig diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index e2c51cc6fe..40845f75c3 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -527,9 +527,6 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. const arena = b.allocator; const io = b.graph.io; - var stderr_task = try io.concurrent(readStreamAlloc, .{ gpa, io, zp.child.stderr.?, .unlimited }); - defer if (stderr_task.cancel(io)) |slice| gpa.free(slice) else |_| {}; - var timer = try std.time.Timer.start(); try sendMessage(io, zp.child.stdin.?, .update); @@ -537,19 +534,18 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. var result: ?Path = null; - var stdout_buffer: [512]u8 = undefined; - var stdout_reader: Io.File.Reader = .initStreaming(zp.child.stdout.?, io, &stdout_buffer); - const stdout = &stdout_reader.interface; + var multi_reader_buffer: Io.File.MultiReader.Buffer(2) = undefined; + var multi_reader: Io.File.MultiReader = undefined; + multi_reader.init(gpa, io, multi_reader_buffer.toStreams(), &.{ zp.child.stdout.?, zp.child.stderr.? }); + defer multi_reader.deinit(); - var body_buffer: std.ArrayList(u8) = .empty; - defer body_buffer.deinit(gpa); + const stdout = multi_reader.reader(0); + const stderr = multi_reader.reader(1); while (true) { const Header = std.zig.Server.Message.Header; const header = try stdout.takeStruct(Header, .little); - body_buffer.clearRetainingCapacity(); - try stdout.appendExact(gpa, &body_buffer, header.bytes_len); - const body = body_buffer.items; + const body = try stdout.take(header.bytes_len); switch (header.tag) { .zig_version => { if (!std.mem.eql(u8, builtin.zig_version_string, body)) { @@ -640,8 +636,7 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. s.result_duration_ns = timer.read(); - const stderr_contents = try stderr_task.await(io); - defer gpa.free(stderr_contents); + const stderr_contents = stderr.buffered(); if (stderr_contents.len > 0) { try s.result_error_msgs.append(arena, try arena.dupe(u8, stderr_contents)); } @@ -649,14 +644,6 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. return result; } -fn readStreamAlloc(gpa: Allocator, io: Io, file: Io.File, limit: Io.Limit) ![]u8 { - var file_reader: Io.File.Reader = .initStreaming(file, io, &.{}); - return file_reader.interface.allocRemaining(gpa, limit) catch |err| switch (err) { - error.ReadFailed => return file_reader.err.?, - else => |e| return e, - }; -} - pub fn getZigProcess(s: *Step) ?*ZigProcess { return switch (s.id) { .compile => s.cast(Compile).?.zig_process, diff --git a/lib/std/Io.zig b/lib/std/Io.zig index b503979fda..980379b72b 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -350,8 +350,6 @@ pub const Batch = struct { } }; - pub const WaitError = ConcurrentError || Cancelable || Timeout.Error; - pub fn init(operations: []Operation, ring: []u32) Batch { const len: u31 = @intCast(operations.len); assert(ring.len == len); @@ -405,6 +403,8 @@ pub const Batch = struct { return b.ring[0..len][head.index(len)]; } + pub const WaitError = ConcurrentError || Cancelable || Timeout.Error; + /// Starts work on any submitted operations and returns when at least one has completeed. /// /// Returns `error.Timeout` if `timeout` expires first. diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index cc7042d443..c545b60222 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -18,6 +18,9 @@ pub const Writer = @import("File/Writer.zig"); pub const Atomic = @import("File/Atomic.zig"); /// Memory intended to remain consistent with file contents. pub const MemoryMap = @import("File/MemoryMap.zig"); +/// Concurrently read from multiple file streams, eliminating risk of +/// deadlocking. +pub const MultiReader = @import("File/MultiReader.zig"); pub const INode = std.posix.ino_t; pub const NLink = std.posix.nlink_t; diff --git a/lib/std/Io/File/MultiReader.zig b/lib/std/Io/File/MultiReader.zig new file mode 100644 index 0000000000..1cf3f7b404 --- /dev/null +++ b/lib/std/Io/File/MultiReader.zig @@ -0,0 +1,240 @@ +const MultiReader = @This(); + +const std = @import("../../std.zig"); +const Io = std.Io; +const File = Io.File; +const Allocator = std.mem.Allocator; +const assert = std.debug.assert; + +gpa: Allocator, +streams: *Streams, +batch: Io.Batch, + +pub const Context = struct { + mr: *MultiReader, + fr: File.Reader, + vec: [1][]u8, + err: ?Error, + eos: bool, +}; + +pub const Error = Allocator.Error || File.Reader.Error || Io.ConcurrentError; + +/// Trailing: +/// * `contexts: [len]Context` +/// * `ring: [len]u32` +/// * `operations: [len]Io.Operation` +pub const Streams = extern struct { + len: u32, + + pub fn contexts(s: *Streams) []Context { + _ = s; + @panic("TODO"); + } + + pub fn ring(s: *Streams) []u32 { + _ = s; + @panic("TODO"); + } + + pub fn operations(s: *Streams) []Io.Operation { + _ = s; + @panic("TODO"); + } +}; + +pub fn Buffer(comptime n: usize) type { + return extern struct { + len: u32, + contexts: [n][@sizeOf(Context)]u8 align(@alignOf(Context)), + ring: [n]u32, + operations: [n][@sizeOf(Io.Operation)]u8 align(@alignOf(Io.Operation)), + + pub fn toStreams(b: *@This()) *Streams { + return @ptrCast(b); + } + }; +} + +/// See `Streams.Buffer` for convenience API to obtain the `streams` parameter. +pub fn init(mr: *MultiReader, gpa: Allocator, io: Io, streams: *Streams, files: []const File) void { + const contexts = streams.contexts(); + for (contexts, files) |*context, file| context.* = .{ + .mr = mr, + .fr = .{ + .io = io, + .file = file, + .mode = .streaming, + .interface = .{ + .vtable = &.{ + .stream = stream, + .discard = discard, + .readVec = readVec, + .rebase = rebase, + }, + .buffer = &.{}, + .seek = 0, + .end = 0, + }, + }, + .vec = .{&.{}}, + .err = null, + .eos = false, + }; + const operations = streams.operations(); + const ring = streams.ring(); + mr.* = .{ + .gpa = gpa, + .streams = streams, + .batch = .init(operations, ring), + }; + for (operations, contexts, files, 0..) |*op, *context, file, i| { + const r = &context.fr.interface; + op.* = .{ .file_read_streaming = .{ + .file = file, + .data = &context.vec, + } }; + rebaseGrowing(mr, context, 1) catch |err| { + context.err = err; + continue; + }; + context.vec[0] = r.buffer; + mr.batch.add(i); + } +} + +pub fn deinit(mr: *MultiReader) void { + const gpa = mr.gpa; + const contexts = mr.streams.contexts(); + const io = contexts[0].fr.io; + mr.batch.cancel(io); + for (contexts) |*context| { + gpa.free(context.fr.interface.buffer); + } +} + +pub fn reader(mr: *MultiReader, index: usize) *Io.Reader { + return &mr.streams.contexts()[index].fr.interface; +} + +pub fn toOwnedSlice(mr: *MultiReader, index: usize) Allocator.Error![]u8 { + const gpa = mr.gpa; + const r: *Io.Reader = reader(mr, index); + if (r.seek == 0) { + const new = try gpa.realloc(r.buffer, r.end); + r.buffer = &.{}; + r.end = 0; + return new; + } + const new = try gpa.dupe(u8, r.buffered()); + gpa.free(r.buffer); + r.buffer = &.{}; + r.seek = 0; + r.end = 0; + return new; +} + +fn stream(r: *Io.Reader, w: *Io.Writer, limit: Io.Limit) Io.Reader.StreamError!usize { + _ = limit; + _ = w; + const fr: *File.Reader = @alignCast(@fieldParentPtr("interface", r)); + const context: *Context = @fieldParentPtr("fr", fr); + const mr = context.mr; + return fill(mr, context); +} + +fn discard(r: *Io.Reader, limit: Io.Limit) Io.Reader.Error!usize { + _ = limit; + const fr: *File.Reader = @alignCast(@fieldParentPtr("interface", r)); + const context: *Context = @fieldParentPtr("fr", fr); + const mr = context.mr; + return fill(mr, context); +} + +fn readVec(r: *Io.Reader, data: [][]u8) Io.Reader.Error!usize { + _ = data; + const fr: *File.Reader = @alignCast(@fieldParentPtr("interface", r)); + const context: *Context = @fieldParentPtr("fr", fr); + const mr = context.mr; + return fill(mr, context); +} + +fn rebase(r: *Io.Reader, capacity: usize) Io.Reader.RebaseError!void { + const fr: *File.Reader = @alignCast(@fieldParentPtr("interface", r)); + const context: *Context = @fieldParentPtr("fr", fr); + const mr = context.mr; + + return rebaseGrowing(mr, context, capacity) catch |err| { + context.err = err; + return error.ReadFailed; + }; +} + +fn rebaseGrowing(mr: *MultiReader, context: *Context, capacity: usize) Allocator.Error!void { + const gpa = mr.gpa; + const r = &context.fr.interface; + if (r.buffer.len >= capacity) { + const data = r.buffer[r.seek..r.end]; + @memmove(r.buffer[0..data.len], data); + r.seek = 0; + r.end = data.len; + } else { + const adjusted_capacity = std.ArrayList(u8).growCapacity(capacity); + + if (r.seek == 0) { + if (gpa.remap(r.buffer, adjusted_capacity)) |new_memory| { + r.buffer = new_memory; + return; + } + } + + const data = r.buffer[r.seek..r.end]; + const new = try gpa.alloc(u8, adjusted_capacity); + @memcpy(new[0..data.len], data); + r.seek = 0; + r.end = data.len; + } +} + +fn fill(mr: *MultiReader, original_context: *Context) Io.Reader.Error!usize { + const contexts = mr.streams.contexts(); + const operations = mr.streams.operations(); + const io = contexts[0].fr.io; + + mr.batch.wait(io, .none) catch |err| switch (err) { + error.Timeout, error.UnsupportedClock => unreachable, + else => |e| { + original_context.err = e; + return error.ReadFailed; + }, + }; + + while (mr.batch.next()) |i| { + const context = &contexts[i]; + const operation = &operations[i]; + const n = operation.file_read_streaming.status.result catch |err| { + context.err = err; + continue; + }; + if (n == 0) { + context.eos = true; + continue; + } + const r = &context.fr.interface; + r.end += n; + if (r.buffer.len - r.end == 0) { + rebaseGrowing(mr, context, r.bufferedLen() + 1) catch |err| { + context.err = err; + continue; + }; + assert(r.seek == 0); + context.vec[0] = r.buffer; + } + operation.file_read_streaming.status = .{ .unstarted = {} }; + mr.batch.add(i); + } + + if (original_context.err != null) return error.ReadFailed; + if (original_context.eos) return error.EndOfStream; + return 0; +} diff --git a/lib/std/Io/Reader.zig b/lib/std/Io/Reader.zig index 9c5c762844..9ff025a637 100644 --- a/lib/std/Io/Reader.zig +++ b/lib/std/Io/Reader.zig @@ -127,9 +127,7 @@ pub const ShortError = error{ ReadFailed, }; -pub const RebaseError = error{ - EndOfStream, -}; +pub const RebaseError = Error; pub const failing: Reader = .{ .vtable = &.{ @@ -1402,7 +1400,7 @@ pub fn takeLeb128(r: *Reader, comptime T: type) TakeLeb128Error!T { } /// Ensures `capacity` data can be buffered without rebasing. -pub fn rebase(r: *Reader, capacity: usize) RebaseError!void { +pub fn rebase(r: *Reader, capacity: usize) Error!void { if (r.buffer.len - r.seek >= capacity) { @branchHint(.likely); return; @@ -1410,7 +1408,7 @@ pub fn rebase(r: *Reader, capacity: usize) RebaseError!void { return r.vtable.rebase(r, capacity); } -pub fn defaultRebase(r: *Reader, capacity: usize) RebaseError!void { +pub fn defaultRebase(r: *Reader, capacity: usize) Error!void { assert(r.buffer.len - r.seek < capacity); const data = r.buffer[r.seek..r.end]; @memmove(r.buffer[0..data.len], data); From 12cfc96e1b25d4c75fd08b9af72226502982195a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Jan 2026 18:42:00 -0800 Subject: [PATCH 27/65] std: update rest of build runner to new File.MultiReader API --- lib/std/Build/Step/Run.zig | 110 ++++++++++++++++++-------------- lib/std/Io/File/MultiReader.zig | 59 +++++++++++++---- 2 files changed, 109 insertions(+), 60 deletions(-) diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index d025e01af4..82c15531d0 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1669,39 +1669,44 @@ fn evalZigTest( while (true) { var child = try process.spawn(io, spawn_options); - var poller = std.Io.poll(gpa, StdioPollEnum, .{ - .stdout = child.stdout.?, - .stderr = child.stderr.?, - }); + var multi_reader_buffer: Io.File.MultiReader.Buffer(2) = undefined; + var multi_reader: Io.File.MultiReader = undefined; + multi_reader.init(gpa, io, multi_reader_buffer.toStreams(), &.{ child.stdout.?, child.stderr.? }); var child_killed = false; defer if (!child_killed) { child.kill(io); - poller.deinit(); + multi_reader.deinit(); run.step.result_peak_rss = @max( run.step.result_peak_rss, child.resource_usage_statistics.getMaxRss() orelse 0, ); }; - switch (try pollZigTest( + switch (try waitZigTest( run, &child, options, fuzz_context, - &poller, + &multi_reader, &test_metadata, &test_results, )) { .write_failed => |err| { // The runner unexpectedly closed a stdio pipe, which means a crash. Make sure we've captured // all available stderr to make our error output as useful as possible. - while (try poller.poll()) {} - run.step.result_stderr = try arena.dupe(u8, poller.reader(.stderr).buffered()); + const stderr_fr = multi_reader.fileReader(1); + while (true) { + stderr_fr.interface.fillMore() catch |e| switch (e) { + error.ReadFailed => return stderr_fr.err.?, + error.EndOfStream => break, + }; + } + run.step.result_stderr = try arena.dupe(u8, stderr_fr.interface.buffered()); // Clean up everything and wait for the child to exit. child.stdin.?.close(io); child.stdin = null; - poller.deinit(); + multi_reader.deinit(); child_killed = true; const term = try child.wait(io); run.step.result_peak_rss = @max( @@ -1716,13 +1721,14 @@ fn evalZigTest( .no_poll => |no_poll| { // This might be a success (we requested exit and the child dutifully closed stdout) or // a crash of some kind. Either way, the child will terminate by itself -- wait for it. - const stderr_owned = try arena.dupe(u8, poller.reader(.stderr).buffered()); - poller.reader(.stderr).tossBuffered(); + const stderr_reader = multi_reader.reader(1); + const stderr_owned = try arena.dupe(u8, stderr_reader.buffered()); + stderr_reader.tossBuffered(); // Clean up everything and wait for the child to exit. child.stdin.?.close(io); child.stdin = null; - poller.deinit(); + multi_reader.deinit(); child_killed = true; const term = try child.wait(io); run.step.result_peak_rss = @max( @@ -1770,8 +1776,9 @@ fn evalZigTest( return; }, .timeout => |timeout| { - const stderr = poller.reader(.stderr).buffered(); - poller.reader(.stderr).tossBuffered(); + const stderr_reader = multi_reader.reader(1); + const stderr = stderr_reader.buffered(); + stderr_reader.tossBuffered(); if (timeout.active_test_index) |test_index| { // A test was running. Report the timeout against that test, and continue on to // the next test. @@ -1796,16 +1803,16 @@ fn evalZigTest( } } -/// Polls stdout of a Zig test process until a termination condition is reached: +/// Reads stdout of a Zig test process until a termination condition is reached: /// * A write fails, indicating the child unexpectedly closed stdin /// * A test (or a response from the test runner) times out -/// * `poll` fails, indicating the child closed stdout and stderr -fn pollZigTest( +/// * The wait fails, indicating the child closed stdout and stderr +fn waitZigTest( run: *Run, child: *process.Child, options: Step.MakeOptions, fuzz_context: ?FuzzContext, - poller: *std.Io.Poller(StdioPollEnum), + multi_reader: *Io.File.MultiReader, opt_metadata: *?TestMetadata, results: *Step.TestResults, ) !union(enum) { @@ -1874,12 +1881,11 @@ fn pollZigTest( break :ns @max(options.unit_test_timeout_ns orelse 0, 60 * std.time.ns_per_s); }; - const stdout = poller.reader(.stdout); - const stderr = poller.reader(.stderr); + const stdout = multi_reader.reader(0); + const stderr = multi_reader.reader(1); + const Header = std.zig.Server.Message.Header; while (true) { - const Header = std.zig.Server.Message.Header; - // This block is exited when `stdout` contains enough bytes for a `Header`. header_ready: { if (stdout.buffered().len >= @sizeOf(Header)) { @@ -1894,18 +1900,22 @@ fn pollZigTest( break :ns options.unit_test_timeout_ns; }; - if (opt_timeout_ns) |timeout_ns| { - const remaining_ns = timeout_ns -| timer.?.read(); - if (!try poller.pollTimeout(remaining_ns)) return .{ .no_poll = .{ + const timeout: Io.Timeout = if (opt_timeout_ns) |timeout_ns| .{ .duration = .{ + .raw = .fromNanoseconds(timeout_ns -| timer.?.read()), + .clock = .awake, + } } else .none; + + multi_reader.fill(timeout) catch |err| switch (err) { + error.Timeout, error.EndOfStream => return .{ .no_poll = .{ .active_test_index = active_test_index, .ns_elapsed = if (timer) |*t| t.read() else 0, - } }; - } else { - if (!try poller.poll()) return .{ .no_poll = .{ - .active_test_index = active_test_index, - .ns_elapsed = if (timer) |*t| t.read() else 0, - } }; - } + } }, + error.UnsupportedClock => { + timer = null; + continue; + }, + else => |e| return e, + }; if (stdout.buffered().len >= @sizeOf(Header)) { // There wasn't a header before, but there is one after the `poll`. @@ -1923,11 +1933,8 @@ fn pollZigTest( } // There is definitely a header available now -- read it. const header = stdout.takeStruct(Header, .little) catch unreachable; + try stdout.fill(header.bytes_len); - while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) return .{ .no_poll = .{ - .active_test_index = active_test_index, - .ns_elapsed = if (timer) |*t| t.read() else 0, - } }; const body = stdout.take(header.bytes_len) catch unreachable; var body_r: std.Io.Reader = .fixed(body); switch (header.tag) { @@ -2164,6 +2171,7 @@ fn evalGeneric(run: *Run, spawn_options: process.SpawnOptions) !EvalGenericResul const b = run.step.owner; const io = b.graph.io; const arena = b.allocator; + const gpa = b.allocator; var child = try process.spawn(io, spawn_options); defer child.kill(io); @@ -2211,23 +2219,31 @@ fn evalGeneric(run: *Run, spawn_options: process.SpawnOptions) !EvalGenericResul if (child.stdout) |stdout| { if (child.stderr) |stderr| { - var poller = std.Io.poll(arena, enum { stdout, stderr }, .{ - .stdout = stdout, - .stderr = stderr, - }); - defer poller.deinit(); + var multi_reader_buffer: Io.File.MultiReader.Buffer(2) = undefined; + var multi_reader: Io.File.MultiReader = undefined; + multi_reader.init(gpa, io, multi_reader_buffer.toStreams(), &.{ stdout, stderr }); + defer multi_reader.deinit(); - while (try poller.poll()) { + const stdout_reader = multi_reader.reader(0); + const stderr_reader = multi_reader.reader(1); + + while (multi_reader.fill(.none)) |_| { if (run.stdio_limit.toInt()) |limit| { - if (poller.reader(.stderr).buffered().len > limit) + if (stdout_reader.buffered().len > limit) return error.StdoutStreamTooLong; - if (poller.reader(.stderr).buffered().len > limit) + if (stderr_reader.buffered().len > limit) return error.StderrStreamTooLong; } + } else |err| switch (err) { + error.UnsupportedClock, error.Timeout => unreachable, + error.EndOfStream => {}, + else => |e| return e, } - stdout_bytes = try poller.toOwnedSlice(.stdout); - stderr_bytes = try poller.toOwnedSlice(.stderr); + try multi_reader.checkAnyError(); + + stdout_bytes = try multi_reader.toOwnedSlice(0); + stderr_bytes = try multi_reader.toOwnedSlice(1); } else { var stdout_reader = stdout.readerStreaming(io, &.{}); stdout_bytes = stdout_reader.interface.allocRemaining(arena, run.stdio_limit) catch |err| switch (err) { diff --git a/lib/std/Io/File/MultiReader.zig b/lib/std/Io/File/MultiReader.zig index 1cf3f7b404..d4024ef914 100644 --- a/lib/std/Io/File/MultiReader.zig +++ b/lib/std/Io/File/MultiReader.zig @@ -113,10 +113,28 @@ pub fn deinit(mr: *MultiReader) void { } } +pub fn fileReader(mr: *MultiReader, index: usize) *File.Reader { + return &mr.streams.contexts()[index].fr; +} + pub fn reader(mr: *MultiReader, index: usize) *Io.Reader { return &mr.streams.contexts()[index].fr.interface; } +/// Checks for errors in all streams, prioritizing `error.Canceled` if it +/// occurred anywhere. +pub fn checkAnyError(mr: *const MultiReader) Error!void { + const contexts = mr.streams.contexts(); + var other: Error!void = {}; + for (contexts) |*context| { + if (context.err) |err| switch (err) { + error.Canceled => |e| return e, + else => |e| other = e, + }; + } + return other; +} + pub fn toOwnedSlice(mr: *MultiReader, index: usize) Allocator.Error![]u8 { const gpa = mr.gpa; const r: *Io.Reader = reader(mr, index); @@ -140,7 +158,7 @@ fn stream(r: *Io.Reader, w: *Io.Writer, limit: Io.Limit) Io.Reader.StreamError!u const fr: *File.Reader = @alignCast(@fieldParentPtr("interface", r)); const context: *Context = @fieldParentPtr("fr", fr); const mr = context.mr; - return fill(mr, context); + return fillUntimed(mr, context); } fn discard(r: *Io.Reader, limit: Io.Limit) Io.Reader.Error!usize { @@ -148,7 +166,7 @@ fn discard(r: *Io.Reader, limit: Io.Limit) Io.Reader.Error!usize { const fr: *File.Reader = @alignCast(@fieldParentPtr("interface", r)); const context: *Context = @fieldParentPtr("fr", fr); const mr = context.mr; - return fill(mr, context); + return fillUntimed(mr, context); } fn readVec(r: *Io.Reader, data: [][]u8) Io.Reader.Error!usize { @@ -156,7 +174,7 @@ fn readVec(r: *Io.Reader, data: [][]u8) Io.Reader.Error!usize { const fr: *File.Reader = @alignCast(@fieldParentPtr("interface", r)); const context: *Context = @fieldParentPtr("fr", fr); const mr = context.mr; - return fill(mr, context); + return fillUntimed(mr, context); } fn rebase(r: *Io.Reader, capacity: usize) Io.Reader.RebaseError!void { @@ -196,20 +214,23 @@ fn rebaseGrowing(mr: *MultiReader, context: *Context, capacity: usize) Allocator } } -fn fill(mr: *MultiReader, original_context: *Context) Io.Reader.Error!usize { +pub const FillError = Io.Batch.WaitError || error{ + /// `fill` was called when all streams already have failed or reached the + /// end. + EndOfStream, +}; + +/// Wait until at least one stream receives more data. +pub fn fill(mr: *MultiReader, timeout: Io.Timeout) FillError!void { const contexts = mr.streams.contexts(); const operations = mr.streams.operations(); const io = contexts[0].fr.io; + var any_completed = false; - mr.batch.wait(io, .none) catch |err| switch (err) { - error.Timeout, error.UnsupportedClock => unreachable, - else => |e| { - original_context.err = e; - return error.ReadFailed; - }, - }; + try mr.batch.wait(io, timeout); while (mr.batch.next()) |i| { + any_completed = true; const context = &contexts[i]; const operation = &operations[i]; const n = operation.file_read_streaming.status.result catch |err| { @@ -234,7 +255,19 @@ fn fill(mr: *MultiReader, original_context: *Context) Io.Reader.Error!usize { mr.batch.add(i); } - if (original_context.err != null) return error.ReadFailed; - if (original_context.eos) return error.EndOfStream; + if (!any_completed) return error.EndOfStream; +} + +fn fillUntimed(mr: *MultiReader, context: *Context) Io.Reader.Error!usize { + fill(mr, .none) catch |err| switch (err) { + error.Timeout, error.UnsupportedClock => unreachable, + error.Canceled, error.ConcurrencyUnavailable => |e| { + context.err = e; + return error.ReadFailed; + }, + error.EndOfStream => |e| return e, + }; + if (context.err != null) return error.ReadFailed; + if (context.eos) return error.EndOfStream; return 0; } From e56563ce3fb7ae2fb13f66ba6045ffb1f828ae08 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Jan 2026 21:23:44 -0800 Subject: [PATCH 28/65] std.Io.File.MultiReader: implementation fixes --- lib/std/Build/Step.zig | 34 +++++--- lib/std/Build/Step/Run.zig | 12 ++- lib/std/Io/File/MultiReader.zig | 150 ++++++++++++++++---------------- lib/std/crypto/tls/Client.zig | 3 +- 4 files changed, 103 insertions(+), 96 deletions(-) diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 40845f75c3..37fc2ca023 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -381,13 +381,15 @@ pub fn addError(step: *Step, comptime fmt: []const u8, args: anytype) error{OutO pub const ZigProcess = struct { child: std.process.Child, + multi_reader_buffer: Io.File.MultiReader.Buffer(2), + multi_reader: Io.File.MultiReader, progress_ipc_fd: if (std.Progress.have_ipc) ?std.posix.fd_t else void, pub const StreamEnum = enum { stdout, stderr }; - pub fn deinit(zp: *ZigProcess, gpa: Allocator, io: Io) void { - _ = gpa; + pub fn deinit(zp: *ZigProcess, io: Io) void { zp.child.kill(io); + zp.multi_reader.deinit(); zp.* = undefined; } }; @@ -460,14 +462,18 @@ pub fn evalZigProcess( .request_resource_usage_statistics = true, .progress_node = prog_node, }) catch |err| return s.fail("failed to spawn zig compiler {s}: {t}", .{ argv[0], err }); - defer if (!watch) zp.child.kill(io); zp.* = .{ .child = zp.child, + .multi_reader_buffer = undefined, + .multi_reader = undefined, .progress_ipc_fd = if (std.Progress.have_ipc) prog_node.getIpcFd() else {}, }; + zp.multi_reader.init(gpa, io, zp.multi_reader_buffer.toStreams(), &.{ + zp.child.stdout.?, zp.child.stderr.?, + }); if (watch) s.setZigProcess(zp); - defer if (!watch) zp.deinit(gpa, io); + defer if (!watch) zp.deinit(io); const result = try zigProcessUpdate(s, zp, watch, web_server, gpa); @@ -534,18 +540,18 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. var result: ?Path = null; - var multi_reader_buffer: Io.File.MultiReader.Buffer(2) = undefined; - var multi_reader: Io.File.MultiReader = undefined; - multi_reader.init(gpa, io, multi_reader_buffer.toStreams(), &.{ zp.child.stdout.?, zp.child.stderr.? }); - defer multi_reader.deinit(); - - const stdout = multi_reader.reader(0); - const stderr = multi_reader.reader(1); + const stdout = zp.multi_reader.fileReader(0); while (true) { const Header = std.zig.Server.Message.Header; - const header = try stdout.takeStruct(Header, .little); - const body = try stdout.take(header.bytes_len); + const header = stdout.interface.takeStruct(Header, .little) catch |err| switch (err) { + error.EndOfStream => break, + error.ReadFailed => return stdout.err.?, + }; + const body = stdout.interface.take(header.bytes_len) catch |err| switch (err) { + error.EndOfStream => |e| return e, + error.ReadFailed => return stdout.err.?, + }; switch (header.tag) { .zig_version => { if (!std.mem.eql(u8, builtin.zig_version_string, body)) { @@ -636,7 +642,7 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. s.result_duration_ns = timer.read(); - const stderr_contents = stderr.buffered(); + const stderr_contents = zp.multi_reader.reader(1).buffered(); if (stderr_contents.len > 0) { try s.result_error_msgs.append(arena, try arena.dupe(u8, stderr_contents)); } diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 82c15531d0..c74286f61b 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1695,11 +1695,9 @@ fn evalZigTest( // The runner unexpectedly closed a stdio pipe, which means a crash. Make sure we've captured // all available stderr to make our error output as useful as possible. const stderr_fr = multi_reader.fileReader(1); - while (true) { - stderr_fr.interface.fillMore() catch |e| switch (e) { - error.ReadFailed => return stderr_fr.err.?, - error.EndOfStream => break, - }; + while (stderr_fr.interface.fillMore()) |_| {} else |e| switch (e) { + error.ReadFailed => return stderr_fr.err.?, + error.EndOfStream => {}, } run.step.result_stderr = try arena.dupe(u8, stderr_fr.interface.buffered()); @@ -1905,7 +1903,7 @@ fn waitZigTest( .clock = .awake, } } else .none; - multi_reader.fill(timeout) catch |err| switch (err) { + multi_reader.fill(64, timeout) catch |err| switch (err) { error.Timeout, error.EndOfStream => return .{ .no_poll = .{ .active_test_index = active_test_index, .ns_elapsed = if (timer) |*t| t.read() else 0, @@ -2227,7 +2225,7 @@ fn evalGeneric(run: *Run, spawn_options: process.SpawnOptions) !EvalGenericResul const stdout_reader = multi_reader.reader(0); const stderr_reader = multi_reader.reader(1); - while (multi_reader.fill(.none)) |_| { + while (multi_reader.fill(64, .none)) |_| { if (run.stdio_limit.toInt()) |limit| { if (stdout_reader.buffered().len > limit) return error.StdoutStreamTooLong; diff --git a/lib/std/Io/File/MultiReader.zig b/lib/std/Io/File/MultiReader.zig index d4024ef914..a1ea42a7d8 100644 --- a/lib/std/Io/File/MultiReader.zig +++ b/lib/std/Io/File/MultiReader.zig @@ -28,18 +28,23 @@ pub const Streams = extern struct { len: u32, pub fn contexts(s: *Streams) []Context { - _ = s; - @panic("TODO"); + const base: usize = @intFromPtr(s); + const ptr: [*]Context = @ptrFromInt(std.mem.alignForward(usize, base + @sizeOf(Streams), @alignOf(Context))); + return ptr[0..s.len]; } pub fn ring(s: *Streams) []u32 { - _ = s; - @panic("TODO"); + const prev = contexts(s); + const end = prev.ptr + prev.len; + const ptr: [*]u32 = @ptrFromInt(std.mem.alignForward(usize, @intFromPtr(end), @alignOf(u32))); + return ptr[0..s.len]; } pub fn operations(s: *Streams) []Io.Operation { - _ = s; - @panic("TODO"); + const prev = ring(s); + const end = prev.ptr + prev.len; + const ptr: [*]Io.Operation = @ptrFromInt(std.mem.alignForward(usize, @intFromPtr(end), @alignOf(Io.Operation))); + return ptr[0..s.len]; } }; @@ -51,6 +56,7 @@ pub fn Buffer(comptime n: usize) type { operations: [n][@sizeOf(Io.Operation)]u8 align(@alignOf(Io.Operation)), pub fn toStreams(b: *@This()) *Streams { + b.len = n; return @ptrCast(b); } }; @@ -157,35 +163,87 @@ fn stream(r: *Io.Reader, w: *Io.Writer, limit: Io.Limit) Io.Reader.StreamError!u _ = w; const fr: *File.Reader = @alignCast(@fieldParentPtr("interface", r)); const context: *Context = @fieldParentPtr("fr", fr); - const mr = context.mr; - return fillUntimed(mr, context); + try fillUntimed(context, 1); + return 0; } fn discard(r: *Io.Reader, limit: Io.Limit) Io.Reader.Error!usize { _ = limit; const fr: *File.Reader = @alignCast(@fieldParentPtr("interface", r)); const context: *Context = @fieldParentPtr("fr", fr); - const mr = context.mr; - return fillUntimed(mr, context); + try fillUntimed(context, 1); + return 0; } fn readVec(r: *Io.Reader, data: [][]u8) Io.Reader.Error!usize { _ = data; const fr: *File.Reader = @alignCast(@fieldParentPtr("interface", r)); const context: *Context = @fieldParentPtr("fr", fr); - const mr = context.mr; - return fillUntimed(mr, context); + try fillUntimed(context, 1); + return 0; } fn rebase(r: *Io.Reader, capacity: usize) Io.Reader.RebaseError!void { const fr: *File.Reader = @alignCast(@fieldParentPtr("interface", r)); const context: *Context = @fieldParentPtr("fr", fr); - const mr = context.mr; + try fillUntimed(context, capacity); +} - return rebaseGrowing(mr, context, capacity) catch |err| { - context.err = err; - return error.ReadFailed; +fn fillUntimed(context: *Context, capacity: usize) Io.Reader.Error!void { + fill(context.mr, capacity, .none) catch |err| switch (err) { + error.Timeout, error.UnsupportedClock => unreachable, + error.Canceled, error.ConcurrencyUnavailable => |e| { + context.err = e; + return error.ReadFailed; + }, + error.EndOfStream => |e| return e, }; + if (context.err != null) return error.ReadFailed; + if (context.eos) return error.EndOfStream; +} + +pub const FillError = Io.Batch.WaitError || error{ + /// `fill` was called when all streams already have failed or reached the + /// end. + EndOfStream, +}; + +/// Wait until at least one stream receives more data. +pub fn fill(mr: *MultiReader, unused_capacity: usize, timeout: Io.Timeout) FillError!void { + const contexts = mr.streams.contexts(); + const operations = mr.streams.operations(); + const io = contexts[0].fr.io; + var any_completed = false; + + try mr.batch.wait(io, timeout); + + while (mr.batch.next()) |i| { + any_completed = true; + const context = &contexts[i]; + const operation = &operations[i]; + const n = operation.file_read_streaming.status.result catch |err| { + context.err = err; + continue; + }; + if (n == 0) { + context.eos = true; + continue; + } + const r = &context.fr.interface; + r.end += n; + if (r.buffer.len - r.end < unused_capacity) { + rebaseGrowing(mr, context, r.bufferedLen() + unused_capacity) catch |err| { + context.err = err; + continue; + }; + assert(r.seek == 0); + } + context.vec[0] = r.buffer[r.end..]; + operation.file_read_streaming.status = .{ .unstarted = {} }; + mr.batch.add(i); + } + + if (!any_completed) return error.EndOfStream; } fn rebaseGrowing(mr: *MultiReader, context: *Context, capacity: usize) Allocator.Error!void { @@ -209,65 +267,9 @@ fn rebaseGrowing(mr: *MultiReader, context: *Context, capacity: usize) Allocator const data = r.buffer[r.seek..r.end]; const new = try gpa.alloc(u8, adjusted_capacity); @memcpy(new[0..data.len], data); + gpa.free(r.buffer); + r.buffer = new; r.seek = 0; r.end = data.len; } } - -pub const FillError = Io.Batch.WaitError || error{ - /// `fill` was called when all streams already have failed or reached the - /// end. - EndOfStream, -}; - -/// Wait until at least one stream receives more data. -pub fn fill(mr: *MultiReader, timeout: Io.Timeout) FillError!void { - const contexts = mr.streams.contexts(); - const operations = mr.streams.operations(); - const io = contexts[0].fr.io; - var any_completed = false; - - try mr.batch.wait(io, timeout); - - while (mr.batch.next()) |i| { - any_completed = true; - const context = &contexts[i]; - const operation = &operations[i]; - const n = operation.file_read_streaming.status.result catch |err| { - context.err = err; - continue; - }; - if (n == 0) { - context.eos = true; - continue; - } - const r = &context.fr.interface; - r.end += n; - if (r.buffer.len - r.end == 0) { - rebaseGrowing(mr, context, r.bufferedLen() + 1) catch |err| { - context.err = err; - continue; - }; - assert(r.seek == 0); - context.vec[0] = r.buffer; - } - operation.file_read_streaming.status = .{ .unstarted = {} }; - mr.batch.add(i); - } - - if (!any_completed) return error.EndOfStream; -} - -fn fillUntimed(mr: *MultiReader, context: *Context) Io.Reader.Error!usize { - fill(mr, .none) catch |err| switch (err) { - error.Timeout, error.UnsupportedClock => unreachable, - error.Canceled, error.ConcurrencyUnavailable => |e| { - context.err = e; - return error.ReadFailed; - }, - error.EndOfStream => |e| return e, - }; - if (context.err != null) return error.ReadFailed; - if (context.eos) return error.EndOfStream; - return 0; -} diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig index 44a73c344a..eeeb7d0537 100644 --- a/lib/std/crypto/tls/Client.zig +++ b/lib/std/crypto/tls/Client.zig @@ -336,10 +336,11 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client // Ensure the input buffer pointer is stable in this scope. input.rebase(tls.max_ciphertext_record_len) catch |err| switch (err) { error.EndOfStream => {}, // We have assurance the remainder of stream can be buffered. + error.ReadFailed => |e| return e, }; const record_header = input.peek(tls.record_header_len) catch |err| switch (err) { error.EndOfStream => return error.TlsConnectionTruncated, - error.ReadFailed => return error.ReadFailed, + error.ReadFailed => |e| return e, }; const record_ct = input.takeEnumNonexhaustive(tls.ContentType, .big) catch unreachable; // already peeked input.toss(2); // legacy_version From dd0153b91b55e3b32227562ed68fdaee31c5b844 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Jan 2026 00:23:33 -0800 Subject: [PATCH 29/65] std.Io.operate: fix bogus catch this used to have a different error set. just goes to show you how useful switching on error set is even when there is only 1 prong --- lib/std/Io.zig | 2 +- lib/std/Io/Threaded.zig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/std/Io.zig b/lib/std/Io.zig index 980379b72b..a63a89e4ee 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -303,7 +303,7 @@ pub const Operation = union(enum) { /// Performs one `Operation`. pub fn operate(io: Io, operation: *Operation) Cancelable!void { - return io.vtable.operate(io.userdata, operation) catch unreachable; + return io.vtable.operate(io.userdata, operation); } /// Submits many operations together without waiting for all of them to diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 2167baa5e4..55a4c8aad7 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2465,7 +2465,7 @@ fn operate(userdata: ?*anyopaque, op: *Io.Operation) Io.Cancelable!void { .file_read_streaming => |*o| { _ = o.status.unstarted; o.status = .{ .result = fileReadStreaming(o.file, o.data) catch |err| switch (err) { - error.Canceled => return error.Canceled, + error.Canceled => |e| return e, else => |e| e, } }; }, From 372e8e54d3d7d09bc8805262c4034b7779467842 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Jan 2026 00:56:00 -0800 Subject: [PATCH 30/65] compiler: update for std.Io.File.MultiReader API --- lib/std/Build/Step.zig | 11 ++++++- lib/std/Io/File/MultiReader.zig | 8 +++++ lib/std/process.zig | 2 ++ lib/std/process/Child.zig | 3 +- lib/std/zig/LibCInstallation.zig | 6 ++-- src/Compilation.zig | 51 +++++++++++++++++++++----------- 6 files changed, 59 insertions(+), 22 deletions(-) diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 37fc2ca023..0dd4b93280 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -539,6 +539,7 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. if (!watch) try sendMessage(io, zp.child.stdin.?, .exit); var result: ?Path = null; + var eos_err: error{EndOfStream}!void = {}; const stdout = zp.multi_reader.fileReader(0); @@ -549,7 +550,13 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. error.ReadFailed => return stdout.err.?, }; const body = stdout.interface.take(header.bytes_len) catch |err| switch (err) { - error.EndOfStream => |e| return e, + error.EndOfStream => |e| { + // Better to report the crash with stderr below, but we set + // this in case the child exits successfully while violating + // this protocol. + eos_err = e; + break; + }, error.ReadFailed => return stdout.err.?, }; switch (header.tag) { @@ -647,6 +654,8 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build. try s.result_error_msgs.append(arena, try arena.dupe(u8, stderr_contents)); } + try eos_err; + return result; } diff --git a/lib/std/Io/File/MultiReader.zig b/lib/std/Io/File/MultiReader.zig index a1ea42a7d8..0cfa777e96 100644 --- a/lib/std/Io/File/MultiReader.zig +++ b/lib/std/Io/File/MultiReader.zig @@ -246,6 +246,14 @@ pub fn fill(mr: *MultiReader, unused_capacity: usize, timeout: Io.Timeout) FillE if (!any_completed) return error.EndOfStream; } +/// Wait until all streams fail or reach the end. +pub fn fillRemaining(mr: *MultiReader, timeout: Io.Timeout) Io.Batch.WaitError!void { + while (fill(mr, 1, timeout)) |_| {} else |err| switch (err) { + error.EndOfStream => return, + else => |e| return e, + } +} + fn rebaseGrowing(mr: *MultiReader, context: *Context, capacity: usize) Allocator.Error!void { const gpa = mr.gpa; const r = &context.fr.interface; diff --git a/lib/std/process.zig b/lib/std/process.zig index b5de41f5d8..6f3c155f6d 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -488,6 +488,7 @@ pub const RunOptions = struct { create_no_window: bool = true, /// Darwin-only. Disable ASLR for the child process. disable_aslr: bool = false, + timeout: Io.Timeout = .none, }; pub const RunResult = struct { @@ -529,6 +530,7 @@ pub fn run(gpa: Allocator, io: Io, options: RunOptions) RunError!RunResult { .stderr = &stderr, .stdout_limit = options.stdout_limit, .stderr_limit = options.stderr_limit, + .timeout = options.timeout, }); const term = try child.wait(io); diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index 19c974ff9f..fe6dfa389d 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -137,6 +137,7 @@ pub const CollectOutputOptions = struct { allocator: ?Allocator = null, stdout_limit: Io.Limit = .unlimited, stderr_limit: Io.Limit = .unlimited, + timeout: Io.Timeout = .none, }; /// Collect the output from the process's stdout and stderr. Will return once @@ -173,7 +174,7 @@ pub fn collectOutput(child: *const Child, io: Io, options: CollectOutputOptions) remaining += 1; } while (remaining > 0) { - try batch.wait(io, .none); + try batch.wait(io, options.timeout); while (batch.next()) |op| { const n = try reads[op].file_read_streaming.status.result; if (n == 0) { diff --git a/lib/std/zig/LibCInstallation.zig b/lib/std/zig/LibCInstallation.zig index 02b3df54dc..6a3f4b4813 100644 --- a/lib/std/zig/LibCInstallation.zig +++ b/lib/std/zig/LibCInstallation.zig @@ -268,7 +268,8 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, gpa: Allocator, io: Io, ar }); const run_res = std.process.run(gpa, io, .{ - .max_output_bytes = 1024 * 1024, + .stdout_limit = .limited(1024 * 1024), + .stderr_limit = .limited(1024 * 1024), .argv = argv.items, .environ_map = &environ_map, // Some C compilers, such as Clang, are known to rely on argv[0] to find the path @@ -584,7 +585,8 @@ fn ccPrintFileName(gpa: Allocator, io: Io, args: CCPrintFileNameOptions) ![]u8 { try argv.append(arg1); const run_res = std.process.run(gpa, io, .{ - .max_output_bytes = 1024 * 1024, + .stdout_limit = .limited(1024 * 1024), + .stderr_limit = .limited(1024 * 1024), .argv = argv.items, .environ_map = &environ_map, // Some C compilers, such as Clang, are known to rely on argv[0] to find the path diff --git a/src/Compilation.zig b/src/Compilation.zig index 98c1b56e38..6b6021ab3c 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -6873,6 +6873,7 @@ fn spawnZigRc( child_progress_node: std.Progress.Node, ) !void { const io = comp.io; + const gpa = comp.gpa; var node_name: std.ArrayList(u8) = .empty; defer node_name.deinit(arena); @@ -6887,55 +6888,69 @@ fn spawnZigRc( }); defer child.kill(io); - var poller = std.Io.poll(comp.gpa, enum { stdout, stderr }, .{ - .stdout = child.stdout.?, - .stderr = child.stderr.?, - }); - defer poller.deinit(); + var multi_reader_buffer: Io.File.MultiReader.Buffer(2) = undefined; + var multi_reader: Io.File.MultiReader = undefined; + multi_reader.init(gpa, io, multi_reader_buffer.toStreams(), &.{ child.stdout.?, child.stderr.? }); + defer multi_reader.deinit(); - const stdout = poller.reader(.stdout); + const stdout = multi_reader.fileReader(0); + const MessageHeader = std.zig.Server.Message.Header; - poll: while (true) { - const MessageHeader = std.zig.Server.Message.Header; - while (stdout.buffered().len < @sizeOf(MessageHeader)) if (!try poller.poll()) break :poll; - const header = stdout.takeStruct(MessageHeader, .little) catch unreachable; - while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll; - const body = stdout.take(header.bytes_len) catch unreachable; + var eos_err: error{EndOfStream}!void = {}; + while (true) { + const header = stdout.interface.takeStruct(MessageHeader, .little) catch |err| switch (err) { + error.EndOfStream => break, + error.ReadFailed => return stdout.err.?, + }; + const body = stdout.interface.take(header.bytes_len) catch |err| switch (err) { + error.EndOfStream => |e| { + // Better to report the crash with stderr below, but we set + // this in case the child exits successfully while violating + // this protocol. + eos_err = e; + break; + }, + error.ReadFailed => return stdout.err.?, + }; switch (header.tag) { // We expect exactly one ErrorBundle, and if any error_bundle header is // sent then it's a fatal error. .error_bundle => { - const error_bundle = try std.zig.Server.allocErrorBundle(comp.gpa, body); + const error_bundle = try std.zig.Server.allocErrorBundle(gpa, body); return comp.failWin32ResourceWithOwnedBundle(win32_resource, error_bundle); }, else => {}, // ignore other messages } } - // Just in case there's a failure that didn't send an ErrorBundle (e.g. an error return trace) - const stderr = poller.reader(.stderr); + try multi_reader.fillRemaining(.none); + // Just in case there's a failure that didn't send an ErrorBundle (e.g. an error return trace) const term = child.wait(io) catch |err| { return comp.failWin32Resource(win32_resource, "unable to wait for {s} rc: {t}", .{ argv[0], err }); }; + const stderr = multi_reader.reader(1).buffered(); + switch (term) { .exited => |code| { if (code != 0) { - log.err("zig rc failed with stderr:\n{s}", .{stderr.buffered()}); + log.err("zig rc failed with stderr:\n{s}", .{stderr}); return comp.failWin32Resource(win32_resource, "zig rc exited with code {d}", .{code}); } }, .signal => |sig| { - log.err("zig rc signaled {t} with stderr:\n{s}", .{ sig, stderr.buffered() }); + log.err("zig rc signaled {t} with stderr:\n{s}", .{ sig, stderr }); return comp.failWin32Resource(win32_resource, "zig rc terminated unexpectedly", .{}); }, else => { - log.err("zig rc terminated with stderr:\n{s}", .{stderr.buffered()}); + log.err("zig rc terminated with stderr:\n{s}", .{stderr}); return comp.failWin32Resource(win32_resource, "zig rc terminated unexpectedly", .{}); }, } + + try eos_err; } pub fn tmpFilePath(comp: Compilation, ally: Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 { From 68a34df0257b4baec32617bf8c70c223849b59e2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Jan 2026 21:07:59 -0800 Subject: [PATCH 31/65] std.Io.Threaded: fix error set --- lib/std/Io/Threaded.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 55a4c8aad7..0bddff2fc2 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2475,6 +2475,7 @@ fn operate(userdata: ?*anyopaque, op: *Io.Operation) Io.Cancelable!void { fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.WaitError!void { const t: *Threaded = @ptrCast(@alignCast(userdata)); if (is_windows) return batchWaitWindows(t, b, timeout); + if (native_os == .wasi and !builtin.link_libc) @panic("TODO"); const operations = b.operations; const len: u31 = @intCast(operations.len); const ring = b.ring[0..len]; @@ -2597,7 +2598,7 @@ fn batchCancel(userdata: ?*anyopaque, b: *Io.Batch) void { b.user.complete_tail = complete_tail; } -fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.ConcurrentError!void { +fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.WaitError!void { const operations = b.operations; const len: u31 = @intCast(operations.len); const ring = b.ring[0..len]; From 9134430387bb99504c5813abc8b566a97ba5b1f0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Jan 2026 21:21:56 -0800 Subject: [PATCH 32/65] std.Io.Threaded: fix batchWait impl --- lib/std/Io/Threaded.zig | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 0bddff2fc2..cfb690e634 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2524,6 +2524,7 @@ fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch. try operate(t, &operations[op]); ring[complete_tail.index(len)] = op; complete_tail = complete_tail.next(len); + poll_i = 0; return; }, else => {}, @@ -2548,24 +2549,20 @@ fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch. if (deadline == null) continue; return error.Timeout; } - var canceled = false; - for (poll_buffer[0..poll_i], map_buffer[0..poll_i]) |*poll_fd, op| { + while (poll_i != 0) { + poll_i -= 1; + const poll_fd = &poll_buffer[poll_i]; + const op = map_buffer[poll_i]; if (poll_fd.revents == 0) { submit_head = submit_head.prev(len); ring[submit_head.index(len)] = op; } else { - operate(t, &operations[op]) catch |err| switch (err) { - error.Canceled => { - canceled = true; - continue; - }, - }; + try operate(t, &operations[op]); ring[complete_tail.index(len)] = op; complete_tail = complete_tail.next(len); } } - poll_i = 0; - return if (canceled) error.Canceled; + return; }, .INTR => continue, else => return error.ConcurrencyUnavailable, From 54241bc770cbd610f48c365dce3c7ebf69336d73 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Jan 2026 21:38:44 -0800 Subject: [PATCH 33/65] tools: update for std.process API changes --- tools/update_clang_options.zig | 1 - tools/update_cpu_features.zig | 1 - 2 files changed, 2 deletions(-) diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig index b52267a3fb..a073062d88 100644 --- a/tools/update_clang_options.zig +++ b/tools/update_clang_options.zig @@ -676,7 +676,6 @@ pub fn main(init: std.process.Init) !void { const child_result = try std.process.run(arena, io, .{ .argv = &child_args, - .max_output_bytes = 100 * 1024 * 1024, }); std.debug.print("{s}\n", .{child_result.stderr}); diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig index 3041ee6acc..eaa6a9afd2 100644 --- a/tools/update_cpu_features.zig +++ b/tools/update_cpu_features.zig @@ -1987,7 +1987,6 @@ fn processOneTarget(io: Io, job: Job) void { const child_result = try std.process.run(arena, io, .{ .argv = &child_args, - .max_output_bytes = 500 * 1024 * 1024, }); tblgen_progress.end(); if (child_result.stderr.len != 0) { From a4d438562d794023e64e50c6826eb20aeef20eab Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Jan 2026 21:39:03 -0800 Subject: [PATCH 34/65] std.Io.Threaded: fix compilation failures on Windows it's still broken as hell tho --- lib/std/Io/Threaded.zig | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index cfb690e634..0adaa60a81 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -1323,7 +1323,10 @@ fn waitForApcOrAlert() void { const max_iovecs_len = 8; const splat_buffer_size = 64; -const poll_buffer_len = 32; +/// Happens to be the same number that matches maximum number of handles that +/// NtWaitForMultipleObjects accepts. We use this value also for poll() on +/// posix systems. +const poll_buffer_len = 64; const default_PATH = "/usr/local/bin:/bin/:/usr/bin"; comptime { @@ -2635,18 +2638,13 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa overlapped.* = .{ .Internal = 0, .InternalHigh = 0, - .DUMMYUNIONNAME = .{ - .DUMMYSTRUCTNAME = .{ - .Offset = 0, - .OffsetHigh = 0, - }, - .Pointer = null, - }, + .DUMMYUNIONNAME = .{ .Pointer = null }, .hEvent = null, }; var n: windows.DWORD = undefined; const buf = o.data[0]; - if (windows.kernel32.ReadFile(o.file.handle, buf.ptr, buf.len, &n, overlapped) == 0) { + const buf_len = std.math.lossyCast(windows.DWORD, buf.len); + if (windows.kernel32.ReadFile(o.file.handle, buf.ptr, buf_len, &n, overlapped) == 0) { @panic("TODO"); } handles_buffer[buffer_i] = o.file.handle; @@ -2663,6 +2661,7 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa try operate(t, &operations[op]); ring[complete_tail.index(len)] = op; complete_tail = complete_tail.next(len); + buffer_i = 0; return; }, else => {}, @@ -2672,10 +2671,15 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa const map = map_buffer[0..buffer_i]; const syscall: Syscall = try .start(); - const index = windows.WaitForMultipleObjectsEx(handles, false, windows.INFINITE, true); + const index_result = windows.WaitForMultipleObjectsEx(handles, false, windows.INFINITE, true); syscall.finish(); + const index = index_result catch |err| switch (err) { + error.Unexpected => @panic("TODO"), + error.WaitAbandoned => @panic("TODO"), + error.WaitTimeOut => @panic("TODO"), + }; var n: windows.DWORD = undefined; - if (0 == windows.kernel32.GetOverlappedResult(handles[index], overlapped_buffer[index], &n, 0)) { + if (0 == windows.kernel32.GetOverlappedResult(handles[index], &overlapped_buffer[index], &n, 0)) { switch (windows.GetLastError()) { .BROKEN_PIPE => @panic("TODO"), .OPERATION_ABORTED => @panic("TODO"), From 15ca46d1e70f24a1b37ac332630f6949552e4bda Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Jan 2026 21:50:54 -0800 Subject: [PATCH 35/65] std.Io.Threaded: fix compilation error on some systems --- lib/std/Io/Threaded.zig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 0adaa60a81..4024263f4f 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2486,8 +2486,8 @@ fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch. const submit_tail = b.user.submit_tail; b.impl.submit_tail = submit_tail; var complete_tail = b.impl.complete_tail; - var map_buffer: [poll_buffer_len]u32 = undefined; // poll_buffer index to operations index - var poll_i: usize = 0; + var map_buffer: [poll_buffer_len]u8 = undefined; // poll_buffer index to operations index + var poll_i: u8 = 0; defer { for (map_buffer[0..poll_i]) |op| { submit_head = submit_head.prev(len); @@ -2515,7 +2515,7 @@ fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch. .events = posix.POLL.IN, .revents = 0, }; - map_buffer[poll_i] = op; + map_buffer[poll_i] = @intCast(op); poll_i += 1; }, } From a901ea23b0a53f6feba0111886156262fcff469d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Jan 2026 22:39:23 -0800 Subject: [PATCH 36/65] update doctest API usage --- tools/doctest.zig | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tools/doctest.zig b/tools/doctest.zig index 55b8ca7bfb..97a9a0be3f 100644 --- a/tools/doctest.zig +++ b/tools/doctest.zig @@ -201,7 +201,6 @@ fn printOutput( .argv = build_args.items, .cwd = tmp_dir_path, .environ_map = environ_map, - .max_output_bytes = max_doc_file_size, }); switch (result.term) { .exited => |exit_code| { @@ -257,7 +256,6 @@ fn printOutput( .argv = run_args, .environ_map = environ_map, .cwd = tmp_dir_path, - .max_output_bytes = max_doc_file_size, }); switch (result.term) { .exited => |exit_code| { @@ -376,7 +374,6 @@ fn printOutput( .argv = test_args.items, .environ_map = environ_map, .cwd = tmp_dir_path, - .max_output_bytes = max_doc_file_size, }); switch (result.term) { .exited => |exit_code| { @@ -432,7 +429,6 @@ fn printOutput( .argv = test_args.items, .environ_map = environ_map, .cwd = tmp_dir_path, - .max_output_bytes = max_doc_file_size, }); switch (result.term) { .exited => |exit_code| { @@ -508,7 +504,6 @@ fn printOutput( .argv = build_args.items, .environ_map = environ_map, .cwd = tmp_dir_path, - .max_output_bytes = max_doc_file_size, }); switch (result.term) { .exited => |exit_code| { @@ -1132,7 +1127,6 @@ fn run( .argv = args, .environ_map = environ_map, .cwd = cwd, - .max_output_bytes = max_doc_file_size, }); switch (result.term) { .exited => |exit_code| { From ec74d650fe5ba6b5fddd0277b06b43a94383a0e0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 19 Jan 2026 15:14:43 -0800 Subject: [PATCH 37/65] incr-check: update to std.Io.File.MultiReader from std.Io.poll --- tools/incr-check.zig | 80 ++++++++++++++++++++++++-------------------- 1 file changed, 44 insertions(+), 36 deletions(-) diff --git a/tools/incr-check.zig b/tools/incr-check.zig index c9564f85c2..840faf6f27 100644 --- a/tools/incr-check.zig +++ b/tools/incr-check.zig @@ -28,6 +28,7 @@ fn logImpl( } pub fn main(init: std.process.Init) !void { + const gpa = init.gpa; const fatal = std.process.fatal; const arena = init.arena.allocator(); const io = init.io; @@ -224,11 +225,10 @@ pub fn main(init: std.process.Init) !void { .enable_darling = enable_darling, }; - var poller = Io.poll(arena, Eval.StreamEnum, .{ - .stdout = child.stdout.?, - .stderr = child.stderr.?, - }); - defer poller.deinit(); + var multi_reader_buffer: Io.File.MultiReader.Buffer(2) = undefined; + var multi_reader: Io.File.MultiReader = undefined; + multi_reader.init(gpa, io, multi_reader_buffer.toStreams(), &.{ child.stdout.?, child.stderr.? }); + defer multi_reader.deinit(); for (case.updates) |update| { var update_node = target_prog_node.start(update.name, 0); @@ -243,10 +243,10 @@ pub fn main(init: std.process.Init) !void { eval.write(update); try eval.requestUpdate(); - try eval.check(&poller, update, update_node); + try eval.check(&multi_reader, update, update_node); } - try eval.end(&poller); + try eval.end(&multi_reader); waitChild(&child, &eval); } @@ -272,9 +272,6 @@ const Eval = struct { enable_wasmtime: bool, enable_darling: bool, - const StreamEnum = enum { stdout, stderr }; - const Poller = Io.Poller(StreamEnum); - /// Currently this function assumes the previous updates have already been written. fn write(eval: *Eval, update: Case.Update) void { const io = eval.io; @@ -293,23 +290,29 @@ const Eval = struct { } } - fn check(eval: *Eval, poller: *Poller, update: Case.Update, prog_node: std.Progress.Node) !void { + fn check(eval: *Eval, mr: *Io.File.MultiReader, update: Case.Update, prog_node: std.Progress.Node) !void { const arena = eval.arena; - const stdout = poller.reader(.stdout); - const stderr = poller.reader(.stderr); + const stdout = mr.fileReader(0); + const stderr = &mr.fileReader(1).interface; + const Header = std.zig.Server.Message.Header; - poll: while (true) { - const Header = std.zig.Server.Message.Header; - while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll; - const header = stdout.takeStruct(Header, .little) catch unreachable; - while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll; - const body = stdout.take(header.bytes_len) catch unreachable; + while (true) { + const header = stdout.interface.takeStruct(Header, .little) catch |err| switch (err) { + error.EndOfStream => break, + error.ReadFailed => return stdout.err.?, + }; + const body = stdout.interface.take(header.bytes_len) catch |err| switch (err) { + // If this panic triggers it might be helpful to rework this + // code to print the stderr from the abnormally terminated child. + error.EndOfStream => @panic("unexpected mid-message end of stream"), + error.ReadFailed => return stdout.err.?, + }; switch (header.tag) { .error_bundle => { const result_error_bundle = try std.zig.Server.allocErrorBundle(arena, body); if (stderr.bufferedLen() > 0) { - const stderr_data = try poller.toOwnedSlice(.stderr); + const stderr_data = try mr.toOwnedSlice(1); if (eval.allow_stderr) { std.log.info("error_bundle stderr:\n{s}", .{stderr_data}); } else { @@ -326,7 +329,7 @@ const Eval = struct { var r: std.Io.Reader = .fixed(body); _ = r.takeStruct(std.zig.Server.Message.EmitDigest, .little) catch unreachable; if (stderr.bufferedLen() > 0) { - const stderr_data = try poller.toOwnedSlice(.stderr); + const stderr_data = try mr.toOwnedSlice(1); if (eval.allow_stderr) { std.log.info("emit_digest stderr:\n{s}", .{stderr_data}); } else { @@ -358,11 +361,12 @@ const Eval = struct { } } - if (stderr.bufferedLen() > 0) { + const buffered_stderr = stderr.buffered(); + if (buffered_stderr.len > 0) { if (eval.allow_stderr) { - std.log.info("stderr:\n{s}", .{stderr.buffered()}); + std.log.info("stderr:\n{s}", .{buffered_stderr}); } else { - eval.fatal("unexpected stderr:\n{s}", .{stderr.buffered()}); + eval.fatal("unexpected stderr:\n{s}", .{buffered_stderr}); } } @@ -588,23 +592,27 @@ const Eval = struct { }; } - fn end(eval: *Eval, poller: *Poller) !void { + fn end(eval: *Eval, mr: *Io.File.MultiReader) !void { requestExit(eval.child, eval); - const stdout = poller.reader(.stdout); - const stderr = poller.reader(.stderr); + const stdout = mr.fileReader(0); + const Header = std.zig.Server.Message.Header; - poll: while (true) { - const Header = std.zig.Server.Message.Header; - while (stdout.buffered().len < @sizeOf(Header)) if (!try poller.poll()) break :poll; - const header = stdout.takeStruct(Header, .little) catch unreachable; - while (stdout.buffered().len < header.bytes_len) if (!try poller.poll()) break :poll; - stdout.toss(header.bytes_len); + while (true) { + const header = stdout.interface.takeStruct(Header, .little) catch |err| switch (err) { + error.EndOfStream => break, + error.ReadFailed => return stdout.err.?, + }; + stdout.interface.discardAll(header.bytes_len) catch |err| switch (err) { + error.ReadFailed => return stdout.err.?, + error.EndOfStream => |e| return e, + }; } - if (stderr.bufferedLen() > 0) { - eval.fatal("unexpected stderr:\n{s}", .{stderr.buffered()}); - } + try mr.fillRemaining(.none); + + const stderr = mr.reader(1).buffered(); + if (stderr.len > 0) eval.fatal("unexpected stderr:\n{s}", .{stderr}); } fn buildCOutput(eval: *Eval, c_path: []const u8, out_path: []const u8, prog_node: std.Progress.Node) !void { From 276ca77bf04f574a730923aa77757ac0d2a464df Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 19 Jan 2026 15:16:51 -0800 Subject: [PATCH 38/65] build: adjust max_rss for behavior tests observed error: memory usage peaked at 0.70GB (699138048 bytes), exceeding the declared upper bound of 0.66GB (659809075 bytes) --- build.zig | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/build.zig b/build.zig index 9835714efd..aee378bdb2 100644 --- a/build.zig +++ b/build.zig @@ -472,27 +472,7 @@ pub fn build(b: *std.Build) !void { .skip_linux = skip_linux, .skip_llvm = skip_llvm, .skip_libc = skip_libc, - .max_rss = switch (b.graph.host.result.os.tag) { - .freebsd => 2_000_000_000, - .linux => switch (b.graph.host.result.cpu.arch) { - .aarch64 => 659_809_075, - .loongarch64 => 598_902_374, - .powerpc64le => 627_431_833, - .riscv64 => 827_043_430, - .s390x => 580_596_121, - .x86_64 => 3_290_894_745, - else => 3_300_000_000, - }, - .macos => switch (b.graph.host.result.cpu.arch) { - .aarch64 => 767_736_217, - else => 800_000_000, - }, - .windows => switch (b.graph.host.result.cpu.arch) { - .x86_64 => 603_070_054, - else => 700_000_000, - }, - else => 3_300_000_000, - }, + .max_rss = 3_300_000_000, })); test_modules_step.dependOn(tests.addModuleTests(b, .{ From 37316a3cf61a0b193003a19b44116da346c6cd3e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 26 Jan 2026 16:25:58 -0800 Subject: [PATCH 39/65] std.Io.Threaded: resolve merge conflicts --- lib/std/Io/Threaded.zig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 4024263f4f..e60959abd0 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2459,7 +2459,6 @@ fn futexWake(userdata: ?*anyopaque, ptr: *const u32, max_waiters: u32) void { fn operate(userdata: ?*anyopaque, op: *Io.Operation) Io.Cancelable!void { const t: *Threaded = @ptrCast(@alignCast(userdata)); - _ = t; switch (op.*) { .noop => |*o| { _ = o.status.unstarted; @@ -2467,7 +2466,7 @@ fn operate(userdata: ?*anyopaque, op: *Io.Operation) Io.Cancelable!void { }, .file_read_streaming => |*o| { _ = o.status.unstarted; - o.status = .{ .result = fileReadStreaming(o.file, o.data) catch |err| switch (err) { + o.status = .{ .result = fileReadStreaming(t, o.file, o.data) catch |err| switch (err) { error.Canceled => |e| return e, else => |e| e, } }; From 523aa213c9f7466bdff5c7030de7d221f1547621 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 26 Jan 2026 19:07:01 -0800 Subject: [PATCH 40/65] std.Io.Threaded: batchWait and batchCancel for Windows --- lib/std/Build/Step/Run.zig | 1 - lib/std/Io.zig | 8 + lib/std/Io/Threaded.zig | 350 +++++++++++++++++++++++-------------- 3 files changed, 226 insertions(+), 133 deletions(-) diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index c74286f61b..a2d678275a 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1721,7 +1721,6 @@ fn evalZigTest( // a crash of some kind. Either way, the child will terminate by itself -- wait for it. const stderr_reader = multi_reader.reader(1); const stderr_owned = try arena.dupe(u8, stderr_reader.buffered()); - stderr_reader.tossBuffered(); // Clean up everything and wait for the child to exit. child.stdin.?.close(io); diff --git a/lib/std/Io.zig b/lib/std/Io.zig index a63a89e4ee..438fb01529 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -350,6 +350,8 @@ pub const Batch = struct { } }; + /// After calling this, it is safe to unconditionally defer a call to + /// `cancel`. pub fn init(operations: []Operation, ring: []u32) Batch { const len: u31 = @intCast(operations.len); assert(ring.len == len); @@ -408,12 +410,18 @@ pub const Batch = struct { /// Starts work on any submitted operations and returns when at least one has completeed. /// /// Returns `error.Timeout` if `timeout` expires first. + /// + /// Depending on the `Io` implementation, may allocate resources that are + /// freed with `cancel`, even if an error is returned. pub fn wait(b: *Batch, io: Io, timeout: Timeout) WaitError!void { return io.vtable.batchWait(io.userdata, b, timeout); } /// Returns after all `operations` have completed. Operations which have not completed /// after this function returns were successfully dropped and had no side effects. + /// + /// This function is idempotent with respect to itself and `wait`. It is + /// safe to unconditionally `defer` a call to this function after `init`. pub fn cancel(b: *Batch, io: Io) void { return io.vtable.batchCancel(io.userdata, b); } diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index e60959abd0..0a17e58a67 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -1255,6 +1255,32 @@ const AlertableSyscall = struct { assert(is_windows); } + fn start() Io.Cancelable!AlertableSyscall { + const thread = Thread.current orelse return .{ .thread = null }; + switch (thread.cancel_protection) { + .blocked => return .{ .thread = null }, + .unblocked => {}, + } + const old_status = thread.status.fetchOr(.{ + .cancelation = @enumFromInt(0b010), + .awaitable = .null, + }, .monotonic); + switch (old_status.cancelation) { + .parked => unreachable, + .blocked => unreachable, + .blocked_alertable => unreachable, + .blocked_canceling => unreachable, + .blocked_alertable_canceling => unreachable, + .none => return .{ .thread = thread }, // new status is `.blocked_alertable` + .canceling => { + // Status is unchanged (still `.canceling`)---change to `.canceled` before return. + thread.status.store(.{ .cancelation = .canceled, .awaitable = old_status.awaitable }, .monotonic); + return error.Canceled; + }, + .canceled => return .{ .thread = null }, // new status is `.canceled` (unchanged) + } + } + fn checkCancel(s: AlertableSyscall) Io.Cancelable!void { comptime assert(is_windows); const thread = s.thread orelse return; @@ -2501,10 +2527,10 @@ fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch. const op = ring[submit_head.index(len)]; const operation = &operations[op]; switch (operation.*) { - .noop => { - try operate(t, operation); - ring[complete_tail.index(len)] = op; - complete_tail = complete_tail.next(len); + .noop => |*o| { + _ = o.status.unstarted; + o.status = .{ .result = {} }; + submitComplete(ring, &complete_tail, op); }, .file_read_streaming => |*o| { _ = o.status.unstarted; @@ -2524,8 +2550,7 @@ fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch. 1 => if (timeout == .none) { const op = map_buffer[0]; try operate(t, &operations[op]); - ring[complete_tail.index(len)] = op; - complete_tail = complete_tail.next(len); + submitComplete(ring, &complete_tail, op); poll_i = 0; return; }, @@ -2560,8 +2585,7 @@ fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch. ring[submit_head.index(len)] = op; } else { try operate(t, &operations[op]); - ring[complete_tail.index(len)] = op; - complete_tail = complete_tail.next(len); + submitComplete(ring, &complete_tail, op); } } return; @@ -2584,19 +2608,49 @@ fn batchCancel(userdata: ?*anyopaque, b: *Io.Batch) void { while (submit_head != submit_tail) : (submit_head = submit_head.next(len)) { const op = ring[submit_head.index(len)]; switch (operations[op]) { - .noop => { - operate(t, &operations[op]) catch unreachable; - ring[complete_tail.index(len)] = op; - complete_tail = complete_tail.next(len); + .noop => |*o| { + _ = o.status.unstarted; + o.status = .{ .result = {} }; + submitComplete(ring, &complete_tail, op); }, .file_read_streaming => |*o| _ = o.status.unstarted, } } + if (is_windows) { + // Iterate over pending and issue cancelations, then free the allocation for IO_STATUS_BLOCK + if (b.impl.reserved) |reserved| { + const gpa = t.allocator; + const metadatas_ptr: [*]WinOpMetadata = @ptrCast(@alignCast(reserved)); + const metadatas = metadatas_ptr[0..b.operations.len]; + for (metadatas, 0..) |*metadata, op| { + const done = @atomicLoad(windows.NTSTATUS, &metadata.iosb.u.Status, .acquire) != .PENDING; + if (done) continue; + switch (operations[op]) { + .noop => unreachable, + .file_read_streaming => |*o| { + _ = windows.ntdll.NtCancelIoFile(o.file.handle, &metadata.iosb); + }, + } + } + for (metadatas) |*metadata| { + while (@atomicLoad(windows.NTSTATUS, &metadata.iosb.u.Status, .acquire) == .PENDING) { + waitForApcOrAlert(); + } + } + gpa.free(metadatas); + b.impl.reserved = null; + } + } b.impl.submit_head = submit_tail; b.impl.complete_tail = complete_tail; b.user.complete_tail = complete_tail; } +const WinOpMetadata = struct { + iosb: windows.IO_STATUS_BLOCK, + pending: bool, +}; + fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.WaitError!void { const operations = b.operations; const len: u31 = @intCast(operations.len); @@ -2606,16 +2660,16 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa b.impl.submit_tail = submit_tail; var complete_tail = b.impl.complete_tail; - var overlapped_buffer: [poll_buffer_len]windows.OVERLAPPED = undefined; - var handles_buffer: [poll_buffer_len]windows.HANDLE = undefined; - var map_buffer: [poll_buffer_len]u32 = undefined; // handles_buffer index to operations index - var buffer_i: usize = 0; + const metadatas_ptr: [*]WinOpMetadata = if (b.impl.reserved) |reserved| @ptrCast(@alignCast(reserved)) else a: { + const gpa = t.allocator; + const metadatas = gpa.alloc(WinOpMetadata, operations.len) catch return error.ConcurrencyUnavailable; + b.impl.reserved = metadatas.ptr; + @memset(metadatas, .{ .iosb = undefined, .pending = false }); + break :a metadatas.ptr; + }; + const metadatas = metadatas_ptr[0..operations.len]; defer { - for (map_buffer[0..buffer_i]) |op| { - submit_head = submit_head.prev(len); - ring[submit_head.index(len)] = op; - } b.impl.submit_head = submit_head; b.impl.complete_tail = complete_tail; b.user.complete_tail = complete_tail; @@ -2624,74 +2678,76 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa while (submit_head != submit_tail) : (submit_head = submit_head.next(len)) { const op = ring[submit_head.index(len)]; const operation = &operations[op]; + const metadata = &metadatas[op]; + metadata.* = .{ .iosb = undefined, .pending = false }; switch (operation.*) { - .noop => { - try operate(t, operation); - ring[complete_tail.index(len)] = op; - complete_tail = complete_tail.next(len); + .noop => |*o| { + _ = o.status.unstarted; + o.status = .{ .result = {} }; + submitComplete(ring, &complete_tail, op); }, .file_read_streaming => |*o| { _ = o.status.unstarted; - if (handles_buffer.len - buffer_i == 0) return error.ConcurrencyUnavailable; - const overlapped = &overlapped_buffer[buffer_i]; - overlapped.* = .{ - .Internal = 0, - .InternalHigh = 0, - .DUMMYUNIONNAME = .{ .Pointer = null }, - .hEvent = null, - }; - var n: windows.DWORD = undefined; - const buf = o.data[0]; - const buf_len = std.math.lossyCast(windows.DWORD, buf.len); - if (windows.kernel32.ReadFile(o.file.handle, buf.ptr, buf_len, &n, overlapped) == 0) { - @panic("TODO"); + switch (try ntReadFile(o.file.handle, o.data, &metadata.iosb)) { + .status => { + o.status = .{ .result = ntReadFileResult(&metadata.iosb) }; + submitComplete(ring, &complete_tail, op); + }, + .pending => { + o.status = .{ .pending = b }; + metadata.pending = true; + }, } - handles_buffer[buffer_i] = o.file.handle; - map_buffer[buffer_i] = op; - buffer_i += 1; }, } } - switch (buffer_i) { - 0 => return, - 1 => if (timeout == .none) { - const op = map_buffer[0]; - try operate(t, &operations[op]); - ring[complete_tail.index(len)] = op; - complete_tail = complete_tail.next(len); - buffer_i = 0; - return; - }, - else => {}, - } + var delay_interval: windows.LARGE_INTEGER = timeoutToWindowsInterval(timeout); - const handles = handles_buffer[0..buffer_i]; - const map = map_buffer[0..buffer_i]; - - const syscall: Syscall = try .start(); - const index_result = windows.WaitForMultipleObjectsEx(handles, false, windows.INFINITE, true); - syscall.finish(); - const index = index_result catch |err| switch (err) { - error.Unexpected => @panic("TODO"), - error.WaitAbandoned => @panic("TODO"), - error.WaitTimeOut => @panic("TODO"), - }; - var n: windows.DWORD = undefined; - if (0 == windows.kernel32.GetOverlappedResult(handles[index], &overlapped_buffer[index], &n, 0)) { - switch (windows.GetLastError()) { - .BROKEN_PIPE => @panic("TODO"), - .OPERATION_ABORTED => @panic("TODO"), - else => @panic("TODO"), + while (true) { + const alertable_syscall = try AlertableSyscall.start(); + const delay_rc = windows.ntdll.NtDelayExecution(windows.TRUE, &delay_interval); + alertable_syscall.finish(); + switch (delay_rc) { + .SUCCESS => { + // The thread woke due to the timeout. Although spurious + // timeouts are OK, when no deadline is passed we must not + // return `error.Timeout`. + if (timeout != .none) return error.Timeout; + }, + else => {}, } - } else switch (operations[map[index]]) { - .noop => unreachable, - .file_read_streaming => |*o| { - o.status = .{ .result = n }; - }, + var any_done = false; + var any_pending = false; + for (metadatas, 0..) |*metadata, op_usize| { + if (!metadata.pending) continue; + any_pending = true; + const op: u31 = @intCast(op_usize); + const done = @atomicLoad(windows.NTSTATUS, &metadata.iosb.u.Status, .acquire) != .PENDING; + switch (operations[op]) { + .noop => unreachable, + .file_read_streaming => |*o| { + assert(o.status.pending == b); + if (!done) continue; + o.status = .{ .result = ntReadFileResult(&metadata.iosb) }; + }, + } + any_done = true; + metadata.pending = false; + submitComplete(ring, &complete_tail, op); + } + if (any_done) return; + if (!any_pending) return; } } +fn submitComplete(ring: []u32, complete_tail: *Io.Batch.RingIndex, op: u32) void { + const ct = complete_tail.*; + const len: u31 = @intCast(ring.len); + ring[ct.index(len)] = op; + complete_tail.* = ct.next(len); +} + const dirCreateDir = switch (native_os) { .windows => dirCreateDirWindows, .wasi => dirCreateDirWasi, @@ -5529,7 +5585,7 @@ fn dirRealPathFileWindows(userdata: ?*anyopaque, dir: Dir, sub_path: []const u8, fn realPathWindows(h_file: windows.HANDLE, out_buffer: []u8) File.RealPathError!usize { var wide_buf: [windows.PATH_MAX_WIDE]u16 = undefined; - // TODO move GetFinalPathNameByHandle logic into std.Io.Threaded and add cancel checks + // TODO move GetFinalPathNameByHandle logic into Io.Threaded and add cancel checks try Thread.checkCancel(); const wide_slice = try windows.GetFinalPathNameByHandle(h_file, .{}, &wide_buf); @@ -8617,70 +8673,42 @@ fn fileReadStreamingPosix(file: File, data: []const []u8) File.Reader.Error!usiz } fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!usize { - var index: usize = 0; - while (index < data.len and data[index].len == 0) index += 1; - if (index == data.len) return 0; - const buffer = data[index]; - var io_status_block: windows.IO_STATUS_BLOCK = undefined; - const syscall: Syscall = try .start(); - while (true) { - io_status_block.u.Status = .PENDING; - switch (windows.ntdll.NtReadFile( - file.handle, - null, // event - noopApc, // apc callback - null, // apc context - &io_status_block, - buffer.ptr, - @min(std.math.maxInt(u32), buffer.len), - null, // byte offset - null, // key - )) { - .SUCCESS, .END_OF_FILE, .PIPE_BROKEN => { - syscall.finish(); - return io_status_block.Information; - }, - .PENDING => break, - .CANCELLED => { - try syscall.checkCancel(); - continue; - }, - .INVALID_DEVICE_REQUEST => return syscall.fail(error.IsDir), - .LOCK_NOT_GRANTED => return syscall.fail(error.LockViolation), - .ACCESS_DENIED => return syscall.fail(error.AccessDenied), - .INVALID_PARAMETER => |err| return syscall.ntstatusBug(err), // streaming read of async mode file - else => |status| return syscall.unexpectedNtstatus(status), - } - } - { - // Once we get here we received PENDING so we must not return from the - // function until the operation completes. - defer while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { - waitForApcOrAlert(); - }; + if (ntReadFile(file.handle, data, &io_status_block)) |result| switch (result) { + .status => return ntReadFileResult(&io_status_block), + .pending => { + // Once we get here we received PENDING so we must not return from the + // function until the operation completes. + defer while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { + waitForApcOrAlert(); + }; - const alertable_syscall = syscall.toAlertable() catch |err| switch (err) { - error.Canceled => |e| { - _ = windows.ntdll.NtCancelIoFile(file.handle, &io_status_block); - return e; - }, - }; - defer alertable_syscall.finish(); - waitForApcOrAlert(); - while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { - alertable_syscall.checkCancel() catch |err| switch (err) { + const alertable_syscall = AlertableSyscall.start() catch |err| switch (err) { error.Canceled => |e| { _ = windows.ntdll.NtCancelIoFile(file.handle, &io_status_block); return e; }, }; + defer alertable_syscall.finish(); waitForApcOrAlert(); - } - } + while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { + alertable_syscall.checkCancel() catch |err| switch (err) { + error.Canceled => |e| { + _ = windows.ntdll.NtCancelIoFile(file.handle, &io_status_block); + return e; + }, + }; + waitForApcOrAlert(); + } + }, + } else |err| return err; + return ntReadFileResult(&io_status_block); +} + +fn ntReadFileResult(io_status_block: *windows.IO_STATUS_BLOCK) !usize { switch (io_status_block.u.Status) { .SUCCESS, .END_OF_FILE, .PIPE_BROKEN => return io_status_block.Information, - .PENDING => unreachable, // cannot return until the operation completes + .PENDING => unreachable, .INVALID_DEVICE_REQUEST => return error.IsDir, .LOCK_NOT_GRANTED => return error.LockViolation, .ACCESS_DENIED => return error.AccessDenied, @@ -8688,6 +8716,47 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us } } +fn ntReadFile(handle: windows.HANDLE, data: []const []u8, iosb: *windows.IO_STATUS_BLOCK) Io.Cancelable!enum { status, pending } { + var index: usize = 0; + while (index < data.len and data[index].len == 0) index += 1; + if (index == data.len) { + iosb.u.Status = .SUCCESS; + iosb.Information = 0; + return .status; + } + const buffer = data[index]; + + const syscall: Syscall = try .start(); + while (true) { + iosb.u.Status = .PENDING; + switch (windows.ntdll.NtReadFile( + handle, + null, // event + noopApc, // apc callback + null, // apc context + iosb, + buffer.ptr, + @min(std.math.maxInt(u32), buffer.len), + null, // byte offset + null, // key + )) { + .PENDING => { + syscall.finish(); + return .pending; + }, + .CANCELLED => { + try syscall.checkCancel(); + continue; + }, + else => |status| { + syscall.finish(); + iosb.u.Status = status; + return .status; + }, + } + } +} + fn fileReadPositionalPosix(file: File, data: []const []u8, offset: u64) File.ReadPositionalError!usize { if (!have_preadv) @compileError("TODO implement fileReadPositionalPosix for cursed operating systems that don't support preadv (it's only Haiku)"); @@ -9318,7 +9387,7 @@ fn processExecutablePath(userdata: ?*anyopaque, out_buffer: []u8) process.Execut }; defer w.CloseHandle(h_file); - // TODO move GetFinalPathNameByHandle logic into std.Io.Threaded and add cancel checks + // TODO move GetFinalPathNameByHandle logic into Io.Threaded and add cancel checks try Thread.checkCancel(); const wide_slice = try w.GetFinalPathNameByHandle(h_file, .{}, &path_name_w_buf.data); @@ -12989,7 +13058,7 @@ fn processSetCurrentDir(userdata: ?*anyopaque, dir: Dir) process.SetCurrentDirEr if (is_windows) { var dir_path_buffer: [windows.PATH_MAX_WIDE]u16 = undefined; - // TODO move GetFinalPathNameByHandle logic into std.Io.Threaded and add cancel checks + // TODO move GetFinalPathNameByHandle logic into Io.Threaded and add cancel checks try Thread.checkCancel(); const dir_path = try windows.GetFinalPathNameByHandle(dir.handle, .{}, &dir_path_buffer); const path_len_bytes = std.math.cast(u16, dir_path.len * 2) orelse return error.NameTooLong; @@ -16657,7 +16726,7 @@ const parking_sleep = struct { /// Spurious wakeups are possible. /// /// `addr_hint` has no semantic effect, but may allow the OS to optimize this operation. -fn park(opt_deadline: ?std.Io.Clock.Timestamp, addr_hint: ?*const anyopaque) error{Timeout}!void { +fn park(opt_deadline: ?Io.Clock.Timestamp, addr_hint: ?*const anyopaque) error{Timeout}!void { comptime assert(use_parking_futex or use_parking_sleep); switch (native_os) { .windows => { @@ -16713,6 +16782,23 @@ fn park(opt_deadline: ?std.Io.Clock.Timestamp, addr_hint: ?*const anyopaque) err } } +fn timeoutToWindowsInterval(timeout: Io.Timeout) windows.LARGE_INTEGER { + switch (timeout) { + .none => { + return std.math.minInt(windows.LARGE_INTEGER); // infinite timeout + }, + .deadline => |deadline| { + const nanoseconds = deadline.raw.nanoseconds; + return @intCast(@divTrunc(nanoseconds, 100)); + }, + .duration => |duration| { + const now_timestamp = nowWindows(duration.clock) catch unreachable; + const deadline_ns = now_timestamp.nanoseconds + duration.raw.nanoseconds; + return @intCast(@divTrunc(deadline_ns, 100)); + }, + } +} + const UnparkTid = switch (native_os) { // `NtAlertMultipleThreadByThreadId` is weird and wants 64-bit thread handles? .windows => usize, From efa502a1cd0333860848c2b6e15978f6551e02f3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 27 Jan 2026 11:29:03 -0800 Subject: [PATCH 41/65] std.Build.Step.Run: gracefully handle test runner misbehavior specifically if it misbehaves after sending a message header but not the body --- lib/std/Build/Step/Run.zig | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index a2d678275a..4d3cda54c9 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1930,7 +1930,28 @@ fn waitZigTest( } // There is definitely a header available now -- read it. const header = stdout.takeStruct(Header, .little) catch unreachable; - try stdout.fill(header.bytes_len); + + while (stdout.buffered().len < header.bytes_len) { + const timeout: Io.Timeout = t: { + const t = if (timer) |*t| t else break :t .none; + if (response_timeout_ns) |timeout_ns| break :t .{ .duration = .{ + .raw = .fromNanoseconds(timeout_ns -| t.read()), + .clock = .awake, + } }; + break :t .none; + }; + multi_reader.fill(64, timeout) catch |err| switch (err) { + error.Timeout, error.EndOfStream => return .{ .no_poll = .{ + .active_test_index = active_test_index, + .ns_elapsed = if (timer) |*t| t.read() else 0, + } }, + error.UnsupportedClock => { + timer = null; + continue; + }, + else => |e| return e, + }; + } const body = stdout.take(header.bytes_len) catch unreachable; var body_r: std.Io.Reader = .fixed(body); From 2fb224cb845c18044186c1348ca9a1b2a3152948 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 27 Jan 2026 11:33:07 -0800 Subject: [PATCH 42/65] std.Io.Threaded: fix bad use of AlertableSyscall The defer would cause two problems: 1. keeping the state active during call to NtCancelIoFile 2. invalid state transition. after canceled is returned from checkCancel, new status is already canceled. calling finish after that is illegal. --- lib/std/Io/Threaded.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 0a17e58a67..0f0b043ce0 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -8689,7 +8689,6 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us return e; }, }; - defer alertable_syscall.finish(); waitForApcOrAlert(); while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { alertable_syscall.checkCancel() catch |err| switch (err) { @@ -8700,6 +8699,7 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us }; waitForApcOrAlert(); } + alertable_syscall.finish(); }, } else |err| return err; return ntReadFileResult(&io_status_block); From fdf1ee973e9f3cb01f6fec9e460950622cdf92e4 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 27 Jan 2026 13:24:27 -0800 Subject: [PATCH 43/65] std.Io.Threaded: move the NtDelayExecution later in batchWait also guard against receiving SUCCESS with 0 byte read ms docs say that pipes can do this if there is a 0 byte write --- lib/std/Io/Threaded.zig | 42 +++++++++++++++++++++++---------- lib/std/os/windows/kernel32.zig | 3 --- 2 files changed, 29 insertions(+), 16 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 0f0b043ce0..00f14ed741 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2705,18 +2705,6 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa var delay_interval: windows.LARGE_INTEGER = timeoutToWindowsInterval(timeout); while (true) { - const alertable_syscall = try AlertableSyscall.start(); - const delay_rc = windows.ntdll.NtDelayExecution(windows.TRUE, &delay_interval); - alertable_syscall.finish(); - switch (delay_rc) { - .SUCCESS => { - // The thread woke due to the timeout. Although spurious - // timeouts are OK, when no deadline is passed we must not - // return `error.Timeout`. - if (timeout != .none) return error.Timeout; - }, - else => {}, - } var any_done = false; var any_pending = false; for (metadatas, 0..) |*metadata, op_usize| { @@ -2738,6 +2726,18 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa } if (any_done) return; if (!any_pending) return; + const alertable_syscall = try AlertableSyscall.start(); + const delay_rc = windows.ntdll.NtDelayExecution(windows.TRUE, &delay_interval); + alertable_syscall.finish(); + switch (delay_rc) { + .SUCCESS => { + // The thread woke due to the timeout. Although spurious + // timeouts are OK, when no deadline is passed we must not + // return `error.Timeout`. + if (timeout != .none) return error.Timeout; + }, + else => {}, + } } } @@ -8707,7 +8707,11 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us fn ntReadFileResult(io_status_block: *windows.IO_STATUS_BLOCK) !usize { switch (io_status_block.u.Status) { - .SUCCESS, .END_OF_FILE, .PIPE_BROKEN => return io_status_block.Information, + .SUCCESS => { + assert(io_status_block.Information != 0); + return io_status_block.Information; + }, + .END_OF_FILE, .PIPE_BROKEN => return 0, .PENDING => unreachable, .INVALID_DEVICE_REQUEST => return error.IsDir, .LOCK_NOT_GRANTED => return error.LockViolation, @@ -8744,6 +8748,17 @@ fn ntReadFile(handle: windows.HANDLE, data: []const []u8, iosb: *windows.IO_STAT syscall.finish(); return .pending; }, + .SUCCESS => { + // Only END_OF_FILE is the true end. + if (iosb.Information == 0) { + try syscall.checkCancel(); + continue; + } else { + syscall.finish(); + iosb.u.Status = .SUCCESS; + return .status; + } + }, .CANCELLED => { try syscall.checkCancel(); continue; @@ -9709,6 +9724,7 @@ fn writeFileStreamingWindows( handle: windows.HANDLE, bytes: []const u8, ) File.Writer.Error!usize { + assert(bytes.len != 0); var bytes_written: windows.DWORD = undefined; const adjusted_len = std.math.lossyCast(u32, bytes.len); const syscall: Syscall = try .start(); diff --git a/lib/std/os/windows/kernel32.zig b/lib/std/os/windows/kernel32.zig index fe28e40cbb..b6785e4a33 100644 --- a/lib/std/os/windows/kernel32.zig +++ b/lib/std/os/windows/kernel32.zig @@ -188,9 +188,6 @@ pub extern "kernel32" fn PostQueuedCompletionStatus( lpOverlapped: ?*OVERLAPPED, ) callconv(.winapi) BOOL; -// TODO: -// GetOverlappedResultEx with bAlertable=false, which calls: GetStdHandle + WaitForSingleObjectEx. -// Uses the SwitchBack system to run implementations for older programs; Do we care about this? pub extern "kernel32" fn GetOverlappedResult( hFile: HANDLE, lpOverlapped: *OVERLAPPED, From 8a80b5464022a9fb4320e37f9dfc2aa83539ff07 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 27 Jan 2026 15:31:23 -0800 Subject: [PATCH 44/65] std: remove error.BrokenPipe from file reads, add error.EndOfStream and make reading file streaming allowed to return 0 byte reads. According to Microsoft documentation, on Windows it is possible to get 0-byte reads from pipes when 0-byte writes are made. --- lib/std/Io.zig | 8 ++-- lib/std/Io/File.zig | 5 +- lib/std/Io/File/MultiReader.zig | 14 ++---- lib/std/Io/File/Reader.zig | 34 +++++++------ lib/std/Io/Threaded.zig | 85 ++++++++++++++------------------- lib/std/Progress.zig | 3 +- lib/std/process/Child.zig | 28 ++++++----- lib/std/zig/system.zig | 1 - 8 files changed, 82 insertions(+), 96 deletions(-) diff --git a/lib/std/Io.zig b/lib/std/Io.zig index 438fb01529..c00e4619e8 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -187,7 +187,7 @@ pub const VTable = struct { fileWritePositional: *const fn (?*anyopaque, File, header: []const u8, data: []const []const u8, splat: usize, offset: u64) File.WritePositionalError!usize, fileWriteFileStreaming: *const fn (?*anyopaque, File, header: []const u8, *Io.File.Reader, Io.Limit) File.Writer.WriteFileError!usize, fileWriteFilePositional: *const fn (?*anyopaque, File, header: []const u8, *Io.File.Reader, Io.Limit, offset: u64) File.WriteFilePositionalError!usize, - /// Returns 0 on end of stream. + /// Returns 0 if reading at or past the end. fileReadPositional: *const fn (?*anyopaque, File, data: []const []u8, offset: u64) File.ReadPositionalError!usize, fileSeekBy: *const fn (?*anyopaque, File, relative_offset: i64) File.SeekError!void, fileSeekTo: *const fn (?*anyopaque, File, absolute_offset: u64) File.SeekError!void, @@ -263,18 +263,18 @@ pub const Operation = union(enum) { status: Status(void) = .{ .unstarted = {} }, }; - /// Returns 0 on end of stream. + /// May return 0 reads which is different than `error.EndOfStream`. pub const FileReadStreaming = struct { file: File, data: []const []u8, status: Status(Error!usize) = .{ .unstarted = {} }, - pub const Error = error{ + pub const Error = UnendingError || error{EndOfStream}; + pub const UnendingError = error{ InputOutput, SystemResources, /// Trying to read a directory file descriptor as if it were a file. IsDir, - BrokenPipe, ConnectionResetByPeer, /// File was not opened with read capability. NotOpenForReading, diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index c545b60222..df5b3b5a53 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -552,11 +552,13 @@ pub fn setTimestampsNow(file: File, io: Io) SetTimestampsError!void { }); } +pub const ReadStreamingError = error{EndOfStream} || Reader.Error; + /// Returns 0 on stream end or if `buffer` has no space available for data. /// /// See also: /// * `reader` -pub fn readStreaming(file: File, io: Io, buffer: []const []u8) Reader.Error!usize { +pub fn readStreaming(file: File, io: Io, buffer: []const []u8) ReadStreamingError!usize { var operation: Io.Operation = .{ .file_read_streaming = .{ .file = file, .data = buffer, @@ -570,7 +572,6 @@ pub const ReadPositionalError = error{ SystemResources, /// Trying to read a directory file descriptor as if it were a file. IsDir, - BrokenPipe, /// Non-blocking has been enabled, and reading from the file descriptor /// would block. WouldBlock, diff --git a/lib/std/Io/File/MultiReader.zig b/lib/std/Io/File/MultiReader.zig index 0cfa777e96..08ad76000c 100644 --- a/lib/std/Io/File/MultiReader.zig +++ b/lib/std/Io/File/MultiReader.zig @@ -15,10 +15,9 @@ pub const Context = struct { fr: File.Reader, vec: [1][]u8, err: ?Error, - eos: bool, }; -pub const Error = Allocator.Error || File.Reader.Error || Io.ConcurrentError; +pub const Error = Allocator.Error || File.ReadStreamingError || Io.ConcurrentError; /// Trailing: /// * `contexts: [len]Context` @@ -85,7 +84,6 @@ pub fn init(mr: *MultiReader, gpa: Allocator, io: Io, streams: *Streams, files: }, .vec = .{&.{}}, .err = null, - .eos = false, }; const operations = streams.operations(); const ring = streams.ring(); @@ -198,8 +196,10 @@ fn fillUntimed(context: *Context, capacity: usize) Io.Reader.Error!void { }, error.EndOfStream => |e| return e, }; - if (context.err != null) return error.ReadFailed; - if (context.eos) return error.EndOfStream; + if (context.err) |err| switch (err) { + error.EndOfStream => |e| return e, + else => return error.ReadFailed, + }; } pub const FillError = Io.Batch.WaitError || error{ @@ -225,10 +225,6 @@ pub fn fill(mr: *MultiReader, unused_capacity: usize, timeout: Io.Timeout) FillE context.err = err; continue; }; - if (n == 0) { - context.eos = true; - continue; - } const r = &context.fr.interface; r.end += n; if (r.buffer.len - r.end < unused_capacity) { diff --git a/lib/std/Io/File/Reader.zig b/lib/std/Io/File/Reader.zig index 7703521d7e..effd000df8 100644 --- a/lib/std/Io/File/Reader.zig +++ b/lib/std/Io/File/Reader.zig @@ -26,7 +26,7 @@ size_err: ?SizeError = null, seek_err: ?SeekError = null, interface: Io.Reader, -pub const Error = Io.Operation.FileReadStreaming.Error || Io.Cancelable; +pub const Error = Io.Operation.FileReadStreaming.UnendingError || Io.Cancelable; pub const SizeError = File.StatError || error{ /// Occurs if, for example, the file handle is a network socket and therefore does not have a size. @@ -280,14 +280,16 @@ fn readVecStreaming(r: *Reader, data: [][]u8) Io.Reader.Error!usize { const dest_n, const data_size = try r.interface.writableVector(&iovecs_buffer, data); const dest = iovecs_buffer[0..dest_n]; assert(dest[0].len > 0); - const n = r.file.readStreaming(io, dest) catch |err| { - r.err = err; - return error.ReadFailed; + const n = r.file.readStreaming(io, dest) catch |err| switch (err) { + error.EndOfStream => { + r.size = r.pos; + return error.EndOfStream; + }, + else => |e| { + r.err = e; + return error.ReadFailed; + }, }; - if (n == 0) { - r.size = r.pos; - return error.EndOfStream; - } r.pos += n; if (n > data_size) { r.interface.end += n - data_size; @@ -335,14 +337,16 @@ fn discard(io_reader: *Io.Reader, limit: Io.Limit) Io.Reader.Error!usize { const dest_n, const data_size = try r.interface.writableVector(&iovecs_buffer, &data); const dest = iovecs_buffer[0..dest_n]; assert(dest[0].len > 0); - const n = file.readStreaming(io, dest) catch |err| { - r.err = err; - return error.ReadFailed; + const n = file.readStreaming(io, dest) catch |err| switch (err) { + error.EndOfStream => { + r.size = r.pos; + return error.EndOfStream; + }, + else => |e| { + r.err = e; + return error.ReadFailed; + }, }; - if (n == 0) { - r.size = r.pos; - return error.EndOfStream; - } r.pos += n; if (n > data_size) { r.interface.end += n - data_size; diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 00f14ed741..c9c38c6b29 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -8583,14 +8583,14 @@ fn fileClose(userdata: ?*anyopaque, files: []const File) void { for (files) |file| posix.close(file.handle); } -fn fileReadStreaming(userdata: ?*anyopaque, file: File, data: []const []u8) File.Reader.Error!usize { +fn fileReadStreaming(userdata: ?*anyopaque, file: File, data: []const []u8) File.ReadStreamingError!usize { const t: *Threaded = @ptrCast(@alignCast(userdata)); _ = t; if (is_windows) return fileReadStreamingWindows(file, data); return fileReadStreamingPosix(file, data); } -fn fileReadStreamingPosix(file: File, data: []const []u8) File.Reader.Error!usize { +fn fileReadStreamingPosix(file: File, data: []const []u8) File.ReadStreamingError!usize { var iovecs_buffer: [max_iovecs_len]posix.iovec = undefined; var i: usize = 0; for (data) |buf| { @@ -8611,28 +8611,24 @@ fn fileReadStreamingPosix(file: File, data: []const []u8) File.Reader.Error!usiz switch (std.os.wasi.fd_read(file.handle, dest.ptr, dest.len, &nread)) { .SUCCESS => { syscall.finish(); + if (nread == 0) return error.EndOfStream; return nread; }, .INTR, .TIMEDOUT => { try syscall.checkCancel(); continue; }, - else => |e| { - syscall.finish(); - switch (e) { - .INVAL => |err| return errnoBug(err), - .FAULT => |err| return errnoBug(err), - .BADF => return error.IsDir, // File operation on directory. - .IO => return error.InputOutput, - .ISDIR => return error.IsDir, - .NOBUFS => return error.SystemResources, - .NOMEM => return error.SystemResources, - .NOTCONN => return error.SocketUnconnected, - .CONNRESET => return error.ConnectionResetByPeer, - .NOTCAPABLE => return error.AccessDenied, - else => |err| return posix.unexpectedErrno(err), - } - }, + .BADF => return syscall.fail(error.IsDir), // File operation on directory. + .IO => return syscall.fail(error.InputOutput), + .ISDIR => return syscall.fail(error.IsDir), + .NOBUFS => return syscall.fail(error.SystemResources), + .NOMEM => return syscall.fail(error.SystemResources), + .NOTCONN => return syscall.fail(error.SocketUnconnected), + .CONNRESET => return syscall.fail(error.ConnectionResetByPeer), + .NOTCAPABLE => return syscall.fail(error.AccessDenied), + .INVAL => |err| return syscall.errnoBug(err), + .FAULT => |err| return syscall.errnoBug(err), + else => |err| return syscall.unexpectedErrno(err), } } } @@ -8643,36 +8639,33 @@ fn fileReadStreamingPosix(file: File, data: []const []u8) File.Reader.Error!usiz switch (posix.errno(rc)) { .SUCCESS => { syscall.finish(); + if (rc == 0) return error.EndOfStream; return @intCast(rc); }, .INTR, .TIMEDOUT => { try syscall.checkCancel(); continue; }, - else => |e| { + .BADF => { syscall.finish(); - switch (e) { - .INVAL => |err| return errnoBug(err), - .FAULT => |err| return errnoBug(err), - .AGAIN => return error.WouldBlock, - .BADF => { - if (native_os == .wasi) return error.IsDir; // File operation on directory. - return error.NotOpenForReading; - }, - .IO => return error.InputOutput, - .ISDIR => return error.IsDir, - .NOBUFS => return error.SystemResources, - .NOMEM => return error.SystemResources, - .NOTCONN => return error.SocketUnconnected, - .CONNRESET => return error.ConnectionResetByPeer, - else => |err| return posix.unexpectedErrno(err), - } + if (native_os == .wasi) return error.IsDir; // File operation on directory. + return error.NotOpenForReading; }, + .AGAIN => return syscall.fail(error.WouldBlock), + .IO => return syscall.fail(error.InputOutput), + .ISDIR => return syscall.fail(error.IsDir), + .NOBUFS => return syscall.fail(error.SystemResources), + .NOMEM => return syscall.fail(error.SystemResources), + .NOTCONN => return syscall.fail(error.SocketUnconnected), + .CONNRESET => return syscall.fail(error.ConnectionResetByPeer), + .INVAL => |err| return syscall.errnoBug(err), + .FAULT => |err| return syscall.errnoBug(err), + else => |err| return syscall.unexpectedErrno(err), } } } -fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!usize { +fn fileReadStreamingWindows(file: File, data: []const []u8) File.ReadStreamingError!usize { var io_status_block: windows.IO_STATUS_BLOCK = undefined; if (ntReadFile(file.handle, data, &io_status_block)) |result| switch (result) { .status => return ntReadFileResult(&io_status_block), @@ -8707,11 +8700,9 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.Reader.Error!us fn ntReadFileResult(io_status_block: *windows.IO_STATUS_BLOCK) !usize { switch (io_status_block.u.Status) { - .SUCCESS => { - assert(io_status_block.Information != 0); - return io_status_block.Information; - }, - .END_OF_FILE, .PIPE_BROKEN => return 0, + .SUCCESS => return io_status_block.Information, + .END_OF_FILE => return error.EndOfStream, + .PIPE_BROKEN => return error.EndOfStream, .PENDING => unreachable, .INVALID_DEVICE_REQUEST => return error.IsDir, .LOCK_NOT_GRANTED => return error.LockViolation, @@ -8749,15 +8740,9 @@ fn ntReadFile(handle: windows.HANDLE, data: []const []u8, iosb: *windows.IO_STAT return .pending; }, .SUCCESS => { - // Only END_OF_FILE is the true end. - if (iosb.Information == 0) { - try syscall.checkCancel(); - continue; - } else { - syscall.finish(); - iosb.u.Status = .SUCCESS; - return .status; - } + syscall.finish(); + iosb.u.Status = .SUCCESS; + return .status; }, .CANCELLED => { try syscall.checkCancel(); diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 5ccc46778b..d0ee9e556f 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -984,7 +984,7 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff var bytes_read: usize = 0; while (true) { const n = file.readStreaming(io, &.{pipe_buf[bytes_read..]}) catch |err| switch (err) { - error.WouldBlock => break, + error.WouldBlock, error.EndOfStream => break, else => |e| { std.log.debug("failed to read child progress data: {t}", .{e}); main_storage.completed_count = 0; @@ -992,7 +992,6 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff continue :main_loop; }, }; - if (n == 0) break; if (opt_saved_metadata) |m| { if (m.remaining_read_trash_bytes > 0) { assert(bytes_read == 0); diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index fe6dfa389d..e226fb7a9b 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -176,19 +176,21 @@ pub fn collectOutput(child: *const Child, io: Io, options: CollectOutputOptions) while (remaining > 0) { try batch.wait(io, options.timeout); while (batch.next()) |op| { - const n = try reads[op].file_read_streaming.status.result; - if (n == 0) { - remaining -= 1; - } else { - lists[op].items.len += n; - if (lists[op].items.len > @intFromEnum(limits[op])) return error.StreamTooLong; - if (options.allocator) |gpa| try lists[op].ensureUnusedCapacity(gpa, 1); - const cap = lists[op].unusedCapacitySlice(); - if (cap.len == 0) return error.StreamTooLong; - vecs[op][0] = cap; - reads[op].file_read_streaming.status = .{ .unstarted = {} }; - batch.add(op); - } + const n = reads[op].file_read_streaming.status.result catch |err| switch (err) { + error.EndOfStream => { + remaining -= 1; + continue; + }, + else => |e| return e, + }; + lists[op].items.len += n; + if (lists[op].items.len > @intFromEnum(limits[op])) return error.StreamTooLong; + if (options.allocator) |gpa| try lists[op].ensureUnusedCapacity(gpa, 1); + const cap = lists[op].unusedCapacitySlice(); + if (cap.len == 0) return error.StreamTooLong; + vecs[op][0] = cap; + reads[op].file_read_streaming.status = .{ .unstarted = {} }; + batch.add(op); } } } diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index 5046e2f51b..b32b554dee 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -420,7 +420,6 @@ pub fn resolveTargetQuery(io: Io, query: Target.Query) DetectError!Target { error.Canceled => |e| return e, error.Unexpected => |e| return e, error.WouldBlock => return error.Unexpected, - error.BrokenPipe => return error.Unexpected, error.ConnectionResetByPeer => return error.Unexpected, error.NotOpenForReading => return error.Unexpected, error.SocketUnconnected => return error.Unexpected, From 6a1fd3c69db486df7a805d211e484c60efe294ae Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Jan 2026 00:01:41 -0800 Subject: [PATCH 45/65] std.Io.File.MultiReader: make checkAnyError exclude EndOfStream --- lib/std/Build/Step/Run.zig | 6 ++---- lib/std/Io/File/MultiReader.zig | 10 ++++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 4d3cda54c9..55fee9a286 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1385,14 +1385,12 @@ fn runCommand( break :term spawnChildAndCollect(run, interp_argv.items, &environ_map, has_side_effects, options, fuzz_context) catch |e| { if (!run.failing_to_execute_foreign_is_an_error) return error.MakeSkipped; if (e == error.MakeFailed) return error.MakeFailed; // error already reported - return step.fail("unable to spawn interpreter {s}: {s}", .{ - interp_argv.items[0], @errorName(e), - }); + return step.fail("unable to spawn interpreter {s}: {t}", .{ interp_argv.items[0], e }); }; } if (err == error.MakeFailed) return error.MakeFailed; // error already reported - return step.fail("failed to spawn and capture stdio from {s}: {s}", .{ argv[0], @errorName(err) }); + return step.fail("failed to spawn and capture stdio from {s}: {t}", .{ argv[0], err }); }; const generic_result = opt_generic_result orelse { diff --git a/lib/std/Io/File/MultiReader.zig b/lib/std/Io/File/MultiReader.zig index 08ad76000c..7a0f8de068 100644 --- a/lib/std/Io/File/MultiReader.zig +++ b/lib/std/Io/File/MultiReader.zig @@ -17,7 +17,8 @@ pub const Context = struct { err: ?Error, }; -pub const Error = Allocator.Error || File.ReadStreamingError || Io.ConcurrentError; +pub const Error = UnendingError || error{EndOfStream}; +pub const UnendingError = Allocator.Error || File.Reader.Error || Io.ConcurrentError; /// Trailing: /// * `contexts: [len]Context` @@ -126,13 +127,14 @@ pub fn reader(mr: *MultiReader, index: usize) *Io.Reader { } /// Checks for errors in all streams, prioritizing `error.Canceled` if it -/// occurred anywhere. -pub fn checkAnyError(mr: *const MultiReader) Error!void { +/// occurred anywhere, and ignoring `error.EndOfStream`. +pub fn checkAnyError(mr: *const MultiReader) UnendingError!void { const contexts = mr.streams.contexts(); - var other: Error!void = {}; + var other: UnendingError!void = {}; for (contexts) |*context| { if (context.err) |err| switch (err) { error.Canceled => |e| return e, + error.EndOfStream => continue, else => |e| other = e, }; } From b2816f26980f7ac9f0d87c844320e29dfbb6269e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Jan 2026 02:27:20 -0800 Subject: [PATCH 46/65] build.zig: only-c implies no-lib --- build.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.zig b/build.zig index aee378bdb2..3ed237fa5e 100644 --- a/build.zig +++ b/build.zig @@ -29,7 +29,7 @@ pub fn build(b: *std.Build) !void { const use_zig_libcxx = b.option(bool, "use-zig-libcxx", "If libc++ is needed, use zig's bundled version, don't try to integrate with the system") orelse false; const test_step = b.step("test", "Run all the tests"); - const skip_install_lib_files = b.option(bool, "no-lib", "skip copying of lib/ files and langref to installation prefix. Useful for development") orelse false; + const skip_install_lib_files = b.option(bool, "no-lib", "skip copying of lib/ files and langref to installation prefix. Useful for development") orelse only_c; const skip_install_langref = b.option(bool, "no-langref", "skip copying of langref to the installation prefix") orelse skip_install_lib_files; const std_docs = b.option(bool, "std-docs", "include standard library autodocs") orelse false; const no_bin = b.option(bool, "no-bin", "skip emitting compiler binary") orelse false; From 687123a85eaac8b7c290b21c346e8aeb8470dfcb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Jan 2026 17:43:42 -0800 Subject: [PATCH 47/65] std.process.run: use Io.File.MultiReader and delete the special-cased function --- lib/std/process.zig | 49 +++++++++++++++------------ lib/std/process/Child.zig | 70 --------------------------------------- 2 files changed, 28 insertions(+), 91 deletions(-) diff --git a/lib/std/process.zig b/lib/std/process.zig index 6f3c155f6d..739027da07 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -453,16 +453,16 @@ pub fn spawnPath(io: Io, dir: Io.Dir, options: SpawnOptions) SpawnError!Child { return io.vtable.processSpawnPath(io.userdata, dir, options); } -pub const RunError = SpawnError || Child.CollectOutputError; +pub const RunError = SpawnError || error{ + StreamTooLong, +} || Io.ConcurrentError || Allocator.Error || Io.File.Reader.Error || Io.Timeout.Error; pub const RunOptions = struct { argv: []const []const u8, stderr_limit: Io.Limit = .unlimited, stdout_limit: Io.Limit = .unlimited, - /// How many bytes to initially allocate for stderr. - stderr_reserve_amount: usize = 1, - /// How many bytes to initially allocate for stdout. - stdout_reserve_amount: usize = 1, + /// How many bytes to initially allocate for stderr and stdout. + reserve_amount: usize = 64, /// Set to change the current working directory when spawning the child process. cwd: ?[]const u8 = null, @@ -516,29 +516,36 @@ pub fn run(gpa: Allocator, io: Io, options: RunOptions) RunError!RunResult { }); defer child.kill(io); - var stdout: std.ArrayList(u8) = .empty; - defer stdout.deinit(gpa); - var stderr: std.ArrayList(u8) = .empty; - defer stderr.deinit(gpa); + var multi_reader_buffer: Io.File.MultiReader.Buffer(2) = undefined; + var multi_reader: Io.File.MultiReader = undefined; + multi_reader.init(gpa, io, multi_reader_buffer.toStreams(), &.{ child.stdout.?, child.stderr.? }); + defer multi_reader.deinit(); - try stdout.ensureUnusedCapacity(gpa, options.stdout_reserve_amount); - try stderr.ensureUnusedCapacity(gpa, options.stderr_reserve_amount); + const stdout_reader = multi_reader.reader(0); + const stderr_reader = multi_reader.reader(1); - try child.collectOutput(io, .{ - .allocator = gpa, - .stdout = &stdout, - .stderr = &stderr, - .stdout_limit = options.stdout_limit, - .stderr_limit = options.stderr_limit, - .timeout = options.timeout, - }); + while (multi_reader.fill(options.reserve_amount, options.timeout)) |_| { + if (options.stdout_limit.toInt()) |limit| { + if (stdout_reader.buffered().len > limit) + return error.StreamTooLong; + } + if (options.stderr_limit.toInt()) |limit| { + if (stderr_reader.buffered().len > limit) + return error.StreamTooLong; + } + } else |err| switch (err) { + error.EndOfStream => {}, + else => |e| return e, + } + + try multi_reader.checkAnyError(); const term = try child.wait(io); - const stdout_slice = try stdout.toOwnedSlice(gpa); + const stdout_slice = try multi_reader.toOwnedSlice(0); errdefer gpa.free(stdout_slice); - const stderr_slice = try stderr.toOwnedSlice(gpa); + const stderr_slice = try multi_reader.toOwnedSlice(1); errdefer gpa.free(stderr_slice); return .{ diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index e226fb7a9b..c87d221a95 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -124,73 +124,3 @@ pub fn wait(child: *Child, io: Io) WaitError!Term { assert(child.id != null); return io.vtable.childWait(io.userdata, child); } - -pub const CollectOutputError = error{ - StreamTooLong, -} || Io.ConcurrentError || Allocator.Error || Io.File.Reader.Error || Io.Timeout.Error; - -pub const CollectOutputOptions = struct { - stdout: *std.ArrayList(u8), - stderr: *std.ArrayList(u8), - /// Used for `stdout` and `stderr`. If not provided, only the existing - /// capacity will be used. - allocator: ?Allocator = null, - stdout_limit: Io.Limit = .unlimited, - stderr_limit: Io.Limit = .unlimited, - timeout: Io.Timeout = .none, -}; - -/// Collect the output from the process's stdout and stderr. Will return once -/// all output has been collected. This does not mean that the process has -/// ended. `wait` should still be called to wait for and clean up the process. -/// -/// The process must have been started with stdout and stderr set to -/// `process.SpawnOptions.StdIo.pipe`. -pub fn collectOutput(child: *const Child, io: Io, options: CollectOutputOptions) CollectOutputError!void { - const files: [2]Io.File = .{ child.stdout.?, child.stderr.? }; - const lists: [2]*std.ArrayList(u8) = .{ options.stdout, options.stderr }; - const limits: [2]Io.Limit = .{ options.stdout_limit, options.stderr_limit }; - var reads: [2]Io.Operation = undefined; - var vecs: [2][1][]u8 = undefined; - var ring: [2]u32 = undefined; - var batch: Io.Batch = .init(&reads, &ring); - defer { - batch.cancel(io); - while (batch.next()) |op| { - lists[op].items.len += reads[op].file_read_streaming.status.result catch continue; - } - } - var remaining: usize = 0; - for (0.., &reads, &lists, &files, &vecs) |op, *read, list, file, *vec| { - if (options.allocator) |gpa| try list.ensureUnusedCapacity(gpa, 1); - const cap = list.unusedCapacitySlice(); - if (cap.len == 0) return error.StreamTooLong; - vec[0] = cap; - read.* = .{ .file_read_streaming = .{ - .file = file, - .data = vec, - } }; - batch.add(op); - remaining += 1; - } - while (remaining > 0) { - try batch.wait(io, options.timeout); - while (batch.next()) |op| { - const n = reads[op].file_read_streaming.status.result catch |err| switch (err) { - error.EndOfStream => { - remaining -= 1; - continue; - }, - else => |e| return e, - }; - lists[op].items.len += n; - if (lists[op].items.len > @intFromEnum(limits[op])) return error.StreamTooLong; - if (options.allocator) |gpa| try lists[op].ensureUnusedCapacity(gpa, 1); - const cap = lists[op].unusedCapacitySlice(); - if (cap.len == 0) return error.StreamTooLong; - vecs[op][0] = cap; - reads[op].file_read_streaming.status = .{ .unstarted = {} }; - batch.add(op); - } - } -} From 7a13d57916aae2840047cc3461aa44c3a72ca546 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Jan 2026 17:59:46 -0800 Subject: [PATCH 48/65] std.Io.Threaded: add missing check for pending status in batchCancel --- lib/std/Io/Threaded.zig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index c9c38c6b29..05ff37b76e 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2623,6 +2623,7 @@ fn batchCancel(userdata: ?*anyopaque, b: *Io.Batch) void { const metadatas_ptr: [*]WinOpMetadata = @ptrCast(@alignCast(reserved)); const metadatas = metadatas_ptr[0..b.operations.len]; for (metadatas, 0..) |*metadata, op| { + if (!metadata.pending) continue; const done = @atomicLoad(windows.NTSTATUS, &metadata.iosb.u.Status, .acquire) != .PENDING; if (done) continue; switch (operations[op]) { @@ -2633,6 +2634,7 @@ fn batchCancel(userdata: ?*anyopaque, b: *Io.Batch) void { } } for (metadatas) |*metadata| { + if (!metadata.pending) continue; while (@atomicLoad(windows.NTSTATUS, &metadata.iosb.u.Status, .acquire) == .PENDING) { waitForApcOrAlert(); } From d770e14e001daaea9eb921c1630af69c518468a2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Jan 2026 18:19:12 -0800 Subject: [PATCH 49/65] std.Io.Threaded.batchWaitWindows: eager result sets any_done true Thanks jacobly for finding the bug --- lib/std/Io/Threaded.zig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 05ff37b76e..cfcb9f65df 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2677,6 +2677,8 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa b.user.complete_tail = complete_tail; } + var any_done = false; + while (submit_head != submit_tail) : (submit_head = submit_head.next(len)) { const op = ring[submit_head.index(len)]; const operation = &operations[op]; @@ -2686,6 +2688,7 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa .noop => |*o| { _ = o.status.unstarted; o.status = .{ .result = {} }; + any_done = true; submitComplete(ring, &complete_tail, op); }, .file_read_streaming => |*o| { @@ -2693,6 +2696,7 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa switch (try ntReadFile(o.file.handle, o.data, &metadata.iosb)) { .status => { o.status = .{ .result = ntReadFileResult(&metadata.iosb) }; + any_done = true; submitComplete(ring, &complete_tail, op); }, .pending => { @@ -2707,7 +2711,6 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa var delay_interval: windows.LARGE_INTEGER = timeoutToWindowsInterval(timeout); while (true) { - var any_done = false; var any_pending = false; for (metadatas, 0..) |*metadata, op_usize| { if (!metadata.pending) continue; From 3320e6a1ae453d40dd78ff3abf6c8543bec2555d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Jan 2026 18:40:48 -0800 Subject: [PATCH 50/65] std.Io.Threaded.batchWait better fix for any_done It is legal to call batchWait with already completed operations in the ring. In such case, we need to avoid waiting in the syscall. The any_done flag was a poor way of tracking state we already have: whether the completion queue is empty. This problem affects the posix poll implementation as well. Thanks again to jacobly for finding the problem. --- lib/std/Io/Threaded.zig | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index cfcb9f65df..29f5787574 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2560,17 +2560,31 @@ fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch. const deadline = timeout.toDeadline(t_io) catch return error.UnsupportedClock; const max_poll_ms = std.math.maxInt(i32); while (true) { - const timeout_ms: i32 = if (deadline) |d| t: { + const timeout_ms: i32 = t: { + if (b.user.complete_head != complete_tail) { + // It is legal to call batchWait with already completed + // operations in the ring. In such case, we need to avoid + // blocking in the poll syscall, but we can still take this + // opportunity to find additional ready operations. + break :t 0; + } + const d = deadline orelse break :t -1; const duration = d.durationFromNow(t_io) catch return error.UnsupportedClock; if (duration.raw.nanoseconds <= 0) return error.Timeout; break :t @intCast(@min(max_poll_ms, duration.raw.toMilliseconds())); - } else -1; + }; const syscall = try Syscall.start(); const rc = posix.system.poll(&poll_buffer, poll_i, timeout_ms); syscall.finish(); switch (posix.errno(rc)) { .SUCCESS => { if (rc == 0) { + if (b.user.complete_head != complete_tail) { + // Since there are already completions available in the + // queue, this is neither a timeout nor a case for + // retrying. + return; + } // Although spurious timeouts are OK, when no deadline is // passed we must not return `error.Timeout`. if (deadline == null) continue; @@ -2677,8 +2691,6 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa b.user.complete_tail = complete_tail; } - var any_done = false; - while (submit_head != submit_tail) : (submit_head = submit_head.next(len)) { const op = ring[submit_head.index(len)]; const operation = &operations[op]; @@ -2688,7 +2700,6 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa .noop => |*o| { _ = o.status.unstarted; o.status = .{ .result = {} }; - any_done = true; submitComplete(ring, &complete_tail, op); }, .file_read_streaming => |*o| { @@ -2696,7 +2707,6 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa switch (try ntReadFile(o.file.handle, o.data, &metadata.iosb)) { .status => { o.status = .{ .result = ntReadFileResult(&metadata.iosb) }; - any_done = true; submitComplete(ring, &complete_tail, op); }, .pending => { @@ -2725,11 +2735,10 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa o.status = .{ .result = ntReadFileResult(&metadata.iosb) }; }, } - any_done = true; metadata.pending = false; submitComplete(ring, &complete_tail, op); } - if (any_done) return; + if (b.user.complete_head != complete_tail) return; if (!any_pending) return; const alertable_syscall = try AlertableSyscall.start(); const delay_rc = windows.ntdll.NtDelayExecution(windows.TRUE, &delay_interval); From 4dd7fe90a2b541a44aafc3a9596445387a3aed49 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Jan 2026 21:02:43 -0800 Subject: [PATCH 51/65] std.Io.Threaded: compress ntReadFile logic Just use the ntstatus field rather than an additional enum --- lib/std/Io/Threaded.zig | 137 ++++++++++++++++++---------------------- 1 file changed, 61 insertions(+), 76 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 29f5787574..fbe12ada55 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2695,7 +2695,10 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa const op = ring[submit_head.index(len)]; const operation = &operations[op]; const metadata = &metadatas[op]; - metadata.* = .{ .iosb = undefined, .pending = false }; + metadata.* = .{ .iosb = .{ + .u = .{ .Status = .PENDING }, + .Information = 0, + }, .pending = false }; switch (operation.*) { .noop => |*o| { _ = o.status.unstarted; @@ -2704,15 +2707,13 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa }, .file_read_streaming => |*o| { _ = o.status.unstarted; - switch (try ntReadFile(o.file.handle, o.data, &metadata.iosb)) { - .status => { - o.status = .{ .result = ntReadFileResult(&metadata.iosb) }; - submitComplete(ring, &complete_tail, op); - }, - .pending => { - o.status = .{ .pending = b }; - metadata.pending = true; - }, + try ntReadFile(o.file.handle, o.data, &metadata.iosb); + if (@atomicLoad(windows.NTSTATUS, &metadata.iosb.u.Status, .acquire) == .PENDING) { + o.status = .{ .pending = b }; + metadata.pending = true; + } else { + o.status = .{ .result = ntReadFileResult(&metadata.iosb) }; + submitComplete(ring, &complete_tail, op); } }, } @@ -8680,44 +8681,36 @@ fn fileReadStreamingPosix(file: File, data: []const []u8) File.ReadStreamingErro } fn fileReadStreamingWindows(file: File, data: []const []u8) File.ReadStreamingError!usize { - var io_status_block: windows.IO_STATUS_BLOCK = undefined; - if (ntReadFile(file.handle, data, &io_status_block)) |result| switch (result) { - .status => return ntReadFileResult(&io_status_block), - .pending => { - // Once we get here we received PENDING so we must not return from the - // function until the operation completes. - defer while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { - waitForApcOrAlert(); - }; - - const alertable_syscall = AlertableSyscall.start() catch |err| switch (err) { - error.Canceled => |e| { - _ = windows.ntdll.NtCancelIoFile(file.handle, &io_status_block); - return e; - }, - }; - waitForApcOrAlert(); - while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { - alertable_syscall.checkCancel() catch |err| switch (err) { - error.Canceled => |e| { - _ = windows.ntdll.NtCancelIoFile(file.handle, &io_status_block); - return e; - }, - }; - waitForApcOrAlert(); - } - alertable_syscall.finish(); - }, - } else |err| return err; + var io_status_block: windows.IO_STATUS_BLOCK = .{ + .u = .{ .Status = .PENDING }, + .Information = 0, + }; + try ntReadFile(file.handle, data, &io_status_block); + while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { + // Once we get here we must not return from the function until the + // operation completes, thereby releasing reference to io_status_block. + const alertable_syscall = AlertableSyscall.start() catch |err| switch (err) { + error.Canceled => |e| { + _ = windows.ntdll.NtCancelIoFile(file.handle, &io_status_block); + while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { + waitForApcOrAlert(); + } + return e; + }, + }; + waitForApcOrAlert(); + alertable_syscall.finish(); + } return ntReadFileResult(&io_status_block); } -fn ntReadFileResult(io_status_block: *windows.IO_STATUS_BLOCK) !usize { +fn ntReadFileResult(io_status_block: *const windows.IO_STATUS_BLOCK) !usize { switch (io_status_block.u.Status) { + .PENDING => unreachable, + .CANCELLED => unreachable, .SUCCESS => return io_status_block.Information, .END_OF_FILE => return error.EndOfStream, .PIPE_BROKEN => return error.EndOfStream, - .PENDING => unreachable, .INVALID_DEVICE_REQUEST => return error.IsDir, .LOCK_NOT_GRANTED => return error.LockViolation, .ACCESS_DENIED => return error.AccessDenied, @@ -8725,50 +8718,42 @@ fn ntReadFileResult(io_status_block: *windows.IO_STATUS_BLOCK) !usize { } } -fn ntReadFile(handle: windows.HANDLE, data: []const []u8, iosb: *windows.IO_STATUS_BLOCK) Io.Cancelable!enum { status, pending } { +fn ntReadFile(handle: windows.HANDLE, data: []const []u8, iosb: *windows.IO_STATUS_BLOCK) Io.Cancelable!void { var index: usize = 0; while (index < data.len and data[index].len == 0) index += 1; if (index == data.len) { iosb.u.Status = .SUCCESS; iosb.Information = 0; - return .status; + return; } const buffer = data[index]; const syscall: Syscall = try .start(); - while (true) { - iosb.u.Status = .PENDING; - switch (windows.ntdll.NtReadFile( - handle, - null, // event - noopApc, // apc callback - null, // apc context - iosb, - buffer.ptr, - @min(std.math.maxInt(u32), buffer.len), - null, // byte offset - null, // key - )) { - .PENDING => { - syscall.finish(); - return .pending; - }, - .SUCCESS => { - syscall.finish(); - iosb.u.Status = .SUCCESS; - return .status; - }, - .CANCELLED => { - try syscall.checkCancel(); - continue; - }, - else => |status| { - syscall.finish(); - iosb.u.Status = status; - return .status; - }, - } - } + while (true) switch (windows.ntdll.NtReadFile( + handle, + null, // event + noopApc, // apc callback + null, // apc context + iosb, + buffer.ptr, + @min(std.math.maxInt(u32), buffer.len), + null, // byte offset + null, // key + )) { + .PENDING => { + syscall.finish(); + return; + }, + .CANCELLED => { + try syscall.checkCancel(); + continue; + }, + else => |status| { + syscall.finish(); + iosb.u.Status = status; + return; + }, + }; } fn fileReadPositionalPosix(file: File, data: []const []u8, offset: u64) File.ReadPositionalError!usize { From 8f8aa8346a8babe43fb85d7033db2a329d602366 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 29 Jan 2026 13:31:44 -0800 Subject: [PATCH 52/65] std.Io.Threaded: ntReadFileResult handles EOF + bytes available --- lib/std/Io/Threaded.zig | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index fbe12ada55..8e820f71d0 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -8709,8 +8709,10 @@ fn ntReadFileResult(io_status_block: *const windows.IO_STATUS_BLOCK) !usize { .PENDING => unreachable, .CANCELLED => unreachable, .SUCCESS => return io_status_block.Information, - .END_OF_FILE => return error.EndOfStream, - .PIPE_BROKEN => return error.EndOfStream, + .END_OF_FILE, .PIPE_BROKEN => { + if (io_status_block.Information == 0) return error.EndOfStream; + return io_status_block.Information; + }, .INVALID_DEVICE_REQUEST => return error.IsDir, .LOCK_NOT_GRANTED => return error.LockViolation, .ACCESS_DENIED => return error.AccessDenied, From c2679feaaab26cc76fc381c61d49488a96f1f63c Mon Sep 17 00:00:00 2001 From: Matthew Lugg Date: Fri, 30 Jan 2026 00:44:08 +0000 Subject: [PATCH 53/65] std.Io.Threaded: fix ntdll timeouts on Windows --- lib/std/Io/Threaded.zig | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 8e820f71d0..287bed5d09 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2719,7 +2719,13 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa } } - var delay_interval: windows.LARGE_INTEGER = timeoutToWindowsInterval(timeout); + const deadline: ?Io.Clock.Timestamp = timeout.toDeadline(ioBasic(t)) catch |err| switch (err) { + error.Unexpected => deadline: { + recoverableOsBugDetected(); + break :deadline .{ .raw = .{ .nanoseconds = 0 }, .clock = .awake }; + }, + error.UnsupportedClock => |e| return e, + }; while (true) { var any_pending = false; @@ -2741,6 +2747,16 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa } if (b.user.complete_head != complete_tail) return; if (!any_pending) return; + var delay_interval: windows.LARGE_INTEGER = interval: { + const d = deadline orelse break :interval std.math.minInt(windows.LARGE_INTEGER); + break :interval t.deadlineToWindowsInterval(d) catch |err| switch (err) { + error.UnsupportedClock => |e| return e, + error.Unexpected => { + recoverableOsBugDetected(); + break :interval -1; + }, + }; + }; const alertable_syscall = try AlertableSyscall.start(); const delay_rc = windows.ntdll.NtDelayExecution(windows.TRUE, &delay_interval); alertable_syscall.finish(); @@ -16784,19 +16800,18 @@ fn park(opt_deadline: ?Io.Clock.Timestamp, addr_hint: ?*const anyopaque) error{T } } -fn timeoutToWindowsInterval(timeout: Io.Timeout) windows.LARGE_INTEGER { - switch (timeout) { - .none => { - return std.math.minInt(windows.LARGE_INTEGER); // infinite timeout +fn deadlineToWindowsInterval(t: *Io.Threaded, deadline: Io.Clock.Timestamp) Io.Clock.Error!windows.LARGE_INTEGER { + // ntdll only supports two combinations: + // * real-time (`.real`) sleeps with absolute deadlines + // * monotonic (`.awake`/`.boot`) sleeps with relative durations + switch (deadline.clock) { + .cpu_process, .cpu_thread => unreachable, // cannot sleep for CPU time + .real => { + return @intCast(@max(@divTrunc(deadline.raw.nanoseconds, 100), 0)); }, - .deadline => |deadline| { - const nanoseconds = deadline.raw.nanoseconds; - return @intCast(@divTrunc(nanoseconds, 100)); - }, - .duration => |duration| { - const now_timestamp = nowWindows(duration.clock) catch unreachable; - const deadline_ns = now_timestamp.nanoseconds + duration.raw.nanoseconds; - return @intCast(@divTrunc(deadline_ns, 100)); + .awake, .boot => { + const duration = try deadline.durationFromNow(ioBasic(t)); + return @intCast(@min(@divTrunc(-duration.raw.nanoseconds, 100), -1)); }, } } From f8828e543ab6331395ef4d9f9a2b53770028b6b2 Mon Sep 17 00:00:00 2001 From: Matthew Lugg Date: Fri, 30 Jan 2026 00:44:29 +0000 Subject: [PATCH 54/65] std.Build: fully upgrade Step.Run to std.Io timing (and fix a typo) --- lib/std/Build/Step/Run.zig | 108 +++++++++++++++++-------------------- 1 file changed, 48 insertions(+), 60 deletions(-) diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index 55fee9a286..68d0ec480c 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -1587,9 +1587,13 @@ fn spawnChildAndCollect( }; if (run.stdio == .zig_test) { - var timer = try std.time.Timer.start(); - defer run.step.result_duration_ns = timer.read(); - try evalZigTest(run, spawn_options, options, fuzz_context); + const started: Io.Clock.Timestamp = try .now(io, .awake); + const result = evalZigTest(run, spawn_options, options, fuzz_context) catch |err| switch (err) { + error.Canceled => |e| return e, + else => |e| e, + }; + run.step.result_duration_ns = @intCast((try started.untilNow(io)).raw.nanoseconds); + try result; return null; } else { const inherit = spawn_options.stdout == .inherit or spawn_options.stderr == .inherit; @@ -1602,10 +1606,14 @@ fn spawnChildAndCollect( } else .no_color; defer if (inherit) io.unlockStderr(); try setColorEnvironmentVariables(run, environ_map, terminal_mode); - var timer = try std.time.Timer.start(); - const res = try evalGeneric(run, spawn_options); - run.step.result_duration_ns = timer.read(); - return .{ .term = res.term, .stdout = res.stdout, .stderr = res.stderr }; + + const started: Io.Clock.Timestamp = try .now(io, .awake); + const result = evalGeneric(run, spawn_options) catch |err| switch (err) { + error.Canceled => |e| return e, + else => |e| e, + }; + run.step.result_duration_ns = @intCast((try started.untilNow(io)).raw.nanoseconds); + return try result; } } @@ -1861,9 +1869,7 @@ fn waitZigTest( var active_test_index: ?u32 = null; - // `null` means this host does not support `std.time.Timer`. This timer is `reset()` whenever we - // change `active_test_index`, i.e. whenever a test starts or finishes. - var timer: ?std.time.Timer = std.time.Timer.start() catch null; + var last_update: Io.Clock.Timestamp = try .now(io, .awake); var coverage_id: ?u64 = null; @@ -1871,16 +1877,27 @@ fn waitZigTest( // test. For instance, if the test runner leaves this much time between us requesting a test to // start and it acknowledging the test starting, we terminate the child and raise an error. This // *should* never happen, but could in theory be caused by some very unlucky IB in a test. - const response_timeout_ns: ?u64 = ns: { - if (fuzz_context != null) break :ns null; // don't timeout fuzz tests - break :ns @max(options.unit_test_timeout_ns orelse 0, 60 * std.time.ns_per_s); + const response_timeout: ?Io.Clock.Duration = t: { + if (fuzz_context != null) break :t null; // don't timeout fuzz tests + const ns = @max(options.unit_test_timeout_ns orelse 0, 60 * std.time.ns_per_s); + break :t .{ .clock = .awake, .raw = .fromNanoseconds(ns) }; }; + const test_timeout: ?Io.Clock.Duration = if (options.unit_test_timeout_ns) |ns| .{ + .clock = .awake, + .raw = .fromNanoseconds(ns), + } else null; const stdout = multi_reader.reader(0); const stderr = multi_reader.reader(1); const Header = std.zig.Server.Message.Header; while (true) { + const timeout: Io.Timeout = t: { + const opt_duration = if (active_test_index == null) response_timeout else test_timeout; + const duration = opt_duration orelse break :t .none; + break :t .{ .deadline = last_update.addDuration(duration) }; + }; + // This block is exited when `stdout` contains enough bytes for a `Header`. header_ready: { if (stdout.buffered().len >= @sizeOf(Header)) { @@ -1888,65 +1905,33 @@ fn waitZigTest( break :header_ready; } - // Always `null` if `timer` is `null`. - const opt_timeout_ns: ?u64 = ns: { - if (timer == null) break :ns null; - if (active_test_index == null) break :ns response_timeout_ns; - break :ns options.unit_test_timeout_ns; - }; - - const timeout: Io.Timeout = if (opt_timeout_ns) |timeout_ns| .{ .duration = .{ - .raw = .fromNanoseconds(timeout_ns -| timer.?.read()), - .clock = .awake, - } } else .none; - multi_reader.fill(64, timeout) catch |err| switch (err) { - error.Timeout, error.EndOfStream => return .{ .no_poll = .{ + error.Timeout => return .{ .timeout = .{ .active_test_index = active_test_index, - .ns_elapsed = if (timer) |*t| t.read() else 0, + .ns_elapsed = @intCast((try last_update.untilNow(io)).raw.nanoseconds), + } }, + error.EndOfStream => return .{ .no_poll = .{ + .active_test_index = active_test_index, + .ns_elapsed = @intCast((try last_update.untilNow(io)).raw.nanoseconds), } }, - error.UnsupportedClock => { - timer = null; - continue; - }, else => |e| return e, }; - if (stdout.buffered().len >= @sizeOf(Header)) { - // There wasn't a header before, but there is one after the `poll`. - break :header_ready; - } - - if (opt_timeout_ns) |timeout_ns| { - const cur_ns = timer.?.read(); - if (cur_ns >= timeout_ns) return .{ .timeout = .{ - .active_test_index = active_test_index, - .ns_elapsed = cur_ns, - } }; - } continue; } // There is definitely a header available now -- read it. const header = stdout.takeStruct(Header, .little) catch unreachable; while (stdout.buffered().len < header.bytes_len) { - const timeout: Io.Timeout = t: { - const t = if (timer) |*t| t else break :t .none; - if (response_timeout_ns) |timeout_ns| break :t .{ .duration = .{ - .raw = .fromNanoseconds(timeout_ns -| t.read()), - .clock = .awake, - } }; - break :t .none; - }; multi_reader.fill(64, timeout) catch |err| switch (err) { - error.Timeout, error.EndOfStream => return .{ .no_poll = .{ + error.Timeout => return .{ .timeout = .{ .active_test_index = active_test_index, - .ns_elapsed = if (timer) |*t| t.read() else 0, + .ns_elapsed = @intCast((try last_update.untilNow(io)).raw.nanoseconds), + } }, + error.EndOfStream => return .{ .no_poll = .{ + .active_test_index = active_test_index, + .ns_elapsed = @intCast((try last_update.untilNow(io)).raw.nanoseconds), } }, - error.UnsupportedClock => { - timer = null; - continue; - }, else => |e| return e, }; } @@ -1991,13 +1976,13 @@ fn waitZigTest( @memset(opt_metadata.*.?.ns_per_test, std.math.maxInt(u64)); active_test_index = null; - if (timer) |*t| t.reset(); + last_update = try .now(io, .awake); requestNextTest(io, child.stdin.?, &opt_metadata.*.?, &sub_prog_node) catch |err| return .{ .write_failed = err }; }, .test_started => { active_test_index = opt_metadata.*.?.next_index - 1; - if (timer) |*t| t.reset(); + last_update = try .now(io, .awake); }, .test_results => { assert(fuzz_context == null); @@ -2040,7 +2025,10 @@ fn waitZigTest( } active_test_index = null; - if (timer) |*t| md.ns_per_test[tr_hdr.index] = t.lap(); + + const now: Io.Clock.Timestamp = try .now(io, .awake); + md.ns_per_test[tr_hdr.index] = @intCast(last_update.durationTo(now).raw.nanoseconds); + last_update = now; requestNextTest(io, child.stdin.?, md, &sub_prog_node) catch |err| return .{ .write_failed = err }; }, From 866ee4f1c52ae3e5a8ac95c9c5e61b019aa5eadb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 29 Jan 2026 15:03:33 -0800 Subject: [PATCH 55/65] std.Io.Threaded: handle TIMEOUT from NtDelayExceution --- lib/std/Io/Threaded.zig | 2 +- lib/std/os/windows/ntdll.zig | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 287bed5d09..bccaf24ecd 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2761,7 +2761,7 @@ fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.Wa const delay_rc = windows.ntdll.NtDelayExecution(windows.TRUE, &delay_interval); alertable_syscall.finish(); switch (delay_rc) { - .SUCCESS => { + .SUCCESS, .TIMEOUT => { // The thread woke due to the timeout. Although spurious // timeouts are OK, when no deadline is passed we must not // return `error.Timeout`. diff --git a/lib/std/os/windows/ntdll.zig b/lib/std/os/windows/ntdll.zig index d68cd1494b..195a457d3b 100644 --- a/lib/std/os/windows/ntdll.zig +++ b/lib/std/os/windows/ntdll.zig @@ -594,6 +594,13 @@ pub extern "ntdll" fn NtCancelSynchronousIoFile( IoStatusBlock: *IO_STATUS_BLOCK, ) callconv(.winapi) NTSTATUS; +/// This function has been observed to return SUCCESS on timeout on Windows 10 +/// and TIMEOUT on Wine 10.0. +/// +/// This function has been observed on Windows 11 such that positive interval +/// is real time, which can cause waits to be interrupted by changing system +/// time, however negative intervals are not affected by changes to system +/// time. pub extern "ntdll" fn NtDelayExecution( Alertable: BOOLEAN, DelayInterval: *const LARGE_INTEGER, From a41ee5994d7f1f8dbecf54cdba0d56864d95ef99 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 29 Jan 2026 17:13:17 -0800 Subject: [PATCH 56/65] std.Build.Step: evalZigProcess handles EndOfStream and a happy little info log when the process needs to be restarted --- lib/std/Build/Step.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index 0dd4b93280..cfc263b770 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -416,7 +416,8 @@ pub fn evalZigProcess( assert(watch); if (std.Progress.have_ipc) if (zp.progress_ipc_fd) |fd| prog_node.setIpcFd(fd); const result = zigProcessUpdate(s, zp, watch, web_server, gpa) catch |err| switch (err) { - error.BrokenPipe => { + error.BrokenPipe, error.EndOfStream => |reason| { + std.log.info("{s} restart required: {t}", .{ argv[0], reason }); // Process restart required. const term = zp.child.wait(io) catch |e| { return s.fail("unable to wait for {s}: {t}", .{ argv[0], e }); From a520355e4ca8f190d5c470413308bb4c8cb8b526 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 29 Jan 2026 22:58:54 -0800 Subject: [PATCH 57/65] std.process: simplify RunError set --- lib/std/process.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/std/process.zig b/lib/std/process.zig index 739027da07..3b5a0ecebd 100644 --- a/lib/std/process.zig +++ b/lib/std/process.zig @@ -453,9 +453,9 @@ pub fn spawnPath(io: Io, dir: Io.Dir, options: SpawnOptions) SpawnError!Child { return io.vtable.processSpawnPath(io.userdata, dir, options); } -pub const RunError = SpawnError || error{ +pub const RunError = error{ StreamTooLong, -} || Io.ConcurrentError || Allocator.Error || Io.File.Reader.Error || Io.Timeout.Error; +} || SpawnError || Io.File.MultiReader.UnendingError || Io.Timeout.Error; pub const RunOptions = struct { argv: []const []const u8, From 2674acdb77f8a144eaedbfa57a1b7fdc70dc1e60 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 30 Jan 2026 01:44:07 -0500 Subject: [PATCH 58/65] Io.Batch: implement alternate API --- lib/std/Io.zig | 245 +++++++------ lib/std/Io/File.zig | 7 +- lib/std/Io/File/MultiReader.zig | 52 ++- lib/std/Io/Threaded.zig | 589 +++++++++++++++++++------------- 4 files changed, 517 insertions(+), 376 deletions(-) diff --git a/lib/std/Io.zig b/lib/std/Io.zig index c00e4619e8..ca77a9836a 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -149,8 +149,9 @@ pub const VTable = struct { futexWaitUncancelable: *const fn (?*anyopaque, ptr: *const u32, expected: u32) void, futexWake: *const fn (?*anyopaque, ptr: *const u32, max_waiters: u32) void, - operate: *const fn (?*anyopaque, *Operation) Cancelable!void, - batchWait: *const fn (?*anyopaque, *Batch, Timeout) Batch.WaitError!void, + operate: *const fn (?*anyopaque, Operation) Cancelable!Operation.Result, + batchAwaitAsync: *const fn (?*anyopaque, *Batch) Batch.AwaitAsyncError!void, + batchAwaitConcurrent: *const fn (?*anyopaque, *Batch, Timeout) Batch.AwaitConcurrentError!void, batchCancel: *const fn (?*anyopaque, *Batch) void, dirCreateDir: *const fn (?*anyopaque, Dir, []const u8, Dir.Permissions) Dir.CreateDirError!void, @@ -255,19 +256,14 @@ pub const VTable = struct { }; pub const Operation = union(enum) { - noop: Noop, file_read_streaming: FileReadStreaming, - pub const Noop = struct { - reserved: [2]usize = .{ 0, 0 }, - status: Status(void) = .{ .unstarted = {} }, - }; + pub const Tag = @typeInfo(Operation).@"union".tag_type.?; /// May return 0 reads which is different than `error.EndOfStream`. pub const FileReadStreaming = struct { file: File, data: []const []u8, - status: Status(Error!usize) = .{ .unstarted = {} }, pub const Error = UnendingError || error{EndOfStream}; pub const UnendingError = error{ @@ -290,19 +286,72 @@ pub const Operation = union(enum) { /// lock. LockViolation, } || Io.UnexpectedError; + + pub const Result = usize; }; - pub fn Status(Result: type) type { - return union { - unstarted: void, - pending: *Batch, + pub const Result = Result: { + const operation_fields = @typeInfo(Operation).@"union".fields; + var field_names: [operation_fields.len][]const u8 = undefined; + var field_types: [operation_fields.len]type = undefined; + for (operation_fields, &field_names, &field_types) |field, *field_name, *field_type| { + field_name.* = field.name; + field_type.* = field.type.Error!field.type.Result; + } + break :Result @Union(.auto, Tag, &field_names, &field_types, &@splat(.{})); + }; + + pub const Storage = union { + unused: List.DoubleNode, + submission: Submission, + pending: Pending, + completion: Completion, + + pub const Submission = struct { + node: List.SingleNode, + operation: Operation, + }; + + pub const Pending = struct { + node: List.DoubleNode, + tag: Tag, + context: [3]usize, + }; + + pub const Completion = struct { + node: List.SingleNode, result: Result, }; - } + }; + + pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, + + pub fn fromIndex(i: usize) OptionalIndex { + const oi: OptionalIndex = @enumFromInt(i); + assert(oi != .none); + return oi; + } + + pub fn toIndex(oi: OptionalIndex) u32 { + assert(oi != .none); + return @intFromEnum(oi); + } + }; + pub const List = struct { + head: OptionalIndex, + tail: OptionalIndex, + + pub const empty: List = .{ .head = .none, .tail = .none }; + + pub const SingleNode = struct { next: OptionalIndex }; + pub const DoubleNode = struct { prev: OptionalIndex, next: OptionalIndex }; + }; }; /// Performs one `Operation`. -pub fn operate(io: Io, operation: *Operation) Cancelable!void { +pub fn operate(io: Io, operation: Operation) Cancelable!Operation.Result { return io.vtable.operate(io.userdata, operation); } @@ -312,116 +361,96 @@ pub fn operate(io: Io, operation: *Operation) Cancelable!void { /// This is a low-level abstraction based on `Operation`. For a higher /// level API that operates on `Future`, see `Select`. pub const Batch = struct { - operations: []Operation, - ring: [*]u32, - user: struct { - submit_tail: RingIndex, - complete_head: RingIndex, - complete_tail: RingIndex, - }, - impl: struct { - submit_head: RingIndex, - submit_tail: RingIndex, - complete_tail: RingIndex, - reserved: ?*anyopaque, - }, - - pub const RingIndex = enum(u32) { - _, - - pub fn index(ri: RingIndex, len: u31) u31 { - const i = @intFromEnum(ri); - assert(i < @as(u32, len) * 2); - return @intCast(if (i < len) i else i - len); - } - - pub fn prev(ri: RingIndex, len: u31) RingIndex { - const i = @intFromEnum(ri); - const double_len = @as(u32, len) * 2; - assert(i <= double_len); - return @enumFromInt((if (i > 0) i else double_len) - 1); - } - - pub fn next(ri: RingIndex, len: u31) RingIndex { - const i = @intFromEnum(ri) + 1; - const double_len = @as(u32, len) * 2; - assert(i <= double_len); - return @enumFromInt(if (i < double_len) i else 0); - } - }; + storage: []Operation.Storage, + unused: Operation.List, + submissions: Operation.List, + pending: Operation.List, + completions: Operation.List, + context: ?*anyopaque, /// After calling this, it is safe to unconditionally defer a call to /// `cancel`. - pub fn init(operations: []Operation, ring: []u32) Batch { - const len: u31 = @intCast(operations.len); - assert(ring.len == len); + pub fn init(storage: []Operation.Storage) Batch { + var prev: Operation.OptionalIndex = .none; + for (storage, 0..) |*operation, index| { + operation.* = .{ .unused = .{ .prev = prev, .next = .fromIndex(index + 1) } }; + prev = .fromIndex(index); + } + storage[storage.len - 1].unused.next = .none; return .{ - .operations = operations, - .ring = ring.ptr, - .user = .{ - .submit_tail = @enumFromInt(0), - .complete_head = @enumFromInt(0), - .complete_tail = @enumFromInt(0), - }, - .impl = .{ - .submit_head = @enumFromInt(0), - .submit_tail = @enumFromInt(0), - .complete_tail = @enumFromInt(0), - .reserved = null, + .storage = storage, + .unused = .{ + .head = .fromIndex(0), + .tail = .fromIndex(storage.len - 1), }, + .submissions = .empty, + .pending = .empty, + .completions = .empty, + .context = null, }; } - /// Adds `b.operations[operation]` to the list of submitted operations - /// that will be performed when `wait` is called. - pub fn add(b: *Batch, operation: usize) void { - const tail = b.user.submit_tail; - const len: u31 = @intCast(b.operations.len); - b.user.submit_tail = tail.next(len); - b.ring[0..len][tail.index(len)] = @intCast(operation); + /// Adds an operation to be performed at the next await call. + /// Returns the index that will be returned by `next` after the operation completes. + /// Asserts that no more than `storage.len` operations are active at a time. + pub fn add(b: *Batch, operation: Operation) u32 { + const index = b.unused.next; + b.addAt(index.toIndex(), operation); + return index; } - fn flush(b: *Batch) void { - @atomicStore(RingIndex, &b.impl.submit_tail, b.user.submit_tail, .release); - } - - /// Returns `operation` such that `b.operations[operation]` has completed. - /// Returns `null` when `wait` should be called. - pub fn next(b: *Batch) ?u32 { - const head = b.user.complete_head; - if (head == b.user.complete_tail) { - @branchHint(.unlikely); - b.flush(); - const tail = @atomicLoad(RingIndex, &b.impl.complete_tail, .acquire); - if (head == tail) { - @branchHint(.unlikely); - return null; - } - assert(head != tail); - b.user.complete_tail = tail; + /// Adds an operation to be performed at the next await call. + /// After the operation completes, `next` will return `index`. + /// Asserts that the operation at `index` is not active. + pub fn addAt(b: *Batch, index: u32, operation: Operation) void { + const storage = &b.storage[index]; + const unused = storage.unused; + switch (unused.prev) { + .none => b.unused.head = .none, + else => |prev_index| b.storage[prev_index.toIndex()].unused.next = unused.next, } - const len: u31 = @intCast(b.operations.len); - b.user.complete_head = head.next(len); - return b.ring[0..len][head.index(len)]; + switch (unused.next) { + .none => b.unused.tail = .none, + else => |next_index| b.storage[next_index.toIndex()].unused.prev = unused.prev, + } + + switch (b.submissions.tail) { + .none => b.submissions.head = .fromIndex(index), + else => |tail_index| b.storage[tail_index.toIndex()].submission.node.next = .fromIndex(index), + } + storage.* = .{ .submission = .{ .node = .{ .next = .none }, .operation = operation } }; + b.submissions.tail = .fromIndex(index); } - pub const WaitError = ConcurrentError || Cancelable || Timeout.Error; + pub fn next(b: *Batch) ?struct { index: u32, result: Operation.Result } { + const index = b.completions.head; + if (index == .none) return null; + const storage = &b.storage[index.toIndex()]; + const completion = storage.completion; + const next_index = completion.node.next; + b.completions.head = next_index; + if (next_index == .none) b.completions.tail = .none; - /// Starts work on any submitted operations and returns when at least one has completeed. - /// - /// Returns `error.Timeout` if `timeout` expires first. - /// - /// Depending on the `Io` implementation, may allocate resources that are - /// freed with `cancel`, even if an error is returned. - pub fn wait(b: *Batch, io: Io, timeout: Timeout) WaitError!void { - return io.vtable.batchWait(io.userdata, b, timeout); + const tail_index = b.unused.tail; + switch (tail_index) { + .none => b.unused.head = index, + else => b.storage[tail_index.toIndex()].unused.next = index, + } + storage.* = .{ .unused = .{ .prev = tail_index, .next = .none } }; + b.unused.tail = index; + return .{ .index = index.toIndex(), .result = completion.result }; + } + + pub const AwaitAsyncError = Cancelable; + pub fn awaitAsync(b: *Batch, io: Io) AwaitAsyncError!void { + return io.vtable.batchAwaitAsync(io.userdata, b); + } + + pub const AwaitConcurrentError = ConcurrentError || Cancelable || Timeout.Error; + pub fn awaitConcurrent(b: *Batch, io: Io, timeout: Timeout) AwaitConcurrentError!void { + return io.vtable.batchAwaitConcurrent(io.userdata, b, timeout); } - /// Returns after all `operations` have completed. Operations which have not completed - /// after this function returns were successfully dropped and had no side effects. - /// - /// This function is idempotent with respect to itself and `wait`. It is - /// safe to unconditionally `defer` a call to this function after `init`. pub fn cancel(b: *Batch, io: Io) void { return io.vtable.batchCancel(io.userdata, b); } diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index df5b3b5a53..5db6f81ac6 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -559,12 +559,11 @@ pub const ReadStreamingError = error{EndOfStream} || Reader.Error; /// See also: /// * `reader` pub fn readStreaming(file: File, io: Io, buffer: []const []u8) ReadStreamingError!usize { - var operation: Io.Operation = .{ .file_read_streaming = .{ + const result = try io.operate(.{ .file_read_streaming = .{ .file = file, .data = buffer, - } }; - try io.operate(&operation); - return operation.file_read_streaming.status.result; + } }); + return result.file_read_streaming; } pub const ReadPositionalError = error{ diff --git a/lib/std/Io/File/MultiReader.zig b/lib/std/Io/File/MultiReader.zig index 7a0f8de068..217215a363 100644 --- a/lib/std/Io/File/MultiReader.zig +++ b/lib/std/Io/File/MultiReader.zig @@ -22,8 +22,7 @@ pub const UnendingError = Allocator.Error || File.Reader.Error || Io.ConcurrentE /// Trailing: /// * `contexts: [len]Context` -/// * `ring: [len]u32` -/// * `operations: [len]Io.Operation` +/// * `storage: [len]Io.Operation.Storage` pub const Streams = extern struct { len: u32, @@ -33,17 +32,10 @@ pub const Streams = extern struct { return ptr[0..s.len]; } - pub fn ring(s: *Streams) []u32 { + pub fn storage(s: *Streams) []Io.Operation.Storage { const prev = contexts(s); const end = prev.ptr + prev.len; - const ptr: [*]u32 = @ptrFromInt(std.mem.alignForward(usize, @intFromPtr(end), @alignOf(u32))); - return ptr[0..s.len]; - } - - pub fn operations(s: *Streams) []Io.Operation { - const prev = ring(s); - const end = prev.ptr + prev.len; - const ptr: [*]Io.Operation = @ptrFromInt(std.mem.alignForward(usize, @intFromPtr(end), @alignOf(Io.Operation))); + const ptr: [*]Io.Operation.Storage = @ptrFromInt(std.mem.alignForward(usize, @intFromPtr(end), @alignOf(Io.Operation.Storage))); return ptr[0..s.len]; } }; @@ -52,8 +44,7 @@ pub fn Buffer(comptime n: usize) type { return extern struct { len: u32, contexts: [n][@sizeOf(Context)]u8 align(@alignOf(Context)), - ring: [n]u32, - operations: [n][@sizeOf(Io.Operation)]u8 align(@alignOf(Io.Operation)), + storage: [n][@sizeOf(Io.Operation.Storage)]u8 align(@alignOf(Io.Operation.Storage)), pub fn toStreams(b: *@This()) *Streams { b.len = n; @@ -86,25 +77,22 @@ pub fn init(mr: *MultiReader, gpa: Allocator, io: Io, streams: *Streams, files: .vec = .{&.{}}, .err = null, }; - const operations = streams.operations(); - const ring = streams.ring(); mr.* = .{ .gpa = gpa, .streams = streams, - .batch = .init(operations, ring), + .batch = .init(streams.storage()), }; - for (operations, contexts, files, 0..) |*op, *context, file, i| { + for (contexts, 0..) |*context, i| { const r = &context.fr.interface; - op.* = .{ .file_read_streaming = .{ - .file = file, - .data = &context.vec, - } }; rebaseGrowing(mr, context, 1) catch |err| { context.err = err; continue; }; context.vec[0] = r.buffer; - mr.batch.add(i); + mr.batch.addAt(@intCast(i), .{ .file_read_streaming = .{ + .file = context.fr.file, + .data = &context.vec, + } }); } } @@ -204,7 +192,7 @@ fn fillUntimed(context: *Context, capacity: usize) Io.Reader.Error!void { }; } -pub const FillError = Io.Batch.WaitError || error{ +pub const FillError = Io.Batch.AwaitConcurrentError || error{ /// `fill` was called when all streams already have failed or reached the /// end. EndOfStream, @@ -213,17 +201,15 @@ pub const FillError = Io.Batch.WaitError || error{ /// Wait until at least one stream receives more data. pub fn fill(mr: *MultiReader, unused_capacity: usize, timeout: Io.Timeout) FillError!void { const contexts = mr.streams.contexts(); - const operations = mr.streams.operations(); const io = contexts[0].fr.io; var any_completed = false; - try mr.batch.wait(io, timeout); + try mr.batch.awaitConcurrent(io, timeout); - while (mr.batch.next()) |i| { + while (mr.batch.next()) |operation| { any_completed = true; - const context = &contexts[i]; - const operation = &operations[i]; - const n = operation.file_read_streaming.status.result catch |err| { + const context = &contexts[operation.index]; + const n = operation.result.file_read_streaming catch |err| { context.err = err; continue; }; @@ -237,15 +223,17 @@ pub fn fill(mr: *MultiReader, unused_capacity: usize, timeout: Io.Timeout) FillE assert(r.seek == 0); } context.vec[0] = r.buffer[r.end..]; - operation.file_read_streaming.status = .{ .unstarted = {} }; - mr.batch.add(i); + mr.batch.addAt(operation.index, .{ .file_read_streaming = .{ + .file = context.fr.file, + .data = &context.vec, + } }); } if (!any_completed) return error.EndOfStream; } /// Wait until all streams fail or reach the end. -pub fn fillRemaining(mr: *MultiReader, timeout: Io.Timeout) Io.Batch.WaitError!void { +pub fn fillRemaining(mr: *MultiReader, timeout: Io.Timeout) Io.Batch.AwaitConcurrentError!void { while (fill(mr, 1, timeout)) |_| {} else |err| switch (err) { error.EndOfStream => return, else => |e| return e, diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index bccaf24ecd..18f24eb40c 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -1617,7 +1617,8 @@ pub fn io(t: *Threaded) Io { .futexWake = futexWake, .operate = operate, - .batchWait = batchWait, + .batchAwaitAsync = batchAwaitAsync, + .batchAwaitConcurrent = batchAwaitConcurrent, .batchCancel = batchCancel, .dirCreateDir = dirCreateDir, @@ -1780,7 +1781,8 @@ pub fn ioBasic(t: *Threaded) Io { .futexWake = futexWake, .operate = operate, - .batchWait = batchWait, + .batchAwaitAsync = batchAwaitAsync, + .batchAwaitConcurrent = batchAwaitConcurrent, .batchCancel = batchCancel, .dirCreateDir = dirCreateDir, @@ -2483,85 +2485,227 @@ fn futexWake(userdata: ?*anyopaque, ptr: *const u32, max_waiters: u32) void { Thread.futexWake(ptr, max_waiters); } -fn operate(userdata: ?*anyopaque, op: *Io.Operation) Io.Cancelable!void { +fn operate(userdata: ?*anyopaque, operation: Io.Operation) Io.Cancelable!Io.Operation.Result { const t: *Threaded = @ptrCast(@alignCast(userdata)); - switch (op.*) { - .noop => |*o| { - _ = o.status.unstarted; - o.status = .{ .result = {} }; - }, - .file_read_streaming => |*o| { - _ = o.status.unstarted; - o.status = .{ .result = fileReadStreaming(t, o.file, o.data) catch |err| switch (err) { + switch (operation) { + .file_read_streaming => |o| return .{ + .file_read_streaming = fileReadStreaming(t, o.file, o.data) catch |err| switch (err) { error.Canceled => |e| return e, else => |e| e, - } }; + }, }, } } -fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.WaitError!void { +fn batchAwaitAsync(userdata: ?*anyopaque, b: *Io.Batch) Io.Batch.AwaitAsyncError!void { const t: *Threaded = @ptrCast(@alignCast(userdata)); - if (is_windows) return batchWaitWindows(t, b, timeout); + if (is_windows) { + try batchAwaitWindows(b); + const alertable_syscall = try AlertableSyscall.start(); + while (b.pending.head != .none and b.completions.head == .none) waitForApcOrAlert(); + alertable_syscall.finish(); + return; + } if (native_os == .wasi and !builtin.link_libc) @panic("TODO"); - const operations = b.operations; - const len: u31 = @intCast(operations.len); - const ring = b.ring[0..len]; - var submit_head = b.impl.submit_head; - const submit_tail = b.user.submit_tail; - b.impl.submit_tail = submit_tail; - var complete_tail = b.impl.complete_tail; - var map_buffer: [poll_buffer_len]u8 = undefined; // poll_buffer index to operations index - var poll_i: u8 = 0; - defer { - for (map_buffer[0..poll_i]) |op| { - submit_head = submit_head.prev(len); - ring[submit_head.index(len)] = op; - } - b.impl.submit_head = submit_head; - b.impl.complete_tail = complete_tail; - b.user.complete_tail = complete_tail; - } var poll_buffer: [poll_buffer_len]posix.pollfd = undefined; - while (submit_head != submit_tail) : (submit_head = submit_head.next(len)) { - const op = ring[submit_head.index(len)]; - const operation = &operations[op]; - switch (operation.*) { - .noop => |*o| { - _ = o.status.unstarted; - o.status = .{ .result = {} }; - submitComplete(ring, &complete_tail, op); - }, - .file_read_streaming => |*o| { - _ = o.status.unstarted; - if (poll_buffer.len - poll_i == 0) return error.ConcurrencyUnavailable; - poll_buffer[poll_i] = .{ - .fd = o.file.handle, - .events = posix.POLL.IN, - .revents = 0, - }; - map_buffer[poll_i] = @intCast(op); - poll_i += 1; - }, + var poll_len: u32 = 0; + { + var index = b.submissions.head; + while (index != .none and poll_len < poll_buffer_len) { + const submission = &b.storage[index.toIndex()].submission; + switch (submission.operation) { + .file_read_streaming => |o| { + poll_buffer[poll_len] = .{ .fd = o.file.handle, .events = posix.POLL.IN, .revents = 0 }; + poll_len += 1; + }, + } + index = submission.node.next; } } - switch (poll_i) { + switch (poll_len) { + 0 => return, + 1 => {}, + else => while (true) { + const timeout_ms: i32 = t: { + if (b.completions.head != .none) { + // It is legal to call batchWait with already completed + // operations in the ring. In such case, we need to avoid + // blocking in the poll syscall, but we can still take this + // opportunity to find additional ready operations. + break :t 0; + } + const max_poll_ms = std.math.maxInt(i32); + break :t max_poll_ms; + }; + const syscall = try Syscall.start(); + const rc = posix.system.poll(&poll_buffer, poll_len, timeout_ms); + syscall.finish(); + switch (posix.errno(rc)) { + .SUCCESS => { + if (rc == 0) { + if (b.completions.head != .none) { + // Since there are already completions available in the + // queue, this is neither a timeout nor a case for + // retrying. + return; + } + continue; + } + var prev_index: Io.Operation.OptionalIndex = .none; + var index = b.submissions.head; + for (poll_buffer[0..poll_len]) |poll_entry| { + const storage = &b.storage[index.toIndex()]; + const submission = &storage.submission; + const next_index = submission.node.next; + if (poll_entry.revents != 0) { + const result = try operate(t, submission.operation); + + switch (prev_index) { + .none => b.submissions.head = next_index, + else => b.storage[prev_index.toIndex()].submission.node.next = next_index, + } + if (next_index == .none) b.submissions.tail = prev_index; + + switch (b.completions.tail) { + .none => b.completions.head = index, + else => |tail_index| b.storage[tail_index.toIndex()].completion.node.next = index, + } + storage.* = .{ .completion = .{ .node = .{ .next = .none }, .result = result } }; + b.completions.tail = index; + } else prev_index = index; + index = next_index; + } + assert(index == .none); + return; + }, + .INTR => continue, + else => break, + } + }, + } + { + var tail_index = b.completions.tail; + defer b.completions.tail = tail_index; + var index = b.submissions.head; + errdefer b.submissions.head = index; + while (index != .none) { + const storage = &b.storage[index.toIndex()]; + const submission = &storage.submission; + const next_index = submission.node.next; + const result = try operate(t, submission.operation); + + switch (tail_index) { + .none => b.completions.head = index, + else => b.storage[tail_index.toIndex()].completion.node.next = index, + } + storage.* = .{ .completion = .{ .node = .{ .next = .none }, .result = result } }; + tail_index = index; + index = next_index; + } + b.submissions = .{ .head = .none, .tail = .none }; + } +} + +fn batchAwaitConcurrent(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.AwaitConcurrentError!void { + const t: *Threaded = @ptrCast(@alignCast(userdata)); + if (is_windows) { + const deadline: ?Io.Clock.Timestamp = timeout.toDeadline(ioBasic(t)) catch |err| switch (err) { + error.Unexpected => deadline: { + recoverableOsBugDetected(); + break :deadline .{ .raw = .{ .nanoseconds = 0 }, .clock = .awake }; + }, + error.UnsupportedClock => |e| return e, + }; + try batchAwaitWindows(b); + while (b.pending.head != .none and b.completions.head == .none) { + var delay_interval: windows.LARGE_INTEGER = interval: { + const d = deadline orelse break :interval std.math.minInt(windows.LARGE_INTEGER); + break :interval t.deadlineToWindowsInterval(d) catch |err| switch (err) { + error.UnsupportedClock => |e| return e, + error.Unexpected => { + recoverableOsBugDetected(); + break :interval -1; + }, + }; + }; + const alertable_syscall = try AlertableSyscall.start(); + const delay_rc = windows.ntdll.NtDelayExecution(windows.TRUE, &delay_interval); + alertable_syscall.finish(); + switch (delay_rc) { + .SUCCESS, .TIMEOUT => { + // The thread woke due to the timeout. Although spurious + // timeouts are OK, when no deadline is passed we must not + // return `error.Timeout`. + if (timeout != .none and b.completions.head == .none) return error.Timeout; + }, + else => {}, + } + } + return; + } + if (native_os == .wasi and !builtin.link_libc) @panic("TODO"); + var poll_buffer: [poll_buffer_len]posix.pollfd = undefined; + var poll_storage: struct { + gpa: std.mem.Allocator, + b: *Io.Batch, + slice: []posix.pollfd, + len: u32, + + fn add(storage: *@This(), file: Io.File, events: @FieldType(posix.pollfd, "events")) Io.ConcurrentError!void { + const len = storage.len; + if (len == poll_buffer_len) { + const slice: []posix.pollfd = if (storage.b.context) |context| + @as([*]posix.pollfd, @ptrCast(@alignCast(context)))[0..storage.b.storage.len] + else allocation: { + const allocation = storage.gpa.alloc(posix.pollfd, storage.b.storage.len) catch + return error.ConcurrencyUnavailable; + storage.b.context = allocation.ptr; + break :allocation allocation; + }; + @memcpy(slice[0..poll_buffer_len], storage.slice); + } + storage.slice[len] = .{ + .fd = file.handle, + .events = events, + .revents = 0, + }; + storage.len = len + 1; + } + } = .{ .gpa = t.allocator, .b = b, .slice = &poll_buffer, .len = 0 }; + { + var index = b.submissions.head; + while (index != .none) { + const submission = &b.storage[index.toIndex()].submission; + switch (submission.operation) { + .file_read_streaming => |o| try poll_storage.add(o.file, posix.POLL.IN), + } + index = submission.node.next; + } + } + switch (poll_storage.len) { 0 => return, 1 => if (timeout == .none) { - const op = map_buffer[0]; - try operate(t, &operations[op]); - submitComplete(ring, &complete_tail, op); - poll_i = 0; + const index = b.submissions.head; + const storage = &b.storage[index.toIndex()]; + const result = try operate(t, storage.submission.operation); + + b.submissions = .{ .head = .none, .tail = .none }; + + switch (b.completions.tail) { + .none => b.completions.head = index, + else => |tail_index| b.storage[tail_index.toIndex()].completion.node.next = index, + } + storage.* = .{ .completion = .{ .node = .{ .next = .none }, .result = result } }; + b.completions.tail = index; return; }, else => {}, } const t_io = ioBasic(t); const deadline = timeout.toDeadline(t_io) catch return error.UnsupportedClock; - const max_poll_ms = std.math.maxInt(i32); while (true) { const timeout_ms: i32 = t: { - if (b.user.complete_head != complete_tail) { + if (b.completions.head != .none) { // It is legal to call batchWait with already completed // operations in the ring. In such case, we need to avoid // blocking in the poll syscall, but we can still take this @@ -2571,15 +2715,16 @@ fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch. const d = deadline orelse break :t -1; const duration = d.durationFromNow(t_io) catch return error.UnsupportedClock; if (duration.raw.nanoseconds <= 0) return error.Timeout; + const max_poll_ms = std.math.maxInt(i32); break :t @intCast(@min(max_poll_ms, duration.raw.toMilliseconds())); }; const syscall = try Syscall.start(); - const rc = posix.system.poll(&poll_buffer, poll_i, timeout_ms); + const rc = posix.system.poll(&poll_buffer, poll_storage.len, timeout_ms); syscall.finish(); switch (posix.errno(rc)) { .SUCCESS => { if (rc == 0) { - if (b.user.complete_head != complete_tail) { + if (b.completions.head != .none) { // Since there are already completions available in the // queue, this is neither a timeout nor a case for // retrying. @@ -2590,18 +2735,30 @@ fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch. if (deadline == null) continue; return error.Timeout; } - while (poll_i != 0) { - poll_i -= 1; - const poll_fd = &poll_buffer[poll_i]; - const op = map_buffer[poll_i]; - if (poll_fd.revents == 0) { - submit_head = submit_head.prev(len); - ring[submit_head.index(len)] = op; - } else { - try operate(t, &operations[op]); - submitComplete(ring, &complete_tail, op); - } + var prev_index: Io.Operation.OptionalIndex = .none; + var index = b.submissions.head; + for (poll_storage.slice[0..poll_storage.len]) |poll_entry| { + const submission = &b.storage[index.toIndex()].submission; + const next_index = submission.node.next; + if (poll_entry.revents != 0) { + const result = try operate(t, submission.operation); + + switch (prev_index) { + .none => b.submissions.head = next_index, + else => b.storage[prev_index.toIndex()].submission.node.next = next_index, + } + if (next_index == .none) b.submissions.tail = prev_index; + + switch (b.completions.tail) { + .none => b.completions.head = index, + else => |tail_index| b.storage[tail_index.toIndex()].completion.node.next = index, + } + b.completions.tail = index; + b.storage[index.toIndex()] = .{ .completion = .{ .node = .{ .next = .none }, .result = result } }; + } else prev_index = index; + index = next_index; } + assert(index == .none); return; }, .INTR => continue, @@ -2610,166 +2767,126 @@ fn batchWait(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch. } } -fn batchCancel(userdata: ?*anyopaque, b: *Io.Batch) void { - const t: *Threaded = @ptrCast(@alignCast(userdata)); - const operations = b.operations; - const len: u31 = @intCast(operations.len); - const ring = b.ring[0..len]; - var submit_head = b.impl.submit_head; - const submit_tail = b.user.submit_tail; - b.impl.submit_tail = submit_tail; - var complete_tail = b.impl.complete_tail; - while (submit_head != submit_tail) : (submit_head = submit_head.next(len)) { - const op = ring[submit_head.index(len)]; - switch (operations[op]) { - .noop => |*o| { - _ = o.status.unstarted; - o.status = .{ .result = {} }; - submitComplete(ring, &complete_tail, op); - }, - .file_read_streaming => |*o| _ = o.status.unstarted, - } - } - if (is_windows) { - // Iterate over pending and issue cancelations, then free the allocation for IO_STATUS_BLOCK - if (b.impl.reserved) |reserved| { - const gpa = t.allocator; - const metadatas_ptr: [*]WinOpMetadata = @ptrCast(@alignCast(reserved)); - const metadatas = metadatas_ptr[0..b.operations.len]; - for (metadatas, 0..) |*metadata, op| { - if (!metadata.pending) continue; - const done = @atomicLoad(windows.NTSTATUS, &metadata.iosb.u.Status, .acquire) != .PENDING; - if (done) continue; - switch (operations[op]) { - .noop => unreachable, - .file_read_streaming => |*o| { - _ = windows.ntdll.NtCancelIoFile(o.file.handle, &metadata.iosb); - }, - } - } - for (metadatas) |*metadata| { - if (!metadata.pending) continue; - while (@atomicLoad(windows.NTSTATUS, &metadata.iosb.u.Status, .acquire) == .PENDING) { - waitForApcOrAlert(); - } - } - gpa.free(metadatas); - b.impl.reserved = null; - } - } - b.impl.submit_head = submit_tail; - b.impl.complete_tail = complete_tail; - b.user.complete_tail = complete_tail; -} - -const WinOpMetadata = struct { +const WindowsBatchPendingOperationContext = extern struct { + file: windows.HANDLE, iosb: windows.IO_STATUS_BLOCK, - pending: bool, + + const Erased = [3]usize; + + comptime { + assert(@sizeOf(Erased) <= @sizeOf(WindowsBatchPendingOperationContext)); + } + + fn toErased(context: *WindowsBatchPendingOperationContext) *Erased { + return @ptrCast(context); + } + + fn fromErased(erased: *Erased) *WindowsBatchPendingOperationContext { + return @ptrCast(erased); + } }; -fn batchWaitWindows(t: *Threaded, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.WaitError!void { - const operations = b.operations; - const len: u31 = @intCast(operations.len); - const ring = b.ring[0..len]; - var submit_head = b.impl.submit_head; - const submit_tail = b.user.submit_tail; - b.impl.submit_tail = submit_tail; - var complete_tail = b.impl.complete_tail; - - const metadatas_ptr: [*]WinOpMetadata = if (b.impl.reserved) |reserved| @ptrCast(@alignCast(reserved)) else a: { - const gpa = t.allocator; - const metadatas = gpa.alloc(WinOpMetadata, operations.len) catch return error.ConcurrencyUnavailable; - b.impl.reserved = metadatas.ptr; - @memset(metadatas, .{ .iosb = undefined, .pending = false }); - break :a metadatas.ptr; - }; - const metadatas = metadatas_ptr[0..operations.len]; - - defer { - b.impl.submit_head = submit_head; - b.impl.complete_tail = complete_tail; - b.user.complete_tail = complete_tail; - } - - while (submit_head != submit_tail) : (submit_head = submit_head.next(len)) { - const op = ring[submit_head.index(len)]; - const operation = &operations[op]; - const metadata = &metadatas[op]; - metadata.* = .{ .iosb = .{ - .u = .{ .Status = .PENDING }, - .Information = 0, - }, .pending = false }; - switch (operation.*) { - .noop => |*o| { - _ = o.status.unstarted; - o.status = .{ .result = {} }; - submitComplete(ring, &complete_tail, op); - }, - .file_read_streaming => |*o| { - _ = o.status.unstarted; - try ntReadFile(o.file.handle, o.data, &metadata.iosb); - if (@atomicLoad(windows.NTSTATUS, &metadata.iosb.u.Status, .acquire) == .PENDING) { - o.status = .{ .pending = b }; - metadata.pending = true; - } else { - o.status = .{ .result = ntReadFileResult(&metadata.iosb) }; - submitComplete(ring, &complete_tail, op); - } - }, - } - } - - const deadline: ?Io.Clock.Timestamp = timeout.toDeadline(ioBasic(t)) catch |err| switch (err) { - error.Unexpected => deadline: { - recoverableOsBugDetected(); - break :deadline .{ .raw = .{ .nanoseconds = 0 }, .clock = .awake }; - }, - error.UnsupportedClock => |e| return e, - }; - - while (true) { - var any_pending = false; - for (metadatas, 0..) |*metadata, op_usize| { - if (!metadata.pending) continue; - any_pending = true; - const op: u31 = @intCast(op_usize); - const done = @atomicLoad(windows.NTSTATUS, &metadata.iosb.u.Status, .acquire) != .PENDING; - switch (operations[op]) { - .noop => unreachable, - .file_read_streaming => |*o| { - assert(o.status.pending == b); - if (!done) continue; - o.status = .{ .result = ntReadFileResult(&metadata.iosb) }; - }, +fn batchCancel(userdata: ?*anyopaque, b: *Io.Batch) void { + const t: *Threaded = @ptrCast(@alignCast(userdata)); + { + var tail_index = b.unused.tail; + defer b.unused.tail = tail_index; + var index = b.submissions.head; + errdefer b.submissions.head = index; + while (index != .none) { + const next_index = b.storage[index.toIndex()].submission.node.next; + switch (tail_index) { + .none => b.unused.head = index, + else => b.storage[tail_index.toIndex()].unused.next = index, } - metadata.pending = false; - submitComplete(ring, &complete_tail, op); - } - if (b.user.complete_head != complete_tail) return; - if (!any_pending) return; - var delay_interval: windows.LARGE_INTEGER = interval: { - const d = deadline orelse break :interval std.math.minInt(windows.LARGE_INTEGER); - break :interval t.deadlineToWindowsInterval(d) catch |err| switch (err) { - error.UnsupportedClock => |e| return e, - error.Unexpected => { - recoverableOsBugDetected(); - break :interval -1; - }, - }; - }; - const alertable_syscall = try AlertableSyscall.start(); - const delay_rc = windows.ntdll.NtDelayExecution(windows.TRUE, &delay_interval); - alertable_syscall.finish(); - switch (delay_rc) { - .SUCCESS, .TIMEOUT => { - // The thread woke due to the timeout. Although spurious - // timeouts are OK, when no deadline is passed we must not - // return `error.Timeout`. - if (timeout != .none) return error.Timeout; - }, - else => {}, + b.storage[index.toIndex()] = .{ .unused = .{ .prev = tail_index, .next = .none } }; + tail_index = index; + index = next_index; } + b.submissions = .{ .head = .none, .tail = .none }; } + if (is_windows) { + var index = b.pending.head; + while (index != .none) { + const pending = &b.storage[index.toIndex()].pending; + const context: *WindowsBatchPendingOperationContext = .fromErased(&pending.context); + _ = windows.ntdll.NtCancelIoFile(context.file, &context.iosb); + index = pending.node.next; + } + while (b.pending.head != .none) waitForApcOrAlert(); + } else if (b.context) |context| { + t.allocator.free(@as([*]posix.pollfd, @ptrCast(@alignCast(context)))[0..b.storage.len]); + b.context = null; + } + assert(b.pending.head == .none); +} + +fn batchApc(apc_context: ?*anyopaque, iosb: *windows.IO_STATUS_BLOCK, _: windows.ULONG) callconv(.winapi) void { + const b: *Io.Batch = @ptrCast(@alignCast(apc_context)); + const context: *WindowsBatchPendingOperationContext = @fieldParentPtr("iosb", iosb); + const erased_context = context.toErased(); + const pending: *Io.Operation.Storage.Pending = @fieldParentPtr("context", erased_context); + switch (pending.node.prev) { + .none => b.pending.head = pending.node.next, + else => |prev_index| b.storage[prev_index.toIndex()].pending.node.next = pending.node.next, + } + switch (pending.node.next) { + .none => b.pending.tail = pending.node.prev, + else => |next_index| b.storage[next_index.toIndex()].pending.node.prev = pending.node.prev, + } + const storage: *Io.Operation.Storage = @fieldParentPtr("pending", pending); + const index = storage - b.storage.ptr; + switch (iosb.u.Status) { + .CANCELLED => { + const tail_index = b.unused.tail; + switch (tail_index) { + .none => b.unused.head = .fromIndex(index), + else => b.storage[tail_index.toIndex()].unused.next = .fromIndex(index), + } + storage.* = .{ .unused = .{ .prev = tail_index, .next = .none } }; + b.unused.tail = .fromIndex(index); + }, + else => { + switch (b.completions.tail) { + .none => b.completions.head = .fromIndex(index), + else => |tail_index| b.storage[tail_index.toIndex()].completion.node.next = .fromIndex(index), + } + b.completions.tail = .fromIndex(index); + const result: Io.Operation.Result = switch (pending.tag) { + .file_read_streaming => .{ .file_read_streaming = ntReadFileResult(iosb) }, + }; + storage.* = .{ .completion = .{ .node = .{ .next = .none }, .result = result } }; + }, + } +} + +fn batchAwaitWindows(b: *Io.Batch) Io.Cancelable!void { + var index = b.submissions.head; + errdefer b.submissions.head = index; + while (index != .none) { + const storage = &b.storage[index.toIndex()]; + const submission = storage.submission; + errdefer storage.* = .{ .submission = submission }; + storage.* = .{ .pending = .{ + .node = .{ .prev = b.pending.tail, .next = .none }, + .tag = submission.operation, + .context = undefined, + } }; + const context: *WindowsBatchPendingOperationContext = .fromErased(&storage.pending.context); + switch (submission.operation) { + .file_read_streaming => |o| { + context.file = o.file.handle; + try ntReadFile(o.file.handle, o.data, &batchApc, b, &context.iosb); + }, + } + switch (b.pending.tail) { + .none => b.pending.head = index, + else => |tail_index| b.storage[tail_index.toIndex()].pending.node.next = index, + } + b.pending.tail = index; + index = submission.node.next; + } + b.submissions = .{ .head = .none, .tail = .none }; } fn submitComplete(ring: []u32, complete_tail: *Io.Batch.RingIndex, op: u32) void { @@ -8701,7 +8818,7 @@ fn fileReadStreamingWindows(file: File, data: []const []u8) File.ReadStreamingEr .u = .{ .Status = .PENDING }, .Information = 0, }; - try ntReadFile(file.handle, data, &io_status_block); + try ntReadFile(file.handle, data, &noopApc, null, &io_status_block); while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { // Once we get here we must not return from the function until the // operation completes, thereby releasing reference to io_status_block. @@ -8736,12 +8853,20 @@ fn ntReadFileResult(io_status_block: *const windows.IO_STATUS_BLOCK) !usize { } } -fn ntReadFile(handle: windows.HANDLE, data: []const []u8, iosb: *windows.IO_STATUS_BLOCK) Io.Cancelable!void { +fn ntReadFile( + handle: windows.HANDLE, + data: []const []u8, + apcRoutine: ?*const windows.IO_APC_ROUTINE, + apc_context: ?*anyopaque, + iosb: *windows.IO_STATUS_BLOCK, +) Io.Cancelable!void { var index: usize = 0; while (index < data.len and data[index].len == 0) index += 1; if (index == data.len) { - iosb.u.Status = .SUCCESS; - iosb.Information = 0; + iosb.* = .{ .u = .{ .Status = .SUCCESS }, .Information = 0 }; + if (apcRoutine) |routine| if (routine != &noopApc) { + _ = windows.ntdll.NtQueueApcThread(windows.current_process, routine, apc_context, iosb, null); + }; return; } const buffer = data[index]; @@ -8750,8 +8875,8 @@ fn ntReadFile(handle: windows.HANDLE, data: []const []u8, iosb: *windows.IO_STAT while (true) switch (windows.ntdll.NtReadFile( handle, null, // event - noopApc, // apc callback - null, // apc context + apcRoutine, + apc_context, iosb, buffer.ptr, @min(std.math.maxInt(u32), buffer.len), From 62c97b745d508a5ed7011bf3b8400fde4677b839 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 30 Jan 2026 12:27:27 -0800 Subject: [PATCH 59/65] std.Io.Threaded: stop checking bytes read with END_OF_FILE --- lib/std/Io/Threaded.zig | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 18f24eb40c..45184e6d59 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -8816,9 +8816,10 @@ fn fileReadStreamingPosix(file: File, data: []const []u8) File.ReadStreamingErro fn fileReadStreamingWindows(file: File, data: []const []u8) File.ReadStreamingError!usize { var io_status_block: windows.IO_STATUS_BLOCK = .{ .u = .{ .Status = .PENDING }, - .Information = 0, + .Information = undefined, }; try ntReadFile(file.handle, data, &noopApc, null, &io_status_block); + while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { // Once we get here we must not return from the function until the // operation completes, thereby releasing reference to io_status_block. @@ -8842,10 +8843,7 @@ fn ntReadFileResult(io_status_block: *const windows.IO_STATUS_BLOCK) !usize { .PENDING => unreachable, .CANCELLED => unreachable, .SUCCESS => return io_status_block.Information, - .END_OF_FILE, .PIPE_BROKEN => { - if (io_status_block.Information == 0) return error.EndOfStream; - return io_status_block.Information; - }, + .END_OF_FILE, .PIPE_BROKEN => return error.EndOfStream, .INVALID_DEVICE_REQUEST => return error.IsDir, .LOCK_NOT_GRANTED => return error.LockViolation, .ACCESS_DENIED => return error.AccessDenied, From 39a6d5d1c5db32e9648fba6a46f3fef4ef83974a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 30 Jan 2026 17:59:48 -0800 Subject: [PATCH 60/65] std.Io.File: add non-blocking flag On Windows, we need to know ahead of time whether a file was opened in synchronous mode or asynchronous mode. There may be advantages to tracking this state for POSIX operating systems as well. --- lib/std/Io/File.zig | 18 +++++++++ lib/std/Io/Threaded.zig | 71 ++++++++++++++++++++++++++---------- lib/std/Io/Threaded/test.zig | 4 +- lib/std/Progress.zig | 1 + lib/std/posix/test.zig | 9 +++-- 5 files changed, 79 insertions(+), 24 deletions(-) diff --git a/lib/std/Io/File.zig b/lib/std/Io/File.zig index 5db6f81ac6..e0297e0573 100644 --- a/lib/std/Io/File.zig +++ b/lib/std/Io/File.zig @@ -10,6 +10,18 @@ const assert = std.debug.assert; const Dir = std.Io.Dir; handle: Handle, +flags: Flags, + +pub const Flags = struct { + /// * true: + /// - windows: opened with MODE.IO.ASYNCHRONOUS + /// - POSIX: O_NONBLOCK is set + /// * false: + /// - windows: opened with SYNCHRONOUS_ALERT or SYNCHRONOUS_NONALERT, or + /// not a file. + /// - POSIX: O_NONBLOCK is unset + nonblocking: bool, +}; pub const Handle = std.posix.fd_t; @@ -80,9 +92,11 @@ pub fn stdout() File { return switch (native_os) { .windows => .{ .handle = std.os.windows.peb().ProcessParameters.hStdOutput, + .flags = .{ .nonblocking = false }, }, else => .{ .handle = std.posix.STDOUT_FILENO, + .flags = .{ .nonblocking = false }, }, }; } @@ -91,9 +105,11 @@ pub fn stderr() File { return switch (native_os) { .windows => .{ .handle = std.os.windows.peb().ProcessParameters.hStdError, + .flags = .{ .nonblocking = false }, }, else => .{ .handle = std.posix.STDERR_FILENO, + .flags = .{ .nonblocking = false }, }, }; } @@ -102,9 +118,11 @@ pub fn stdin() File { return switch (native_os) { .windows => .{ .handle = std.os.windows.peb().ProcessParameters.hStdInput, + .flags = .{ .nonblocking = false }, }, else => .{ .handle = std.posix.STDIN_FILENO, + .flags = .{ .nonblocking = false }, }, }; } diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 45184e6d59..8fdd2a58e9 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -3215,8 +3215,10 @@ fn dirCreateDirPathOpenWasi( fn dirStat(userdata: ?*anyopaque, dir: Dir) Dir.StatError!Dir.Stat { const t: *Threaded = @ptrCast(@alignCast(userdata)); - const file: File = .{ .handle = dir.handle }; - return fileStat(t, file); + return fileStat(t, .{ + .handle = dir.handle, + .flags = .{ .nonblocking = false }, + }); } const dirStatFile = switch (native_os) { @@ -4008,7 +4010,10 @@ fn dirCreateFilePosix( } } - return .{ .handle = fd }; + return .{ + .handle = fd, + .flags = .{ .nonblocking = false }, + }; } fn dirCreateFileWindows( @@ -4138,7 +4143,10 @@ fn dirCreateFileWindows( errdefer windows.CloseHandle(handle); const exclusive = switch (flags.lock) { - .none => return .{ .handle = handle }, + .none => return .{ + .handle = handle, + .flags = .{ .nonblocking = false }, + }, .shared => false, .exclusive => true, }; @@ -4158,7 +4166,10 @@ fn dirCreateFileWindows( )) { .SUCCESS => { syscall.finish(); - return .{ .handle = handle }; + return .{ + .handle = handle, + .flags = .{ .nonblocking = false }, + }; }, .INSUFFICIENT_RESOURCES => return syscall.fail(error.SystemResources), .LOCK_NOT_GRANTED => return syscall.fail(error.WouldBlock), @@ -4207,7 +4218,10 @@ fn dirCreateFileWasi( switch (wasi.path_open(dir.handle, lookup_flags, sub_path.ptr, sub_path.len, oflags, base, inheriting, fdflags, &fd)) { .SUCCESS => { syscall.finish(); - return .{ .handle = fd }; + return .{ + .handle = fd, + .flags = .{ .nonblocking = false }, + }; }, .INTR => { try syscall.checkCancel(); @@ -4302,7 +4316,10 @@ fn dirCreateFileAtomic( .SUCCESS => { syscall.finish(); return .{ - .file = .{ .handle = @intCast(rc) }, + .file = .{ + .handle = @intCast(rc), + .flags = .{ .nonblocking = false }, + }, .file_basename_hex = 0, .dest_sub_path = dest_path, .file_open = true, @@ -4510,7 +4527,10 @@ fn dirOpenFilePosix( if (!flags.allow_directory) { const is_dir = is_dir: { - const stat = fileStat(t, .{ .handle = fd }) catch |err| switch (err) { + const stat = fileStat(t, .{ + .handle = fd, + .flags = .{ .nonblocking = false }, + }) catch |err| switch (err) { // The directory-ness is either unknown or unknowable error.Streaming => break :is_dir false, else => |e| return e, @@ -4596,7 +4616,10 @@ fn dirOpenFilePosix( } } - return .{ .handle = fd }; + return .{ + .handle = fd, + .flags = .{ .nonblocking = false }, + }; } fn dirOpenFileWindows( @@ -4729,7 +4752,10 @@ pub fn dirOpenFileWtf16( errdefer w.CloseHandle(handle); const exclusive = switch (flags.lock) { - .none => return .{ .handle = handle }, + .none => return .{ + .handle = handle, + .flags = .{ .nonblocking = false }, + }, .shared => false, .exclusive => true, }; @@ -4752,7 +4778,10 @@ pub fn dirOpenFileWtf16( .ACCESS_VIOLATION => |err| return syscall.ntstatusBug(err), // bad io_status_block pointer else => |status| return syscall.unexpectedNtstatus(status), }; - return .{ .handle = handle }; + return .{ + .handle = handle, + .flags = .{ .nonblocking = false }, + }; } fn dirOpenFileWasi( @@ -4834,7 +4863,7 @@ fn dirOpenFileWasi( if (!flags.allow_directory) { const is_dir = is_dir: { - const stat = fileStat(t, .{ .handle = fd }) catch |err| switch (err) { + const stat = fileStat(t, .{ .handle = fd, .flags = .{ .nonblocking = false } }) catch |err| switch (err) { // The directory-ness is either unknown or unknowable error.Streaming => break :is_dir false, else => |e| return e, @@ -4844,7 +4873,10 @@ fn dirOpenFileWasi( if (is_dir) return error.IsDir; } - return .{ .handle = fd }; + return .{ + .handle = fd, + .flags = .{ .nonblocking = false }, + }; } const dirOpenDir = switch (native_os) { @@ -14390,15 +14422,15 @@ fn spawnPosix(t: *Threaded, options: process.SpawnOptions) process.SpawnError!Sp .pid = pid, .err_fd = err_pipe[0], .stdin = switch (options.stdin) { - .pipe => .{ .handle = stdin_pipe[1] }, + .pipe => .{ .handle = stdin_pipe[1], .flags = .{ .nonblocking = false } }, else => null, }, .stdout = switch (options.stdout) { - .pipe => .{ .handle = stdout_pipe[0] }, + .pipe => .{ .handle = stdout_pipe[0], .flags = .{ .nonblocking = false } }, else => null, }, .stderr = switch (options.stderr) { - .pipe => .{ .handle = stderr_pipe[0] }, + .pipe => .{ .handle = stderr_pipe[0], .flags = .{ .nonblocking = false } }, else => null, }, }; @@ -15052,9 +15084,9 @@ fn processSpawnWindows(userdata: ?*anyopaque, options: process.SpawnOptions) pro return .{ .id = piProcInfo.hProcess, .thread_handle = piProcInfo.hThread, - .stdin = if (g_hChildStd_IN_Wr) |h| .{ .handle = h } else null, - .stdout = if (g_hChildStd_OUT_Rd) |h| .{ .handle = h } else null, - .stderr = if (g_hChildStd_ERR_Rd) |h| .{ .handle = h } else null, + .stdin = if (g_hChildStd_IN_Wr) |h| .{ .handle = h, .flags = .{ .nonblocking = false } } else null, + .stdout = if (g_hChildStd_OUT_Rd) |h| .{ .handle = h, .flags = .{ .nonblocking = true } } else null, + .stderr = if (g_hChildStd_ERR_Rd) |h| .{ .handle = h, .flags = .{ .nonblocking = true } } else null, .request_resource_usage_statistics = options.request_resource_usage_statistics, }; } @@ -16188,6 +16220,7 @@ fn progressParentFile(userdata: ?*anyopaque) std.Progress.ParentFileError!File { .pointer => @ptrFromInt(int), else => return error.UnsupportedOperation, }, + .flags = .{ .nonblocking = false }, }; } diff --git a/lib/std/Io/Threaded/test.zig b/lib/std/Io/Threaded/test.zig index ffda1e7601..593580d1f6 100644 --- a/lib/std/Io/Threaded/test.zig +++ b/lib/std/Io/Threaded/test.zig @@ -188,8 +188,8 @@ test "cancel blocked read from pipe" { }), else => { const pipe = try std.Io.Threaded.pipe2(.{}); - read_end = .{ .handle = pipe[0] }; - write_end = .{ .handle = pipe[1] }; + read_end = .{ .handle = pipe[0], .flags = .{ .nonblocking = false } }; + write_end = .{ .handle = pipe[1], .flags = .{ .nonblocking = false } }; }, } defer { diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index d0ee9e556f..0fefc77a32 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -979,6 +979,7 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff if (main_parent == .unused) continue; const file: Io.File = .{ .handle = main_storage.getIpcFd() orelse continue, + .flags = .{ .nonblocking = true }, }; const opt_saved_metadata = findOld(file.handle, old_ipc_metadata_fds, old_ipc_metadata); var bytes_read: usize = 0; diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index 5838595fcf..e2a52473f3 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -126,8 +126,8 @@ test "pipe" { const io = testing.io; const fds = try std.Io.Threaded.pipe2(.{}); - const out: Io.File = .{ .handle = fds[0] }; - const in: Io.File = .{ .handle = fds[1] }; + const out: Io.File = .{ .handle = fds[0], .flags = .{ .nonblocking = false } }; + const in: Io.File = .{ .handle = fds[1], .flags = .{ .nonblocking = false } }; try in.writeStreamingAll(io, "hello"); var buf: [16]u8 = undefined; try expect((try out.readStreaming(io, &.{&buf})) == 5); @@ -150,7 +150,10 @@ test "memfd_create" { else => return error.SkipZigTest, } - const file: Io.File = .{ .handle = try posix.memfd_create("test", 0) }; + const file: Io.File = .{ + .handle = try posix.memfd_create("test", 0), + .flags = .{ .nonblocking = false }, + }; defer file.close(io); try file.writePositionalAll(io, "test", 0); From 25aef0dd8786c5b5342eda167a609529efa09353 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 30 Jan 2026 19:10:44 -0800 Subject: [PATCH 61/65] std.Io.Threaded: rework file reading to observe nonblocking flag - batchAwaitAsync does blocking reads with NtReadFile (no APC, no event) when the nonblocking flag is unset, but still takes advantage of APCs when nonblocking flag is set. - batchAwaitConcurrent returns error.ConcurrencyUnavailable when it encounters a file_read_streaming operation on a file in blocking mode. - fileReadStreaming avoids pointlessly checking sync cancelation status when nonblocking flag is set, uses an APC with a done flag, and waits on that value to change in NtDelayExecution before returning. - fix incorrect use of NtCancelIoFile (ntdll function prototype was wrong, leading to misuse) --- lib/std/Io/Threaded.zig | 224 +++++++++++++++++++++++------------ lib/std/os/windows/ntdll.zig | 2 +- 2 files changed, 147 insertions(+), 79 deletions(-) diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index 8fdd2a58e9..e9e5ece521 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -1340,8 +1340,6 @@ const AlertableSyscall = struct { } }; -fn noopApc(_: ?*anyopaque, _: *windows.IO_STATUS_BLOCK, _: windows.ULONG) callconv(.winapi) void {} - fn waitForApcOrAlert() void { const infinite_timeout: windows.LARGE_INTEGER = std.math.minInt(windows.LARGE_INTEGER); _ = windows.ntdll.NtDelayExecution(windows.TRUE, &infinite_timeout); @@ -2500,7 +2498,10 @@ fn operate(userdata: ?*anyopaque, operation: Io.Operation) Io.Cancelable!Io.Oper fn batchAwaitAsync(userdata: ?*anyopaque, b: *Io.Batch) Io.Batch.AwaitAsyncError!void { const t: *Threaded = @ptrCast(@alignCast(userdata)); if (is_windows) { - try batchAwaitWindows(b); + batchAwaitWindows(b, false) catch |err| switch (err) { + error.ConcurrencyUnavailable => unreachable, // passed concurrency=false + else => |e| return e, + }; const alertable_syscall = try AlertableSyscall.start(); while (b.pending.head != .none and b.completions.head == .none) waitForApcOrAlert(); alertable_syscall.finish(); @@ -2616,7 +2617,7 @@ fn batchAwaitConcurrent(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout }, error.UnsupportedClock => |e| return e, }; - try batchAwaitWindows(b); + try batchAwaitWindows(b, true); while (b.pending.head != .none and b.completions.head == .none) { var delay_interval: windows.LARGE_INTEGER = interval: { const d = deadline orelse break :interval std.math.minInt(windows.LARGE_INTEGER); @@ -2810,7 +2811,8 @@ fn batchCancel(userdata: ?*anyopaque, b: *Io.Batch) void { while (index != .none) { const pending = &b.storage[index.toIndex()].pending; const context: *WindowsBatchPendingOperationContext = .fromErased(&pending.context); - _ = windows.ntdll.NtCancelIoFile(context.file, &context.iosb); + var cancel_iosb: windows.IO_STATUS_BLOCK = undefined; + _ = windows.ntdll.NtCancelIoFileEx(context.file, &context.iosb, &cancel_iosb); index = pending.node.next; } while (b.pending.head != .none) waitForApcOrAlert(); @@ -2860,30 +2862,94 @@ fn batchApc(apc_context: ?*anyopaque, iosb: *windows.IO_STATUS_BLOCK, _: windows } } -fn batchAwaitWindows(b: *Io.Batch) Io.Cancelable!void { +/// If `concurrency` is false, `error.ConcurrencyUnavailable` is unreachable. +fn batchAwaitWindows(b: *Io.Batch, concurrency: bool) error{ Canceled, ConcurrencyUnavailable }!void { var index = b.submissions.head; errdefer b.submissions.head = index; while (index != .none) { const storage = &b.storage[index.toIndex()]; const submission = storage.submission; - errdefer storage.* = .{ .submission = submission }; storage.* = .{ .pending = .{ .node = .{ .prev = b.pending.tail, .next = .none }, .tag = submission.operation, .context = undefined, } }; - const context: *WindowsBatchPendingOperationContext = .fromErased(&storage.pending.context); - switch (submission.operation) { - .file_read_streaming => |o| { - context.file = o.file.handle; - try ntReadFile(o.file.handle, o.data, &batchApc, b, &context.iosb); - }, - } switch (b.pending.tail) { .none => b.pending.head = index, else => |tail_index| b.storage[tail_index.toIndex()].pending.node.next = index, } b.pending.tail = index; + const context: *WindowsBatchPendingOperationContext = .fromErased(&storage.pending.context); + errdefer { + context.iosb.u.Status = .CANCELLED; + batchApc(b, &context.iosb, 0); + } + switch (submission.operation) { + .file_read_streaming => |o| o: { + var data_index: usize = 0; + while (o.data.len - data_index != 0 and o.data[data_index].len == 0) data_index += 1; + if (o.data.len - data_index == 0) { + context.iosb = .{ + .u = .{ .Status = .SUCCESS }, + .Information = 0, + }; + batchApc(b, &context.iosb, 0); + break :o; + } + const buffer = o.data[data_index]; + const short_buffer_len = @min(std.math.maxInt(u32), buffer.len); + + if (o.file.flags.nonblocking) { + context.file = o.file.handle; + switch (windows.ntdll.NtReadFile( + o.file.handle, + null, // event + &batchApc, + b, + &context.iosb, + buffer.ptr, + short_buffer_len, + null, // byte offset + null, // key + )) { + .PENDING, .SUCCESS => {}, + .CANCELLED => unreachable, + else => |status| { + context.iosb.u.Status = status; + batchApc(b, &context.iosb, 0); + }, + } + } else { + if (concurrency) return error.ConcurrencyUnavailable; + + const syscall: Syscall = try .start(); + while (true) switch (windows.ntdll.NtReadFile( + o.file.handle, + null, // event + null, // APC routine + null, // APC context + &context.iosb, + buffer.ptr, + short_buffer_len, + null, // byte offset + null, // key + )) { + .PENDING => unreachable, // unrecoverable: wrong File nonblocking flag + .CANCELLED => { + try syscall.checkCancel(); + continue; + }, + else => |status| { + syscall.finish(); + + context.iosb.u.Status = status; + batchApc(b, &context.iosb, 0); + break; + }, + }; + } + }, + } index = submission.node.next; } b.submissions = .{ .head = .none, .tail = .none }; @@ -8846,28 +8912,76 @@ fn fileReadStreamingPosix(file: File, data: []const []u8) File.ReadStreamingErro } fn fileReadStreamingWindows(file: File, data: []const []u8) File.ReadStreamingError!usize { - var io_status_block: windows.IO_STATUS_BLOCK = .{ - .u = .{ .Status = .PENDING }, - .Information = undefined, - }; - try ntReadFile(file.handle, data, &noopApc, null, &io_status_block); + var index: usize = 0; + while (data.len - index != 0 and data[index].len == 0) index += 1; + if (data.len - index == 0) return 0; + const buffer = data[index]; + const short_buffer_len = @min(std.math.maxInt(u32), buffer.len); - while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { - // Once we get here we must not return from the function until the - // operation completes, thereby releasing reference to io_status_block. - const alertable_syscall = AlertableSyscall.start() catch |err| switch (err) { - error.Canceled => |e| { - _ = windows.ntdll.NtCancelIoFile(file.handle, &io_status_block); - while (@atomicLoad(windows.NTSTATUS, &io_status_block.u.Status, .acquire) == .PENDING) { - waitForApcOrAlert(); - } - return e; + var iosb: windows.IO_STATUS_BLOCK = undefined; + + if (!file.flags.nonblocking) { + const syscall: Syscall = try .start(); + while (true) switch (windows.ntdll.NtReadFile( + file.handle, + null, // event + null, // APC routine + null, // APC context + &iosb, + buffer.ptr, + short_buffer_len, + null, // byte offset + null, // key + )) { + .PENDING => unreachable, // unrecoverable: wrong File nonblocking flag + .CANCELLED => { + try syscall.checkCancel(); + continue; + }, + else => |status| { + syscall.finish(); + iosb.u.Status = status; + return ntReadFileResult(&iosb); }, }; - waitForApcOrAlert(); - alertable_syscall.finish(); } - return ntReadFileResult(&io_status_block); + + var done: bool = false; + + switch (windows.ntdll.NtReadFile( + file.handle, + null, // event + flagApc, + &done, // APC context + &iosb, + buffer.ptr, + short_buffer_len, + null, // byte offset + null, // key + )) { + // We must wait for the APC routine. + .PENDING, .SUCCESS => while (!done) { + // Once we get here we must not return from the function until the + // operation completes, thereby releasing reference to io_status_block. + const alertable_syscall = AlertableSyscall.start() catch |err| switch (err) { + error.Canceled => |e| { + var cancel_iosb: windows.IO_STATUS_BLOCK = undefined; + _ = windows.ntdll.NtCancelIoFileEx(file.handle, &iosb, &cancel_iosb); + while (!done) waitForApcOrAlert(); + return e; + }, + }; + waitForApcOrAlert(); + alertable_syscall.finish(); + }, + else => |status| iosb.u.Status = status, + } + return ntReadFileResult(&iosb); +} + +fn flagApc(userdata: ?*anyopaque, _: *windows.IO_STATUS_BLOCK, _: windows.ULONG) callconv(.winapi) void { + const flag: *bool = @ptrCast(userdata); + flag.* = true; } fn ntReadFileResult(io_status_block: *const windows.IO_STATUS_BLOCK) !usize { @@ -8883,52 +8997,6 @@ fn ntReadFileResult(io_status_block: *const windows.IO_STATUS_BLOCK) !usize { } } -fn ntReadFile( - handle: windows.HANDLE, - data: []const []u8, - apcRoutine: ?*const windows.IO_APC_ROUTINE, - apc_context: ?*anyopaque, - iosb: *windows.IO_STATUS_BLOCK, -) Io.Cancelable!void { - var index: usize = 0; - while (index < data.len and data[index].len == 0) index += 1; - if (index == data.len) { - iosb.* = .{ .u = .{ .Status = .SUCCESS }, .Information = 0 }; - if (apcRoutine) |routine| if (routine != &noopApc) { - _ = windows.ntdll.NtQueueApcThread(windows.current_process, routine, apc_context, iosb, null); - }; - return; - } - const buffer = data[index]; - - const syscall: Syscall = try .start(); - while (true) switch (windows.ntdll.NtReadFile( - handle, - null, // event - apcRoutine, - apc_context, - iosb, - buffer.ptr, - @min(std.math.maxInt(u32), buffer.len), - null, // byte offset - null, // key - )) { - .PENDING => { - syscall.finish(); - return; - }, - .CANCELLED => { - try syscall.checkCancel(); - continue; - }, - else => |status| { - syscall.finish(); - iosb.u.Status = status; - return; - }, - }; -} - fn fileReadPositionalPosix(file: File, data: []const []u8, offset: u64) File.ReadPositionalError!usize { if (!have_preadv) @compileError("TODO implement fileReadPositionalPosix for cursed operating systems that don't support preadv (it's only Haiku)"); diff --git a/lib/std/os/windows/ntdll.zig b/lib/std/os/windows/ntdll.zig index 195a457d3b..d9e68e54f9 100644 --- a/lib/std/os/windows/ntdll.zig +++ b/lib/std/os/windows/ntdll.zig @@ -614,5 +614,5 @@ pub extern "ntdll" fn NtCancelIoFileEx( pub extern "ntdll" fn NtCancelIoFile( FileHandle: HANDLE, - IoRequestToCancel: ?*IO_STATUS_BLOCK, + IoStatusBlock: *IO_STATUS_BLOCK, ) callconv(.winapi) NTSTATUS; From b6f4bb91c41bb78276ad54725ae5c61a160defd6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 30 Jan 2026 11:45:08 -0800 Subject: [PATCH 62/65] std.Io: add documentation to Batch --- lib/std/Io.zig | 34 ++++++++++++++++++++++++++++++---- lib/std/Io/Threaded.zig | 2 +- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/lib/std/Io.zig b/lib/std/Io.zig index ca77a9836a..c3ba3575e4 100644 --- a/lib/std/Io.zig +++ b/lib/std/Io.zig @@ -150,7 +150,7 @@ pub const VTable = struct { futexWake: *const fn (?*anyopaque, ptr: *const u32, max_waiters: u32) void, operate: *const fn (?*anyopaque, Operation) Cancelable!Operation.Result, - batchAwaitAsync: *const fn (?*anyopaque, *Batch) Batch.AwaitAsyncError!void, + batchAwaitAsync: *const fn (?*anyopaque, *Batch) Cancelable!void, batchAwaitConcurrent: *const fn (?*anyopaque, *Batch, Timeout) Batch.AwaitConcurrentError!void, batchCancel: *const fn (?*anyopaque, *Batch) void, @@ -359,7 +359,7 @@ pub fn operate(io: Io, operation: Operation) Cancelable!Operation.Result { /// complete. /// /// This is a low-level abstraction based on `Operation`. For a higher -/// level API that operates on `Future`, see `Select`. +/// level API that operates on `Future`, see `Select` and `Group`. pub const Batch = struct { storage: []Operation.Storage, unused: Operation.List, @@ -422,6 +422,11 @@ pub const Batch = struct { b.submissions.tail = .fromIndex(index); } + /// After calling `awaitAsync`, `awaitConcurrent`, or `cancel`, this + /// function iterates over the completed operations. + /// + /// Each completion returned from this function dequeues from the `Batch`. + /// It is not required to dequeue all completions before awaiting again. pub fn next(b: *Batch) ?struct { index: u32, result: Operation.Result } { const index = b.completions.head; if (index == .none) return null; @@ -441,16 +446,37 @@ pub const Batch = struct { return .{ .index = index.toIndex(), .result = completion.result }; } - pub const AwaitAsyncError = Cancelable; - pub fn awaitAsync(b: *Batch, io: Io) AwaitAsyncError!void { + /// Waits for at least one of the submitted operations to complete. After + /// this function returns the completed operations can be iterated with + /// `next`. + /// + /// This function provides opportunity for the implementation to introduce + /// concurrency into the batched operations, but unlike `awaitConcurrent`, + /// does not require it, and therefore cannot fail with + /// `error.ConcurrencyUnavailable`. + pub fn awaitAsync(b: *Batch, io: Io) Cancelable!void { return io.vtable.batchAwaitAsync(io.userdata, b); } pub const AwaitConcurrentError = ConcurrentError || Cancelable || Timeout.Error; + + /// Waits for at least one of the submitted operations to complete. After + /// this function returns the completed operations can be iterated with + /// `next`. + /// + /// Unlike `awaitAsync`, this function requires the implementation to + /// perform the operations concurrently and therefore can fail with + /// `error.ConcurrencyUnavailable`. pub fn awaitConcurrent(b: *Batch, io: Io, timeout: Timeout) AwaitConcurrentError!void { return io.vtable.batchAwaitConcurrent(io.userdata, b, timeout); } + /// Requests all pending operations to be interrupted, then waits for all + /// pending operations to complete. After this returns, the `Batch` is in a + /// well-defined state, ready to be iterated with `next`. Successfully + /// canceled operations will be absent from the iteration. Some operations + /// may have successfully completed regardless of the cancel request and + /// will appear in the iteration. pub fn cancel(b: *Batch, io: Io) void { return io.vtable.batchCancel(io.userdata, b); } diff --git a/lib/std/Io/Threaded.zig b/lib/std/Io/Threaded.zig index e9e5ece521..f9002567a2 100644 --- a/lib/std/Io/Threaded.zig +++ b/lib/std/Io/Threaded.zig @@ -2495,7 +2495,7 @@ fn operate(userdata: ?*anyopaque, operation: Io.Operation) Io.Cancelable!Io.Oper } } -fn batchAwaitAsync(userdata: ?*anyopaque, b: *Io.Batch) Io.Batch.AwaitAsyncError!void { +fn batchAwaitAsync(userdata: ?*anyopaque, b: *Io.Batch) Io.Cancelable!void { const t: *Threaded = @ptrCast(@alignCast(userdata)); if (is_windows) { batchAwaitWindows(b, false) catch |err| switch (err) { From 43866f743978ef6cf4755760cc96d4c00665fde3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 30 Jan 2026 21:57:51 -0800 Subject: [PATCH 63/65] build.zig: bump max_rss encountered error: memory usage peaked at 0.66GB (656060416 bytes), exceeding the declared upper bound of 0.64GB (639565414 bytes) --- build.zig | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/build.zig b/build.zig index 3ed237fa5e..84cbba38bd 100644 --- a/build.zig +++ b/build.zig @@ -498,23 +498,7 @@ pub fn build(b: *std.Build) !void { .skip_llvm = skip_llvm, .skip_libc = true, .no_builtin = true, - .max_rss = switch (b.graph.host.result.os.tag) { - .freebsd => 800_000_000, - .linux => switch (b.graph.host.result.cpu.arch) { - .aarch64 => 639_565_414, - .loongarch64 => 598_884_352, - .powerpc64le => 597_897_625, - .riscv64 => 636_429_516, - .s390x => 574_166_630, - .x86_64 => 978_463_129, - else => 900_000_000, - }, - .macos => switch (b.graph.host.result.cpu.arch) { - .aarch64 => 701_413_785, - else => 800_000_000, - }, - else => 900_000_000, - }, + .max_rss = 900_000_000, })); test_modules_step.dependOn(tests.addModuleTests(b, .{ From 14e1e5f6d872a7a8a8d98fa48cc6f41f002a7d1f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 30 Jan 2026 22:00:02 -0800 Subject: [PATCH 64/65] std: IoUring test handles EINTR --- lib/std/os/linux/IoUring/test.zig | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/std/os/linux/IoUring/test.zig b/lib/std/os/linux/IoUring/test.zig index ac2d5ddea5..644b9b7c77 100644 --- a/lib/std/os/linux/IoUring/test.zig +++ b/lib/std/os/linux/IoUring/test.zig @@ -2736,8 +2736,9 @@ fn send(sockfd: posix.socket_t, buf: []const u8, flags: u32) !usize { } fn connect(sock: posix.socket_t, sock_addr: *const posix.sockaddr, len: posix.socklen_t) !void { - switch (posix.errno(posix.system.connect(sock, sock_addr, len))) { + while (true) switch (posix.errno(posix.system.connect(sock, sock_addr, len))) { .SUCCESS => return, + .INTR => continue, else => return error.ConnectFailed, - } + }; } From 9646801bed8f0f36b59deecff32ef02868ed72f2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 30 Jan 2026 22:06:33 -0800 Subject: [PATCH 65/65] std: fix Preopens compilation error --- lib/std/process/Preopens.zig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/std/process/Preopens.zig b/lib/std/process/Preopens.zig index 8223c29f83..3baf696ee9 100644 --- a/lib/std/process/Preopens.zig +++ b/lib/std/process/Preopens.zig @@ -29,7 +29,10 @@ pub fn get(p: *const Preopens, name: []const u8) ?Resource { switch (native_os) { .wasi => { const index = p.map.getIndex(name) orelse return null; - if (index <= 2) return .{ .file = .{ .handle = @intCast(index) } }; + if (index <= 2) return .{ .file = .{ + .handle = @intCast(index), + .flags = .{ .nonblocking = false }, + } }; return .{ .dir = .{ .handle = @intCast(index) } }; }, else => {