compiler: fix most compilation errors from std.fs changes

This commit is contained in:
Andrew Kelley
2025-12-14 23:35:33 -08:00
parent 4458e423bf
commit 16bd2e137e
23 changed files with 177 additions and 156 deletions
+1 -1
View File
@@ -24,7 +24,7 @@ pub const Message = struct {
@"fatal error",
};
pub fn write(msg: Message, w: *std.Io.Writer, config: std.Io.tty.Config, details: bool) std.Io.tty.Config.SetColorError!void {
pub fn write(msg: Message, w: *std.Io.Writer, config: std.Io.File.Writer.Mode, details: bool) std.Io.tty.Config.SetColorError!void {
try config.setColor(w, .bold);
if (msg.location) |loc| {
try w.print("{s}:{d}:{d}: ", .{ loc.path, loc.line_no, loc.col });
+4 -2
View File
@@ -1065,11 +1065,13 @@ fn fatalNotFound(pp: *Preprocessor, tok: TokenWithExpansionLocs, filename: []con
fn verboseLog(pp: *Preprocessor, raw: RawToken, comptime fmt: []const u8, args: anytype) void {
@branchHint(.cold);
const source = pp.comp.getSource(raw.source);
const comp = pp.comp;
const io = comp.io;
const source = comp.getSource(raw.source);
const line_col = source.lineCol(.{ .id = raw.source, .line = raw.line, .byte_offset = raw.start });
var stderr_buf: [4096]u8 = undefined;
var stderr = Io.File.stderr().writer(&stderr_buf);
var stderr = Io.File.stderr().writer(io, &stderr_buf);
const w = &stderr.interface;
w.print("{s}:{d}:{d}: ", .{ source.path, line_col.line_no, line_col.col }) catch return;
+2 -1
View File
@@ -106,6 +106,7 @@ pub fn statFile(p: Path, io: Io, sub_path: []const u8) !Io.Dir.Stat {
pub fn atomicFile(
p: Path,
io: Io,
sub_path: []const u8,
options: Io.Dir.AtomicFileOptions,
buf: *[fs.max_path_bytes]u8,
@@ -115,7 +116,7 @@ pub fn atomicFile(
p.sub_path, sub_path,
}) catch return error.NameTooLong;
};
return p.root_dir.handle.atomicFile(joined_path, options);
return p.root_dir.handle.atomicFile(io, joined_path, options);
}
pub fn access(p: Path, io: Io, sub_path: []const u8, flags: Io.Dir.AccessOptions) !void {
+1 -1
View File
@@ -1645,7 +1645,7 @@ pub fn copyFile(
.permissions = permissions,
.write_buffer = &buffer,
});
defer atomic_file.deinit(io);
defer atomic_file.deinit();
_ = atomic_file.file_writer.interface.sendFileAll(&file_reader, .unlimited) catch |err| switch (err) {
error.ReadFailed => return file_reader.err.?,
+2 -1
View File
@@ -202,7 +202,7 @@ pub const AddCertsFromDirError = AddCertsFromFilePathError;
pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp, iterable_dir: Io.Dir) AddCertsFromDirError!void {
var it = iterable_dir.iterate();
while (try it.next()) |entry| {
while (try it.next(io)) |entry| {
switch (entry.kind) {
.file, .sym_link => {},
else => continue,
@@ -243,6 +243,7 @@ pub fn addCertsFromFilePath(
pub const AddCertsFromFileError = Allocator.Error ||
Io.File.Reader.Error ||
Io.File.Reader.SizeError ||
ParseCertError ||
std.base64.Error ||
error{ CertificateAuthorityBundleTooBig, MissingEndCertificateMarker, Streaming };
+1 -1
View File
@@ -1598,7 +1598,7 @@ test "AtomicFile" {
{
var buffer: [100]u8 = undefined;
var af = try ctx.dir.atomicFile(test_out_file, .{ .write_buffer = &buffer });
var af = try ctx.dir.atomicFile(io, test_out_file, .{ .write_buffer = &buffer });
defer af.deinit();
try af.file_writer.interface.writeAll(test_content);
try af.finish();
+1 -1
View File
@@ -648,7 +648,7 @@ pub fn printAstErrorsToStderr(gpa: Allocator, io: Io, tree: Ast, path: []const u
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
error_bundle.renderToStderr(io, .{}, color);
return error_bundle.renderToStderr(io, .{}, color);
}
pub fn putAstErrorsIntoBundle(
+2 -2
View File
@@ -162,14 +162,14 @@ pub const RenderOptions = struct {
include_log_text: bool = true,
};
pub const RenderToStderrError = Io.Cancelable || Io.File.Writer.Mode.SetColorError;
pub const RenderToStderrError = Io.Cancelable || Io.File.Writer.Error;
pub fn renderToStderr(eb: ErrorBundle, io: Io, options: RenderOptions, color: std.zig.Color) RenderToStderrError!void {
var buffer: [256]u8 = undefined;
const stderr = try io.lockStderrWriter(&buffer);
defer io.unlockStderrWriter();
renderToWriter(eb, options, &stderr.interface, color.getTtyConf(stderr.mode)) catch |err| switch (err) {
error.WriteFailed => return stderr.interface.err.?,
error.WriteFailed => return stderr.err.?,
else => |e| return e,
};
}
+11 -8
View File
@@ -1,3 +1,11 @@
const LibCDirs = @This();
const builtin = @import("builtin");
const std = @import("../std.zig");
const Io = std.Io;
const LibCInstallation = std.zig.LibCInstallation;
const Allocator = std.mem.Allocator;
libc_include_dir_list: []const []const u8,
libc_installation: ?*const LibCInstallation,
libc_framework_dir_list: []const []const u8,
@@ -14,6 +22,7 @@ pub const DarwinSdkLayout = enum {
pub fn detect(
arena: Allocator,
io: Io,
zig_lib_dir: []const u8,
target: *const std.Target,
is_native_abi: bool,
@@ -38,7 +47,7 @@ pub fn detect(
// using the system libc installation.
if (is_native_abi and !target.isMinGW()) {
const libc = try arena.create(LibCInstallation);
libc.* = LibCInstallation.findNative(.{ .allocator = arena, .target = target }) catch |err| switch (err) {
libc.* = LibCInstallation.findNative(arena, io, .{ .target = target }) catch |err| switch (err) {
error.CCompilerExitCode,
error.CCompilerCrashed,
error.CCompilerCannotFindHeaders,
@@ -75,7 +84,7 @@ pub fn detect(
if (use_system_abi) {
const libc = try arena.create(LibCInstallation);
libc.* = try LibCInstallation.findNative(.{ .allocator = arena, .verbose = true, .target = target });
libc.* = try LibCInstallation.findNative(arena, io, .{ .verbose = true, .target = target });
return detectFromInstallation(arena, target, libc);
}
@@ -265,9 +274,3 @@ fn libCGenericName(target: *const std.Target) [:0]const u8 {
=> unreachable,
}
}
const LibCDirs = @This();
const builtin = @import("builtin");
const std = @import("../std.zig");
const LibCInstallation = std.zig.LibCInstallation;
const Allocator = std.mem.Allocator;
+3 -3
View File
@@ -204,7 +204,7 @@ pub fn findNative(gpa: Allocator, io: Io, args: FindNativeOptions) FindError!Lib
try self.findNativeIncludeDirWindows(gpa, io, args, sdk);
try self.findNativeCrtDirWindows(gpa, io, args.target, sdk);
} else if (is_haiku) {
try self.findNativeIncludeDirPosix(args);
try self.findNativeIncludeDirPosix(gpa, io, args);
try self.findNativeGccDirHaiku(gpa, io, args);
self.crt_dir = try gpa.dupeZ(u8, "/system/develop/lib");
} else if (builtin.target.os.tag == .illumos) {
@@ -213,7 +213,7 @@ pub fn findNative(gpa: Allocator, io: Io, args: FindNativeOptions) FindError!Lib
self.sys_include_dir = try gpa.dupeZ(u8, "/usr/include");
self.crt_dir = try gpa.dupeZ(u8, "/usr/lib/64");
} else if (std.process.can_spawn) {
try self.findNativeIncludeDirPosix(args);
try self.findNativeIncludeDirPosix(gpa, io, args);
switch (builtin.target.os.tag) {
.freebsd, .netbsd, .openbsd, .dragonfly => self.crt_dir = try gpa.dupeZ(u8, "/usr/lib"),
.linux => try self.findNativeCrtDirPosix(gpa, io, args),
@@ -335,7 +335,7 @@ fn findNativeIncludeDirPosix(self: *LibCInstallation, gpa: Allocator, io: Io, ar
defer search_dir.close(io);
if (self.include_dir == null) {
if (search_dir.access(include_dir_example_file, .{})) |_| {
if (search_dir.access(io, include_dir_example_file, .{})) |_| {
self.include_dir = try gpa.dupeZ(u8, search_path);
} else |err| switch (err) {
error.FileNotFound => {},
+1 -1
View File
@@ -343,7 +343,7 @@ pub fn updateFileOnDisk(file: *File, comp: *Compilation) !void {
}
// `make_path` matters because the dir hasn't actually been created yet.
var af = try root_dir.atomicFile(sub_path, .{ .make_path = true, .write_buffer = &.{} });
var af = try root_dir.atomicFile(io, sub_path, .{ .make_path = true, .write_buffer = &.{} });
defer af.deinit();
try af.file_writer.interface.writeAll(file.source.?);
af.finish() catch |err| switch (err) {
+68 -70
View File
@@ -771,8 +771,8 @@ pub const Directories = struct {
const zig_lib: Cache.Directory = d: {
if (override_zig_lib) |path| break :d openUnresolved(arena, io, cwd, path, .@"zig lib");
if (wasi) break :d openWasiPreopen(wasi_preopens, "/lib");
break :d introspect.findZigLibDirFromSelfExe(arena, cwd, self_exe_path) catch |err| {
fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) });
break :d introspect.findZigLibDirFromSelfExe(arena, io, cwd, self_exe_path) catch |err| {
fatal("unable to find zig installation directory '{s}': {t}", .{ self_exe_path, err });
};
};
@@ -780,7 +780,7 @@ pub const Directories = struct {
if (override_global_cache) |path| break :d openUnresolved(arena, io, cwd, path, .@"global cache");
if (wasi) break :d openWasiPreopen(wasi_preopens, "/cache");
const path = introspect.resolveGlobalCacheDir(arena) catch |err| {
fatal("unable to resolve zig cache directory: {s}", .{@errorName(err)});
fatal("unable to resolve zig cache directory: {t}", .{err});
};
break :d openUnresolved(arena, io, cwd, path, .@"global cache");
};
@@ -789,7 +789,7 @@ pub const Directories = struct {
.override => |path| openUnresolved(arena, io, cwd, path, .@"local cache"),
.search => d: {
const maybe_path = introspect.resolveSuitableLocalCacheDir(arena, io, cwd) catch |err| {
fatal("unable to resolve zig cache directory: {s}", .{@errorName(err)});
fatal("unable to resolve zig cache directory: {t}", .{err});
};
const path = maybe_path orelse break :d global_cache;
break :d openUnresolved(arena, io, cwd, path, .@"local cache");
@@ -919,8 +919,8 @@ pub const CrtFile = struct {
lock: Cache.Lock,
full_object_path: Cache.Path,
pub fn deinit(self: *CrtFile, gpa: Allocator) void {
self.lock.release();
pub fn deinit(self: *CrtFile, gpa: Allocator, io: Io) void {
self.lock.release(io);
gpa.free(self.full_object_path.sub_path);
self.* = undefined;
}
@@ -1317,7 +1317,7 @@ pub const CObject = struct {
};
/// Returns if there was failure.
pub fn clearStatus(self: *CObject, gpa: Allocator) bool {
pub fn clearStatus(self: *CObject, gpa: Allocator, io: Io) bool {
switch (self.status) {
.new => return false,
.failure, .failure_retryable => {
@@ -1326,15 +1326,15 @@ pub const CObject = struct {
},
.success => |*success| {
gpa.free(success.object_path.sub_path);
success.lock.release();
success.lock.release(io);
self.status = .new;
return false;
},
}
}
pub fn destroy(self: *CObject, gpa: Allocator) void {
_ = self.clearStatus(gpa);
pub fn destroy(self: *CObject, gpa: Allocator, io: Io) void {
_ = self.clearStatus(gpa, io);
gpa.destroy(self);
}
};
@@ -1364,7 +1364,7 @@ pub const Win32Resource = struct {
},
/// Returns true if there was failure.
pub fn clearStatus(self: *Win32Resource, gpa: Allocator) bool {
pub fn clearStatus(self: *Win32Resource, gpa: Allocator, io: Io) bool {
switch (self.status) {
.new => return false,
.failure, .failure_retryable => {
@@ -1373,15 +1373,15 @@ pub const Win32Resource = struct {
},
.success => |*success| {
gpa.free(success.res_path);
success.lock.release();
success.lock.release(io);
self.status = .new;
return false;
},
}
}
pub fn destroy(self: *Win32Resource, gpa: Allocator) void {
_ = self.clearStatus(gpa);
pub fn destroy(self: *Win32Resource, gpa: Allocator, io: Io) void {
_ = self.clearStatus(gpa, io);
gpa.destroy(self);
}
};
@@ -1610,9 +1610,9 @@ const CacheUse = union(CacheMode) {
/// Prevents other processes from clobbering files in the output directory.
lock: ?Cache.Lock,
fn releaseLock(whole: *Whole) void {
fn releaseLock(whole: *Whole, io: Io) void {
if (whole.lock) |*lock| {
lock.release();
lock.release(io);
whole.lock = null;
}
}
@@ -1634,7 +1634,7 @@ const CacheUse = union(CacheMode) {
},
.whole => |whole| {
assert(whole.tmp_artifact_directory == null);
whole.releaseLock();
whole.releaseLock(io);
},
}
}
@@ -1903,13 +1903,17 @@ pub const CreateDiagnostic = union(enum) {
return error.CreateFail;
}
};
pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, options: CreateOptions) error{
pub const CreateError = error{
OutOfMemory,
Canceled,
Unexpected,
CurrentWorkingDirectoryUnlinked,
/// An error has been stored to `diag`.
CreateFail,
}!*Compilation {
};
pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic, options: CreateOptions) CreateError!*Compilation {
const output_mode = options.config.output_mode;
const is_dyn_lib = switch (output_mode) {
.Obj, .Exe => false,
@@ -1957,6 +1961,7 @@ pub fn create(gpa: Allocator, arena: Allocator, io: Io, diag: *CreateDiagnostic,
const libc_dirs = std.zig.LibCDirs.detect(
arena,
io,
options.dirs.zig_lib.path.?,
target,
options.root_mod.resolved_target.is_native_abi,
@@ -2701,7 +2706,7 @@ pub fn destroy(comp: *Compilation) void {
if (comp.bin_file) |lf| lf.destroy();
if (comp.zcu) |zcu| zcu.deinit();
comp.cache_use.deinit();
comp.cache_use.deinit(io);
for (&comp.work_queues) |*work_queue| work_queue.deinit(gpa);
comp.c_object_work_queue.deinit(gpa);
@@ -2714,36 +2719,36 @@ pub fn destroy(comp: *Compilation) void {
var it = comp.crt_files.iterator();
while (it.next()) |entry| {
gpa.free(entry.key_ptr.*);
entry.value_ptr.deinit(gpa);
entry.value_ptr.deinit(gpa, io);
}
comp.crt_files.deinit(gpa);
}
if (comp.libcxx_static_lib) |*crt_file| crt_file.deinit(gpa);
if (comp.libcxxabi_static_lib) |*crt_file| crt_file.deinit(gpa);
if (comp.libunwind_static_lib) |*crt_file| crt_file.deinit(gpa);
if (comp.tsan_lib) |*crt_file| crt_file.deinit(gpa);
if (comp.ubsan_rt_lib) |*crt_file| crt_file.deinit(gpa);
if (comp.ubsan_rt_obj) |*crt_file| crt_file.deinit(gpa);
if (comp.zigc_static_lib) |*crt_file| crt_file.deinit(gpa);
if (comp.compiler_rt_lib) |*crt_file| crt_file.deinit(gpa);
if (comp.compiler_rt_obj) |*crt_file| crt_file.deinit(gpa);
if (comp.compiler_rt_dyn_lib) |*crt_file| crt_file.deinit(gpa);
if (comp.fuzzer_lib) |*crt_file| crt_file.deinit(gpa);
if (comp.libcxx_static_lib) |*crt_file| crt_file.deinit(gpa, io);
if (comp.libcxxabi_static_lib) |*crt_file| crt_file.deinit(gpa, io);
if (comp.libunwind_static_lib) |*crt_file| crt_file.deinit(gpa, io);
if (comp.tsan_lib) |*crt_file| crt_file.deinit(gpa, io);
if (comp.ubsan_rt_lib) |*crt_file| crt_file.deinit(gpa, io);
if (comp.ubsan_rt_obj) |*crt_file| crt_file.deinit(gpa, io);
if (comp.zigc_static_lib) |*crt_file| crt_file.deinit(gpa, io);
if (comp.compiler_rt_lib) |*crt_file| crt_file.deinit(gpa, io);
if (comp.compiler_rt_obj) |*crt_file| crt_file.deinit(gpa, io);
if (comp.compiler_rt_dyn_lib) |*crt_file| crt_file.deinit(gpa, io);
if (comp.fuzzer_lib) |*crt_file| crt_file.deinit(gpa, io);
if (comp.glibc_so_files) |*glibc_file| {
glibc_file.deinit(gpa);
glibc_file.deinit(gpa, io);
}
if (comp.freebsd_so_files) |*freebsd_file| {
freebsd_file.deinit(gpa);
freebsd_file.deinit(gpa, io);
}
if (comp.netbsd_so_files) |*netbsd_file| {
netbsd_file.deinit(gpa);
netbsd_file.deinit(gpa, io);
}
for (comp.c_object_table.keys()) |key| {
key.destroy(gpa);
key.destroy(gpa, io);
}
comp.c_object_table.deinit(gpa);
@@ -2753,7 +2758,7 @@ pub fn destroy(comp: *Compilation) void {
comp.failed_c_objects.deinit(gpa);
for (comp.win32_resource_table.keys()) |key| {
key.destroy(gpa);
key.destroy(gpa, io);
}
comp.win32_resource_table.deinit(gpa);
@@ -2906,7 +2911,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
.whole => |whole| {
assert(comp.bin_file == null);
// We are about to obtain this lock, so here we give other processes a chance first.
whole.releaseLock();
whole.releaseLock(io);
man = comp.cache_parent.obtain();
whole.cache_manifest = &man;
@@ -3092,17 +3097,12 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) UpdateE
}
if (build_options.enable_debug_extensions and comp.verbose_intern_pool) {
std.debug.print("intern pool stats for '{s}':\n", .{
comp.root_name,
});
zcu.intern_pool.dump();
std.debug.print("intern pool stats for '{s}':\n", .{comp.root_name});
zcu.intern_pool.dump(io);
}
if (build_options.enable_debug_extensions and comp.verbose_generic_instances) {
std.debug.print("generic instances for '{s}:0x{x}':\n", .{
comp.root_name,
@intFromPtr(zcu),
});
std.debug.print("generic instances for '{s}:0x{x}':\n", .{ comp.root_name, @intFromPtr(zcu) });
zcu.intern_pool.dumpGenericInstances(gpa);
}
}
@@ -3680,6 +3680,7 @@ pub fn saveState(comp: *Compilation) !void {
const lf = comp.bin_file orelse return;
const gpa = comp.gpa;
const io = comp.io;
var bufs = std.array_list.Managed([]const u8).init(gpa);
defer bufs.deinit();
@@ -3900,7 +3901,7 @@ pub fn saveState(comp: *Compilation) !void {
// Using an atomic file prevents a crash or power failure from corrupting
// the previous incremental compilation state.
var write_buffer: [1024]u8 = undefined;
var af = try lf.emit.root_dir.handle.atomicFile(basename, .{ .write_buffer = &write_buffer });
var af = try lf.emit.root_dir.handle.atomicFile(io, basename, .{ .write_buffer = &write_buffer });
defer af.deinit();
try af.file_writer.interface.writeVecAll(bufs.items);
try af.finish();
@@ -4258,8 +4259,8 @@ pub fn getAllErrorsAlloc(comp: *Compilation) error{OutOfMemory}!ErrorBundle {
// However, we haven't reported any such error.
// This is a compiler bug.
print_ctx: {
const stderr = try io.lockStderrWriter(&.{});
defer io.unlockStderrWriter();
const stderr = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter();
const w = &stderr.interface;
w.writeAll("referenced transitive analysis errors, but none actually emitted\n") catch break :print_ctx;
w.print("{f} [transitive failure]\n", .{zcu.fmtAnalUnit(failed_unit)}) catch break :print_ctx;
@@ -5219,13 +5220,10 @@ fn processOneJob(
}
}
fn createDepFile(
comp: *Compilation,
depfile: []const u8,
binfile: Cache.Path,
) anyerror!void {
fn createDepFile(comp: *Compilation, depfile: []const u8, binfile: Cache.Path) anyerror!void {
const io = comp.io;
var buf: [4096]u8 = undefined;
var af = try Io.Dir.cwd().atomicFile(depfile, .{ .write_buffer = &buf });
var af = try Io.Dir.cwd().atomicFile(io, depfile, .{ .write_buffer = &buf });
defer af.deinit();
comp.writeDepFile(binfile, &af.file_writer.interface) catch return af.file_writer.err.?;
@@ -5280,13 +5278,8 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void {
for (&[_][]const u8{ "docs/main.js", "docs/index.html" }) |sub_path| {
const basename = fs.path.basename(sub_path);
comp.dirs.zig_lib.handle.copyFile(sub_path, out_dir, basename, .{}) catch |err| {
comp.lockAndSetMiscFailure(.docs_copy, "unable to copy {s}: {s}", .{
sub_path,
@errorName(err),
});
return;
};
comp.dirs.zig_lib.handle.copyFile(sub_path, out_dir, basename, io, .{}) catch |err|
return comp.lockAndSetMiscFailure(.docs_copy, "unable to copy {s}: {t}", .{ sub_path, err });
}
var tar_file = out_dir.createFile(io, "sources.tar", .{}) catch |err| {
@@ -5350,7 +5343,7 @@ fn docsCopyModule(
var buffer: [1024]u8 = undefined;
while (try walker.next()) |entry| {
while (try walker.next(io)) |entry| {
switch (entry.kind) {
.file => {
if (!std.mem.endsWith(u8, entry.basename, ".zig")) continue;
@@ -5505,7 +5498,7 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU
try comp.updateSubCompilation(sub_compilation, .docs_wasm, prog_node);
var crt_file = try sub_compilation.toCrtFile();
defer crt_file.deinit(gpa);
defer crt_file.deinit(gpa, io);
const docs_bin_file = crt_file.full_object_path;
assert(docs_bin_file.sub_path.len > 0); // emitted binary is not a directory
@@ -5521,10 +5514,12 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) SubU
};
defer out_dir.close(io);
crt_file.full_object_path.root_dir.handle.copyFile(
Io.Dir.copyFile(
crt_file.full_object_path.root_dir.handle,
crt_file.full_object_path.sub_path,
out_dir,
"main.wasm",
io,
.{},
) catch |err| {
comp.lockAndSetMiscFailure(.docs_copy, "unable to copy '{f}' to '{f}': {t}", .{
@@ -5758,7 +5753,7 @@ pub fn translateC(
try argv.appendSlice(comp.global_cc_argv);
try argv.appendSlice(owner_mod.cc_argv);
try argv.appendSlice(&.{ source_path, "-o", translated_path });
if (comp.verbose_cimport) dumpArgv(io, argv.items);
if (comp.verbose_cimport) try dumpArgv(io, argv.items);
}
var stdout: []u8 = undefined;
@@ -6153,7 +6148,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
const gpa = comp.gpa;
const io = comp.io;
if (c_object.clearStatus(gpa)) {
if (c_object.clearStatus(gpa, io)) {
// There was previous failure.
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
@@ -6500,7 +6495,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
if (win32_resource.clearStatus(comp.gpa)) {
if (win32_resource.clearStatus(comp.gpa, io)) {
// There was previous failure.
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
@@ -6768,7 +6763,7 @@ fn spawnZigRc(
// Just in case there's a failure that didn't send an ErrorBundle (e.g. an error return trace)
const stderr = poller.reader(.stderr);
const term = child.wait() catch |err| {
const term = child.wait(io) catch |err| {
return comp.failWin32Resource(win32_resource, "unable to wait for {s} rc: {s}", .{ argv[0], @errorName(err) });
};
@@ -7781,7 +7776,10 @@ pub fn dumpArgv(io: Io, argv: []const []const u8) Io.Cancelable!void {
defer io.unlockStderrWriter();
const w = &stderr.interface;
return dumpArgvWriter(w, argv) catch |err| switch (err) {
error.WriteFailed => return stderr.err.?,
error.WriteFailed => switch (stderr.err.?) {
error.Canceled => return error.Canceled,
else => return,
},
};
}
+16 -21
View File
@@ -501,7 +501,7 @@ fn runResource(
.path = tmp_directory_path,
.handle = handle: {
const dir = cache_root.handle.makeOpenPath(io, tmp_dir_sub_path, .{
.iterate = true,
.open_options = .{ .iterate = true },
}) catch |err| {
try eb.addRootErrorMessage(.{
.msg = try eb.printString("unable to create temporary directory '{s}': {t}", .{
@@ -525,7 +525,7 @@ fn runResource(
// https://github.com/ziglang/zig/issues/17095
pkg_path.root_dir.handle.close(io);
pkg_path.root_dir.handle = cache_root.handle.makeOpenPath(io, tmp_dir_sub_path, .{
.iterate = true,
.open_options = .{ .iterate = true },
}) catch @panic("btrfs workaround failed");
}
@@ -1334,7 +1334,7 @@ fn unzip(
f.location_tok,
try eb.printString("failed writing temporary zip file: {t}", .{err}),
);
break :b zip_file_writer.moveToReader(io);
break :b zip_file_writer.moveToReader();
};
var diagnostics: std.zip.Diagnostics = .{ .allocator = f.arena.allocator() };
@@ -1376,7 +1376,7 @@ fn unpackGitPack(f: *Fetch, out_dir: Io.Dir, resource: *Resource.Git) anyerror!U
const fetch_reader = &resource.fetch_stream.reader;
_ = try fetch_reader.streamRemaining(&pack_file_writer.interface);
try pack_file_writer.interface.flush();
break :b pack_file_writer.moveToReader(io);
break :b pack_file_writer.moveToReader();
};
var index_file = try pack_dir.createFile(io, "pkg.idx", .{ .read = true });
@@ -1421,26 +1421,21 @@ fn recursiveDirectoryCopy(f: *Fetch, dir: Io.Dir, tmp_dir: Io.Dir) anyerror!void
// Recursive directory copy.
var it = try dir.walk(gpa);
defer it.deinit();
while (try it.next()) |entry| {
while (try it.next(io)) |entry| {
switch (entry.kind) {
.directory => {}, // omit empty directories
.file => {
dir.copyFile(
entry.path,
tmp_dir,
entry.path,
.{},
) catch |err| switch (err) {
dir.copyFile(entry.path, tmp_dir, entry.path, io, .{}) catch |err| switch (err) {
error.FileNotFound => {
if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(io, dirname);
try dir.copyFile(entry.path, tmp_dir, entry.path, .{});
try dir.copyFile(entry.path, tmp_dir, entry.path, io, .{});
},
else => |e| return e,
};
},
.sym_link => {
var buf: [fs.max_path_bytes]u8 = undefined;
const link_name = try dir.readLink(io, entry.path, &buf);
const link_name = buf[0..try dir.readLink(io, entry.path, &buf)];
// TODO: if this would create a symlink to outside
// the destination directory, fail with an error instead.
tmp_dir.symLink(io, link_name, entry.path, .{}) catch |err| switch (err) {
@@ -1524,7 +1519,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
var group: Io.Group = .init;
defer group.wait(io);
while (walker.next() catch |err| {
while (walker.next(io) catch |err| {
try eb.addRootErrorMessage(.{ .msg = try eb.printString(
"unable to walk temporary directory '{f}': {s}",
.{ pkg_path, @errorName(err) },
@@ -1575,7 +1570,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
.failure = undefined, // to be populated by the worker
.size = undefined, // to be populated by the worker
};
group.async(io, workerHashFile, .{ root_dir, hashed_file });
group.async(io, workerHashFile, .{ io, root_dir, hashed_file });
try all_files.append(hashed_file);
}
}
@@ -1643,7 +1638,7 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
assert(!f.job_queue.recursive);
// Print something to stdout that can be text diffed to figure out why
// the package hash is different.
dumpHashInfo(all_files.items) catch |err| {
dumpHashInfo(io, all_files.items) catch |err| {
std.debug.print("unable to write to stdout: {s}\n", .{@errorName(err)});
std.process.exit(1);
};
@@ -1655,9 +1650,9 @@ fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!Compute
};
}
fn dumpHashInfo(all_files: []const *const HashedFile) !void {
fn dumpHashInfo(io: Io, all_files: []const *const HashedFile) !void {
var stdout_buffer: [1024]u8 = undefined;
var stdout_writer: Io.File.Writer = .initStreaming(.stdout(), &stdout_buffer);
var stdout_writer: Io.File.Writer = .initStreaming(.stdout(), io, &stdout_buffer);
const w = &stdout_writer.interface;
for (all_files) |hashed_file| {
try w.print("{t}: {x}: {s}\n", .{ hashed_file.kind, &hashed_file.hash, hashed_file.normalized_path });
@@ -1665,8 +1660,8 @@ fn dumpHashInfo(all_files: []const *const HashedFile) !void {
try w.flush();
}
fn workerHashFile(dir: Io.Dir, hashed_file: *HashedFile) void {
hashed_file.failure = hashFileFallible(dir, hashed_file);
fn workerHashFile(io: Io, dir: Io.Dir, hashed_file: *HashedFile) void {
hashed_file.failure = hashFileFallible(io, dir, hashed_file);
}
fn workerDeleteFile(io: Io, dir: Io.Dir, deleted_file: *DeletedFile) void {
@@ -1745,7 +1740,7 @@ const HashedFile = struct {
Io.File.OpenError ||
Io.File.Reader.Error ||
Io.File.StatError ||
Io.File.ChmodError ||
Io.File.SetPermissionsError ||
Io.Dir.ReadLinkError;
const Kind = enum { file, link };
+1 -1
View File
@@ -274,7 +274,7 @@ pub const Repository = struct {
continue;
};
defer file.close(io);
try file.writeAll(file_object.data);
try file.writePositionalAll(io, file_object.data, 0);
},
.symlink => {
try repository.odb.seekOid(entry.oid);
+1 -1
View File
@@ -2679,7 +2679,7 @@ pub fn failWithOwnedErrorMsg(sema: *Sema, block: ?*Block, err_msg: *Zcu.ErrorMsg
Compilation.addModuleErrorMsg(zcu, &wip_errors, err_msg.*, false) catch @panic("out of memory");
std.debug.print("compile error during Sema:\n", .{});
var error_bundle = wip_errors.toOwnedBundle("") catch @panic("out of memory");
error_bundle.renderToStderr(io, .{}, .auto);
error_bundle.renderToStderr(io, .{}, .auto) catch @panic("failed to print to stderr");
std.debug.panicExtra(@returnAddress(), "unexpected compile error occurred", .{});
}
+7 -10
View File
@@ -2988,11 +2988,10 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_br: *Io.Reader
pub fn saveZirCache(
gpa: Allocator,
io: Io,
cache_file: Io.File,
cache_file_writer: *Io.File.Writer,
stat: Io.File.Stat,
zir: Zir,
) (Io.File.WriteError || Allocator.Error)!void {
) (Io.File.Writer.Error || Allocator.Error)!void {
const safety_buffer = if (data_has_safety_tag)
try gpa.alloc([8]u8, zir.instructions.len)
else
@@ -3026,13 +3025,12 @@ pub fn saveZirCache(
zir.string_bytes,
@ptrCast(zir.extra),
};
var cache_fw = cache_file.writer(io, &.{});
cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) {
error.WriteFailed => return cache_fw.err.?,
cache_file_writer.interface.writeVecAll(&vecs) catch |err| switch (err) {
error.WriteFailed => return cache_file_writer.err.?,
};
}
pub fn saveZoirCache(io: Io, cache_file: Io.File, stat: Io.File.Stat, zoir: Zoir) Io.File.WriteError!void {
pub fn saveZoirCache(cache_file_writer: *Io.File.Writer, stat: Io.File.Stat, zoir: Zoir) Io.File.Writer.Error!void {
const header: Zoir.Header = .{
.nodes_len = @intCast(zoir.nodes.len),
.extra_len = @intCast(zoir.extra.len),
@@ -3056,9 +3054,8 @@ pub fn saveZoirCache(io: Io, cache_file: Io.File, stat: Io.File.Stat, zoir: Zoir
@ptrCast(zoir.compile_errors),
@ptrCast(zoir.error_notes),
};
var cache_fw = cache_file.writer(io, &.{});
cache_fw.interface.writeVecAll(&vecs) catch |err| switch (err) {
error.WriteFailed => return cache_fw.err.?,
cache_file_writer.interface.writeVecAll(&vecs) catch |err| switch (err) {
error.WriteFailed => return cache_file_writer.err.?,
};
}
+35 -15
View File
@@ -238,18 +238,13 @@ pub fn updateFile(
if (builtin.os.tag == .wasi or lock == .exclusive) break true;
// Otherwise, unlock to give someone a chance to get the exclusive lock
// and then upgrade to an exclusive lock.
cache_file.unlock();
cache_file.unlock(io);
lock = .exclusive;
try cache_file.lock(lock);
try cache_file.lock(io, lock);
};
if (need_update) {
// The cache is definitely stale so delete the contents to avoid an underwrite later.
cache_file.setLength(io, 0) catch |err| switch (err) {
error.FileTooBig => unreachable, // 0 is not too big
else => |e| return e,
};
try cache_file.seekTo(0);
var cache_file_writer: Io.File.Writer = .init(cache_file, io, &.{});
if (stat.size > std.math.maxInt(u32))
return error.FileTooBig;
@@ -278,7 +273,7 @@ pub fn updateFile(
switch (file.getMode()) {
.zig => {
file.zir = try AstGen.generate(gpa, file.tree.?);
Zcu.saveZirCache(gpa, io, cache_file, stat, file.zir.?) catch |err| switch (err) {
Zcu.saveZirCache(gpa, &cache_file_writer, stat, file.zir.?) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
else => log.warn("unable to write cached ZIR code for {f} to {f}{s}: {t}", .{
file.path.fmt(comp), cache_directory, &hex_digest, err,
@@ -287,13 +282,19 @@ pub fn updateFile(
},
.zon => {
file.zoir = try ZonGen.generate(gpa, file.tree.?, .{});
Zcu.saveZoirCache(io, cache_file, stat, file.zoir.?) catch |err| {
Zcu.saveZoirCache(&cache_file_writer, stat, file.zoir.?) catch |err| {
log.warn("unable to write cached ZOIR code for {f} to {f}{s}: {t}", .{
file.path.fmt(comp), cache_directory, &hex_digest, err,
});
};
},
}
cache_file_writer.end() catch |err| switch (err) {
error.WriteFailed => return cache_file_writer.err.?,
else => |e| return e,
};
if (timer.finish()) |ns_astgen| {
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
@@ -4524,12 +4525,14 @@ pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) Ru
.stage2_llvm,
=> {},
},
error.Canceled => |e| return e,
}
return error.AlreadyReported;
};
}
fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) error{
OutOfMemory,
Canceled,
CodegenFail,
NoLinkFile,
BackendDoesNotProduceMir,
@@ -4555,13 +4558,16 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
null;
defer if (liveness) |*l| l.deinit(gpa);
if (build_options.enable_debug_extensions and comp.verbose_air) {
if (build_options.enable_debug_extensions and comp.verbose_air) p: {
const io = comp.io;
const stderr = try io.lockStderrWriter(&.{});
defer io.unlockStderrWriter();
stderr.print("# Begin Function AIR: {f}:\n", .{fqn.fmt(ip)}) catch {};
air.write(stderr, pt, liveness);
stderr.print("# End Function AIR: {f}\n\n", .{fqn.fmt(ip)}) catch {};
printVerboseAir(pt, liveness, fqn, air, &stderr.interface) catch |err| switch (err) {
error.WriteFailed => switch (stderr.err.?) {
error.Canceled => |e| return e,
else => break :p,
},
};
}
if (std.debug.runtime_safety) verify_liveness: {
@@ -4576,7 +4582,7 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
verify.verify() catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => return zcu.codegenFail(nav, "invalid liveness: {s}", .{@errorName(err)}),
else => return zcu.codegenFail(nav, "invalid liveness: {t}", .{err}),
};
}
@@ -4612,3 +4618,17 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
=> return zcu.codegenFail(nav, "unable to codegen: {s}", .{@errorName(err)}),
};
}
fn printVerboseAir(
pt: Zcu.PerThread,
liveness: ?Air.Liveness,
fqn: InternPool.NullTerminatedString,
air: *const Air,
w: *Io.Writer,
) Io.Writer.Error!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
try w.print("# Begin Function AIR: {f}:\n", .{fqn.fmt(ip)});
try air.write(w, pt, liveness);
try w.print("# End Function AIR: {f}\n\n", .{fqn.fmt(ip)});
}
+6 -6
View File
@@ -124,7 +124,7 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
try wip_errors.addZirErrorMessages(zir, tree, source_code, "<stdin>");
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
error_bundle.renderToStderr(io, .{}, color);
error_bundle.renderToStderr(io, .{}, color) catch {};
process.exit(2);
}
} else {
@@ -138,12 +138,12 @@ pub fn run(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8) !
try wip_errors.addZoirErrorMessages(zoir, tree, source_code, "<stdin>");
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
error_bundle.renderToStderr(io, .{}, color);
error_bundle.renderToStderr(io, .{}, color) catch {};
process.exit(2);
}
}
} else if (tree.errors.len != 0) {
try std.zig.printAstErrorsToStderr(gpa, tree, "<stdin>", color);
std.zig.printAstErrorsToStderr(gpa, io, tree, "<stdin>", color) catch {};
process.exit(2);
}
const formatted = try tree.renderAlloc(gpa);
@@ -298,7 +298,7 @@ fn fmtPathFile(
defer tree.deinit(gpa);
if (tree.errors.len != 0) {
try std.zig.printAstErrorsToStderr(gpa, tree, file_path, fmt.color);
try std.zig.printAstErrorsToStderr(gpa, io, tree, file_path, fmt.color);
fmt.any_error = true;
return;
}
@@ -319,7 +319,7 @@ fn fmtPathFile(
try wip_errors.addZirErrorMessages(zir, tree, source_code, file_path);
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
error_bundle.renderToStderr(io, .{}, fmt.color);
try error_bundle.renderToStderr(io, .{}, fmt.color);
fmt.any_error = true;
}
},
@@ -334,7 +334,7 @@ fn fmtPathFile(
try wip_errors.addZoirErrorMessages(zoir, tree, source_code, file_path);
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
error_bundle.renderToStderr(io, .{}, fmt.color);
try error_bundle.renderToStderr(io, .{}, fmt.color);
fmt.any_error = true;
}
},
+2 -2
View File
@@ -401,8 +401,8 @@ pub const BuiltSharedObjects = struct {
lock: Cache.Lock,
dir_path: Path,
pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
self.lock.release();
pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator, io: Io) void {
self.lock.release(io);
gpa.free(self.dir_path.sub_path);
self.* = undefined;
}
+2 -2
View File
@@ -640,8 +640,8 @@ pub const BuiltSharedObjects = struct {
lock: Cache.Lock,
dir_path: Path,
pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
self.lock.release();
pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator, io: Io) void {
self.lock.release(io);
gpa.free(self.dir_path.sub_path);
self.* = undefined;
}
+2 -2
View File
@@ -346,8 +346,8 @@ pub const BuiltSharedObjects = struct {
lock: Cache.Lock,
dir_path: Path,
pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
self.lock.release();
pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator, io: Io) void {
self.lock.release(io);
gpa.free(self.dir_path.sub_path);
self.* = undefined;
}
+2 -3
View File
@@ -2246,13 +2246,12 @@ fn resolvePathInputLib(
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(gpa);
error_bundle.renderToStderr(io, .{}, color);
error_bundle.renderToStderr(io, .{}, color) catch {};
std.process.exit(1);
}
var ld_script = ld_script_result catch |err|
fatal("{f}: failed to parse linker script: {s}", .{ test_path, @errorName(err) });
fatal("{f}: failed to parse linker script: {t}", .{ test_path, err });
defer ld_script.deinit(gpa);
try unresolved_inputs.ensureUnusedCapacity(gpa, ld_script.args.len);
+6 -1
View File
@@ -4598,6 +4598,8 @@ const UpdateModuleError = Compilation.UpdateError || error{
/// The update caused compile errors. The error bundle has already been
/// reported to the user by being rendered to stderr.
CompileErrorsReported,
/// Error occurred printing compilation errors to stderr.
PrintingErrorsFailed,
};
fn updateModule(comp: *Compilation, color: Color, prog_node: std.Progress.Node) UpdateModuleError!void {
try comp.update(prog_node);
@@ -4607,7 +4609,10 @@ fn updateModule(comp: *Compilation, color: Color, prog_node: std.Progress.Node)
if (errors.errorMessageCount() > 0) {
const io = comp.io;
try errors.renderToStderr(io, .{}, color);
errors.renderToStderr(io, .{}, color) catch |err| switch (err) {
error.Canceled => |e| return e,
else => return error.PrintingErrorsFailed,
};
return error.CompileErrorsReported;
}
}