mirror of
https://codeberg.org/ziglang/zig.git
synced 2026-04-27 19:09:47 +03:00
zig rc: Add COFF object file creation for CMake cross-compilation use case
In #22522 I said: > RC="zig rc" will now work in combination with zig cc and CMake. Here's an example of cross-compiling a simple Windows GUI CMake project > > $ RC="zig rc" CC="zig cc --target=x86_64-windows-gnu" cmake .. -DCMAKE_SYSTEM_NAME=Windows -G Ninja However, I didn't realize that the time that this only works because of the `-G Ninja` part. When not using Ninja as the build tool, CMake adds a workaround for 'very long lists of object files' where it takes all object files and runs them through `ar` to combine them into one archive: https://github.com/Kitware/CMake/blob/4a11fd8dde745789f66d6500412d7f56607e9218/Modules/Platform/Windows-GNU.cmake#L141-L158 This is a problem for the Windows resource use-case, because `ar` doesn't know how to deal with `.res` files and so this object combining step fails with: unknown file type: foo.rc.res Only the linker knows what to do with .res files (since it has its own `.res` -> `.obj` ('cvtres') conversion mechanism). So, when using Ninja, this object file combining step is skipped and the .res file gets passed to the linker and everyone is happy. Note: When CMake thinks that its using `windres` as the Windows resource compiler, it will pass `-O coff` to windres which causes it to output a COFF object file instead of a `.res` file, which means that the `ar` step can succeed because it's only working on actual object files. --- This commit gives `zig rc` the ability to output COFF object files directly when `/:output-format coff` is provided as an argument. This effectively matches what happens when CMake uses `windres` for resource compilation, but requires the argument to be provided explicitly. So, after this change, the following CMake cross-compilation use case will work, even when not using Ninja as the generator: RC="zig rc /:output-format coff" CC="zig cc --target=x86_64-windows-gnu" cmake .. -DCMAKE_SYSTEM_NAME=Windows
This commit is contained in:
committed by
Andrew Kelley
parent
8683f25d24
commit
a502301b5e
+579
-64
@@ -5,6 +5,7 @@ const lang = @import("lang.zig");
|
||||
const res = @import("res.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const lex = @import("lex.zig");
|
||||
const cvtres = @import("cvtres.zig");
|
||||
|
||||
/// This is what /SL 100 will set the maximum string literal length to
|
||||
pub const max_string_literal_length_100_percent = 8192;
|
||||
@@ -59,6 +60,20 @@ pub const usage_string_after_command_name =
|
||||
\\ the .rc includes or otherwise depends on.
|
||||
\\ /:depfile-fmt <value> Output format of the depfile, if /:depfile is set.
|
||||
\\ json (default) A top-level JSON array of paths
|
||||
\\ /:input-format <value> If not specified, the input format is inferred.
|
||||
\\ rc (default if input format cannot be inferred)
|
||||
\\ res Compiled .rc file, implies /:output-format coff
|
||||
\\ rcpp Preprocessed .rc file, implies /:no-preprocess
|
||||
\\ /:output-format <value> If not specified, the output format is inferred.
|
||||
\\ res (default if output format cannot be inferred)
|
||||
\\ coff COFF object file (extension: .obj or .o)
|
||||
\\ rcpp Preprocessed .rc file, implies /p
|
||||
\\ /:target <arch> Set the target machine for COFF object files.
|
||||
\\ Can be specified either as PE/COFF machine constant
|
||||
\\ name (X64, ARM64, etc) or Zig/LLVM CPU name (x86_64,
|
||||
\\ aarch64, etc). The default is X64 (aka x86_64).
|
||||
\\ Also accepts a full Zig/LLVM triple, but everything
|
||||
\\ except the architecture is ignored.
|
||||
\\
|
||||
\\Note: For compatibility reasons, all custom options start with :
|
||||
\\
|
||||
@@ -131,8 +146,8 @@ pub const Diagnostics = struct {
|
||||
|
||||
pub const Options = struct {
|
||||
allocator: Allocator,
|
||||
input_filename: []const u8 = &[_]u8{},
|
||||
output_filename: []const u8 = &[_]u8{},
|
||||
input_source: IoSource = .{ .filename = &[_]u8{} },
|
||||
output_source: IoSource = .{ .filename = &[_]u8{} },
|
||||
extra_include_paths: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
ignore_include_env_var: bool = false,
|
||||
preprocess: Preprocess = .yes,
|
||||
@@ -149,9 +164,30 @@ pub const Options = struct {
|
||||
auto_includes: AutoIncludes = .any,
|
||||
depfile_path: ?[]const u8 = null,
|
||||
depfile_fmt: DepfileFormat = .json,
|
||||
input_format: InputFormat = .rc,
|
||||
output_format: OutputFormat = .res,
|
||||
coff_options: cvtres.CoffOptions = .{},
|
||||
|
||||
pub const IoSource = union(enum) {
|
||||
stdio: std.fs.File,
|
||||
filename: []const u8,
|
||||
};
|
||||
pub const AutoIncludes = enum { any, msvc, gnu, none };
|
||||
pub const DepfileFormat = enum { json };
|
||||
pub const InputFormat = enum { rc, res, rcpp };
|
||||
pub const OutputFormat = enum {
|
||||
res,
|
||||
coff,
|
||||
rcpp,
|
||||
|
||||
pub fn extension(format: OutputFormat) []const u8 {
|
||||
return switch (format) {
|
||||
.rcpp => ".rcpp",
|
||||
.coff => ".obj",
|
||||
.res => ".res",
|
||||
};
|
||||
}
|
||||
};
|
||||
pub const Preprocess = enum { no, yes, only };
|
||||
pub const SymbolAction = enum { define, undefine };
|
||||
pub const SymbolValue = union(SymbolAction) {
|
||||
@@ -198,9 +234,10 @@ pub const Options = struct {
|
||||
try self.symbols.put(self.allocator, duped_key, .{ .undefine = {} });
|
||||
}
|
||||
|
||||
/// If the current input filename both:
|
||||
/// If the current input filename:
|
||||
/// - does not have an extension, and
|
||||
/// - does not exist in the cwd
|
||||
/// - does not exist in the cwd, and
|
||||
/// - the input format is .rc
|
||||
/// then this function will append `.rc` to the input filename
|
||||
///
|
||||
/// Note: This behavior is different from the Win32 compiler.
|
||||
@@ -213,14 +250,18 @@ pub const Options = struct {
|
||||
/// of the .rc extension being omitted from the CLI args, but still
|
||||
/// work fine if the file itself does not have an extension.
|
||||
pub fn maybeAppendRC(options: *Options, cwd: std.fs.Dir) !void {
|
||||
if (std.fs.path.extension(options.input_filename).len == 0) {
|
||||
cwd.access(options.input_filename, .{}) catch |err| switch (err) {
|
||||
switch (options.input_source) {
|
||||
.stdio => return,
|
||||
.filename => {},
|
||||
}
|
||||
if (options.input_format == .rc and std.fs.path.extension(options.input_source.filename).len == 0) {
|
||||
cwd.access(options.input_source.filename, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => {
|
||||
var filename_bytes = try options.allocator.alloc(u8, options.input_filename.len + 3);
|
||||
@memcpy(filename_bytes[0..options.input_filename.len], options.input_filename);
|
||||
var filename_bytes = try options.allocator.alloc(u8, options.input_source.filename.len + 3);
|
||||
@memcpy(filename_bytes[0..options.input_source.filename.len], options.input_source.filename);
|
||||
@memcpy(filename_bytes[filename_bytes.len - 3 ..], ".rc");
|
||||
options.allocator.free(options.input_filename);
|
||||
options.input_filename = filename_bytes;
|
||||
options.allocator.free(options.input_source.filename);
|
||||
options.input_source = .{ .filename = filename_bytes };
|
||||
},
|
||||
else => {},
|
||||
};
|
||||
@@ -232,8 +273,14 @@ pub const Options = struct {
|
||||
self.allocator.free(extra_include_path);
|
||||
}
|
||||
self.extra_include_paths.deinit(self.allocator);
|
||||
self.allocator.free(self.input_filename);
|
||||
self.allocator.free(self.output_filename);
|
||||
switch (self.input_source) {
|
||||
.stdio => {},
|
||||
.filename => |filename| self.allocator.free(filename),
|
||||
}
|
||||
switch (self.output_source) {
|
||||
.stdio => {},
|
||||
.filename => |filename| self.allocator.free(filename),
|
||||
}
|
||||
var symbol_it = self.symbols.iterator();
|
||||
while (symbol_it.next()) |entry| {
|
||||
self.allocator.free(entry.key_ptr.*);
|
||||
@@ -243,11 +290,26 @@ pub const Options = struct {
|
||||
if (self.depfile_path) |depfile_path| {
|
||||
self.allocator.free(depfile_path);
|
||||
}
|
||||
if (self.coff_options.define_external_symbol) |symbol_name| {
|
||||
self.allocator.free(symbol_name);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dumpVerbose(self: *const Options, writer: anytype) !void {
|
||||
try writer.print("Input filename: {s}\n", .{self.input_filename});
|
||||
try writer.print("Output filename: {s}\n", .{self.output_filename});
|
||||
const input_source_name = switch (self.input_source) {
|
||||
.stdio => "<stdin>",
|
||||
.filename => |filename| filename,
|
||||
};
|
||||
const output_source_name = switch (self.output_source) {
|
||||
.stdio => "<stdout>",
|
||||
.filename => |filename| filename,
|
||||
};
|
||||
try writer.print("Input filename: {s} (format={s})\n", .{ input_source_name, @tagName(self.input_format) });
|
||||
try writer.print("Output filename: {s} (format={s})\n", .{ output_source_name, @tagName(self.output_format) });
|
||||
if (self.output_format == .coff) {
|
||||
try writer.print(" Target machine type for COFF: {s}\n", .{@tagName(self.coff_options.target)});
|
||||
}
|
||||
|
||||
if (self.extra_include_paths.items.len > 0) {
|
||||
try writer.writeAll(" Extra include paths:\n");
|
||||
for (self.extra_include_paths.items) |extra_include_path| {
|
||||
@@ -331,6 +393,7 @@ pub const Arg = struct {
|
||||
}
|
||||
|
||||
pub fn optionWithoutPrefix(self: Arg, option_len: usize) []const u8 {
|
||||
if (option_len == 0) return self.name();
|
||||
return self.name()[0..option_len];
|
||||
}
|
||||
|
||||
@@ -380,6 +443,8 @@ pub const Arg = struct {
|
||||
|
||||
pub const Value = struct {
|
||||
slice: []const u8,
|
||||
/// Amount to increment the arg index to skip over both the option and the value arg(s)
|
||||
/// e.g. 1 if /<option><value>, 2 if /<option> <value>
|
||||
index_increment: u2 = 1,
|
||||
|
||||
pub fn argSpan(self: Value, arg: Arg) Diagnostics.ErrorDetails.ArgSpan {
|
||||
@@ -414,6 +479,7 @@ pub const Arg = struct {
|
||||
|
||||
pub const Context = struct {
|
||||
index: usize,
|
||||
option_len: usize,
|
||||
arg: Arg,
|
||||
value: Value,
|
||||
};
|
||||
@@ -428,7 +494,18 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
errdefer options.deinit();
|
||||
|
||||
var output_filename: ?[]const u8 = null;
|
||||
var output_filename_context: Arg.Context = undefined;
|
||||
var output_filename_context: union(enum) {
|
||||
unspecified: void,
|
||||
positional: usize,
|
||||
arg: Arg.Context,
|
||||
} = .{ .unspecified = {} };
|
||||
var output_format: ?Options.OutputFormat = null;
|
||||
var output_format_context: Arg.Context = undefined;
|
||||
var input_format: ?Options.InputFormat = null;
|
||||
var input_format_context: Arg.Context = undefined;
|
||||
var input_filename_arg_i: usize = undefined;
|
||||
var preprocess_only_context: Arg.Context = undefined;
|
||||
var depfile_context: Arg.Context = undefined;
|
||||
|
||||
var arg_i: usize = 0;
|
||||
next_arg: while (arg_i < args.len) {
|
||||
@@ -470,6 +547,25 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
if (std.ascii.startsWithIgnoreCase(arg_name, ":no-preprocess")) {
|
||||
options.preprocess = .no;
|
||||
arg.name_offset += ":no-preprocess".len;
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":output-format")) {
|
||||
const value = arg.value(":output-format".len, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":output-format".len) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
};
|
||||
output_format = std.meta.stringToEnum(Options.OutputFormat, value.slice) orelse blk: {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid output format setting: {s} ", .{value.slice});
|
||||
try diagnostics.append(err_details);
|
||||
break :blk output_format;
|
||||
};
|
||||
output_format_context = .{ .index = arg_i, .option_len = ":output-format".len, .arg = arg, .value = value };
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":auto-includes")) {
|
||||
const value = arg.value(":auto-includes".len, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
@@ -488,6 +584,25 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
};
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":input-format")) {
|
||||
const value = arg.value(":input-format".len, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":input-format".len) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
};
|
||||
input_format = std.meta.stringToEnum(Options.InputFormat, value.slice) orelse blk: {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid input format setting: {s} ", .{value.slice});
|
||||
try diagnostics.append(err_details);
|
||||
break :blk input_format;
|
||||
};
|
||||
input_format_context = .{ .index = arg_i, .option_len = ":input-format".len, .arg = arg, .value = value };
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":depfile-fmt")) {
|
||||
const value = arg.value(":depfile-fmt".len, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
@@ -522,6 +637,31 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
const path = try allocator.dupe(u8, value.slice);
|
||||
errdefer allocator.free(path);
|
||||
options.depfile_path = path;
|
||||
depfile_context = .{ .index = arg_i, .option_len = ":depfile".len, .arg = arg, .value = value };
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, ":target")) {
|
||||
const value = arg.value(":target".len, arg_i, args) catch {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = arg.missingSpan() };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("missing value after {s}{s} option", .{ arg.prefixSlice(), arg.optionWithoutPrefix(":target".len) });
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
};
|
||||
// Take the substring up to the first dash so that a full target triple
|
||||
// can be used, e.g. x86_64-windows-gnu becomes x86_64
|
||||
var target_it = std.mem.splitScalar(u8, value.slice, '-');
|
||||
const arch_str = target_it.first();
|
||||
const arch = cvtres.supported_targets.Arch.fromStringIgnoreCase(arch_str) orelse {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = arg_i, .arg_span = value.argSpan(arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("invalid or unsupported target architecture: {s}", .{arch_str});
|
||||
try diagnostics.append(err_details);
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
};
|
||||
options.coff_options.target = arch.toCoffMachineType();
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, "nologo")) {
|
||||
@@ -620,7 +760,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
arg_i += 1;
|
||||
break :next_arg;
|
||||
};
|
||||
output_filename_context = .{ .index = arg_i, .arg = arg, .value = value };
|
||||
output_filename_context = .{ .arg = .{ .index = arg_i, .option_len = "fo".len, .arg = arg, .value = value } };
|
||||
output_filename = value.slice;
|
||||
arg_i += value.index_increment;
|
||||
continue :next_arg;
|
||||
@@ -812,6 +952,7 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
arg.name_offset += 1;
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, "p")) {
|
||||
options.preprocess = .only;
|
||||
preprocess_only_context = .{ .index = arg_i, .option_len = "p".len, .arg = arg, .value = undefined };
|
||||
arg.name_offset += 1;
|
||||
} else if (std.ascii.startsWithIgnoreCase(arg_name, "i")) {
|
||||
const value = arg.value(1, arg_i, args) catch {
|
||||
@@ -920,10 +1061,10 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
|
||||
if (args.len > 0) {
|
||||
const last_arg = args[args.len - 1];
|
||||
if (arg_i > 0 and last_arg.len > 0 and last_arg[0] == '/' and std.ascii.endsWithIgnoreCase(last_arg, ".rc")) {
|
||||
if (arg_i > 0 and last_arg.len > 0 and last_arg[0] == '/' and isSupportedInputExtension(std.fs.path.extension(last_arg))) {
|
||||
var note_details = Diagnostics.ErrorDetails{ .type = .note, .print_args = true, .arg_index = arg_i - 1 };
|
||||
var note_writer = note_details.msg.writer(allocator);
|
||||
try note_writer.writeAll("if this argument was intended to be the input filename, then -- should be specified in front of it to exclude it from option parsing");
|
||||
try note_writer.writeAll("if this argument was intended to be the input filename, adding -- in front of it will exclude it from option parsing");
|
||||
try diagnostics.append(note_details);
|
||||
}
|
||||
}
|
||||
@@ -932,7 +1073,28 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
// things after this rely on the value of the input filename.
|
||||
return error.ParseError;
|
||||
}
|
||||
options.input_filename = try allocator.dupe(u8, positionals[0]);
|
||||
options.input_source = .{ .filename = try allocator.dupe(u8, positionals[0]) };
|
||||
input_filename_arg_i = arg_i;
|
||||
|
||||
const InputFormatSource = enum {
|
||||
inferred_from_input_filename,
|
||||
input_format_arg,
|
||||
};
|
||||
|
||||
var input_format_source: InputFormatSource = undefined;
|
||||
if (input_format == null) {
|
||||
const ext = std.fs.path.extension(options.input_source.filename);
|
||||
if (std.ascii.eqlIgnoreCase(ext, ".res")) {
|
||||
input_format = .res;
|
||||
} else if (std.ascii.eqlIgnoreCase(ext, ".rcpp")) {
|
||||
input_format = .rcpp;
|
||||
} else {
|
||||
input_format = .rc;
|
||||
}
|
||||
input_format_source = .inferred_from_input_filename;
|
||||
} else {
|
||||
input_format_source = .input_format_arg;
|
||||
}
|
||||
|
||||
if (positionals.len > 1) {
|
||||
if (output_filename != null) {
|
||||
@@ -942,53 +1104,233 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
|
||||
try diagnostics.append(err_details);
|
||||
var note_details = Diagnostics.ErrorDetails{
|
||||
.type = .note,
|
||||
.arg_index = output_filename_context.value.index(output_filename_context.index),
|
||||
.arg_span = output_filename_context.value.argSpan(output_filename_context.arg),
|
||||
.arg_index = output_filename_context.arg.index,
|
||||
.arg_span = output_filename_context.arg.value.argSpan(output_filename_context.arg.arg),
|
||||
};
|
||||
var note_writer = note_details.msg.writer(allocator);
|
||||
try note_writer.writeAll("output filename previously specified here");
|
||||
try diagnostics.append(note_details);
|
||||
} else {
|
||||
output_filename = positionals[1];
|
||||
output_filename_context = .{ .positional = arg_i + 1 };
|
||||
}
|
||||
}
|
||||
|
||||
const OutputFormatSource = enum {
|
||||
inferred_from_input_filename,
|
||||
inferred_from_output_filename,
|
||||
output_format_arg,
|
||||
unable_to_infer_from_input_filename,
|
||||
unable_to_infer_from_output_filename,
|
||||
inferred_from_preprocess_only,
|
||||
};
|
||||
|
||||
var output_format_source: OutputFormatSource = undefined;
|
||||
if (output_filename == null) {
|
||||
var buf = std.ArrayList(u8).init(allocator);
|
||||
errdefer buf.deinit();
|
||||
|
||||
if (std.fs.path.dirname(options.input_filename)) |dirname| {
|
||||
var end_pos = dirname.len;
|
||||
// We want to ensure that we write a path separator at the end, so if the dirname
|
||||
// doesn't end with a path sep then include the char after the dirname
|
||||
// which must be a path sep.
|
||||
if (!std.fs.path.isSep(dirname[dirname.len - 1])) end_pos += 1;
|
||||
try buf.appendSlice(options.input_filename[0..end_pos]);
|
||||
if (output_format == null) {
|
||||
output_format_source = .inferred_from_input_filename;
|
||||
const input_ext = std.fs.path.extension(options.input_source.filename);
|
||||
if (std.ascii.eqlIgnoreCase(input_ext, ".res")) {
|
||||
output_format = .coff;
|
||||
} else if (options.preprocess == .only and (input_format.? == .rc or std.ascii.eqlIgnoreCase(input_ext, ".rc"))) {
|
||||
output_format = .rcpp;
|
||||
output_format_source = .inferred_from_preprocess_only;
|
||||
} else {
|
||||
if (!std.ascii.eqlIgnoreCase(input_ext, ".res")) {
|
||||
output_format_source = .unable_to_infer_from_input_filename;
|
||||
}
|
||||
output_format = .res;
|
||||
}
|
||||
}
|
||||
try buf.appendSlice(std.fs.path.stem(options.input_filename));
|
||||
if (options.preprocess == .only) {
|
||||
try buf.appendSlice(".rcpp");
|
||||
} else {
|
||||
try buf.appendSlice(".res");
|
||||
}
|
||||
|
||||
options.output_filename = try buf.toOwnedSlice();
|
||||
options.output_source = .{ .filename = try filepathWithExtension(allocator, options.input_source.filename, output_format.?.extension()) };
|
||||
} else {
|
||||
options.output_filename = try allocator.dupe(u8, output_filename.?);
|
||||
options.output_source = .{ .filename = try allocator.dupe(u8, output_filename.?) };
|
||||
if (output_format == null) {
|
||||
output_format_source = .inferred_from_output_filename;
|
||||
const ext = std.fs.path.extension(options.output_source.filename);
|
||||
if (std.ascii.eqlIgnoreCase(ext, ".obj") or std.ascii.eqlIgnoreCase(ext, ".o")) {
|
||||
output_format = .coff;
|
||||
} else if (std.ascii.eqlIgnoreCase(ext, ".rcpp")) {
|
||||
output_format = .rcpp;
|
||||
} else {
|
||||
if (!std.ascii.eqlIgnoreCase(ext, ".res")) {
|
||||
output_format_source = .unable_to_infer_from_output_filename;
|
||||
}
|
||||
output_format = .res;
|
||||
}
|
||||
} else {
|
||||
output_format_source = .output_format_arg;
|
||||
}
|
||||
}
|
||||
|
||||
options.input_format = input_format.?;
|
||||
options.output_format = output_format.?;
|
||||
|
||||
// Check for incompatible options
|
||||
var print_input_format_source_note: bool = false;
|
||||
var print_output_format_source_note: bool = false;
|
||||
if (options.depfile_path != null and (options.input_format == .res or options.output_format == .rcpp)) {
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .warning, .arg_index = depfile_context.index, .arg_span = depfile_context.value.argSpan(depfile_context.arg) };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
if (options.input_format == .res) {
|
||||
try msg_writer.print("the {s}{s} option was ignored because the input format is '{s}'", .{
|
||||
depfile_context.arg.prefixSlice(),
|
||||
depfile_context.arg.optionWithoutPrefix(depfile_context.option_len),
|
||||
@tagName(options.input_format),
|
||||
});
|
||||
print_input_format_source_note = true;
|
||||
} else if (options.output_format == .rcpp) {
|
||||
try msg_writer.print("the {s}{s} option was ignored because the output format is '{s}'", .{
|
||||
depfile_context.arg.prefixSlice(),
|
||||
depfile_context.arg.optionWithoutPrefix(depfile_context.option_len),
|
||||
@tagName(options.output_format),
|
||||
});
|
||||
print_output_format_source_note = true;
|
||||
}
|
||||
try diagnostics.append(err_details);
|
||||
}
|
||||
if (!isSupportedTransformation(options.input_format, options.output_format)) {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = input_filename_arg_i, .print_args = false };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("input format '{s}' cannot be converted to output format '{s}'", .{ @tagName(options.input_format), @tagName(options.output_format) });
|
||||
try diagnostics.append(err_details);
|
||||
print_input_format_source_note = true;
|
||||
print_output_format_source_note = true;
|
||||
}
|
||||
if (options.preprocess == .only and options.output_format != .rcpp) {
|
||||
var err_details = Diagnostics.ErrorDetails{ .arg_index = preprocess_only_context.index };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("the {s}{s} option cannot be used with output format '{s}'", .{
|
||||
preprocess_only_context.arg.prefixSlice(),
|
||||
preprocess_only_context.arg.optionWithoutPrefix(preprocess_only_context.option_len),
|
||||
@tagName(options.output_format),
|
||||
});
|
||||
try diagnostics.append(err_details);
|
||||
print_output_format_source_note = true;
|
||||
}
|
||||
if (print_input_format_source_note) {
|
||||
switch (input_format_source) {
|
||||
.inferred_from_input_filename => {
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .note, .arg_index = input_filename_arg_i };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.writeAll("the input format was inferred from the input filename");
|
||||
try diagnostics.append(err_details);
|
||||
},
|
||||
.input_format_arg => {
|
||||
var err_details = Diagnostics.ErrorDetails{
|
||||
.type = .note,
|
||||
.arg_index = input_format_context.index,
|
||||
.arg_span = input_format_context.value.argSpan(input_format_context.arg),
|
||||
};
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.writeAll("the input format was specified here");
|
||||
try diagnostics.append(err_details);
|
||||
},
|
||||
}
|
||||
}
|
||||
if (print_output_format_source_note) {
|
||||
switch (output_format_source) {
|
||||
.inferred_from_input_filename, .unable_to_infer_from_input_filename => {
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .note, .arg_index = input_filename_arg_i };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
if (output_format_source == .inferred_from_input_filename) {
|
||||
try msg_writer.writeAll("the output format was inferred from the input filename");
|
||||
} else {
|
||||
try msg_writer.writeAll("the output format was unable to be inferred from the input filename, so the default was used");
|
||||
}
|
||||
try diagnostics.append(err_details);
|
||||
},
|
||||
.inferred_from_output_filename, .unable_to_infer_from_output_filename => {
|
||||
var err_details: Diagnostics.ErrorDetails = switch (output_filename_context) {
|
||||
.positional => |i| .{ .type = .note, .arg_index = i },
|
||||
.arg => |ctx| .{ .type = .note, .arg_index = ctx.index, .arg_span = ctx.value.argSpan(ctx.arg) },
|
||||
.unspecified => unreachable,
|
||||
};
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
if (output_format_source == .inferred_from_output_filename) {
|
||||
try msg_writer.writeAll("the output format was inferred from the output filename");
|
||||
} else {
|
||||
try msg_writer.writeAll("the output format was unable to be inferred from the output filename, so the default was used");
|
||||
}
|
||||
try diagnostics.append(err_details);
|
||||
},
|
||||
.output_format_arg => {
|
||||
var err_details = Diagnostics.ErrorDetails{
|
||||
.type = .note,
|
||||
.arg_index = output_format_context.index,
|
||||
.arg_span = output_format_context.value.argSpan(output_format_context.arg),
|
||||
};
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.writeAll("the output format was specified here");
|
||||
try diagnostics.append(err_details);
|
||||
},
|
||||
.inferred_from_preprocess_only => {
|
||||
var err_details = Diagnostics.ErrorDetails{ .type = .note, .arg_index = preprocess_only_context.index };
|
||||
var msg_writer = err_details.msg.writer(allocator);
|
||||
try msg_writer.print("the output format was inferred from the usage of the {s}{s} option", .{
|
||||
preprocess_only_context.arg.prefixSlice(),
|
||||
preprocess_only_context.arg.optionWithoutPrefix(preprocess_only_context.option_len),
|
||||
});
|
||||
try diagnostics.append(err_details);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if (diagnostics.hasError()) {
|
||||
return error.ParseError;
|
||||
}
|
||||
|
||||
// Implied settings from input/output formats
|
||||
if (options.output_format == .rcpp) options.preprocess = .only;
|
||||
if (options.input_format == .res) options.output_format = .coff;
|
||||
if (options.input_format == .rcpp) options.preprocess = .no;
|
||||
|
||||
return options;
|
||||
}
|
||||
|
||||
pub fn filepathWithExtension(allocator: Allocator, path: []const u8, ext: []const u8) ![]const u8 {
|
||||
var buf = std.ArrayList(u8).init(allocator);
|
||||
errdefer buf.deinit();
|
||||
if (std.fs.path.dirname(path)) |dirname| {
|
||||
var end_pos = dirname.len;
|
||||
// We want to ensure that we write a path separator at the end, so if the dirname
|
||||
// doesn't end with a path sep then include the char after the dirname
|
||||
// which must be a path sep.
|
||||
if (!std.fs.path.isSep(dirname[dirname.len - 1])) end_pos += 1;
|
||||
try buf.appendSlice(path[0..end_pos]);
|
||||
}
|
||||
try buf.appendSlice(std.fs.path.stem(path));
|
||||
try buf.appendSlice(ext);
|
||||
return try buf.toOwnedSlice();
|
||||
}
|
||||
|
||||
pub fn isSupportedInputExtension(ext: []const u8) bool {
|
||||
if (std.ascii.eqlIgnoreCase(ext, ".rc")) return true;
|
||||
if (std.ascii.eqlIgnoreCase(ext, ".res")) return true;
|
||||
if (std.ascii.eqlIgnoreCase(ext, ".rcpp")) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
pub fn isSupportedTransformation(input: Options.InputFormat, output: Options.OutputFormat) bool {
|
||||
return switch (input) {
|
||||
.rc => switch (output) {
|
||||
.res => true,
|
||||
.coff => true,
|
||||
.rcpp => true,
|
||||
},
|
||||
.res => switch (output) {
|
||||
.res => false,
|
||||
.coff => true,
|
||||
.rcpp => false,
|
||||
},
|
||||
.rcpp => switch (output) {
|
||||
.res => true,
|
||||
.coff => true,
|
||||
.rcpp => false,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns true if the str is a valid C identifier for use in a #define/#undef macro
|
||||
pub fn isValidIdentifier(str: []const u8) bool {
|
||||
for (str, 0..) |c, i| switch (c) {
|
||||
@@ -1278,17 +1620,6 @@ test "parse errors: basic" {
|
||||
\\
|
||||
\\
|
||||
);
|
||||
try testParseError(&.{"/some/absolute/path/parsed/as/an/option.rc"},
|
||||
\\<cli>: error: the /s option is unsupported
|
||||
\\ ... /some/absolute/path/parsed/as/an/option.rc
|
||||
\\ ~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
\\<cli>: error: missing input filename
|
||||
\\
|
||||
\\<cli>: note: if this argument was intended to be the input filename, then -- should be specified in front of it to exclude it from option parsing
|
||||
\\ ... /some/absolute/path/parsed/as/an/option.rc
|
||||
\\ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
test "inferred absolute filepaths" {
|
||||
@@ -1349,8 +1680,8 @@ test "parse: options" {
|
||||
defer options.deinit();
|
||||
|
||||
try std.testing.expectEqual(true, options.verbose);
|
||||
try std.testing.expectEqualStrings("foo.rc", options.input_filename);
|
||||
try std.testing.expectEqualStrings("foo.res", options.output_filename);
|
||||
try std.testing.expectEqualStrings("foo.rc", options.input_source.filename);
|
||||
try std.testing.expectEqualStrings("foo.res", options.output_source.filename);
|
||||
}
|
||||
{
|
||||
var options = try testParse(&.{ "/vx", "foo.rc" });
|
||||
@@ -1358,8 +1689,8 @@ test "parse: options" {
|
||||
|
||||
try std.testing.expectEqual(true, options.verbose);
|
||||
try std.testing.expectEqual(true, options.ignore_include_env_var);
|
||||
try std.testing.expectEqualStrings("foo.rc", options.input_filename);
|
||||
try std.testing.expectEqualStrings("foo.res", options.output_filename);
|
||||
try std.testing.expectEqualStrings("foo.rc", options.input_source.filename);
|
||||
try std.testing.expectEqualStrings("foo.res", options.output_source.filename);
|
||||
}
|
||||
{
|
||||
var options = try testParse(&.{ "/xv", "foo.rc" });
|
||||
@@ -1367,8 +1698,8 @@ test "parse: options" {
|
||||
|
||||
try std.testing.expectEqual(true, options.verbose);
|
||||
try std.testing.expectEqual(true, options.ignore_include_env_var);
|
||||
try std.testing.expectEqualStrings("foo.rc", options.input_filename);
|
||||
try std.testing.expectEqualStrings("foo.res", options.output_filename);
|
||||
try std.testing.expectEqualStrings("foo.rc", options.input_source.filename);
|
||||
try std.testing.expectEqualStrings("foo.res", options.output_source.filename);
|
||||
}
|
||||
{
|
||||
var options = try testParse(&.{ "/xvFObar.res", "foo.rc" });
|
||||
@@ -1376,8 +1707,8 @@ test "parse: options" {
|
||||
|
||||
try std.testing.expectEqual(true, options.verbose);
|
||||
try std.testing.expectEqual(true, options.ignore_include_env_var);
|
||||
try std.testing.expectEqualStrings("foo.rc", options.input_filename);
|
||||
try std.testing.expectEqualStrings("bar.res", options.output_filename);
|
||||
try std.testing.expectEqualStrings("foo.rc", options.input_source.filename);
|
||||
try std.testing.expectEqualStrings("bar.res", options.output_source.filename);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1541,24 +1872,208 @@ test "parse: unsupported LCX/LCE-related options" {
|
||||
);
|
||||
}
|
||||
|
||||
test "parse: output filename specified twice" {
|
||||
try testParseError(&.{ "/fo", "foo.res", "foo.rc", "foo.res" },
|
||||
\\<cli>: error: output filename already specified
|
||||
\\ ... foo.res
|
||||
\\ ^~~~~~~
|
||||
\\<cli>: note: output filename previously specified here
|
||||
\\ ... /fo foo.res ...
|
||||
\\ ~~~~^~~~~~~
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
test "parse: input and output formats" {
|
||||
{
|
||||
try testParseError(&.{ "/:output-format", "rcpp", "foo.res" },
|
||||
\\<cli>: error: input format 'res' cannot be converted to output format 'rcpp'
|
||||
\\
|
||||
\\<cli>: note: the input format was inferred from the input filename
|
||||
\\ ... foo.res
|
||||
\\ ^~~~~~~
|
||||
\\<cli>: note: the output format was specified here
|
||||
\\ ... /:output-format rcpp ...
|
||||
\\ ~~~~~~~~~~~~~~~~^~~~
|
||||
\\
|
||||
);
|
||||
}
|
||||
{
|
||||
try testParseError(&.{ "foo.res", "foo.rcpp" },
|
||||
\\<cli>: error: input format 'res' cannot be converted to output format 'rcpp'
|
||||
\\
|
||||
\\<cli>: note: the input format was inferred from the input filename
|
||||
\\ ... foo.res ...
|
||||
\\ ^~~~~~~
|
||||
\\<cli>: note: the output format was inferred from the output filename
|
||||
\\ ... foo.rcpp
|
||||
\\ ^~~~~~~~
|
||||
\\
|
||||
);
|
||||
}
|
||||
{
|
||||
try testParseError(&.{ "/:input-format", "res", "foo" },
|
||||
\\<cli>: error: input format 'res' cannot be converted to output format 'res'
|
||||
\\
|
||||
\\<cli>: note: the input format was specified here
|
||||
\\ ... /:input-format res ...
|
||||
\\ ~~~~~~~~~~~~~~~^~~
|
||||
\\<cli>: note: the output format was unable to be inferred from the input filename, so the default was used
|
||||
\\ ... foo
|
||||
\\ ^~~
|
||||
\\
|
||||
);
|
||||
}
|
||||
{
|
||||
try testParseError(&.{ "/p", "/:input-format", "res", "foo" },
|
||||
\\<cli>: error: input format 'res' cannot be converted to output format 'res'
|
||||
\\
|
||||
\\<cli>: error: the /p option cannot be used with output format 'res'
|
||||
\\ ... /p ...
|
||||
\\ ^~
|
||||
\\<cli>: note: the input format was specified here
|
||||
\\ ... /:input-format res ...
|
||||
\\ ~~~~~~~~~~~~~~~^~~
|
||||
\\<cli>: note: the output format was unable to be inferred from the input filename, so the default was used
|
||||
\\ ... foo
|
||||
\\ ^~~
|
||||
\\
|
||||
);
|
||||
}
|
||||
{
|
||||
try testParseError(&.{ "/:output-format", "coff", "/p", "foo.rc" },
|
||||
\\<cli>: error: the /p option cannot be used with output format 'coff'
|
||||
\\ ... /p ...
|
||||
\\ ^~
|
||||
\\<cli>: note: the output format was specified here
|
||||
\\ ... /:output-format coff ...
|
||||
\\ ~~~~~~~~~~~~~~~~^~~~
|
||||
\\
|
||||
);
|
||||
}
|
||||
{
|
||||
try testParseError(&.{ "/fo", "foo.res", "/p", "foo.rc" },
|
||||
\\<cli>: error: the /p option cannot be used with output format 'res'
|
||||
\\ ... /p ...
|
||||
\\ ^~
|
||||
\\<cli>: note: the output format was inferred from the output filename
|
||||
\\ ... /fo foo.res ...
|
||||
\\ ~~~~^~~~~~~
|
||||
\\
|
||||
);
|
||||
}
|
||||
{
|
||||
try testParseError(&.{ "/p", "foo.rc", "foo.o" },
|
||||
\\<cli>: error: the /p option cannot be used with output format 'coff'
|
||||
\\ ... /p ...
|
||||
\\ ^~
|
||||
\\<cli>: note: the output format was inferred from the output filename
|
||||
\\ ... foo.o
|
||||
\\ ^~~~~
|
||||
\\
|
||||
);
|
||||
}
|
||||
{
|
||||
var options = try testParse(&.{"foo.rc"});
|
||||
defer options.deinit();
|
||||
|
||||
try std.testing.expectEqual(.rc, options.input_format);
|
||||
try std.testing.expectEqual(.res, options.output_format);
|
||||
}
|
||||
{
|
||||
var options = try testParse(&.{"foo.rcpp"});
|
||||
defer options.deinit();
|
||||
|
||||
try std.testing.expectEqual(.no, options.preprocess);
|
||||
try std.testing.expectEqual(.rcpp, options.input_format);
|
||||
try std.testing.expectEqual(.res, options.output_format);
|
||||
}
|
||||
{
|
||||
var options = try testParse(&.{ "foo.rc", "foo.rcpp" });
|
||||
defer options.deinit();
|
||||
|
||||
try std.testing.expectEqual(.only, options.preprocess);
|
||||
try std.testing.expectEqual(.rc, options.input_format);
|
||||
try std.testing.expectEqual(.rcpp, options.output_format);
|
||||
}
|
||||
{
|
||||
var options = try testParse(&.{ "foo.rc", "foo.obj" });
|
||||
defer options.deinit();
|
||||
|
||||
try std.testing.expectEqual(.rc, options.input_format);
|
||||
try std.testing.expectEqual(.coff, options.output_format);
|
||||
}
|
||||
{
|
||||
var options = try testParse(&.{ "/fo", "foo.o", "foo.rc" });
|
||||
defer options.deinit();
|
||||
|
||||
try std.testing.expectEqual(.rc, options.input_format);
|
||||
try std.testing.expectEqual(.coff, options.output_format);
|
||||
}
|
||||
{
|
||||
var options = try testParse(&.{"foo.res"});
|
||||
defer options.deinit();
|
||||
|
||||
try std.testing.expectEqual(.res, options.input_format);
|
||||
try std.testing.expectEqual(.coff, options.output_format);
|
||||
}
|
||||
{
|
||||
var options = try testParseWarning(&.{ "/:depfile", "foo.json", "foo.rc", "foo.rcpp" },
|
||||
\\<cli>: warning: the /:depfile option was ignored because the output format is 'rcpp'
|
||||
\\ ... /:depfile foo.json ...
|
||||
\\ ~~~~~~~~~~^~~~~~~~
|
||||
\\<cli>: note: the output format was inferred from the output filename
|
||||
\\ ... foo.rcpp
|
||||
\\ ^~~~~~~~
|
||||
\\
|
||||
);
|
||||
defer options.deinit();
|
||||
|
||||
try std.testing.expectEqual(.rc, options.input_format);
|
||||
try std.testing.expectEqual(.rcpp, options.output_format);
|
||||
}
|
||||
{
|
||||
var options = try testParseWarning(&.{ "/:depfile", "foo.json", "foo.res", "foo.o" },
|
||||
\\<cli>: warning: the /:depfile option was ignored because the input format is 'res'
|
||||
\\ ... /:depfile foo.json ...
|
||||
\\ ~~~~~~~~~~^~~~~~~~
|
||||
\\<cli>: note: the input format was inferred from the input filename
|
||||
\\ ... foo.res ...
|
||||
\\ ^~~~~~~
|
||||
\\
|
||||
);
|
||||
defer options.deinit();
|
||||
|
||||
try std.testing.expectEqual(.res, options.input_format);
|
||||
try std.testing.expectEqual(.coff, options.output_format);
|
||||
}
|
||||
}
|
||||
|
||||
test "maybeAppendRC" {
|
||||
var tmp = std.testing.tmpDir(.{});
|
||||
defer tmp.cleanup();
|
||||
|
||||
var options = try testParse(&.{"foo"});
|
||||
defer options.deinit();
|
||||
try std.testing.expectEqualStrings("foo", options.input_filename);
|
||||
try std.testing.expectEqualStrings("foo", options.input_source.filename);
|
||||
|
||||
// Create the file so that it's found. In this scenario, .rc should not get
|
||||
// appended.
|
||||
var file = try tmp.dir.createFile("foo", .{});
|
||||
file.close();
|
||||
try options.maybeAppendRC(tmp.dir);
|
||||
try std.testing.expectEqualStrings("foo", options.input_filename);
|
||||
try std.testing.expectEqualStrings("foo", options.input_source.filename);
|
||||
|
||||
// Now delete the file and try again. Since the verbatim name is no longer found
|
||||
// and the input filename does not have an extension, .rc should get appended.
|
||||
// Now delete the file and try again. But this time change the input format
|
||||
// to non-rc.
|
||||
try tmp.dir.deleteFile("foo");
|
||||
options.input_format = .res;
|
||||
try options.maybeAppendRC(tmp.dir);
|
||||
try std.testing.expectEqualStrings("foo.rc", options.input_filename);
|
||||
try std.testing.expectEqualStrings("foo", options.input_source.filename);
|
||||
|
||||
// Finally, reset the input format to rc. Since the verbatim name is no longer found
|
||||
// and the input filename does not have an extension, .rc should get appended.
|
||||
options.input_format = .rc;
|
||||
try options.maybeAppendRC(tmp.dir);
|
||||
try std.testing.expectEqualStrings("foo.rc", options.input_source.filename);
|
||||
}
|
||||
|
||||
@@ -0,0 +1,1125 @@
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const res = @import("res.zig");
|
||||
const NameOrOrdinal = res.NameOrOrdinal;
|
||||
const MemoryFlags = res.MemoryFlags;
|
||||
const Language = res.Language;
|
||||
const numPaddingBytesNeeded = @import("compile.zig").Compiler.numPaddingBytesNeeded;
|
||||
|
||||
pub const Resource = struct {
|
||||
type_value: NameOrOrdinal,
|
||||
name_value: NameOrOrdinal,
|
||||
data_version: u32,
|
||||
memory_flags: MemoryFlags,
|
||||
language: Language,
|
||||
version: u32,
|
||||
characteristics: u32,
|
||||
data: []const u8,
|
||||
|
||||
pub fn deinit(self: Resource, allocator: Allocator) void {
|
||||
self.name_value.deinit(allocator);
|
||||
self.type_value.deinit(allocator);
|
||||
allocator.free(self.data);
|
||||
}
|
||||
|
||||
/// Returns true if all fields match the expected value of the resource at the
|
||||
/// start of all .res files that distinguishes the .res file as 32-bit (as
|
||||
/// opposed to 16-bit).
|
||||
pub fn is32BitPreface(self: Resource) bool {
|
||||
if (self.type_value != .ordinal or self.type_value.ordinal != 0) return false;
|
||||
if (self.name_value != .ordinal or self.name_value.ordinal != 0) return false;
|
||||
if (self.data_version != 0) return false;
|
||||
if (@as(u16, @bitCast(self.memory_flags)) != 0) return false;
|
||||
if (@as(u16, @bitCast(self.language)) != 0) return false;
|
||||
if (self.version != 0) return false;
|
||||
if (self.characteristics != 0) return false;
|
||||
if (self.data.len != 0) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn isDlgInclude(resource: Resource) bool {
|
||||
return resource.type_value == .ordinal and resource.type_value.ordinal == @intFromEnum(res.RT.DLGINCLUDE);
|
||||
}
|
||||
};
|
||||
|
||||
pub const ParsedResources = struct {
|
||||
list: std.ArrayListUnmanaged(Resource) = .empty,
|
||||
allocator: Allocator,
|
||||
|
||||
pub fn init(allocator: Allocator) ParsedResources {
|
||||
return .{ .allocator = allocator };
|
||||
}
|
||||
|
||||
pub fn deinit(self: *ParsedResources) void {
|
||||
for (self.list.items) |*resource| {
|
||||
resource.deinit(self.allocator);
|
||||
}
|
||||
self.list.deinit(self.allocator);
|
||||
}
|
||||
};
|
||||
|
||||
pub const ParseResOptions = struct {
|
||||
skip_zero_data_resources: bool = true,
|
||||
skip_dlginclude_resources: bool = true,
|
||||
max_size: u64,
|
||||
};
|
||||
|
||||
/// The returned ParsedResources should be freed by calling its `deinit` function.
|
||||
pub fn parseRes(allocator: Allocator, reader: anytype, options: ParseResOptions) !ParsedResources {
|
||||
var resources = ParsedResources.init(allocator);
|
||||
errdefer resources.deinit();
|
||||
|
||||
try parseResInto(&resources, reader, options);
|
||||
|
||||
return resources;
|
||||
}
|
||||
|
||||
pub fn parseResInto(resources: *ParsedResources, reader: anytype, options: ParseResOptions) !void {
|
||||
const allocator = resources.allocator;
|
||||
var bytes_remaining: u64 = options.max_size;
|
||||
{
|
||||
const first_resource_and_size = try parseResource(allocator, reader, bytes_remaining);
|
||||
defer first_resource_and_size.resource.deinit(allocator);
|
||||
if (!first_resource_and_size.resource.is32BitPreface()) return error.InvalidPreface;
|
||||
bytes_remaining -= first_resource_and_size.total_size;
|
||||
}
|
||||
|
||||
while (bytes_remaining != 0) {
|
||||
const resource_and_size = try parseResource(allocator, reader, bytes_remaining);
|
||||
if (options.skip_zero_data_resources and resource_and_size.resource.data.len == 0) {
|
||||
resource_and_size.resource.deinit(allocator);
|
||||
} else if (options.skip_dlginclude_resources and resource_and_size.resource.isDlgInclude()) {
|
||||
resource_and_size.resource.deinit(allocator);
|
||||
} else {
|
||||
errdefer resource_and_size.resource.deinit(allocator);
|
||||
try resources.list.append(allocator, resource_and_size.resource);
|
||||
}
|
||||
bytes_remaining -= resource_and_size.total_size;
|
||||
}
|
||||
}
|
||||
|
||||
pub const ResourceAndSize = struct {
|
||||
resource: Resource,
|
||||
total_size: u64,
|
||||
};
|
||||
|
||||
pub fn parseResource(allocator: Allocator, reader: anytype, max_size: u64) !ResourceAndSize {
|
||||
var header_counting_reader = std.io.countingReader(reader);
|
||||
const header_reader = header_counting_reader.reader();
|
||||
const data_size = try header_reader.readInt(u32, .little);
|
||||
const header_size = try header_reader.readInt(u32, .little);
|
||||
const total_size: u64 = @as(u64, header_size) + data_size;
|
||||
if (total_size > max_size) return error.ImpossibleSize;
|
||||
|
||||
var header_bytes_available = header_size -| 8;
|
||||
var type_reader = std.io.limitedReader(header_reader, header_bytes_available);
|
||||
const type_value = try parseNameOrOrdinal(allocator, type_reader.reader());
|
||||
errdefer type_value.deinit(allocator);
|
||||
|
||||
header_bytes_available -|= @intCast(type_value.byteLen());
|
||||
var name_reader = std.io.limitedReader(header_reader, header_bytes_available);
|
||||
const name_value = try parseNameOrOrdinal(allocator, name_reader.reader());
|
||||
errdefer name_value.deinit(allocator);
|
||||
|
||||
const padding_after_name = numPaddingBytesNeeded(@intCast(header_counting_reader.bytes_read));
|
||||
try header_reader.skipBytes(padding_after_name, .{ .buf_size = 3 });
|
||||
|
||||
std.debug.assert(header_counting_reader.bytes_read % 4 == 0);
|
||||
const data_version = try header_reader.readInt(u32, .little);
|
||||
const memory_flags: MemoryFlags = @bitCast(try header_reader.readInt(u16, .little));
|
||||
const language: Language = @bitCast(try header_reader.readInt(u16, .little));
|
||||
const version = try header_reader.readInt(u32, .little);
|
||||
const characteristics = try header_reader.readInt(u32, .little);
|
||||
|
||||
const header_bytes_read = header_counting_reader.bytes_read;
|
||||
if (header_size != header_bytes_read) return error.HeaderSizeMismatch;
|
||||
|
||||
const data = try allocator.alloc(u8, data_size);
|
||||
errdefer allocator.free(data);
|
||||
try reader.readNoEof(data);
|
||||
|
||||
const padding_after_data = numPaddingBytesNeeded(@intCast(data_size));
|
||||
try reader.skipBytes(padding_after_data, .{ .buf_size = 3 });
|
||||
|
||||
return .{
|
||||
.resource = .{
|
||||
.name_value = name_value,
|
||||
.type_value = type_value,
|
||||
.language = language,
|
||||
.memory_flags = memory_flags,
|
||||
.version = version,
|
||||
.characteristics = characteristics,
|
||||
.data_version = data_version,
|
||||
.data = data,
|
||||
},
|
||||
.total_size = header_size + data.len + padding_after_data,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn parseNameOrOrdinal(allocator: Allocator, reader: anytype) !NameOrOrdinal {
|
||||
const first_code_unit = try reader.readInt(u16, .little);
|
||||
if (first_code_unit == 0xFFFF) {
|
||||
const ordinal_value = try reader.readInt(u16, .little);
|
||||
return .{ .ordinal = ordinal_value };
|
||||
}
|
||||
var name_buf = try std.ArrayListUnmanaged(u16).initCapacity(allocator, 16);
|
||||
errdefer name_buf.deinit(allocator);
|
||||
var code_unit = first_code_unit;
|
||||
while (code_unit != 0) {
|
||||
try name_buf.append(allocator, std.mem.nativeToLittle(u16, code_unit));
|
||||
code_unit = try reader.readInt(u16, .little);
|
||||
}
|
||||
return .{ .name = try name_buf.toOwnedSliceSentinel(allocator, 0) };
|
||||
}
|
||||
|
||||
pub const CoffOptions = struct {
|
||||
target: std.coff.MachineType = .X64,
|
||||
/// If true, zeroes will be written to all timestamp fields
|
||||
reproducible: bool = true,
|
||||
/// If true, the MEM_WRITE flag will not be set in the .rsrc section header
|
||||
read_only: bool = false,
|
||||
/// If non-null, a symbol with this name and storage class EXTERNAL will be added to the symbol table.
|
||||
define_external_symbol: ?[]const u8 = null,
|
||||
/// Re-use data offsets for resources with data that is identical.
|
||||
fold_duplicate_data: bool = false,
|
||||
};
|
||||
|
||||
pub const Diagnostics = union {
|
||||
none: void,
|
||||
/// Contains the index of the second resource in a duplicate resource pair.
|
||||
duplicate_resource: usize,
|
||||
/// Contains the index of the resource that either has data that's too long or
|
||||
/// caused the total data to overflow.
|
||||
overflow_resource: usize,
|
||||
};
|
||||
|
||||
pub fn writeCoff(allocator: Allocator, writer: anytype, resources: []const Resource, options: CoffOptions, diagnostics: ?*Diagnostics) !void {
|
||||
var resource_tree = ResourceTree.init(allocator, options);
|
||||
defer resource_tree.deinit();
|
||||
|
||||
for (resources, 0..) |*resource, i| {
|
||||
resource_tree.put(resource, i) catch |err| {
|
||||
switch (err) {
|
||||
error.DuplicateResource => {
|
||||
if (diagnostics) |d_ptr| d_ptr.* = .{ .duplicate_resource = i };
|
||||
},
|
||||
error.ResourceDataTooLong, error.TotalResourceDataTooLong => {
|
||||
if (diagnostics) |d_ptr| d_ptr.* = .{ .overflow_resource = i };
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
return err;
|
||||
};
|
||||
}
|
||||
|
||||
const lengths = resource_tree.dataLengths();
|
||||
const byte_size_of_relocation = 10;
|
||||
const relocations_len: u32 = @intCast(byte_size_of_relocation * resources.len);
|
||||
const pointer_to_rsrc01_data = @sizeOf(std.coff.CoffHeader) + (@sizeOf(std.coff.SectionHeader) * 2);
|
||||
const pointer_to_relocations = pointer_to_rsrc01_data + lengths.rsrc01;
|
||||
const pointer_to_rsrc02_data = pointer_to_relocations + relocations_len;
|
||||
const pointer_to_symbol_table = pointer_to_rsrc02_data + lengths.rsrc02;
|
||||
|
||||
const timestamp: i64 = if (options.reproducible) 0 else std.time.timestamp();
|
||||
const size_of_optional_header = 0;
|
||||
const machine_type: std.coff.MachineType = options.target;
|
||||
const flags = std.coff.CoffHeaderFlags{
|
||||
.@"32BIT_MACHINE" = 1,
|
||||
};
|
||||
const number_of_symbols = 5 + @as(u32, @intCast(resources.len)) + @intFromBool(options.define_external_symbol != null);
|
||||
const coff_header = std.coff.CoffHeader{
|
||||
.machine = machine_type,
|
||||
.number_of_sections = 2,
|
||||
.time_date_stamp = @as(u32, @truncate(@as(u64, @bitCast(timestamp)))),
|
||||
.pointer_to_symbol_table = pointer_to_symbol_table,
|
||||
.number_of_symbols = number_of_symbols,
|
||||
.size_of_optional_header = size_of_optional_header,
|
||||
.flags = flags,
|
||||
};
|
||||
|
||||
try writer.writeStructEndian(coff_header, .little);
|
||||
|
||||
const rsrc01_header = std.coff.SectionHeader{
|
||||
.name = ".rsrc$01".*,
|
||||
.virtual_size = 0,
|
||||
.virtual_address = 0,
|
||||
.size_of_raw_data = lengths.rsrc01,
|
||||
.pointer_to_raw_data = pointer_to_rsrc01_data,
|
||||
.pointer_to_relocations = if (relocations_len != 0) pointer_to_relocations else 0,
|
||||
.pointer_to_linenumbers = 0,
|
||||
.number_of_relocations = @intCast(resources.len),
|
||||
.number_of_linenumbers = 0,
|
||||
.flags = .{
|
||||
.CNT_INITIALIZED_DATA = 1,
|
||||
.MEM_WRITE = @intFromBool(!options.read_only),
|
||||
.MEM_READ = 1,
|
||||
},
|
||||
};
|
||||
try writer.writeStructEndian(rsrc01_header, .little);
|
||||
|
||||
const rsrc02_header = std.coff.SectionHeader{
|
||||
.name = ".rsrc$02".*,
|
||||
.virtual_size = 0,
|
||||
.virtual_address = 0,
|
||||
.size_of_raw_data = lengths.rsrc02,
|
||||
.pointer_to_raw_data = pointer_to_rsrc02_data,
|
||||
.pointer_to_relocations = 0,
|
||||
.pointer_to_linenumbers = 0,
|
||||
.number_of_relocations = 0,
|
||||
.number_of_linenumbers = 0,
|
||||
.flags = .{
|
||||
.CNT_INITIALIZED_DATA = 1,
|
||||
.MEM_WRITE = @intFromBool(!options.read_only),
|
||||
.MEM_READ = 1,
|
||||
},
|
||||
};
|
||||
try writer.writeStructEndian(rsrc02_header, .little);
|
||||
|
||||
// TODO: test surrogate pairs
|
||||
try resource_tree.sort();
|
||||
|
||||
var string_table = StringTable{};
|
||||
defer string_table.deinit(allocator);
|
||||
const resource_symbols = try resource_tree.writeCoff(
|
||||
allocator,
|
||||
writer,
|
||||
resources,
|
||||
lengths,
|
||||
&string_table,
|
||||
);
|
||||
defer allocator.free(resource_symbols);
|
||||
|
||||
try writeSymbol(writer, .{
|
||||
.name = "@feat.00".*,
|
||||
.value = 0x11,
|
||||
.section_number = .ABSOLUTE,
|
||||
.type = .{
|
||||
.base_type = .NULL,
|
||||
.complex_type = .NULL,
|
||||
},
|
||||
.storage_class = .STATIC,
|
||||
.number_of_aux_symbols = 0,
|
||||
});
|
||||
|
||||
try writeSymbol(writer, .{
|
||||
.name = ".rsrc$01".*,
|
||||
.value = 0,
|
||||
.section_number = @enumFromInt(1),
|
||||
.type = .{
|
||||
.base_type = .NULL,
|
||||
.complex_type = .NULL,
|
||||
},
|
||||
.storage_class = .STATIC,
|
||||
.number_of_aux_symbols = 1,
|
||||
});
|
||||
try writeSectionDefinition(writer, .{
|
||||
.length = lengths.rsrc01,
|
||||
.number_of_relocations = @intCast(resources.len),
|
||||
.number_of_linenumbers = 0,
|
||||
.checksum = 0,
|
||||
.number = 0,
|
||||
.selection = .NONE,
|
||||
.unused = .{0} ** 3,
|
||||
});
|
||||
|
||||
try writeSymbol(writer, .{
|
||||
.name = ".rsrc$02".*,
|
||||
.value = 0,
|
||||
.section_number = @enumFromInt(2),
|
||||
.type = .{
|
||||
.base_type = .NULL,
|
||||
.complex_type = .NULL,
|
||||
},
|
||||
.storage_class = .STATIC,
|
||||
.number_of_aux_symbols = 1,
|
||||
});
|
||||
try writeSectionDefinition(writer, .{
|
||||
.length = lengths.rsrc02,
|
||||
.number_of_relocations = 0,
|
||||
.number_of_linenumbers = 0,
|
||||
.checksum = 0,
|
||||
.number = 0,
|
||||
.selection = .NONE,
|
||||
.unused = .{0} ** 3,
|
||||
});
|
||||
|
||||
for (resource_symbols) |resource_symbol| {
|
||||
try writeSymbol(writer, resource_symbol);
|
||||
}
|
||||
|
||||
if (options.define_external_symbol) |external_symbol_name| {
|
||||
const name_bytes: [8]u8 = name_bytes: {
|
||||
if (external_symbol_name.len > 8) {
|
||||
const string_table_offset: u32 = try string_table.put(allocator, external_symbol_name);
|
||||
var bytes = [_]u8{0} ** 8;
|
||||
std.mem.writeInt(u32, bytes[4..8], string_table_offset, .little);
|
||||
break :name_bytes bytes;
|
||||
} else {
|
||||
var symbol_shortname = [_]u8{0} ** 8;
|
||||
@memcpy(symbol_shortname[0..external_symbol_name.len], external_symbol_name);
|
||||
break :name_bytes symbol_shortname;
|
||||
}
|
||||
};
|
||||
|
||||
try writeSymbol(writer, .{
|
||||
.name = name_bytes,
|
||||
.value = 0,
|
||||
.section_number = .ABSOLUTE,
|
||||
.type = .{
|
||||
.base_type = .NULL,
|
||||
.complex_type = .NULL,
|
||||
},
|
||||
.storage_class = .EXTERNAL,
|
||||
.number_of_aux_symbols = 0,
|
||||
});
|
||||
}
|
||||
|
||||
try writer.writeInt(u32, string_table.totalByteLength(), .little);
|
||||
try writer.writeAll(string_table.bytes.items);
|
||||
}
|
||||
|
||||
fn writeSymbol(writer: anytype, symbol: std.coff.Symbol) !void {
|
||||
try writer.writeAll(&symbol.name);
|
||||
try writer.writeInt(u32, symbol.value, .little);
|
||||
try writer.writeInt(u16, @intFromEnum(symbol.section_number), .little);
|
||||
try writer.writeInt(u8, @intFromEnum(symbol.type.base_type), .little);
|
||||
try writer.writeInt(u8, @intFromEnum(symbol.type.complex_type), .little);
|
||||
try writer.writeInt(u8, @intFromEnum(symbol.storage_class), .little);
|
||||
try writer.writeInt(u8, symbol.number_of_aux_symbols, .little);
|
||||
}
|
||||
|
||||
fn writeSectionDefinition(writer: anytype, def: std.coff.SectionDefinition) !void {
|
||||
try writer.writeInt(u32, def.length, .little);
|
||||
try writer.writeInt(u16, def.number_of_relocations, .little);
|
||||
try writer.writeInt(u16, def.number_of_linenumbers, .little);
|
||||
try writer.writeInt(u32, def.checksum, .little);
|
||||
try writer.writeInt(u16, def.number, .little);
|
||||
try writer.writeInt(u8, @intFromEnum(def.selection), .little);
|
||||
try writer.writeAll(&def.unused);
|
||||
}
|
||||
|
||||
pub const ResourceDirectoryTable = extern struct {
|
||||
characteristics: u32,
|
||||
timestamp: u32,
|
||||
major_version: u16,
|
||||
minor_version: u16,
|
||||
number_of_name_entries: u16,
|
||||
number_of_id_entries: u16,
|
||||
};
|
||||
|
||||
pub const ResourceDirectoryEntry = extern struct {
|
||||
entry: packed union {
|
||||
name_offset: packed struct(u32) {
|
||||
address: u31,
|
||||
/// This is undocumented in the PE/COFF spec, but the high bit
|
||||
/// is set by cvtres.exe for string addresses
|
||||
to_string: bool = true,
|
||||
},
|
||||
integer_id: u32,
|
||||
},
|
||||
offset: packed struct(u32) {
|
||||
address: u31,
|
||||
to_subdirectory: bool,
|
||||
},
|
||||
|
||||
pub fn writeCoff(self: ResourceDirectoryEntry, writer: anytype) !void {
|
||||
try writer.writeInt(u32, @bitCast(self.entry), .little);
|
||||
try writer.writeInt(u32, @bitCast(self.offset), .little);
|
||||
}
|
||||
};
|
||||
|
||||
pub const ResourceDataEntry = extern struct {
|
||||
data_rva: u32,
|
||||
size: u32,
|
||||
codepage: u32,
|
||||
reserved: u32 = 0,
|
||||
};
|
||||
|
||||
/// type -> name -> language
|
||||
const ResourceTree = struct {
|
||||
type_to_name_map: std.ArrayHashMapUnmanaged(NameOrOrdinal, NameToLanguageMap, NameOrOrdinalHashContext, true),
|
||||
rsrc_string_table: std.ArrayHashMapUnmanaged(NameOrOrdinal, void, NameOrOrdinalHashContext, true),
|
||||
deduplicated_data: std.StringArrayHashMapUnmanaged(u32),
|
||||
data_offsets: std.ArrayListUnmanaged(u32),
|
||||
rsrc02_len: u32,
|
||||
coff_options: CoffOptions,
|
||||
allocator: Allocator,
|
||||
|
||||
const RelocatableResource = struct {
|
||||
resource: *const Resource,
|
||||
original_index: usize,
|
||||
};
|
||||
const LanguageToResourceMap = std.AutoArrayHashMapUnmanaged(Language, RelocatableResource);
|
||||
const NameToLanguageMap = std.ArrayHashMapUnmanaged(NameOrOrdinal, LanguageToResourceMap, NameOrOrdinalHashContext, true);
|
||||
|
||||
const NameOrOrdinalHashContext = struct {
|
||||
pub fn hash(self: @This(), v: NameOrOrdinal) u32 {
|
||||
_ = self;
|
||||
var hasher = std.hash.Wyhash.init(0);
|
||||
const tag = std.meta.activeTag(v);
|
||||
hasher.update(std.mem.asBytes(&tag));
|
||||
switch (v) {
|
||||
.name => |name| {
|
||||
hasher.update(std.mem.sliceAsBytes(name));
|
||||
},
|
||||
.ordinal => |*ordinal| {
|
||||
hasher.update(std.mem.asBytes(ordinal));
|
||||
},
|
||||
}
|
||||
return @truncate(hasher.final());
|
||||
}
|
||||
pub fn eql(self: @This(), a: NameOrOrdinal, b: NameOrOrdinal, b_index: usize) bool {
|
||||
_ = self;
|
||||
_ = b_index;
|
||||
const tag_a = std.meta.activeTag(a);
|
||||
const tag_b = std.meta.activeTag(b);
|
||||
if (tag_a != tag_b) return false;
|
||||
|
||||
return switch (a) {
|
||||
.name => std.mem.eql(u16, a.name, b.name),
|
||||
.ordinal => a.ordinal == b.ordinal,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub fn init(allocator: Allocator, coff_options: CoffOptions) ResourceTree {
|
||||
return .{
|
||||
.type_to_name_map = .empty,
|
||||
.rsrc_string_table = .empty,
|
||||
.deduplicated_data = .empty,
|
||||
.data_offsets = .empty,
|
||||
.rsrc02_len = 0,
|
||||
.coff_options = coff_options,
|
||||
.allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *ResourceTree) void {
|
||||
for (self.type_to_name_map.values()) |*name_to_lang_map| {
|
||||
for (name_to_lang_map.values()) |*lang_to_resources_map| {
|
||||
lang_to_resources_map.deinit(self.allocator);
|
||||
}
|
||||
name_to_lang_map.deinit(self.allocator);
|
||||
}
|
||||
self.type_to_name_map.deinit(self.allocator);
|
||||
self.rsrc_string_table.deinit(self.allocator);
|
||||
self.deduplicated_data.deinit(self.allocator);
|
||||
self.data_offsets.deinit(self.allocator);
|
||||
}
|
||||
|
||||
pub fn put(self: *ResourceTree, resource: *const Resource, original_index: usize) !void {
|
||||
const name_to_lang_map = blk: {
|
||||
const gop_result = try self.type_to_name_map.getOrPut(self.allocator, resource.type_value);
|
||||
if (!gop_result.found_existing) {
|
||||
gop_result.value_ptr.* = .empty;
|
||||
}
|
||||
break :blk gop_result.value_ptr;
|
||||
};
|
||||
const lang_to_resources_map = blk: {
|
||||
const gop_result = try name_to_lang_map.getOrPut(self.allocator, resource.name_value);
|
||||
if (!gop_result.found_existing) {
|
||||
gop_result.value_ptr.* = .empty;
|
||||
}
|
||||
break :blk gop_result.value_ptr;
|
||||
};
|
||||
{
|
||||
const gop_result = try lang_to_resources_map.getOrPut(self.allocator, resource.language);
|
||||
if (gop_result.found_existing) return error.DuplicateResource;
|
||||
gop_result.value_ptr.* = .{
|
||||
.original_index = original_index,
|
||||
.resource = resource,
|
||||
};
|
||||
}
|
||||
|
||||
// Resize the data_offsets list to accommodate the index, but only if necessary
|
||||
try self.data_offsets.resize(self.allocator, @max(self.data_offsets.items.len, original_index + 1));
|
||||
if (self.coff_options.fold_duplicate_data) {
|
||||
const gop_result = try self.deduplicated_data.getOrPut(self.allocator, resource.data);
|
||||
if (!gop_result.found_existing) {
|
||||
gop_result.value_ptr.* = self.rsrc02_len;
|
||||
try self.incrementRsrc02Len(resource);
|
||||
}
|
||||
self.data_offsets.items[original_index] = gop_result.value_ptr.*;
|
||||
} else {
|
||||
self.data_offsets.items[original_index] = self.rsrc02_len;
|
||||
try self.incrementRsrc02Len(resource);
|
||||
}
|
||||
|
||||
if (resource.type_value == .name and !self.rsrc_string_table.contains(resource.type_value)) {
|
||||
try self.rsrc_string_table.putNoClobber(self.allocator, resource.type_value, {});
|
||||
}
|
||||
if (resource.name_value == .name and !self.rsrc_string_table.contains(resource.name_value)) {
|
||||
try self.rsrc_string_table.putNoClobber(self.allocator, resource.name_value, {});
|
||||
}
|
||||
}
|
||||
|
||||
fn incrementRsrc02Len(self: *ResourceTree, resource: *const Resource) !void {
|
||||
// Note: This @intCast is only safe if we assume that the resource was parsed from a .res file,
|
||||
// since the maximum data length for a resource in the .res file format is maxInt(u32).
|
||||
// TODO: Either codify this properly or use std.math.cast and return an error.
|
||||
const data_len: u32 = @intCast(resource.data.len);
|
||||
const data_len_including_padding: u32 = std.math.cast(u32, std.mem.alignForward(u33, data_len, 8)) orelse {
|
||||
return error.ResourceDataTooLong;
|
||||
};
|
||||
// TODO: Verify that this corresponds to an actual PE/COFF limitation for resource data
|
||||
// in the final linked binary. The limit may turn out to be shorter than u32 max if both
|
||||
// the tree data and the resource data lengths together need to fit within a u32,
|
||||
// or it may be longer in which case we would want to add more .rsrc$NN sections
|
||||
// to the object file for the data that overflows .rsrc$02.
|
||||
self.rsrc02_len = std.math.add(u32, self.rsrc02_len, data_len_including_padding) catch {
|
||||
return error.TotalResourceDataTooLong;
|
||||
};
|
||||
}
|
||||
|
||||
const Lengths = struct {
|
||||
level1: u32,
|
||||
level2: u32,
|
||||
level3: u32,
|
||||
data_entries: u32,
|
||||
strings: u32,
|
||||
padding: u32,
|
||||
|
||||
rsrc01: u32,
|
||||
rsrc02: u32,
|
||||
|
||||
fn stringsStart(self: Lengths) u32 {
|
||||
return self.rsrc01 - self.strings - self.padding;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn dataLengths(self: *const ResourceTree) Lengths {
|
||||
var lengths: Lengths = .{
|
||||
.level1 = 0,
|
||||
.level2 = 0,
|
||||
.level3 = 0,
|
||||
.data_entries = 0,
|
||||
.strings = 0,
|
||||
.padding = 0,
|
||||
.rsrc01 = undefined,
|
||||
.rsrc02 = self.rsrc02_len,
|
||||
};
|
||||
lengths.level1 += @sizeOf(ResourceDirectoryTable);
|
||||
for (self.type_to_name_map.values()) |name_to_lang_map| {
|
||||
lengths.level1 += @sizeOf(ResourceDirectoryEntry);
|
||||
lengths.level2 += @sizeOf(ResourceDirectoryTable);
|
||||
for (name_to_lang_map.values()) |lang_to_resources_map| {
|
||||
lengths.level2 += @sizeOf(ResourceDirectoryEntry);
|
||||
lengths.level3 += @sizeOf(ResourceDirectoryTable);
|
||||
for (lang_to_resources_map.values()) |_| {
|
||||
lengths.level3 += @sizeOf(ResourceDirectoryEntry);
|
||||
lengths.data_entries += @sizeOf(ResourceDataEntry);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (self.rsrc_string_table.keys()) |v| {
|
||||
lengths.strings += @sizeOf(u16); // string length
|
||||
lengths.strings += @intCast(v.name.len * @sizeOf(u16));
|
||||
}
|
||||
lengths.rsrc01 = lengths.level1 + lengths.level2 + lengths.level3 + lengths.data_entries + lengths.strings;
|
||||
lengths.padding = @intCast((4 -% lengths.rsrc01) % 4);
|
||||
lengths.rsrc01 += lengths.padding;
|
||||
return lengths;
|
||||
}
|
||||
|
||||
pub fn sort(self: *ResourceTree) !void {
|
||||
const NameOrOrdinalSortContext = struct {
|
||||
keys: []NameOrOrdinal,
|
||||
|
||||
pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
|
||||
const a = ctx.keys[a_index];
|
||||
const b = ctx.keys[b_index];
|
||||
if (std.meta.activeTag(a) != std.meta.activeTag(b)) {
|
||||
return if (a == .name) true else false;
|
||||
}
|
||||
switch (a) {
|
||||
.name => {
|
||||
const n = @min(a.name.len, b.name.len);
|
||||
for (a.name[0..n], b.name[0..n]) |a_c, b_c| {
|
||||
switch (std.math.order(std.mem.littleToNative(u16, a_c), std.mem.littleToNative(u16, b_c))) {
|
||||
.eq => continue,
|
||||
.lt => return true,
|
||||
.gt => return false,
|
||||
}
|
||||
}
|
||||
return a.name.len < b.name.len;
|
||||
},
|
||||
.ordinal => {
|
||||
return a.ordinal < b.ordinal;
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
self.type_to_name_map.sortUnstable(NameOrOrdinalSortContext{ .keys = self.type_to_name_map.keys() });
|
||||
for (self.type_to_name_map.values()) |*name_to_lang_map| {
|
||||
name_to_lang_map.sortUnstable(NameOrOrdinalSortContext{ .keys = name_to_lang_map.keys() });
|
||||
}
|
||||
const LangSortContext = struct {
|
||||
keys: []Language,
|
||||
|
||||
pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
|
||||
return @as(u16, @bitCast(ctx.keys[a_index])) < @as(u16, @bitCast(ctx.keys[b_index]));
|
||||
}
|
||||
};
|
||||
for (self.type_to_name_map.values()) |*name_to_lang_map| {
|
||||
for (name_to_lang_map.values()) |*lang_to_resource_map| {
|
||||
lang_to_resource_map.sortUnstable(LangSortContext{ .keys = lang_to_resource_map.keys() });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeCoff(
|
||||
self: *const ResourceTree,
|
||||
allocator: Allocator,
|
||||
writer: anytype,
|
||||
resources_in_data_order: []const Resource,
|
||||
lengths: Lengths,
|
||||
coff_string_table: *StringTable,
|
||||
) ![]const std.coff.Symbol {
|
||||
if (self.type_to_name_map.count() == 0) {
|
||||
try writer.writeByteNTimes(0, 16);
|
||||
return &.{};
|
||||
}
|
||||
|
||||
var counting_writer = std.io.countingWriter(writer);
|
||||
const w = counting_writer.writer();
|
||||
|
||||
var level2_list: std.ArrayListUnmanaged(*const NameToLanguageMap) = .empty;
|
||||
defer level2_list.deinit(allocator);
|
||||
|
||||
var level3_list: std.ArrayListUnmanaged(*const LanguageToResourceMap) = .empty;
|
||||
defer level3_list.deinit(allocator);
|
||||
|
||||
var resources_list: std.ArrayListUnmanaged(*const RelocatableResource) = .empty;
|
||||
defer resources_list.deinit(allocator);
|
||||
|
||||
var relocations = Relocations.init(allocator);
|
||||
defer relocations.deinit();
|
||||
|
||||
var string_offsets = try allocator.alloc(u31, self.rsrc_string_table.count());
|
||||
const strings_start = lengths.stringsStart();
|
||||
defer allocator.free(string_offsets);
|
||||
{
|
||||
var string_address: u31 = @intCast(strings_start);
|
||||
for (self.rsrc_string_table.keys(), 0..) |v, i| {
|
||||
string_offsets[i] = string_address;
|
||||
string_address += @sizeOf(u16) + @as(u31, @intCast(v.name.len * @sizeOf(u16)));
|
||||
}
|
||||
}
|
||||
|
||||
const level2_start = lengths.level1;
|
||||
var level2_address = level2_start;
|
||||
{
|
||||
const counts = entryTypeCounts(self.type_to_name_map.keys());
|
||||
const table = ResourceDirectoryTable{
|
||||
.characteristics = 0,
|
||||
.timestamp = 0,
|
||||
.major_version = 0,
|
||||
.minor_version = 0,
|
||||
.number_of_id_entries = counts.ids,
|
||||
.number_of_name_entries = counts.names,
|
||||
};
|
||||
try w.writeStructEndian(table, .little);
|
||||
|
||||
var it = self.type_to_name_map.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const type_value = entry.key_ptr;
|
||||
const dir_entry = ResourceDirectoryEntry{
|
||||
.entry = switch (type_value.*) {
|
||||
.name => .{ .name_offset = .{ .address = string_offsets[self.rsrc_string_table.getIndex(type_value.*).?] } },
|
||||
.ordinal => .{ .integer_id = type_value.ordinal },
|
||||
},
|
||||
.offset = .{
|
||||
.address = @intCast(level2_address),
|
||||
.to_subdirectory = true,
|
||||
},
|
||||
};
|
||||
try dir_entry.writeCoff(w);
|
||||
level2_address += @sizeOf(ResourceDirectoryTable) + @as(u32, @intCast(entry.value_ptr.count() * @sizeOf(ResourceDirectoryEntry)));
|
||||
|
||||
const name_to_lang_map = entry.value_ptr;
|
||||
try level2_list.append(allocator, name_to_lang_map);
|
||||
}
|
||||
}
|
||||
std.debug.assert(counting_writer.bytes_written == level2_start);
|
||||
|
||||
const level3_start = level2_start + lengths.level2;
|
||||
var level3_address = level3_start;
|
||||
for (level2_list.items) |name_to_lang_map| {
|
||||
const counts = entryTypeCounts(name_to_lang_map.keys());
|
||||
const table = ResourceDirectoryTable{
|
||||
.characteristics = 0,
|
||||
.timestamp = 0,
|
||||
.major_version = 0,
|
||||
.minor_version = 0,
|
||||
.number_of_id_entries = counts.ids,
|
||||
.number_of_name_entries = counts.names,
|
||||
};
|
||||
try w.writeStructEndian(table, .little);
|
||||
|
||||
var it = name_to_lang_map.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const name_value = entry.key_ptr;
|
||||
const dir_entry = ResourceDirectoryEntry{
|
||||
.entry = switch (name_value.*) {
|
||||
.name => .{ .name_offset = .{ .address = string_offsets[self.rsrc_string_table.getIndex(name_value.*).?] } },
|
||||
.ordinal => .{ .integer_id = name_value.ordinal },
|
||||
},
|
||||
.offset = .{
|
||||
.address = @intCast(level3_address),
|
||||
.to_subdirectory = true,
|
||||
},
|
||||
};
|
||||
try dir_entry.writeCoff(w);
|
||||
level3_address += @sizeOf(ResourceDirectoryTable) + @as(u32, @intCast(entry.value_ptr.count() * @sizeOf(ResourceDirectoryEntry)));
|
||||
|
||||
const lang_to_resources_map = entry.value_ptr;
|
||||
try level3_list.append(allocator, lang_to_resources_map);
|
||||
}
|
||||
}
|
||||
std.debug.assert(counting_writer.bytes_written == level3_start);
|
||||
|
||||
var reloc_addresses = try allocator.alloc(u32, resources_in_data_order.len);
|
||||
defer allocator.free(reloc_addresses);
|
||||
|
||||
const data_entries_start = level3_start + lengths.level3;
|
||||
var data_entry_address = data_entries_start;
|
||||
for (level3_list.items) |lang_to_resources_map| {
|
||||
const counts = EntryTypeCounts{
|
||||
.names = 0,
|
||||
.ids = @intCast(lang_to_resources_map.count()),
|
||||
};
|
||||
const table = ResourceDirectoryTable{
|
||||
.characteristics = 0,
|
||||
.timestamp = 0,
|
||||
.major_version = 0,
|
||||
.minor_version = 0,
|
||||
.number_of_id_entries = counts.ids,
|
||||
.number_of_name_entries = counts.names,
|
||||
};
|
||||
try w.writeStructEndian(table, .little);
|
||||
|
||||
var it = lang_to_resources_map.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const lang = entry.key_ptr.*;
|
||||
const dir_entry = ResourceDirectoryEntry{
|
||||
.entry = .{ .integer_id = lang.asInt() },
|
||||
.offset = .{
|
||||
.address = @intCast(data_entry_address),
|
||||
.to_subdirectory = false,
|
||||
},
|
||||
};
|
||||
|
||||
const reloc_resource = entry.value_ptr;
|
||||
reloc_addresses[reloc_resource.original_index] = @intCast(data_entry_address);
|
||||
|
||||
try dir_entry.writeCoff(w);
|
||||
data_entry_address += @sizeOf(ResourceDataEntry);
|
||||
|
||||
try resources_list.append(allocator, reloc_resource);
|
||||
}
|
||||
}
|
||||
std.debug.assert(counting_writer.bytes_written == data_entries_start);
|
||||
|
||||
for (resources_list.items, 0..) |reloc_resource, i| {
|
||||
// TODO: This logic works but is convoluted, would be good to clean this up
|
||||
const orig_resource = &resources_in_data_order[reloc_resource.original_index];
|
||||
const address: u32 = reloc_addresses[i];
|
||||
try relocations.add(address, self.data_offsets.items[i]);
|
||||
const data_entry = ResourceDataEntry{
|
||||
.data_rva = 0, // relocation
|
||||
.size = @intCast(orig_resource.data.len),
|
||||
.codepage = 0,
|
||||
};
|
||||
try w.writeStructEndian(data_entry, .little);
|
||||
}
|
||||
std.debug.assert(counting_writer.bytes_written == strings_start);
|
||||
|
||||
for (self.rsrc_string_table.keys()) |v| {
|
||||
const str = v.name;
|
||||
try w.writeInt(u16, @intCast(str.len), .little);
|
||||
try w.writeAll(std.mem.sliceAsBytes(str));
|
||||
}
|
||||
|
||||
try w.writeByteNTimes(0, lengths.padding);
|
||||
|
||||
for (relocations.list.items) |relocation| {
|
||||
try writeRelocation(w, std.coff.Relocation{
|
||||
.virtual_address = relocation.relocation_address,
|
||||
.symbol_table_index = relocation.symbol_index,
|
||||
.type = supported_targets.rvaRelocationTypeIndicator(self.coff_options.target).?,
|
||||
});
|
||||
}
|
||||
|
||||
if (self.coff_options.fold_duplicate_data) {
|
||||
for (self.deduplicated_data.keys()) |data| {
|
||||
const padding_bytes: u4 = @intCast((8 -% data.len) % 8);
|
||||
try w.writeAll(data);
|
||||
try w.writeByteNTimes(0, padding_bytes);
|
||||
}
|
||||
} else {
|
||||
for (resources_in_data_order) |resource| {
|
||||
const padding_bytes: u4 = @intCast((8 -% resource.data.len) % 8);
|
||||
try w.writeAll(resource.data);
|
||||
try w.writeByteNTimes(0, padding_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
var symbols = try allocator.alloc(std.coff.Symbol, resources_list.items.len);
|
||||
errdefer allocator.free(symbols);
|
||||
|
||||
for (relocations.list.items, 0..) |relocation, i| {
|
||||
// cvtres.exe writes the symbol names as $R<data offset as hexadecimal>.
|
||||
//
|
||||
// When the data offset would exceed 6 hex digits in cvtres.exe, it
|
||||
// truncates the value down to 6 hex digits. This is bad behavior, since
|
||||
// e.g. an initial resource with exactly 16 MiB of data and the
|
||||
// resource following it would both have the symbol name $R000000.
|
||||
//
|
||||
// Instead, if the offset would exceed 6 hexadecimal digits,
|
||||
// we put the longer name in the string table.
|
||||
//
|
||||
// Another option would be to adopt llvm-cvtres' behavior
|
||||
// of $R000001, $R000002, etc. rather than using data offset values.
|
||||
var name_buf: [8]u8 = undefined;
|
||||
if (relocation.data_offset > std.math.maxInt(u24)) {
|
||||
const name_slice = try std.fmt.allocPrint(allocator, "$R{X}", .{relocation.data_offset});
|
||||
defer allocator.free(name_slice);
|
||||
const string_table_offset: u32 = try coff_string_table.put(allocator, name_slice);
|
||||
std.mem.writeInt(u32, name_buf[0..4], 0, .little);
|
||||
std.mem.writeInt(u32, name_buf[4..8], string_table_offset, .little);
|
||||
} else {
|
||||
const name_slice = std.fmt.bufPrint(&name_buf, "$R{X:0>6}", .{relocation.data_offset}) catch unreachable;
|
||||
std.debug.assert(name_slice.len == 8);
|
||||
}
|
||||
|
||||
symbols[i] = .{
|
||||
.name = name_buf,
|
||||
.value = relocation.data_offset,
|
||||
.section_number = @enumFromInt(2),
|
||||
.type = .{
|
||||
.base_type = .NULL,
|
||||
.complex_type = .NULL,
|
||||
},
|
||||
.storage_class = .STATIC,
|
||||
.number_of_aux_symbols = 0,
|
||||
};
|
||||
}
|
||||
|
||||
return symbols;
|
||||
}
|
||||
|
||||
fn writeRelocation(writer: anytype, relocation: std.coff.Relocation) !void {
|
||||
try writer.writeInt(u32, relocation.virtual_address, .little);
|
||||
try writer.writeInt(u32, relocation.symbol_table_index, .little);
|
||||
try writer.writeInt(u16, relocation.type, .little);
|
||||
}
|
||||
|
||||
const EntryTypeCounts = struct {
|
||||
names: u16,
|
||||
ids: u16,
|
||||
};
|
||||
|
||||
fn entryTypeCounts(s: []const NameOrOrdinal) EntryTypeCounts {
|
||||
var names: u16 = 0;
|
||||
var ordinals: u16 = 0;
|
||||
for (s) |v| {
|
||||
switch (v) {
|
||||
.name => names += 1,
|
||||
.ordinal => ordinals += 1,
|
||||
}
|
||||
}
|
||||
return .{ .names = names, .ids = ordinals };
|
||||
}
|
||||
};
|
||||
|
||||
const Relocation = struct {
|
||||
symbol_index: u32,
|
||||
data_offset: u32,
|
||||
relocation_address: u32,
|
||||
};
|
||||
|
||||
const Relocations = struct {
|
||||
allocator: Allocator,
|
||||
list: std.ArrayListUnmanaged(Relocation) = .empty,
|
||||
cur_symbol_index: u32 = 5,
|
||||
|
||||
pub fn init(allocator: Allocator) Relocations {
|
||||
return .{ .allocator = allocator };
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Relocations) void {
|
||||
self.list.deinit(self.allocator);
|
||||
}
|
||||
|
||||
pub fn add(self: *Relocations, relocation_address: u32, data_offset: u32) !void {
|
||||
try self.list.append(self.allocator, .{
|
||||
.symbol_index = self.cur_symbol_index,
|
||||
.data_offset = data_offset,
|
||||
.relocation_address = relocation_address,
|
||||
});
|
||||
self.cur_symbol_index += 1;
|
||||
}
|
||||
};
|
||||
|
||||
/// Does not do deduplication (only because there's no chance of duplicate strings in this
|
||||
/// instance).
|
||||
const StringTable = struct {
|
||||
bytes: std.ArrayListUnmanaged(u8) = .empty,
|
||||
|
||||
pub fn deinit(self: *StringTable, allocator: Allocator) void {
|
||||
self.bytes.deinit(allocator);
|
||||
}
|
||||
|
||||
/// Returns the byte offset of the string in the string table
|
||||
pub fn put(self: *StringTable, allocator: Allocator, string: []const u8) !u32 {
|
||||
const null_terminated_len = string.len + 1;
|
||||
const start_offset = self.totalByteLength();
|
||||
if (start_offset + null_terminated_len > std.math.maxInt(u32)) {
|
||||
return error.StringTableOverflow;
|
||||
}
|
||||
try self.bytes.ensureUnusedCapacity(allocator, null_terminated_len);
|
||||
self.bytes.appendSliceAssumeCapacity(string);
|
||||
self.bytes.appendAssumeCapacity(0);
|
||||
return start_offset;
|
||||
}
|
||||
|
||||
/// Returns the total byte count of the string table, including the byte count of the size field
|
||||
pub fn totalByteLength(self: StringTable) u32 {
|
||||
return @intCast(4 + self.bytes.items.len);
|
||||
}
|
||||
};
|
||||
|
||||
pub const supported_targets = struct {
|
||||
/// Enum containing a mixture of names that come from:
|
||||
/// - Machine Types constants in the PE format spec:
|
||||
/// https://learn.microsoft.com/en-us/windows/win32/debug/pe-format#machine-types
|
||||
/// - cvtres.exe /machine options
|
||||
/// - Zig/LLVM arch names
|
||||
/// All field names are lowercase regardless of their casing used in the above origins.
|
||||
pub const Arch = enum {
|
||||
// cvtres.exe /machine names
|
||||
x64,
|
||||
x86,
|
||||
/// Note: Following cvtres.exe's lead, this corresponds to ARMNT, not ARM
|
||||
arm,
|
||||
arm64,
|
||||
arm64ec,
|
||||
arm64x,
|
||||
ia64,
|
||||
ebc,
|
||||
|
||||
// PE/COFF MACHINE constant names not covered above
|
||||
amd64,
|
||||
i386,
|
||||
armnt,
|
||||
|
||||
// Zig/LLVM names not already covered above
|
||||
x86_64,
|
||||
aarch64,
|
||||
|
||||
pub fn toCoffMachineType(arch: Arch) std.coff.MachineType {
|
||||
return switch (arch) {
|
||||
.x64, .amd64, .x86_64 => .X64,
|
||||
.x86, .i386 => .I386,
|
||||
.arm, .armnt => .ARMNT,
|
||||
.arm64, .aarch64 => .ARM64,
|
||||
.arm64ec => .ARM64EC,
|
||||
.arm64x => .ARM64X,
|
||||
.ia64 => .IA64,
|
||||
.ebc => .EBC,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn description(arch: Arch) []const u8 {
|
||||
return switch (arch) {
|
||||
.x64, .amd64, .x86_64 => "64-bit X86",
|
||||
.x86, .i386 => "32-bit X86",
|
||||
.arm, .armnt => "ARM Thumb-2 little endian",
|
||||
.arm64, .aarch64 => "ARM64/AArch64 little endian",
|
||||
.arm64ec => "ARM64 \"Emulation Compatible\"",
|
||||
.arm64x => "ARM64 and ARM64EC together",
|
||||
.ia64 => "64-bit Intel Itanium",
|
||||
.ebc => "EFI Byte Code",
|
||||
};
|
||||
}
|
||||
|
||||
pub const ordered_for_display: []const Arch = &.{
|
||||
.x64,
|
||||
.x86_64,
|
||||
.amd64,
|
||||
.x86,
|
||||
.i386,
|
||||
.arm64,
|
||||
.aarch64,
|
||||
.arm,
|
||||
.armnt,
|
||||
.arm64ec,
|
||||
.arm64x,
|
||||
.ia64,
|
||||
.ebc,
|
||||
};
|
||||
comptime {
|
||||
for (@typeInfo(Arch).@"enum".fields) |enum_field| {
|
||||
_ = std.mem.indexOfScalar(Arch, ordered_for_display, @enumFromInt(enum_field.value)) orelse {
|
||||
@compileError(std.fmt.comptimePrint("'{s}' missing from ordered_for_display", .{enum_field.name}));
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub const longest_name = blk: {
|
||||
var len = 0;
|
||||
for (@typeInfo(Arch).@"enum".fields) |field| {
|
||||
if (field.name.len > len) len = field.name.len;
|
||||
}
|
||||
break :blk len;
|
||||
};
|
||||
|
||||
pub fn fromStringIgnoreCase(str: []const u8) ?Arch {
|
||||
if (str.len > longest_name) return null;
|
||||
var lower_buf: [longest_name]u8 = undefined;
|
||||
const lower = std.ascii.lowerString(&lower_buf, str);
|
||||
return std.meta.stringToEnum(Arch, lower);
|
||||
}
|
||||
|
||||
test fromStringIgnoreCase {
|
||||
try std.testing.expectEqual(.x64, Arch.fromStringIgnoreCase("x64").?);
|
||||
try std.testing.expectEqual(.x64, Arch.fromStringIgnoreCase("X64").?);
|
||||
try std.testing.expectEqual(.aarch64, Arch.fromStringIgnoreCase("Aarch64").?);
|
||||
try std.testing.expectEqual(null, Arch.fromStringIgnoreCase("armzzz"));
|
||||
try std.testing.expectEqual(null, Arch.fromStringIgnoreCase("long string that is longer than any field"));
|
||||
}
|
||||
};
|
||||
|
||||
// https://learn.microsoft.com/en-us/windows/win32/debug/pe-format#type-indicators
|
||||
pub fn rvaRelocationTypeIndicator(target: std.coff.MachineType) ?u16 {
|
||||
return switch (target) {
|
||||
.X64 => 0x3, // IMAGE_REL_AMD64_ADDR32NB
|
||||
.I386 => 0x7, // IMAGE_REL_I386_DIR32NB
|
||||
.ARMNT => 0x2, // IMAGE_REL_ARM_ADDR32NB
|
||||
.ARM64, .ARM64EC, .ARM64X => 0x2, // IMAGE_REL_ARM64_ADDR32NB
|
||||
.IA64 => 0x10, // IMAGE_REL_IA64_DIR32NB
|
||||
.EBC => 0x1, // This is what cvtres.exe writes for this target, unsure where it comes from
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isSupported(target: std.coff.MachineType) bool {
|
||||
return rvaRelocationTypeIndicator(target) != null;
|
||||
}
|
||||
|
||||
comptime {
|
||||
// Enforce two things:
|
||||
// 1. Arch enum field names are all lowercase (necessary for how fromStringIgnoreCase is implemented)
|
||||
// 2. All enum fields in Arch have an associated RVA relocation type when converted to a coff.MachineType
|
||||
for (@typeInfo(Arch).@"enum".fields) |enum_field| {
|
||||
const all_lower = all_lower: for (enum_field.name) |c| {
|
||||
if (std.ascii.isUpper(c)) break :all_lower false;
|
||||
} else break :all_lower true;
|
||||
if (!all_lower) @compileError(std.fmt.comptimePrint("Arch field is not all lowercase: {s}", .{enum_field.name}));
|
||||
const coff_machine = @field(Arch, enum_field.name).toCoffMachineType();
|
||||
_ = rvaRelocationTypeIndicator(coff_machine) orelse {
|
||||
@compileError(std.fmt.comptimePrint("No RVA relocation for Arch: {s}", .{enum_field.name}));
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
||||
+308
-90
@@ -7,7 +7,10 @@ const Diagnostics = @import("errors.zig").Diagnostics;
|
||||
const cli = @import("cli.zig");
|
||||
const preprocess = @import("preprocess.zig");
|
||||
const renderErrorMessage = @import("utils.zig").renderErrorMessage;
|
||||
const openFileNotDir = @import("utils.zig").openFileNotDir;
|
||||
const cvtres = @import("cvtres.zig");
|
||||
const hasDisjointCodePage = @import("disjoint_code_page.zig").hasDisjointCodePage;
|
||||
const fmtResourceType = @import("res.zig").NameOrOrdinal.fmtResourceType;
|
||||
const aro = @import("aro");
|
||||
|
||||
pub fn main() !void {
|
||||
@@ -135,7 +138,10 @@ pub fn main() !void {
|
||||
|
||||
try argv.append("arocc"); // dummy command name
|
||||
try preprocess.appendAroArgs(aro_arena, &argv, options, include_paths);
|
||||
try argv.append(options.input_filename);
|
||||
try argv.append(switch (options.input_source) {
|
||||
.stdio => "-",
|
||||
.filename => |filename| filename,
|
||||
});
|
||||
|
||||
if (options.verbose) {
|
||||
try stdout_writer.writeAll("Preprocessor: arocc (built-in)\n");
|
||||
@@ -164,121 +170,333 @@ pub fn main() !void {
|
||||
|
||||
break :full_input try preprocessed_buf.toOwnedSlice();
|
||||
} else {
|
||||
break :full_input std.fs.cwd().readFileAlloc(allocator, options.input_filename, std.math.maxInt(usize)) catch |err| {
|
||||
try error_handler.emitMessage(allocator, .err, "unable to read input file path '{s}': {s}", .{ options.input_filename, @errorName(err) });
|
||||
std.process.exit(1);
|
||||
};
|
||||
switch (options.input_source) {
|
||||
.stdio => |file| {
|
||||
break :full_input file.readToEndAlloc(allocator, std.math.maxInt(usize)) catch |err| {
|
||||
try error_handler.emitMessage(allocator, .err, "unable to read input from stdin: {s}", .{@errorName(err)});
|
||||
std.process.exit(1);
|
||||
};
|
||||
},
|
||||
.filename => |input_filename| {
|
||||
break :full_input std.fs.cwd().readFileAlloc(allocator, input_filename, std.math.maxInt(usize)) catch |err| {
|
||||
try error_handler.emitMessage(allocator, .err, "unable to read input file path '{s}': {s}", .{ input_filename, @errorName(err) });
|
||||
std.process.exit(1);
|
||||
};
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
defer allocator.free(full_input);
|
||||
|
||||
if (options.preprocess == .only) {
|
||||
try std.fs.cwd().writeFile(.{ .sub_path = options.output_filename, .data = full_input });
|
||||
switch (options.output_source) {
|
||||
.stdio => |output_file| {
|
||||
try output_file.writeAll(full_input);
|
||||
},
|
||||
.filename => |output_filename| {
|
||||
try std.fs.cwd().writeFile(.{ .sub_path = output_filename, .data = full_input });
|
||||
},
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Note: We still want to run this when no-preprocess is set because:
|
||||
// 1. We want to print accurate line numbers after removing multiline comments
|
||||
// 2. We want to be able to handle an already-preprocessed input with #line commands in it
|
||||
var mapping_results = parseAndRemoveLineCommands(allocator, full_input, full_input, .{ .initial_filename = options.input_filename }) catch |err| switch (err) {
|
||||
error.InvalidLineCommand => {
|
||||
// TODO: Maybe output the invalid line command
|
||||
try error_handler.emitMessage(allocator, .err, "invalid line command in the preprocessed source", .{});
|
||||
if (options.preprocess == .no) {
|
||||
try error_handler.emitMessage(allocator, .note, "line commands must be of the format: #line <num> \"<path>\"", .{});
|
||||
} else {
|
||||
try error_handler.emitMessage(allocator, .note, "this is likely to be a bug, please report it", .{});
|
||||
var resources = resources: {
|
||||
const need_intermediate_res = options.output_format == .coff and options.input_format != .res;
|
||||
var res_stream = if (need_intermediate_res)
|
||||
IoStream{
|
||||
.name = "<in-memory intermediate res>",
|
||||
.intermediate = true,
|
||||
.source = .{ .memory = .empty },
|
||||
}
|
||||
std.process.exit(1);
|
||||
},
|
||||
error.LineNumberOverflow => {
|
||||
// TODO: Better error message
|
||||
try error_handler.emitMessage(allocator, .err, "line number count exceeded maximum of {}", .{std.math.maxInt(usize)});
|
||||
std.process.exit(1);
|
||||
},
|
||||
error.OutOfMemory => |e| return e,
|
||||
};
|
||||
defer mapping_results.mappings.deinit(allocator);
|
||||
else if (options.input_format == .res)
|
||||
IoStream.fromIoSource(options.input_source, .input) catch |err| {
|
||||
try error_handler.emitMessage(allocator, .err, "unable to read res file path '{s}': {s}", .{ options.input_source.filename, @errorName(err) });
|
||||
std.process.exit(1);
|
||||
}
|
||||
else
|
||||
IoStream.fromIoSource(options.output_source, .output) catch |err| {
|
||||
try error_handler.emitMessage(allocator, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) });
|
||||
std.process.exit(1);
|
||||
};
|
||||
defer res_stream.deinit(allocator);
|
||||
|
||||
const default_code_page = options.default_code_page orelse .windows1252;
|
||||
const has_disjoint_code_page = hasDisjointCodePage(mapping_results.result, &mapping_results.mappings, default_code_page);
|
||||
const res_data = res_data: {
|
||||
if (options.input_format != .res) {
|
||||
// Note: We still want to run this when no-preprocess is set because:
|
||||
// 1. We want to print accurate line numbers after removing multiline comments
|
||||
// 2. We want to be able to handle an already-preprocessed input with #line commands in it
|
||||
var mapping_results = parseAndRemoveLineCommands(allocator, full_input, full_input, .{ .initial_filename = options.input_source.filename }) catch |err| switch (err) {
|
||||
error.InvalidLineCommand => {
|
||||
// TODO: Maybe output the invalid line command
|
||||
try error_handler.emitMessage(allocator, .err, "invalid line command in the preprocessed source", .{});
|
||||
if (options.preprocess == .no) {
|
||||
try error_handler.emitMessage(allocator, .note, "line commands must be of the format: #line <num> \"<path>\"", .{});
|
||||
} else {
|
||||
try error_handler.emitMessage(allocator, .note, "this is likely to be a bug, please report it", .{});
|
||||
}
|
||||
std.process.exit(1);
|
||||
},
|
||||
error.LineNumberOverflow => {
|
||||
// TODO: Better error message
|
||||
try error_handler.emitMessage(allocator, .err, "line number count exceeded maximum of {}", .{std.math.maxInt(usize)});
|
||||
std.process.exit(1);
|
||||
},
|
||||
error.OutOfMemory => |e| return e,
|
||||
};
|
||||
defer mapping_results.mappings.deinit(allocator);
|
||||
|
||||
const final_input = try removeComments(mapping_results.result, mapping_results.result, &mapping_results.mappings);
|
||||
const default_code_page = options.default_code_page orelse .windows1252;
|
||||
const has_disjoint_code_page = hasDisjointCodePage(mapping_results.result, &mapping_results.mappings, default_code_page);
|
||||
|
||||
var output_file = std.fs.cwd().createFile(options.output_filename, .{}) catch |err| {
|
||||
try error_handler.emitMessage(allocator, .err, "unable to create output file '{s}': {s}", .{ options.output_filename, @errorName(err) });
|
||||
std.process.exit(1);
|
||||
};
|
||||
var output_file_closed = false;
|
||||
defer if (!output_file_closed) output_file.close();
|
||||
const final_input = try removeComments(mapping_results.result, mapping_results.result, &mapping_results.mappings);
|
||||
|
||||
var diagnostics = Diagnostics.init(allocator);
|
||||
defer diagnostics.deinit();
|
||||
var diagnostics = Diagnostics.init(allocator);
|
||||
defer diagnostics.deinit();
|
||||
|
||||
var output_buffered_stream = std.io.bufferedWriter(output_file.writer());
|
||||
const res_stream_writer = res_stream.source.writer(allocator);
|
||||
var output_buffered_stream = std.io.bufferedWriter(res_stream_writer);
|
||||
|
||||
compile(allocator, final_input, output_buffered_stream.writer(), .{
|
||||
.cwd = std.fs.cwd(),
|
||||
.diagnostics = &diagnostics,
|
||||
.source_mappings = &mapping_results.mappings,
|
||||
.dependencies_list = maybe_dependencies_list,
|
||||
.ignore_include_env_var = options.ignore_include_env_var,
|
||||
.extra_include_paths = options.extra_include_paths.items,
|
||||
.system_include_paths = include_paths,
|
||||
.default_language_id = options.default_language_id,
|
||||
.default_code_page = default_code_page,
|
||||
.disjoint_code_page = has_disjoint_code_page,
|
||||
.verbose = options.verbose,
|
||||
.null_terminate_string_table_strings = options.null_terminate_string_table_strings,
|
||||
.max_string_literal_codepoints = options.max_string_literal_codepoints,
|
||||
.silent_duplicate_control_ids = options.silent_duplicate_control_ids,
|
||||
.warn_instead_of_error_on_invalid_code_page = options.warn_instead_of_error_on_invalid_code_page,
|
||||
}) catch |err| switch (err) {
|
||||
error.ParseError, error.CompileError => {
|
||||
try error_handler.emitDiagnostics(allocator, std.fs.cwd(), final_input, &diagnostics, mapping_results.mappings);
|
||||
// Delete the output file on error
|
||||
output_file.close();
|
||||
output_file_closed = true;
|
||||
// Failing to delete is not really a big deal, so swallow any errors
|
||||
std.fs.cwd().deleteFile(options.output_filename) catch {};
|
||||
std.process.exit(1);
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
compile(allocator, final_input, output_buffered_stream.writer(), .{
|
||||
.cwd = std.fs.cwd(),
|
||||
.diagnostics = &diagnostics,
|
||||
.source_mappings = &mapping_results.mappings,
|
||||
.dependencies_list = maybe_dependencies_list,
|
||||
.ignore_include_env_var = options.ignore_include_env_var,
|
||||
.extra_include_paths = options.extra_include_paths.items,
|
||||
.system_include_paths = include_paths,
|
||||
.default_language_id = options.default_language_id,
|
||||
.default_code_page = default_code_page,
|
||||
.disjoint_code_page = has_disjoint_code_page,
|
||||
.verbose = options.verbose,
|
||||
.null_terminate_string_table_strings = options.null_terminate_string_table_strings,
|
||||
.max_string_literal_codepoints = options.max_string_literal_codepoints,
|
||||
.silent_duplicate_control_ids = options.silent_duplicate_control_ids,
|
||||
.warn_instead_of_error_on_invalid_code_page = options.warn_instead_of_error_on_invalid_code_page,
|
||||
}) catch |err| switch (err) {
|
||||
error.ParseError, error.CompileError => {
|
||||
try error_handler.emitDiagnostics(allocator, std.fs.cwd(), final_input, &diagnostics, mapping_results.mappings);
|
||||
// Delete the output file on error
|
||||
res_stream.cleanupAfterError();
|
||||
std.process.exit(1);
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
try output_buffered_stream.flush();
|
||||
try output_buffered_stream.flush();
|
||||
|
||||
// print any warnings/notes
|
||||
if (!zig_integration) {
|
||||
diagnostics.renderToStdErr(std.fs.cwd(), final_input, stderr_config, mapping_results.mappings);
|
||||
}
|
||||
// print any warnings/notes
|
||||
if (!zig_integration) {
|
||||
diagnostics.renderToStdErr(std.fs.cwd(), final_input, stderr_config, mapping_results.mappings);
|
||||
}
|
||||
|
||||
// write the depfile
|
||||
if (options.depfile_path) |depfile_path| {
|
||||
var depfile = std.fs.cwd().createFile(depfile_path, .{}) catch |err| {
|
||||
try error_handler.emitMessage(allocator, .err, "unable to create depfile '{s}': {s}", .{ depfile_path, @errorName(err) });
|
||||
// write the depfile
|
||||
if (options.depfile_path) |depfile_path| {
|
||||
var depfile = std.fs.cwd().createFile(depfile_path, .{}) catch |err| {
|
||||
try error_handler.emitMessage(allocator, .err, "unable to create depfile '{s}': {s}", .{ depfile_path, @errorName(err) });
|
||||
std.process.exit(1);
|
||||
};
|
||||
defer depfile.close();
|
||||
|
||||
const depfile_writer = depfile.writer();
|
||||
var depfile_buffered_writer = std.io.bufferedWriter(depfile_writer);
|
||||
switch (options.depfile_fmt) {
|
||||
.json => {
|
||||
var write_stream = std.json.writeStream(depfile_buffered_writer.writer(), .{ .whitespace = .indent_2 });
|
||||
defer write_stream.deinit();
|
||||
|
||||
try write_stream.beginArray();
|
||||
for (dependencies_list.items) |dep_path| {
|
||||
try write_stream.write(dep_path);
|
||||
}
|
||||
try write_stream.endArray();
|
||||
},
|
||||
}
|
||||
try depfile_buffered_writer.flush();
|
||||
}
|
||||
}
|
||||
|
||||
if (options.output_format != .coff) return;
|
||||
|
||||
break :res_data res_stream.source.readAll(allocator) catch |err| {
|
||||
try error_handler.emitMessage(allocator, .err, "unable to read res from '{s}': {s}", .{ res_stream.name, @errorName(err) });
|
||||
std.process.exit(1);
|
||||
};
|
||||
};
|
||||
// No need to keep the res_data around after parsing the resources from it
|
||||
defer res_data.deinit(allocator);
|
||||
|
||||
std.debug.assert(options.output_format == .coff);
|
||||
|
||||
// TODO: Maybe use a buffered file reader instead of reading file into memory -> fbs
|
||||
var fbs = std.io.fixedBufferStream(res_data.bytes);
|
||||
break :resources cvtres.parseRes(allocator, fbs.reader(), .{ .max_size = res_data.bytes.len }) catch |err| {
|
||||
// TODO: Better errors
|
||||
try error_handler.emitMessage(allocator, .err, "unable to parse res from '{s}': {s}", .{ res_stream.name, @errorName(err) });
|
||||
std.process.exit(1);
|
||||
};
|
||||
defer depfile.close();
|
||||
};
|
||||
defer resources.deinit();
|
||||
|
||||
const depfile_writer = depfile.writer();
|
||||
var depfile_buffered_writer = std.io.bufferedWriter(depfile_writer);
|
||||
switch (options.depfile_fmt) {
|
||||
.json => {
|
||||
var write_stream = std.json.writeStream(depfile_buffered_writer.writer(), .{ .whitespace = .indent_2 });
|
||||
defer write_stream.deinit();
|
||||
var coff_stream = IoStream.fromIoSource(options.output_source, .output) catch |err| {
|
||||
try error_handler.emitMessage(allocator, .err, "unable to create output file '{s}': {s}", .{ options.output_source.filename, @errorName(err) });
|
||||
std.process.exit(1);
|
||||
};
|
||||
defer coff_stream.deinit(allocator);
|
||||
|
||||
try write_stream.beginArray();
|
||||
for (dependencies_list.items) |dep_path| {
|
||||
try write_stream.write(dep_path);
|
||||
}
|
||||
try write_stream.endArray();
|
||||
var coff_output_buffered_stream = std.io.bufferedWriter(coff_stream.source.writer(allocator));
|
||||
|
||||
var cvtres_diagnostics: cvtres.Diagnostics = .{ .none = {} };
|
||||
cvtres.writeCoff(allocator, coff_output_buffered_stream.writer(), resources.list.items, options.coff_options, &cvtres_diagnostics) catch |err| {
|
||||
switch (err) {
|
||||
error.DuplicateResource => {
|
||||
const duplicate_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
|
||||
try error_handler.emitMessage(allocator, .err, "duplicate resource [id: {}, type: {}, language: {}]", .{
|
||||
duplicate_resource.name_value,
|
||||
fmtResourceType(duplicate_resource.type_value),
|
||||
duplicate_resource.language,
|
||||
});
|
||||
},
|
||||
error.ResourceDataTooLong => {
|
||||
const overflow_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
|
||||
try error_handler.emitMessage(allocator, .err, "resource has a data length that is too large to be written into a coff section", .{});
|
||||
try error_handler.emitMessage(allocator, .note, "the resource with the invalid size is [id: {}, type: {}, language: {}]", .{
|
||||
overflow_resource.name_value,
|
||||
fmtResourceType(overflow_resource.type_value),
|
||||
overflow_resource.language,
|
||||
});
|
||||
},
|
||||
error.TotalResourceDataTooLong => {
|
||||
const overflow_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
|
||||
try error_handler.emitMessage(allocator, .err, "total resource data exceeds the maximum of the coff 'size of raw data' field", .{});
|
||||
try error_handler.emitMessage(allocator, .note, "size overflow occurred when attempting to write this resource: [id: {}, type: {}, language: {}]", .{
|
||||
overflow_resource.name_value,
|
||||
fmtResourceType(overflow_resource.type_value),
|
||||
overflow_resource.language,
|
||||
});
|
||||
},
|
||||
else => {
|
||||
try error_handler.emitMessage(allocator, .err, "unable to write coff output file '{s}': {s}", .{ coff_stream.name, @errorName(err) });
|
||||
},
|
||||
}
|
||||
try depfile_buffered_writer.flush();
|
||||
}
|
||||
// Delete the output file on error
|
||||
coff_stream.cleanupAfterError();
|
||||
std.process.exit(1);
|
||||
};
|
||||
|
||||
try coff_output_buffered_stream.flush();
|
||||
}
|
||||
|
||||
const IoStream = struct {
|
||||
name: []const u8,
|
||||
intermediate: bool,
|
||||
source: Source,
|
||||
|
||||
pub const IoDirection = enum { input, output };
|
||||
|
||||
pub fn fromIoSource(source: cli.Options.IoSource, io: IoDirection) !IoStream {
|
||||
return .{
|
||||
.name = switch (source) {
|
||||
.filename => |filename| filename,
|
||||
.stdio => switch (io) {
|
||||
.input => "<stdin>",
|
||||
.output => "<stdout>",
|
||||
},
|
||||
},
|
||||
.intermediate = false,
|
||||
.source = try Source.fromIoSource(source, io),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *IoStream, allocator: std.mem.Allocator) void {
|
||||
self.source.deinit(allocator);
|
||||
}
|
||||
|
||||
pub fn cleanupAfterError(self: *IoStream) void {
|
||||
switch (self.source) {
|
||||
.file => |file| {
|
||||
// Delete the output file on error
|
||||
file.close();
|
||||
// Failing to delete is not really a big deal, so swallow any errors
|
||||
std.fs.cwd().deleteFile(self.name) catch {};
|
||||
},
|
||||
.stdio, .memory, .closed => return,
|
||||
}
|
||||
}
|
||||
|
||||
pub const Source = union(enum) {
|
||||
file: std.fs.File,
|
||||
stdio: std.fs.File,
|
||||
memory: std.ArrayListUnmanaged(u8),
|
||||
/// The source has been closed and any usage of the Source in this state is illegal (except deinit).
|
||||
closed: void,
|
||||
|
||||
pub fn fromIoSource(source: cli.Options.IoSource, io: IoDirection) !Source {
|
||||
switch (source) {
|
||||
.filename => |filename| return .{
|
||||
.file = switch (io) {
|
||||
.input => try openFileNotDir(std.fs.cwd(), filename, .{}),
|
||||
.output => try std.fs.cwd().createFile(filename, .{}),
|
||||
},
|
||||
},
|
||||
.stdio => |file| return .{ .stdio = file },
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Source, allocator: std.mem.Allocator) void {
|
||||
switch (self.*) {
|
||||
.file => |file| file.close(),
|
||||
.stdio => {},
|
||||
.memory => |*list| list.deinit(allocator),
|
||||
.closed => {},
|
||||
}
|
||||
}
|
||||
|
||||
pub const Data = struct {
|
||||
bytes: []const u8,
|
||||
needs_free: bool,
|
||||
|
||||
pub fn deinit(self: Data, allocator: std.mem.Allocator) void {
|
||||
if (self.needs_free) {
|
||||
allocator.free(self.bytes);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn readAll(self: Source, allocator: std.mem.Allocator) !Data {
|
||||
return switch (self) {
|
||||
inline .file, .stdio => |file| .{
|
||||
.bytes = try file.readToEndAlloc(allocator, std.math.maxInt(usize)),
|
||||
.needs_free = true,
|
||||
},
|
||||
.memory => |list| .{ .bytes = list.items, .needs_free = false },
|
||||
.closed => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub const WriterContext = struct {
|
||||
self: *Source,
|
||||
allocator: std.mem.Allocator,
|
||||
};
|
||||
pub const WriteError = std.mem.Allocator.Error || std.fs.File.WriteError;
|
||||
pub const Writer = std.io.Writer(WriterContext, WriteError, write);
|
||||
|
||||
pub fn write(ctx: WriterContext, bytes: []const u8) WriteError!usize {
|
||||
switch (ctx.self.*) {
|
||||
inline .file, .stdio => |file| return file.write(bytes),
|
||||
.memory => |*list| {
|
||||
try list.appendSlice(ctx.allocator, bytes);
|
||||
return bytes.len;
|
||||
},
|
||||
.closed => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writer(self: *Source, allocator: std.mem.Allocator) Writer {
|
||||
return .{ .context = .{ .self = self, .allocator = allocator } };
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
fn getIncludePaths(arena: std.mem.Allocator, auto_includes_option: cli.Options.AutoIncludes, zig_lib_dir: []const u8) ![]const []const u8 {
|
||||
var includes = auto_includes_option;
|
||||
if (builtin.target.os.tag != .windows) {
|
||||
|
||||
Reference in New Issue
Block a user