Dwarf: rework self-hosted debug info from scratch

This is in preparation for incremental and actually being able to debug
executables built by the x86_64 backend.
This commit is contained in:
Jacob Young
2024-08-06 11:22:37 -04:00
parent 90989be0e3
commit ef11bc9899
50 changed files with 5215 additions and 3590 deletions
+9
View File
@@ -549,6 +549,15 @@ pub fn build(b: *std.Build) !void {
test_step.dependOn(tests.addStackTraceTests(b, test_filters, optimization_modes));
test_step.dependOn(tests.addCliTests(b));
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filters, optimization_modes));
if (tests.addDebuggerTests(b, .{
.test_filters = test_filters,
.gdb = b.option([]const u8, "gdb", "path to gdb binary"),
.lldb = b.option([]const u8, "lldb", "path to lldb binary"),
.optimize_modes = optimization_modes,
.skip_single_threaded = skip_single_threaded,
.skip_non_native = skip_non_native,
.skip_libc = skip_libc,
})) |test_debugger_step| test_step.dependOn(test_debugger_step);
try addWasiUpdateStep(b, version);
+1
View File
@@ -64,6 +64,7 @@ stage3-debug/bin/zig build \
stage3-debug/bin/zig build test docs \
--maxrss 21000000000 \
-Dlldb=$HOME/deps/lldb-zig/Debug/bin/lldb \
-fqemu \
-fwasmtime \
-Dstatic-llvm \
+1
View File
@@ -64,6 +64,7 @@ stage3-release/bin/zig build \
stage3-release/bin/zig build test docs \
--maxrss 21000000000 \
-Dlldb=$HOME/deps/lldb-zig/Release/bin/lldb \
-fqemu \
-fwasmtime \
-Dstatic-llvm \
+18
View File
@@ -359,6 +359,24 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
return m.len;
}
pub const FixedWriter = std.io.Writer(*Self, Allocator.Error, appendWriteFixed);
/// Initializes a Writer which will append to the list but will return
/// `error.OutOfMemory` rather than increasing capacity.
pub fn fixedWriter(self: *Self) FixedWriter {
return .{ .context = self };
}
/// The purpose of this function existing is to match `std.io.Writer` API.
fn appendWriteFixed(self: *Self, m: []const u8) error{OutOfMemory}!usize {
const available_capacity = self.capacity - self.items.len;
if (m.len > available_capacity)
return error.OutOfMemory;
self.appendSliceAssumeCapacity(m);
return m.len;
}
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.
+36
View File
@@ -95,6 +95,9 @@ pub const LNE = struct {
pub const set_discriminator = 0x04;
pub const lo_user = 0x80;
pub const hi_user = 0xff;
// Zig extensions
pub const ZIG_set_decl = 0xec;
};
pub const UT = struct {
@@ -118,6 +121,8 @@ pub const LNCT = struct {
pub const lo_user = 0x2000;
pub const hi_user = 0x3fff;
pub const LLVM_source = 0x2001;
};
pub const RLE = struct {
@@ -142,6 +147,37 @@ pub const CC = enum(u8) {
GNU_renesas_sh = 0x40,
GNU_borland_fastcall_i386 = 0x41,
BORLAND_safecall = 0xb0,
BORLAND_stdcall = 0xb1,
BORLAND_pascal = 0xb2,
BORLAND_msfastcall = 0xb3,
BORLAND_msreturn = 0xb4,
BORLAND_thiscall = 0xb5,
BORLAND_fastcall = 0xb6,
LLVM_vectorcall = 0xc0,
LLVM_Win64 = 0xc1,
LLVM_X86_64SysV = 0xc2,
LLVM_AAPCS = 0xc3,
LLVM_AAPCS_VFP = 0xc4,
LLVM_IntelOclBicc = 0xc5,
LLVM_SpirFunction = 0xc6,
LLVM_OpenCLKernel = 0xc7,
LLVM_Swift = 0xc8,
LLVM_PreserveMost = 0xc9,
LLVM_PreserveAll = 0xca,
LLVM_X86RegCall = 0xcb,
LLVM_M68kRTD = 0xcc,
LLVM_PreserveNone = 0xcd,
LLVM_RISCVVectorCall = 0xce,
LLVM_SwiftTail = 0xcf,
pub const lo_user = 0x40;
pub const hi_user = 0xff;
};
pub const ACCESS = struct {
pub const public = 0x01;
pub const protected = 0x02;
pub const private = 0x03;
};
+9
View File
@@ -218,6 +218,15 @@ pub const VMS_rtnbeg_pd_address = 0x2201;
// See http://gcc.gnu.org/wiki/DW_AT_GNAT_descriptive_type .
pub const use_GNAT_descriptive_type = 0x2301;
pub const GNAT_descriptive_type = 0x2302;
// Zig extensions.
pub const ZIG_parent = 0x2ccd;
pub const ZIG_padding = 0x2cce;
pub const ZIG_relative_decl = 0x2cd0;
pub const ZIG_decl_line_relative = 0x2cd1;
pub const ZIG_is_allowzero = 0x2ce1;
pub const ZIG_sentinel = 0x2ce2;
// UPC extension.
pub const upc_threads_scaled = 0x3210;
// PGI (STMicroelectronics) extensions.
+24
View File
@@ -35,6 +35,30 @@ pub const Fortran03 = 0x0022;
pub const Fortran08 = 0x0023;
pub const RenderScript = 0x0024;
pub const BLISS = 0x0025;
pub const Kotlin = 0x0026;
pub const Zig = 0x0027;
pub const Crystal = 0x0028;
pub const C_plus_plus_17 = 0x002a;
pub const C_plus_plus_20 = 0x002b;
pub const C17 = 0x002c;
pub const Fortran18 = 0x002d;
pub const Ada2005 = 0x002e;
pub const Ada2012 = 0x002f;
pub const HIP = 0x0030;
pub const Assembly = 0x0031;
pub const C_sharp = 0x0032;
pub const Mojo = 0x0033;
pub const GLSL = 0x0034;
pub const GLSL_ES = 0x0035;
pub const HLSL = 0x0036;
pub const OpenCL_CPP = 0x0037;
pub const CPP_for_OpenCL = 0x0038;
pub const SYCL = 0x0039;
pub const C_plus_plus_23 = 0x003a;
pub const Odin = 0x003b;
pub const Ruby = 0x0040;
pub const Move = 0x0041;
pub const Hylo = 0x0042;
pub const lo_user = 0x8000;
pub const hi_user = 0xffff;
+1 -1
View File
@@ -419,7 +419,7 @@ pub const tty = @import("io/tty.zig");
/// A Writer that doesn't write to anything.
pub const null_writer: NullWriter = .{ .context = {} };
const NullWriter = Writer(void, error{}, dummyWrite);
pub const NullWriter = Writer(void, error{}, dummyWrite);
fn dummyWrite(context: void, data: []const u8) error{}!usize {
_ = context;
return data.len;
+35 -20
View File
@@ -36,10 +36,14 @@ pub fn readUleb128(comptime T: type, reader: anytype) !T {
pub const readULEB128 = readUleb128;
/// Write a single unsigned integer as unsigned LEB128 to the given writer.
pub fn writeUleb128(writer: anytype, uint_value: anytype) !void {
const T = @TypeOf(uint_value);
const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
var value: U = @intCast(uint_value);
pub fn writeUleb128(writer: anytype, arg: anytype) !void {
const Arg = @TypeOf(arg);
const Int = switch (Arg) {
comptime_int => std.math.IntFittingRange(arg, arg),
else => Arg,
};
const Value = if (@typeInfo(Int).Int.bits < 8) u8 else Int;
var value: Value = arg;
while (true) {
const byte: u8 = @truncate(value & 0x7f);
@@ -118,16 +122,19 @@ pub fn readIleb128(comptime T: type, reader: anytype) !T {
pub const readILEB128 = readIleb128;
/// Write a single signed integer as signed LEB128 to the given writer.
pub fn writeIleb128(writer: anytype, int_value: anytype) !void {
const T = @TypeOf(int_value);
const S = if (@typeInfo(T).Int.bits < 8) i8 else T;
const U = std.meta.Int(.unsigned, @typeInfo(S).Int.bits);
var value: S = @intCast(int_value);
pub fn writeIleb128(writer: anytype, arg: anytype) !void {
const Arg = @TypeOf(arg);
const Int = switch (Arg) {
comptime_int => std.math.IntFittingRange(-arg - 1, arg),
else => Arg,
};
const Signed = if (@typeInfo(Int).Int.bits < 8) i8 else Int;
const Unsigned = std.meta.Int(.unsigned, @typeInfo(Signed).Int.bits);
var value: Signed = arg;
while (true) {
const uvalue: U = @bitCast(value);
const byte: u8 = @truncate(uvalue);
const unsigned: Unsigned = @bitCast(value);
const byte: u8 = @truncate(unsigned);
value >>= 6;
if (value == -1 or value == 0) {
try writer.writeByte(byte & 0x7F);
@@ -147,17 +154,25 @@ pub fn writeIleb128(writer: anytype, int_value: anytype) !void {
/// "relocatable", meaning that it becomes possible to later go back and patch the number to be a
/// different value without shifting all the following code.
pub fn writeUnsignedFixed(comptime l: usize, ptr: *[l]u8, int: std.meta.Int(.unsigned, l * 7)) void {
const T = @TypeOf(int);
const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
var value: U = @intCast(int);
writeUnsignedExtended(ptr, int);
}
comptime var i = 0;
inline while (i < (l - 1)) : (i += 1) {
const byte = @as(u8, @truncate(value)) | 0b1000_0000;
/// Same as `writeUnsignedFixed` but with a runtime-known length.
/// Asserts `slice.len > 0`.
pub fn writeUnsignedExtended(slice: []u8, arg: anytype) void {
const Arg = @TypeOf(arg);
const Int = switch (Arg) {
comptime_int => std.math.IntFittingRange(arg, arg),
else => Arg,
};
const Value = if (@typeInfo(Int).Int.bits < 8) u8 else Int;
var value: Value = arg;
for (slice[0 .. slice.len - 1]) |*byte| {
byte.* = @truncate(0x80 | value);
value >>= 7;
ptr[i] = byte;
}
ptr[i] = @truncate(value);
slice[slice.len - 1] = @as(u7, @intCast(value));
}
/// Deprecated: use `writeIleb128`
+7 -3
View File
@@ -2092,6 +2092,12 @@ pub const Const = struct {
return bits;
}
/// Returns the number of bits required to represent the integer in twos-complement form
/// with the given signedness.
pub fn bitCountTwosCompForSignedness(self: Const, signedness: std.builtin.Signedness) usize {
return self.bitCountTwosComp() + @intFromBool(self.positive and signedness == .signed);
}
/// @popCount with two's complement semantics.
///
/// This returns the number of 1 bits set when the value would be represented in
@@ -2147,9 +2153,7 @@ pub const Const = struct {
if (signedness == .unsigned and !self.positive) {
return false;
}
const req_bits = self.bitCountTwosComp() + @intFromBool(self.positive and signedness == .signed);
return bit_count >= req_bits;
return bit_count >= self.bitCountTwosCompForSignedness(signedness);
}
/// Returns whether self can fit into an integer of the requested type.
+14 -5
View File
@@ -128,7 +128,7 @@ pub fn alignAllocLen(full_len: usize, alloc_len: usize, len_align: u29) usize {
assert(full_len >= alloc_len);
if (len_align == 0)
return alloc_len;
const adjusted = alignBackwardAnyAlign(full_len, len_align);
const adjusted = alignBackwardAnyAlign(usize, full_len, len_align);
assert(adjusted >= alloc_len);
return adjusted;
}
@@ -4312,6 +4312,15 @@ test "sliceAsBytes preserves pointer attributes" {
try testing.expectEqual(in.alignment, out.alignment);
}
/// Round an address down to the next (or current) aligned address.
/// Unlike `alignForward`, `alignment` can be any positive number, not just a power of 2.
pub fn alignForwardAnyAlign(comptime T: type, addr: T, alignment: T) T {
if (isValidAlignGeneric(T, alignment))
return alignForward(T, addr, alignment);
assert(alignment != 0);
return alignBackwardAnyAlign(T, addr + (alignment - 1), alignment);
}
/// Round an address up to the next (or current) aligned address.
/// The alignment must be a power of 2 and greater than 0.
/// Asserts that rounding up the address does not cause integer overflow.
@@ -4433,11 +4442,11 @@ test alignForward {
/// Round an address down to the previous (or current) aligned address.
/// Unlike `alignBackward`, `alignment` can be any positive number, not just a power of 2.
pub fn alignBackwardAnyAlign(i: usize, alignment: usize) usize {
if (isValidAlign(alignment))
return alignBackward(usize, i, alignment);
pub fn alignBackwardAnyAlign(comptime T: type, addr: T, alignment: T) T {
if (isValidAlignGeneric(T, alignment))
return alignBackward(T, addr, alignment);
assert(alignment != 0);
return i - @mod(i, alignment);
return addr - @mod(addr, alignment);
}
/// Round an address down to the previous (or current) aligned address.
+4 -1
View File
@@ -4405,7 +4405,6 @@ fn globalVarDecl(
.decl_line = astgen.source_line,
.astgen = astgen,
.is_comptime = true,
.anon_name_strategy = .parent,
.instructions = gz.instructions,
.instructions_top = gz.instructions.items.len,
};
@@ -4463,6 +4462,8 @@ fn globalVarDecl(
else
.none;
block_scope.anon_name_strategy = .parent;
const init_inst = try expr(
&block_scope,
&block_scope.base,
@@ -4490,6 +4491,8 @@ fn globalVarDecl(
// Extern variable which has an explicit type.
const type_inst = try typeExpr(&block_scope, &block_scope.base, var_decl.ast.type_node);
block_scope.anon_name_strategy = .parent;
const var_inst = try block_scope.addVar(.{
.var_type = type_inst,
.lib_name = lib_name,
+10
View File
@@ -363,6 +363,7 @@ const Job = union(enum) {
/// It must be deinited when the job is processed.
air: Air,
},
codegen_type: InternPool.Index,
/// The `Cau` must be semantically analyzed (and possibly export itself).
/// This may be its first time being analyzed, or it may be outdated.
analyze_cau: InternPool.Cau.Index,
@@ -423,6 +424,7 @@ const CodegenJob = union(enum) {
/// It must be deinited when the job is processed.
air: Air,
},
type: InternPool.Index,
};
pub const CObject = struct {
@@ -3712,6 +3714,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre
.air = func.air,
} });
},
.codegen_type => |ty| try comp.queueCodegenJob(tid, .{ .type = ty }),
.analyze_func => |func| {
const named_frame = tracy.namedFrame("analyze_func");
defer named_frame.end();
@@ -4001,6 +4004,13 @@ fn processOneCodegenJob(tid: usize, comp: *Compilation, codegen_job: CodegenJob)
// This call takes ownership of `func.air`.
try pt.linkerUpdateFunc(func.func, func.air);
},
.type => |ty| {
const named_frame = tracy.namedFrame("codegen_type");
defer named_frame.end();
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
try pt.linkerUpdateContainerType(ty);
},
}
}
+1 -1
View File
@@ -4003,7 +4003,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
}
}
const LoadedEnumType = struct {
pub const LoadedEnumType = struct {
// TODO: the non-fqn will be needed by the new dwarf structure
/// The name of this enum type.
name: NullTerminatedString,
+35
View File
@@ -2845,6 +2845,11 @@ fn zirStructDecl(
try pt.scanNamespace(new_namespace_index, decls);
try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index }));
try sema.declareDependency(.{ .interned = wip_ty.index });
return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index));
@@ -3213,6 +3218,11 @@ fn zirEnumDecl(
}
}
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
return Air.internedToRef(wip_ty.index);
}
@@ -3323,6 +3333,11 @@ fn zirUnionDecl(
try pt.scanNamespace(new_namespace_index, decls);
try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index }));
try sema.declareDependency(.{ .interned = wip_ty.index });
return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index));
@@ -3396,6 +3411,11 @@ fn zirOpaqueDecl(
const decls = sema.code.bodySlice(extra_index, decls_len);
try pt.scanNamespace(new_namespace_index, decls);
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
return Air.internedToRef(wip_ty.finish(ip, .none, new_namespace_index));
}
@@ -22071,6 +22091,11 @@ fn reifyEnum(
return sema.fail(block, src, "non-exhaustive enum specified every value", .{});
}
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
return Air.internedToRef(wip_ty.index);
}
@@ -22318,6 +22343,11 @@ fn reifyUnion(
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index }));
return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index));
}
@@ -22591,6 +22621,11 @@ fn reifyStruct(
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
try mod.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
codegen_type: {
if (mod.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
try mod.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addReferenceEntry(src, AnalUnit.wrap(.{ .cau = new_cau_index }));
return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index));
}
+1 -1
View File
@@ -2208,7 +2208,7 @@ pub fn errorSetHasField(ty: Type, name: []const u8, mod: *Module) bool {
const field_name_interned = ip.getString(name).unwrap() orelse return false;
return error_set_type.nameIndex(ip, field_name_interned) != null;
},
.inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
.inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
.anyerror_type => true,
.none => false,
else => |t| {
+9 -1
View File
@@ -2737,7 +2737,7 @@ pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit
pub fn errorSetBits(mod: *Zcu) u16 {
if (mod.error_limit == 0) return 0;
return std.math.log2_int_ceil(ErrorInt, mod.error_limit + 1); // +1 for no error
return @as(u16, std.math.log2_int(ErrorInt, mod.error_limit)) + 1;
}
pub fn errNote(
@@ -3005,6 +3005,14 @@ pub const UnionLayout = struct {
tag_align: Alignment,
tag_size: u64,
padding: u32,
pub fn tagOffset(layout: UnionLayout) u64 {
return if (layout.tag_align.compare(.lt, layout.payload_align)) layout.payload_size else 0;
}
pub fn payloadOffset(layout: UnionLayout) u64 {
return if (layout.tag_align.compare(.lt, layout.payload_align)) 0 else layout.tag_size;
}
};
/// Returns the index of the active field, given the current tag value
+25 -1
View File
@@ -911,6 +911,11 @@ fn createFileRootStruct(
try pt.scanNamespace(namespace_index, decls);
try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (file.mod.strip) break :codegen_type;
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
zcu.setFileRootType(file_index, wip_ty.index);
return wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index);
}
@@ -1332,7 +1337,10 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult {
// to the `codegen_nav` job.
try decl_ty.resolveFully(pt);
if (!decl_ty.isFnOrHasRuntimeBits(pt)) break :queue_codegen;
if (!decl_ty.isFnOrHasRuntimeBits(pt)) {
if (zcu.comp.config.use_llvm) break :queue_codegen;
if (file.mod.strip) break :queue_codegen;
}
try zcu.comp.queueJob(.{ .codegen_nav = nav_index });
}
@@ -2588,6 +2596,22 @@ pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void
}
}
pub fn linkerUpdateContainerType(pt: Zcu.PerThread, ty: InternPool.Index) !void {
const zcu = pt.zcu;
const comp = zcu.comp;
const ip = &zcu.intern_pool;
const codegen_prog_node = zcu.codegen_prog_node.start(Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), 0);
defer codegen_prog_node.end();
if (comp.bin_file) |lf| {
lf.updateContainerType(pt, ty) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| log.err("codegen type failed: {s}", .{@errorName(e)}),
};
}
}
pub fn reportRetryableAstGenError(
pt: Zcu.PerThread,
src: Zcu.AstGenSrc,
+20 -31
View File
@@ -18,7 +18,6 @@ const ErrorMsg = Zcu.ErrorMsg;
const Target = std.Target;
const Allocator = mem.Allocator;
const trace = @import("../../tracy.zig").trace;
const DW = std.dwarf;
const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
@@ -181,11 +180,11 @@ const DbgInfoReloc = struct {
}
}
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) error{OutOfMemory}!void {
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (reloc.mcv) {
.register => |reg| .{ .register = reg.dwarfLocOp() },
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
.register => |reg| .{ .reg = reg.dwarfNum() },
.stack_offset,
.stack_argument_offset,
=> |offset| blk: {
@@ -194,15 +193,15 @@ const DbgInfoReloc = struct {
.stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)),
else => unreachable,
};
break :blk .{ .stack = .{
.fp_register = Register.x29.dwarfLocOpDeref(),
.offset = adjusted_offset,
break :blk .{ .plus = .{
&.{ .breg = Register.x29.dwarfNum() },
&.{ .consts = adjusted_offset },
} };
},
else => unreachable, // not a possible argument
};
try dw.genArgDbgInfo(reloc.name, reloc.ty, function.owner_nav, loc);
try dw.genVarDebugInfo(.local_arg, reloc.name, reloc.ty, loc);
},
.plan9 => {},
.none => {},
@@ -210,16 +209,10 @@ const DbgInfoReloc = struct {
}
fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
const is_ptr = switch (reloc.tag) {
.dbg_var_ptr => true,
.dbg_var_val => false,
else => unreachable,
};
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (reloc.mcv) {
.register => |reg| .{ .register = reg.dwarfLocOp() },
.dwarf => |dwarf| {
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
.register => |reg| .{ .reg = reg.dwarfNum() },
.ptr_stack_offset,
.stack_offset,
.stack_argument_offset,
@@ -231,24 +224,20 @@ const DbgInfoReloc = struct {
.stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)),
else => unreachable,
};
break :blk .{
.stack = .{
.fp_register = Register.x29.dwarfLocOpDeref(),
.offset = adjusted_offset,
},
};
break :blk .{ .plus = .{
&.{ .reg = Register.x29.dwarfNum() },
&.{ .consts = adjusted_offset },
} };
},
.memory => |address| .{ .memory = address },
.linker_load => |linker_load| .{ .linker_load = linker_load },
.immediate => |x| .{ .immediate = x },
.undef => .undef,
.none => .none,
.memory => |address| .{ .constu = address },
.immediate => |x| .{ .constu = x },
.none => .empty,
else => blk: {
log.debug("TODO generate debug info for {}", .{reloc.mcv});
break :blk .nop;
break :blk .empty;
},
};
try dw.genVarDbgInfo(reloc.name, reloc.ty, function.owner_nav, is_ptr, loc);
try dwarf.genVarDebugInfo(.local_var, reloc.name, reloc.ty, loc);
},
.plan9 => {},
.none => {},
@@ -6207,7 +6196,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.memory => |addr| .{ .memory = addr },
.load_got => |sym_index| .{ .linker_load = .{ .type = .got, .sym_index = sym_index } },
.load_direct => |sym_index| .{ .linker_load = .{ .type = .direct, .sym_index = sym_index } },
.load_symbol, .load_tlv, .lea_symbol => unreachable, // TODO
.load_symbol, .load_tlv, .lea_symbol, .lea_direct => unreachable, // TODO
},
.fail => |msg| {
self.err_msg = msg;
+2 -10
View File
@@ -1,6 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
const DW = std.dwarf;
const assert = std.debug.assert;
const testing = std.testing;
@@ -295,15 +294,8 @@ pub const Register = enum(u8) {
};
}
pub fn dwarfLocOp(self: Register) u8 {
return @as(u8, self.enc()) + DW.OP.reg0;
}
/// DWARF encodings that push a value onto the DWARF stack that is either
/// the contents of a register or the result of adding the contents a given
/// register to a given signed offset.
pub fn dwarfLocOpDeref(self: Register) u8 {
return @as(u8, self.enc()) + DW.OP.breg0;
pub fn dwarfNum(self: Register) u5 {
return self.enc();
}
};
+18 -26
View File
@@ -18,7 +18,6 @@ const ErrorMsg = Zcu.ErrorMsg;
const Target = std.Target;
const Allocator = mem.Allocator;
const trace = @import("../../tracy.zig").trace;
const DW = std.dwarf;
const leb128 = std.leb;
const log = std.log.scoped(.codegen);
const build_options = @import("build_options");
@@ -259,11 +258,11 @@ const DbgInfoReloc = struct {
}
}
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) error{OutOfMemory}!void {
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (reloc.mcv) {
.register => |reg| .{ .register = reg.dwarfLocOp() },
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
.register => |reg| .{ .reg = reg.dwarfNum() },
.stack_offset,
.stack_argument_offset,
=> blk: {
@@ -272,15 +271,15 @@ const DbgInfoReloc = struct {
.stack_argument_offset => |offset| @as(i32, @intCast(function.saved_regs_stack_space + offset)),
else => unreachable,
};
break :blk .{ .stack = .{
.fp_register = DW.OP.breg11,
.offset = adjusted_stack_offset,
break :blk .{ .plus = .{
&.{ .reg = 11 },
&.{ .consts = adjusted_stack_offset },
} };
},
else => unreachable, // not a possible argument
};
try dw.genArgDbgInfo(reloc.name, reloc.ty, function.pt.zcu.funcInfo(function.func_index).owner_nav, loc);
try dw.genVarDebugInfo(.local_arg, reloc.name, reloc.ty, loc);
},
.plan9 => {},
.none => {},
@@ -288,16 +287,10 @@ const DbgInfoReloc = struct {
}
fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
const is_ptr = switch (reloc.tag) {
.dbg_var_ptr => true,
.dbg_var_val => false,
else => unreachable,
};
switch (function.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (reloc.mcv) {
.register => |reg| .{ .register = reg.dwarfLocOp() },
const loc: link.File.Dwarf.Loc = switch (reloc.mcv) {
.register => |reg| .{ .reg = reg.dwarfNum() },
.ptr_stack_offset,
.stack_offset,
.stack_argument_offset,
@@ -309,21 +302,20 @@ const DbgInfoReloc = struct {
.stack_argument_offset => @as(i32, @intCast(function.saved_regs_stack_space + offset)),
else => unreachable,
};
break :blk .{ .stack = .{
.fp_register = DW.OP.breg11,
.offset = adjusted_offset,
break :blk .{ .plus = .{
&.{ .reg = 11 },
&.{ .consts = adjusted_offset },
} };
},
.memory => |address| .{ .memory = address },
.immediate => |x| .{ .immediate = x },
.undef => .undef,
.none => .none,
.memory => |address| .{ .constu = address },
.immediate => |x| .{ .constu = x },
.none => .empty,
else => blk: {
log.debug("TODO generate debug info for {}", .{reloc.mcv});
break :blk .nop;
break :blk .empty;
},
};
try dw.genVarDbgInfo(reloc.name, reloc.ty, function.pt.zcu.funcInfo(function.func_index).owner_nav, is_ptr, loc);
try dw.genVarDebugInfo(.local_var, reloc.name, reloc.ty, loc);
},
.plan9 => {},
.none => {},
@@ -6170,7 +6162,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.mcv => |mcv| switch (mcv) {
.none => .none,
.undef => .undef,
.load_got, .load_symbol, .load_direct, .load_tlv, .lea_symbol => unreachable, // TODO
.load_got, .load_symbol, .load_direct, .load_tlv, .lea_symbol, .lea_direct => unreachable, // TODO
.immediate => |imm| .{ .immediate = @truncate(imm) },
.memory => |addr| .{ .memory = addr },
},
+4 -5
View File
@@ -1,5 +1,4 @@
const std = @import("std");
const DW = std.dwarf;
const assert = std.debug.assert;
const testing = std.testing;
@@ -158,12 +157,12 @@ pub const Register = enum(u5) {
/// Returns the unique 4-bit ID of this register which is used in
/// the machine code
pub fn id(self: Register) u4 {
return @as(u4, @truncate(@intFromEnum(self)));
pub fn id(reg: Register) u4 {
return @truncate(@intFromEnum(reg));
}
pub fn dwarfLocOp(self: Register) u8 {
return @as(u8, self.id()) + DW.OP.reg0;
pub fn dwarfNum(reg: Register) u4 {
return reg.id();
}
};
+12 -27
View File
@@ -4677,9 +4677,7 @@ fn genArgDbgInfo(func: Func, inst: Air.Inst.Index, mcv: MCValue) !void {
switch (func.debug_output) {
.dwarf => |dw| switch (mcv) {
.register => |reg| try dw.genArgDbgInfo(name, ty, func.owner.nav_index, .{
.register = reg.dwarfLocOp(),
}),
.register => |reg| try dw.genVarDebugInfo(.local_arg, name, ty, .{ .reg = reg.dwarfNum() }),
.load_frame => {},
else => {},
},
@@ -5184,43 +5182,30 @@ fn airDbgVar(func: *Func, inst: Air.Inst.Index) !void {
const name = func.air.nullTerminatedString(pl_op.payload);
const tag = func.air.instructions.items(.tag)[@intFromEnum(inst)];
try func.genVarDbgInfo(tag, ty, mcv, name);
try func.genVarDbgInfo(ty, mcv, name);
return func.finishAir(inst, .unreach, .{ operand, .none, .none });
}
fn genVarDbgInfo(
func: Func,
tag: Air.Inst.Tag,
ty: Type,
mcv: MCValue,
name: [:0]const u8,
name: []const u8,
) !void {
const is_ptr = switch (tag) {
.dbg_var_ptr => true,
.dbg_var_val => false,
else => unreachable,
};
switch (func.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (mcv) {
.register => |reg| .{ .register = reg.dwarfLocOp() },
.memory => |address| .{ .memory = address },
.load_symbol => |sym_off| loc: {
assert(sym_off.off == 0);
break :loc .{ .linker_load = .{ .type = .direct, .sym_index = sym_off.sym } };
},
.immediate => |x| .{ .immediate = x },
.undef => .undef,
.none => .none,
.dwarf => |dwarf| {
const loc: link.File.Dwarf.Loc = switch (mcv) {
.register => |reg| .{ .reg = reg.dwarfNum() },
.memory => |address| .{ .constu = address },
.immediate => |x| .{ .constu = x },
.none => .empty,
else => blk: {
// log.warn("TODO generate debug info for {}", .{mcv});
break :blk .nop;
break :blk .empty;
},
};
try dw.genVarDbgInfo(name, ty, func.owner.nav_index, is_ptr, loc);
try dwarf.genVarDebugInfo(.local_var, name, ty, loc);
},
.plan9 => {},
.none => {},
@@ -8031,7 +8016,7 @@ fn genTypedValue(func: *Func, val: Value) InnerError!MCValue {
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
.immediate => |imm| .{ .immediate = imm },
.memory => |addr| .{ .memory = addr },
.load_got, .load_direct => {
.load_got, .load_direct, .lea_direct => {
return func.fail("TODO: genTypedValue {s}", .{@tagName(mcv)});
},
},
+2 -3
View File
@@ -1,5 +1,4 @@
const std = @import("std");
const DW = std.dwarf;
const assert = std.debug.assert;
const testing = std.testing;
const Target = std.Target;
@@ -207,8 +206,8 @@ pub const Register = enum(u8) {
return @truncate(@intFromEnum(reg));
}
pub fn dwarfLocOp(reg: Register) u8 {
return @as(u8, reg.id());
pub fn dwarfNum(reg: Register) u8 {
return reg.id();
}
pub fn bitSize(reg: Register, zcu: *const Zcu) u32 {
+3 -6
View File
@@ -3579,18 +3579,15 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
}
fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
const pt = self.pt;
const mod = pt.zcu;
const arg = self.air.instructions.items(.data)[@intFromEnum(inst)].arg;
const ty = arg.ty.toType();
const owner_nav = mod.funcInfo(self.func_index).owner_nav;
if (arg.name == .none) return;
const name = self.air.nullTerminatedString(@intFromEnum(arg.name));
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
.register => |reg| try dw.genArgDbgInfo(name, ty, owner_nav, .{
.register = reg.dwarfLocOp(),
.register => |reg| try dw.genVarDebugInfo(.local_arg, name, ty, .{
.reg = reg.dwarfNum(),
}),
else => {},
},
@@ -4127,7 +4124,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.mcv => |mcv| switch (mcv) {
.none => .none,
.undef => .undef,
.load_got, .load_symbol, .load_direct, .load_tlv, .lea_symbol => unreachable, // TODO
.load_got, .load_symbol, .load_direct, .load_tlv, .lea_symbol, .lea_direct => unreachable, // TODO
.immediate => |imm| .{ .immediate = imm },
.memory => |addr| .{ .memory = addr },
},
+6 -7
View File
@@ -1,5 +1,4 @@
const std = @import("std");
const DW = std.dwarf;
const assert = std.debug.assert;
const testing = std.testing;
@@ -15,17 +14,17 @@ pub const Register = enum(u6) {
fp = 62, // frame pointer (i6)
// zig fmt: on
pub fn id(self: Register) u5 {
return @as(u5, @truncate(@intFromEnum(self)));
pub fn id(reg: Register) u5 {
return @truncate(@intFromEnum(reg));
}
pub fn enc(self: Register) u5 {
pub fn enc(reg: Register) u5 {
// For integer registers, enc() == id().
return self.id();
return reg.id();
}
pub fn dwarfLocOp(reg: Register) u8 {
return @as(u8, reg.id()) + DW.OP.reg0;
pub fn dwarfNum(reg: Register) u5 {
return reg.id();
}
};
+8 -7
View File
@@ -742,7 +742,7 @@ const InnerError = error{
CodegenFail,
/// Compiler implementation could not handle a large integer.
Overflow,
};
} || link.File.UpdateDebugInfoError;
pub fn deinit(func: *CodeGen) void {
// in case of an error and we still have branches
@@ -2588,8 +2588,8 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const name_nts = func.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
if (name_nts != .none) {
const name = func.air.nullTerminatedString(@intFromEnum(name_nts));
try dwarf.genArgDbgInfo(name, arg_ty, func.owner_nav, .{
.wasm_local = arg.local.value,
try dwarf.genVarDebugInfo(.local_arg, name, arg_ty, .{
.wasm_ext = .{ .local = arg.local.value },
});
}
},
@@ -6455,6 +6455,7 @@ fn airDbgInlineBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) InnerError!void {
_ = is_ptr;
if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{});
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
@@ -6466,14 +6467,14 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) InnerError!void
const name = func.air.nullTerminatedString(pl_op.payload);
log.debug(" var name = ({s})", .{name});
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (operand) {
.local => |local| .{ .wasm_local = local.value },
const loc: link.File.Dwarf.Loc = switch (operand) {
.local => |local| .{ .wasm_ext = .{ .local = local.value } },
else => blk: {
log.debug("TODO generate debug info for {}", .{operand});
break :blk .nop;
break :blk .empty;
},
};
try func.debug_output.dwarf.genVarDbgInfo(name, ty, func.owner_nav, is_ptr, loc);
try func.debug_output.dwarf.genVarDebugInfo(.local_var, name, ty, loc);
return func.finishAir(inst, .none, &.{});
}
+3 -14
View File
@@ -1,5 +1,4 @@
const std = @import("std");
const DW = std.dwarf;
// zig fmt: off
pub const Register = enum(u8) {
@@ -44,18 +43,8 @@ pub const Register = enum(u8) {
return @enumFromInt(@as(u8, self.id()) + 16);
}
pub fn dwarfLocOp(reg: Register) u8 {
return switch (reg.to32()) {
.eax => DW.OP.reg0,
.ecx => DW.OP.reg1,
.edx => DW.OP.reg2,
.ebx => DW.OP.reg3,
.esp => DW.OP.reg4,
.ebp => DW.OP.reg5,
.esi => DW.OP.reg6,
.edi => DW.OP.reg7,
else => unreachable,
};
pub fn dwarfNum(reg: Register) u8 {
return @intFromEnum(reg.to32());
}
};
@@ -64,7 +53,7 @@ pub const Register = enum(u8) {
/// TODO this set is actually a set of caller-saved registers.
pub const callee_preserved_regs = [_]Register{ .eax, .ecx, .edx, .esi, .edi };
// TODO add these to Register enum and corresponding dwarfLocOp
// TODO add these to Register enum and corresponding dwarfNum
// // Return Address register. This is stored in `0(%esp, "")` and is not a physical register.
// RA = (8, "RA"),
//
+138 -95
View File
@@ -18,7 +18,6 @@ const Allocator = mem.Allocator;
const CodeGenError = codegen.CodeGenError;
const Compilation = @import("../../Compilation.zig");
const DebugInfoOutput = codegen.DebugInfoOutput;
const DW = std.dwarf;
const ErrorMsg = Zcu.ErrorMsg;
const Result = codegen.Result;
const Emit = @import("Emit.zig");
@@ -82,6 +81,9 @@ mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
/// MIR extra data
mir_extra: std.ArrayListUnmanaged(u32) = .{},
stack_args: std.ArrayListUnmanaged(StackVar) = .{},
stack_vars: std.ArrayListUnmanaged(StackVar) = .{},
/// Byte offset within the source file of the ending curly.
end_di_line: u32,
end_di_column: u32,
@@ -726,6 +728,12 @@ const InstTracking = struct {
}
};
const StackVar = struct {
name: []const u8,
type: Type,
frame_addr: FrameAddr,
};
const FrameAlloc = struct {
abi_size: u31,
spill_pad: u3,
@@ -831,6 +839,8 @@ pub fn generate(
function.exitlude_jump_relocs.deinit(gpa);
function.mir_instructions.deinit(gpa);
function.mir_extra.deinit(gpa);
function.stack_args.deinit(gpa);
function.stack_vars.deinit(gpa);
}
wip_mir_log.debug("{}:", .{fmtNav(func.owner_nav, ip)});
@@ -903,14 +913,17 @@ pub fn generate(
else => |e| return e,
};
var mir = Mir{
try function.genStackVarDebugInfo(.local_arg, function.stack_args.items);
try function.genStackVarDebugInfo(.local_var, function.stack_vars.items);
var mir: Mir = .{
.instructions = function.mir_instructions.toOwnedSlice(),
.extra = try function.mir_extra.toOwnedSlice(gpa),
.frame_locs = function.frame_locs.toOwnedSlice(),
};
defer mir.deinit(gpa);
var emit = Emit{
var emit: Emit = .{
.lower = .{
.bin_file = bin_file,
.allocator = gpa,
@@ -2425,7 +2438,7 @@ fn computeFrameLayout(self: *Self, cc: std.builtin.CallingConvention) !FrameLayo
const callee_preserved_regs =
abi.getCalleePreservedRegs(abi.resolveCallingConvention(cc, self.target.*));
for (callee_preserved_regs) |reg| {
if (self.register_manager.isRegAllocated(reg) or true) {
if (self.register_manager.isRegAllocated(reg)) {
save_reg_list.push(callee_preserved_regs, reg);
}
}
@@ -5985,10 +5998,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
switch (operand) {
.load_frame => |frame_addr| {
if (tag_abi_size <= 8) {
const off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
@intCast(layout.payload_size)
else
0;
const off: i32 = @intCast(layout.tagOffset());
break :blk try self.copyToRegisterWithInstTracking(inst, tag_ty, .{
.load_frame = .{ .index = frame_addr.index, .off = frame_addr.off + off },
});
@@ -6000,10 +6010,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
);
},
.register => {
const shift: u6 = if (layout.tag_align.compare(.lt, layout.payload_align))
@intCast(layout.payload_size * 8)
else
0;
const shift: u6 = @intCast(layout.tagOffset() * 8);
const result = try self.copyToRegisterWithInstTracking(inst, union_ty, operand);
try self.genShiftBinOpMir(
.{ ._r, .sh },
@@ -11819,7 +11826,16 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
while (self.args[arg_index] == .none) arg_index += 1;
self.arg_index = arg_index + 1;
const result: MCValue = if (self.liveness.isUnused(inst)) .unreach else result: {
const result: MCValue = if (self.debug_output == .none and self.liveness.isUnused(inst)) .unreach else result: {
const name = switch (self.debug_output) {
.none => "",
else => name: {
const name_nts = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
break :name self.air.nullTerminatedString(@intFromEnum(name_nts));
},
};
if (name.len == 0 and self.liveness.isUnused(inst)) break :result .unreach;
const arg_ty = self.typeOfIndex(inst);
const src_mcv = self.args[arg_index];
const dst_mcv = switch (src_mcv) {
@@ -11922,90 +11938,86 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
else => return self.fail("TODO implement arg for {}", .{src_mcv}),
};
const name_nts = self.air.instructions.items(.data)[@intFromEnum(inst)].arg.name;
switch (name_nts) {
.none => {},
_ => try self.genArgDbgInfo(arg_ty, self.air.nullTerminatedString(@intFromEnum(name_nts)), src_mcv),
}
if (name.len > 0) try self.genVarDebugInfo(.local_arg, .dbg_var_val, name, arg_ty, dst_mcv);
if (self.liveness.isUnused(inst)) {
assert(self.debug_output != .none and name.len > 0);
try self.freeValue(dst_mcv);
break :result .none;
}
break :result dst_mcv;
};
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void {
fn genVarDebugInfo(
self: *Self,
var_tag: link.File.Dwarf.WipNav.VarTag,
tag: Air.Inst.Tag,
name: []const u8,
ty: Type,
mcv: MCValue,
) !void {
const stack_vars = switch (var_tag) {
.local_arg => &self.stack_args,
.local_var => &self.stack_vars,
};
switch (self.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (mcv) {
.register => |reg| .{ .register = reg.dwarfNum() },
.register_pair => |regs| .{ .register_pair = .{
regs[0].dwarfNum(), regs[1].dwarfNum(),
} },
// TODO use a frame index
.load_frame, .elementwise_regs_then_frame => return,
//.stack_offset => |off| .{
// .stack = .{
// // TODO handle -fomit-frame-pointer
// .fp_register = Register.rbp.dwarfNum(),
// .offset = -off,
// },
//},
else => unreachable, // not a valid function parameter
};
// TODO: this might need adjusting like the linkers do.
// Instead of flattening the owner and passing Decl.Index here we may
// want to special case LazySymbol in DWARF linker too.
try dw.genArgDbgInfo(name, ty, self.owner.nav_index, loc);
.dwarf => |dwarf| switch (tag) {
else => unreachable,
.dbg_var_ptr => {
const var_ty = ty.childType(self.pt.zcu);
switch (mcv) {
else => {
log.info("dbg_var_ptr({s}({}))", .{ @tagName(mcv), mcv });
unreachable;
},
.unreach, .dead, .elementwise_regs_then_frame, .reserved_frame, .air_ref => unreachable,
.lea_frame => |frame_addr| try stack_vars.append(self.gpa, .{
.name = name,
.type = var_ty,
.frame_addr = frame_addr,
}),
.lea_symbol => |sym_off| try dwarf.genVarDebugInfo(var_tag, name, var_ty, .{ .plus = .{
&.{ .addr = .{ .sym = sym_off.sym } },
&.{ .consts = sym_off.off },
} }),
}
},
.dbg_var_val => switch (mcv) {
.none => try dwarf.genVarDebugInfo(var_tag, name, ty, .empty),
.unreach, .dead, .elementwise_regs_then_frame, .reserved_frame, .air_ref => unreachable,
.immediate => |immediate| try dwarf.genVarDebugInfo(var_tag, name, ty, .{ .stack_value = &.{
.constu = immediate,
} }),
else => {
const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(ty, self.pt));
try self.genSetMem(.{ .frame = frame_index }, 0, ty, mcv, .{});
try stack_vars.append(self.gpa, .{
.name = name,
.type = ty,
.frame_addr = .{ .index = frame_index },
});
},
},
},
.plan9 => {},
.none => {},
}
}
fn genVarDbgInfo(
fn genStackVarDebugInfo(
self: Self,
tag: Air.Inst.Tag,
ty: Type,
mcv: MCValue,
name: [:0]const u8,
var_tag: link.File.Dwarf.WipNav.VarTag,
stack_vars: []const StackVar,
) !void {
const is_ptr = switch (tag) {
.dbg_var_ptr => true,
.dbg_var_val => false,
else => unreachable,
};
switch (self.debug_output) {
.dwarf => |dw| {
const loc: link.File.Dwarf.NavState.DbgInfoLoc = switch (mcv) {
.register => |reg| .{ .register = reg.dwarfNum() },
// TODO use a frame index
.load_frame, .lea_frame => return,
//=> |off| .{ .stack = .{
// .fp_register = Register.rbp.dwarfNum(),
// .offset = -off,
//} },
.memory => |address| .{ .memory = address },
.load_symbol => |sym_off| loc: {
assert(sym_off.off == 0);
break :loc .{ .linker_load = .{ .type = .direct, .sym_index = sym_off.sym } };
}, // TODO
.load_got => |sym_index| .{ .linker_load = .{ .type = .got, .sym_index = sym_index } },
.load_direct => |sym_index| .{
.linker_load = .{ .type = .direct, .sym_index = sym_index },
},
.immediate => |x| .{ .immediate = x },
.undef => .undef,
.none => .none,
else => blk: {
log.debug("TODO generate debug info for {}", .{mcv});
break :blk .nop;
},
};
// TODO: this might need adjusting like the linkers do.
// Instead of flattening the owner and passing Decl.Index here we may
// want to special case LazySymbol in DWARF linker too.
try dw.genVarDbgInfo(name, ty, self.owner.nav_index, is_ptr, loc);
.dwarf => |dwarf| for (stack_vars) |stack_var| {
const frame_loc = self.frame_locs.get(@intFromEnum(stack_var.frame_addr.index));
try dwarf.genVarDebugInfo(var_tag, stack_var.name, stack_var.type, .{ .plus = .{
&.{ .breg = frame_loc.base.dwarfNum() },
&.{ .consts = @as(i33, frame_loc.disp) + stack_var.frame_addr.off },
} });
},
.plan9 => {},
.none => {},
@@ -13045,7 +13057,7 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
const name = self.air.nullTerminatedString(pl_op.payload);
const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
try self.genVarDbgInfo(tag, ty, mcv, name);
try self.genVarDebugInfo(.local_var, tag, name, ty, mcv);
return self.finishAir(inst, .unreach, .{ operand, .none, .none });
}
@@ -13154,13 +13166,17 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
.lea_symbol,
.elementwise_regs_then_frame,
.reserved_frame,
.air_ref,
=> unreachable,
.lea_frame => {
self.eflags_inst = null;
return .{ .immediate = @intFromBool(false) };
},
.register => |opt_reg| {
if (some_info.off == 0) {
const some_abi_size: u32 = @intCast(some_info.ty.abiSize(pt));
@@ -13402,7 +13418,8 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try self.resolveInst(un_op);
const ty = self.typeOf(un_op);
const result = switch (try self.isNull(inst, ty, operand)) {
const result: MCValue = switch (try self.isNull(inst, ty, operand)) {
.immediate => |imm| .{ .immediate = @intFromBool(imm == 0) },
.eflags => |cc| .{ .eflags = cc.negate() },
else => unreachable,
};
@@ -15156,7 +15173,7 @@ fn genSetMem(
})).write(
self,
.{ .base = base, .mod = .{ .rm = .{
.size = self.memSize(ty),
.size = Memory.Size.fromBitSize(@min(self.memSize(ty).bitSize(), src_alias.bitSize())),
.disp = disp,
} } },
src_alias,
@@ -15202,7 +15219,33 @@ fn genSetMem(
@tagName(src_mcv), ty.fmt(pt),
}),
},
.register_offset,
.register_offset => |reg_off| {
const src_reg = self.copyToTmpRegister(ty, src_mcv) catch |err| switch (err) {
error.OutOfRegisters => {
const src_reg = registerAlias(reg_off.reg, abi_size);
try self.asmRegisterMemory(.{ ._, .lea }, src_reg, .{
.base = .{ .reg = src_reg },
.mod = .{ .rm = .{
.size = .qword,
.disp = reg_off.off,
} },
});
try self.genSetMem(base, disp, ty, .{ .register = reg_off.reg }, opts);
return self.asmRegisterMemory(.{ ._, .lea }, src_reg, .{
.base = .{ .reg = src_reg },
.mod = .{ .rm = .{
.size = .qword,
.disp = -reg_off.off,
} },
});
},
else => |e| return e,
};
const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
defer self.register_manager.unlockReg(src_lock);
try self.genSetMem(base, disp, ty, .{ .register = src_reg }, opts);
},
.memory,
.indirect,
.load_direct,
@@ -15422,9 +15465,14 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const src_ty = self.typeOf(ty_op.operand);
const result = result: {
const src_mcv = try self.resolveInst(ty_op.operand);
if (dst_ty.isPtrAtRuntime(mod) and src_ty.isPtrAtRuntime(mod)) switch (src_mcv) {
.lea_frame => break :result src_mcv,
else => if (self.reuseOperand(inst, ty_op.operand, 0, src_mcv)) break :result src_mcv,
};
const dst_rc = self.regClassForType(dst_ty);
const src_rc = self.regClassForType(src_ty);
const src_mcv = try self.resolveInst(ty_op.operand);
const src_lock = if (src_mcv.getReg()) |reg| self.register_manager.lockReg(reg) else null;
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
@@ -18236,10 +18284,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
const tag_int_val = try tag_val.intFromEnum(tag_ty, pt);
const tag_int = tag_int_val.toUnsignedInt(pt);
const tag_off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
@intCast(layout.payload_size)
else
0;
const tag_off: i32 = @intCast(layout.tagOffset());
try self.genCopy(
tag_ty,
dst_mcv.address().offset(tag_off).deref(),
@@ -18247,10 +18292,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
.{},
);
const pl_off: i32 = if (layout.tag_align.compare(.lt, layout.payload_align))
0
else
@intCast(layout.tag_size);
const pl_off: i32 = @intCast(layout.payloadOffset());
try self.genCopy(src_ty, dst_mcv.address().offset(pl_off).deref(), src_mcv, .{});
break :result dst_mcv;
@@ -18790,6 +18832,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.load_symbol => |sym_index| .{ .load_symbol = .{ .sym = sym_index } },
.lea_symbol => |sym_index| .{ .lea_symbol = .{ .sym = sym_index } },
.load_direct => |sym_index| .{ .load_direct = sym_index },
.lea_direct => |sym_index| .{ .lea_direct = sym_index },
.load_got => |sym_index| .{ .lea_got = sym_index },
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
},
+1 -1
View File
@@ -14,7 +14,7 @@ relocs: std.ArrayListUnmanaged(Reloc) = .{},
pub const Error = Lower.Error || error{
EmitFail,
};
} || link.File.UpdateDebugInfoError;
pub fn emitMir(emit: *Emit) Error!void {
for (0..emit.lower.mir.instructions.len) |mir_i| {
+1 -1
View File
@@ -1204,7 +1204,7 @@ pub const FrameLoc = struct {
pub fn resolveFrameLoc(mir: Mir, mem: Memory) Memory {
return switch (mem.info.base) {
.none, .reg, .reloc => mem,
.frame => if (mir.frame_locs.len > 0) Memory{
.frame => if (mir.frame_locs.len > 0) .{
.info = .{
.base = .reg,
.mod = mem.info.mod,
-1
View File
@@ -4,7 +4,6 @@ const expect = std.testing.expect;
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayList;
const DW = std.dwarf;
/// EFLAGS condition codes
pub const Condition = enum(u5) {
+47 -38
View File
@@ -36,10 +36,10 @@ pub const CodeGenError = error{
OutOfMemory,
Overflow,
CodegenFail,
};
} || link.File.UpdateDebugInfoError;
pub const DebugInfoOutput = union(enum) {
dwarf: *link.File.Dwarf.NavState,
dwarf: *link.File.Dwarf.WipNav,
plan9: *link.File.Plan9.DebugInfoOutput,
none,
};
@@ -819,6 +819,9 @@ pub const GenResult = union(enum) {
/// Decl with address deferred until the linker allocates everything in virtual memory.
/// Payload is a symbol index.
load_direct: u32,
/// Decl with address deferred until the linker allocates everything in virtual memory.
/// Payload is a symbol index.
lea_direct: u32,
/// Decl referenced via GOT with address deferred until the linker allocates
/// everything in virtual memory.
/// Payload is a symbol index.
@@ -833,10 +836,6 @@ pub const GenResult = union(enum) {
lea_symbol: u32,
};
fn mcv(val: MCValue) GenResult {
return .{ .mcv = val };
}
fn fail(
gpa: Allocator,
src_loc: Zcu.LazySrcLoc,
@@ -869,7 +868,7 @@ fn genNavRef(
8 => 0xaaaaaaaaaaaaaaaa,
else => unreachable,
};
return GenResult.mcv(.{ .immediate = imm });
return .{ .mcv = .{ .immediate = imm } };
}
const comp = lf.comp;
@@ -878,12 +877,12 @@ fn genNavRef(
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
if (ty.castPtrToFn(zcu)) |fn_ty| {
if (zcu.typeToFunc(fn_ty).?.is_generic) {
return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(pt).toByteUnits().? });
return .{ .mcv = .{ .immediate = fn_ty.abiAlignment(pt).toByteUnits().? } };
}
} else if (ty.zigTypeTag(zcu) == .Pointer) {
const elem_ty = ty.elemType2(zcu);
if (!elem_ty.hasRuntimeBits(pt)) {
return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(pt).toByteUnits().? });
return .{ .mcv = .{ .immediate = elem_ty.abiAlignment(pt).toByteUnits().? } };
}
}
@@ -900,40 +899,40 @@ fn genNavRef(
if (is_extern) {
const sym_index = try elf_file.getGlobalSymbol(name.toSlice(ip), lib_name.toSlice(ip));
zo.symbol(sym_index).flags.is_extern_ptr = true;
return GenResult.mcv(.{ .lea_symbol = sym_index });
return .{ .mcv = .{ .lea_symbol = sym_index } };
}
const sym_index = try zo.getOrCreateMetadataForNav(elf_file, nav_index);
if (!single_threaded and is_threadlocal) {
return GenResult.mcv(.{ .load_tlv = sym_index });
return .{ .mcv = .{ .load_tlv = sym_index } };
}
return GenResult.mcv(.{ .lea_symbol = sym_index });
return .{ .mcv = .{ .lea_symbol = sym_index } };
} else if (lf.cast(.macho)) |macho_file| {
const zo = macho_file.getZigObject().?;
if (is_extern) {
const sym_index = try macho_file.getGlobalSymbol(name.toSlice(ip), lib_name.toSlice(ip));
zo.symbols.items[sym_index].setSectionFlags(.{ .needs_got = true });
return GenResult.mcv(.{ .load_symbol = sym_index });
return .{ .mcv = .{ .load_symbol = sym_index } };
}
const sym_index = try zo.getOrCreateMetadataForNav(macho_file, nav_index);
const sym = zo.symbols.items[sym_index];
if (!single_threaded and is_threadlocal) {
return GenResult.mcv(.{ .load_tlv = sym.nlist_idx });
return .{ .mcv = .{ .load_tlv = sym.nlist_idx } };
}
return GenResult.mcv(.{ .load_symbol = sym.nlist_idx });
return .{ .mcv = .{ .load_symbol = sym.nlist_idx } };
} else if (lf.cast(.coff)) |coff_file| {
if (is_extern) {
// TODO audit this
const global_index = try coff_file.getGlobalSymbol(name.toSlice(ip), lib_name.toSlice(ip));
try coff_file.need_got_table.put(gpa, global_index, {}); // needs GOT
return GenResult.mcv(.{ .load_got = link.File.Coff.global_symbol_bit | global_index });
return .{ .mcv = .{ .load_got = link.File.Coff.global_symbol_bit | global_index } };
}
const atom_index = try coff_file.getOrCreateAtomForNav(nav_index);
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return GenResult.mcv(.{ .load_got = sym_index });
return .{ .mcv = .{ .load_got = sym_index } };
} else if (lf.cast(.plan9)) |p9| {
const atom_index = try p9.seeNav(pt, nav_index);
const atom = p9.getAtom(atom_index);
return GenResult.mcv(.{ .memory = atom.getOffsetTableAddress(p9) });
return .{ .mcv = .{ .memory = atom.getOffsetTableAddress(p9) } };
} else {
return GenResult.fail(gpa, src_loc, "TODO genNavRef for target {}", .{target});
}
@@ -952,30 +951,40 @@ pub fn genTypedValue(
log.debug("genTypedValue: val = {}", .{val.fmtValue(pt)});
if (val.isUndef(zcu)) {
return GenResult.mcv(.undef);
}
if (!ty.isSlice(zcu)) switch (ip.indexToKey(val.toIntern())) {
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.nav => |nav| return genNavRef(lf, pt, src_loc, val, nav, target),
else => {},
},
else => {},
};
if (val.isUndef(zcu)) return .{ .mcv = .undef };
switch (ty.zigTypeTag(zcu)) {
.Void => return GenResult.mcv(.none),
.Void => return .{ .mcv = .none },
.Pointer => switch (ty.ptrSize(zcu)) {
.Slice => {},
else => switch (val.toIntern()) {
.null_value => {
return GenResult.mcv(.{ .immediate = 0 });
return .{ .mcv = .{ .immediate = 0 } };
},
.none => {},
else => switch (ip.indexToKey(val.toIntern())) {
.int => {
return GenResult.mcv(.{ .immediate = val.toUnsignedInt(pt) });
return .{ .mcv = .{ .immediate = val.toUnsignedInt(pt) } };
},
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.nav => |nav| return genNavRef(lf, pt, src_loc, val, nav, target),
.uav => |uav| if (Value.fromInterned(uav.val).typeOf(zcu).hasRuntimeBits(pt))
return switch (try lf.lowerUav(
pt,
uav.val,
Type.fromInterned(uav.orig_ty).ptrAlignment(pt),
src_loc,
)) {
.mcv => |mcv| return .{ .mcv = switch (mcv) {
.load_direct => |sym_index| .{ .lea_direct = sym_index },
.load_symbol => |sym_index| .{ .lea_symbol = sym_index },
else => unreachable,
} },
.fail => |em| return .{ .fail = em },
}
else
return .{ .mcv = .{ .immediate = Type.fromInterned(uav.orig_ty).ptrAlignment(pt)
.forward(@intCast((@as(u66, 1) << @intCast(target.ptrBitWidth() | 1)) / 3)) } },
else => {},
},
else => {},
},
@@ -988,11 +997,11 @@ pub fn genTypedValue(
.signed => @bitCast(val.toSignedInt(pt)),
.unsigned => val.toUnsignedInt(pt),
};
return GenResult.mcv(.{ .immediate = unsigned });
return .{ .mcv = .{ .immediate = unsigned } };
}
},
.Bool => {
return GenResult.mcv(.{ .immediate = @intFromBool(val.toBool()) });
return .{ .mcv = .{ .immediate = @intFromBool(val.toBool()) } };
},
.Optional => {
if (ty.isPtrLikeOptional(zcu)) {
@@ -1000,11 +1009,11 @@ pub fn genTypedValue(
lf,
pt,
src_loc,
val.optionalValue(zcu) orelse return GenResult.mcv(.{ .immediate = 0 }),
val.optionalValue(zcu) orelse return .{ .mcv = .{ .immediate = 0 } },
target,
);
} else if (ty.abiSize(pt) == 1) {
return GenResult.mcv(.{ .immediate = @intFromBool(!val.isNull(zcu)) });
return .{ .mcv = .{ .immediate = @intFromBool(!val.isNull(zcu)) } };
}
},
.Enum => {
@@ -1020,7 +1029,7 @@ pub fn genTypedValue(
.ErrorSet => {
const err_name = ip.indexToKey(val.toIntern()).err.name;
const error_index = try pt.getErrorValue(err_name);
return GenResult.mcv(.{ .immediate = error_index });
return .{ .mcv = .{ .immediate = error_index } };
},
.ErrorUnion => {
const err_type = ty.errorUnionSet(zcu);
+15 -1
View File
@@ -329,6 +329,9 @@ pub const File = struct {
}
}
pub const UpdateDebugInfoError = Dwarf.UpdateError;
pub const FlushDebugInfoError = Dwarf.FlushError;
pub const UpdateNavError = error{
OutOfMemory,
Overflow,
@@ -365,7 +368,7 @@ pub const File = struct {
DeviceBusy,
InvalidArgument,
HotSwapUnavailableOnHostOperatingSystem,
};
} || UpdateDebugInfoError;
/// Called from within CodeGen to retrieve the symbol index of a global symbol.
/// If no symbol exists yet with this name, a new undefined global symbol will
@@ -398,6 +401,16 @@ pub const File = struct {
}
}
pub fn updateContainerType(base: *File, pt: Zcu.PerThread, ty: InternPool.Index) UpdateNavError!void {
switch (base.tag) {
else => {},
inline .elf => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateContainerType(pt, ty);
},
}
}
/// May be called before or after updateExports for any given Decl.
pub fn updateFunc(
base: *File,
@@ -570,6 +583,7 @@ pub const File = struct {
Unseekable,
UnsupportedCpuArchitecture,
UnsupportedVersion,
UnexpectedEndOfFile,
} ||
fs.File.WriteFileError ||
fs.File.OpenError ||
+31 -28
View File
@@ -1205,10 +1205,11 @@ pub fn updateNav(
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
const init_val = switch (ip.indexToKey(nav.status.resolved.val)) {
.variable => |variable| variable.init,
const nav_val = zcu.navValue(nav_index);
const nav_init = switch (ip.indexToKey(nav_val.toIntern())) {
.variable => |variable| Value.fromInterned(variable.init),
.@"extern" => |@"extern"| {
if (ip.isFunctionType(nav.typeOf(ip))) return;
if (ip.isFunctionType(@"extern".ty)) return;
// TODO make this part of getGlobalSymbol
const name = nav.name.toSlice(ip);
const lib_name = @"extern".lib_name.toSlice(ip);
@@ -1216,34 +1217,36 @@ pub fn updateNav(
try self.need_got_table.put(gpa, global_index, {});
return;
},
else => nav.status.resolved.val,
else => nav_val,
};
const atom_index = try self.getOrCreateAtomForNav(nav_index);
Atom.freeRelocations(self, atom_index);
const atom = self.getAtom(atom_index);
if (nav_init.typeOf(zcu).isFnOrHasRuntimeBits(pt)) {
const atom_index = try self.getOrCreateAtomForNav(nav_index);
Atom.freeRelocations(self, atom_index);
const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
const res = try codegen.generateSymbol(
&self.base,
pt,
zcu.navSrcLoc(nav_index),
Value.fromInterned(init_val),
&code_buffer,
.none,
.{ .parent_atom_index = atom.getSymbolIndex().? },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(gpa, nav_index, em);
return;
},
};
const res = try codegen.generateSymbol(
&self.base,
pt,
zcu.navSrcLoc(nav_index),
nav_init,
&code_buffer,
.none,
.{ .parent_atom_index = atom.getSymbolIndex().? },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(gpa, nav_index, em);
return;
},
};
try self.updateNavCode(pt, nav_index, code, .NULL);
try self.updateNavCode(pt, nav_index, code, .NULL);
}
// Exports will be updated by `Zcu.processExports` after the update.
}
@@ -1290,10 +1293,10 @@ fn updateLazySymbolAtom(
},
};
const code_len = @as(u32, @intCast(code.len));
const code_len: u32 = @intCast(code.len);
const symbol = atom.getSymbolPtr(self);
try self.setSymbolName(symbol, name);
symbol.section_number = @as(coff.SectionNumber, @enumFromInt(section_index + 1));
symbol.section_number = @enumFromInt(section_index + 1);
symbol.type = .{ .complex_type = .NULL, .base_type = .NULL };
const vaddr = try self.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0));
+3655 -2755
View File
@@ -1,895 +1,1016 @@
allocator: Allocator,
bin_file: *File,
format: Format,
ptr_width: PtrWidth,
gpa: std.mem.Allocator,
bin_file: *link.File,
format: DW.Format,
endian: std.builtin.Endian,
address_size: AddressSize,
/// A list of `Atom`s whose Line Number Programs have surplus capacity.
/// This is the same concept as `Section.free_list` in Elf; see those doc comments.
src_fn_free_list: std.AutoHashMapUnmanaged(Atom.Index, void) = .{},
src_fn_first_index: ?Atom.Index = null,
src_fn_last_index: ?Atom.Index = null,
src_fns: std.ArrayListUnmanaged(Atom) = .{},
src_fn_navs: AtomTable = .{},
mods: std.AutoArrayHashMapUnmanaged(*Module, struct {
files: Files,
}),
types: std.AutoArrayHashMapUnmanaged(InternPool.Index, Entry.Index),
navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, Entry.Index),
/// A list of `Atom`s whose corresponding .debug_info tags have surplus capacity.
/// This is the same concept as `text_block_free_list`; see those doc comments.
di_atom_free_list: std.AutoHashMapUnmanaged(Atom.Index, void) = .{},
di_atom_first_index: ?Atom.Index = null,
di_atom_last_index: ?Atom.Index = null,
di_atoms: std.ArrayListUnmanaged(Atom) = .{},
di_atom_navs: AtomTable = .{},
debug_abbrev: DebugAbbrev,
debug_aranges: DebugAranges,
debug_info: DebugInfo,
debug_line: DebugLine,
debug_line_str: StringSection,
debug_loclists: DebugLocLists,
debug_rnglists: DebugRngLists,
debug_str: StringSection,
dbg_line_header: DbgLineHeader,
pub const UpdateError =
std.fs.File.OpenError ||
std.fs.File.SetEndPosError ||
std.fs.File.CopyRangeError ||
std.fs.File.PWriteError ||
error{ Overflow, Underflow, UnexpectedEndOfFile };
abbrev_table_offset: ?u64 = null,
pub const FlushError =
UpdateError ||
std.process.GetCwdError;
/// TODO replace with InternPool
/// Table of debug symbol names.
strtab: StringTable = .{},
pub const RelocError =
std.fs.File.PWriteError;
/// Quick lookup array of all defined source files referenced by at least one Nav.
/// They will end up in the DWARF debug_line header as two lists:
/// * []include_directory
/// * []file_names
di_files: std.AutoArrayHashMapUnmanaged(*const Zcu.File, void) = .{},
global_abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
const AtomTable = std.AutoHashMapUnmanaged(InternPool.Nav.Index, Atom.Index);
const Atom = struct {
/// Offset into .debug_info pointing to the tag for this Nav, or
/// offset from the beginning of the Debug Line Program header that contains this function.
off: u32,
/// Size of the .debug_info tag for this Nav, not including padding, or
/// size of the line number program component belonging to this function, not
/// including padding.
len: u32,
prev_index: ?Index,
next_index: ?Index,
pub const Index = u32;
pub const AddressSize = enum(u8) {
@"32" = 4,
@"64" = 8,
_,
};
const DbgLineHeader = struct {
minimum_instruction_length: u8,
maximum_operations_per_instruction: u8,
default_is_stmt: bool,
line_base: i8,
line_range: u8,
opcode_base: u8,
const Files = std.AutoArrayHashMapUnmanaged(Zcu.File.Index, void);
const DebugAbbrev = struct {
section: Section,
const unit: Unit.Index = @enumFromInt(0);
const entry: Entry.Index = @enumFromInt(0);
};
/// Represents state of the analysed Nav.
/// Includes Nav's abbrev table of type Types, matching arena
/// and a set of relocations that will be resolved once this
/// Nav's inner Atom is assigned an offset within the DWARF section.
pub const NavState = struct {
dwarf: *Dwarf,
pt: Zcu.PerThread,
di_atom_navs: *const AtomTable,
dbg_line_func: InternPool.Index,
dbg_line: std.ArrayList(u8),
dbg_info: std.ArrayList(u8),
abbrev_type_arena: std.heap.ArenaAllocator,
abbrev_table: std.ArrayListUnmanaged(AbbrevEntry),
abbrev_resolver: std.AutoHashMapUnmanaged(InternPool.Index, u32),
abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation),
exprloc_relocs: std.ArrayListUnmanaged(ExprlocRelocation),
const DebugAranges = struct {
section: Section,
pub fn deinit(ns: *NavState) void {
const gpa = ns.dwarf.allocator;
ns.dbg_line.deinit();
ns.dbg_info.deinit();
ns.abbrev_type_arena.deinit();
ns.abbrev_table.deinit(gpa);
ns.abbrev_resolver.deinit(gpa);
ns.abbrev_relocs.deinit(gpa);
ns.exprloc_relocs.deinit(gpa);
fn headerBytes(dwarf: *Dwarf) u32 {
return std.mem.alignForwardAnyAlign(
u32,
dwarf.unitLengthBytes() + 2 + dwarf.sectionOffsetBytes() + 1 + 1,
@intFromEnum(dwarf.address_size) * 2,
);
}
/// Adds local type relocation of the form: @offset => @this + addend
/// @this signifies the offset within the .debug_abbrev section of the containing atom.
fn addTypeRelocLocal(self: *NavState, atom_index: Atom.Index, offset: u32, addend: u32) !void {
log.debug("{x}: @this + {x}", .{ offset, addend });
try self.abbrev_relocs.append(self.dwarf.allocator, .{
.target = null,
.atom_index = atom_index,
.offset = offset,
.addend = addend,
});
fn trailerBytes(dwarf: *Dwarf) u32 {
return @intFromEnum(dwarf.address_size) * 2;
}
};
const DebugInfo = struct {
section: Section,
fn headerBytes(dwarf: *Dwarf) u32 {
return dwarf.unitLengthBytes() + 2 + 1 + 1 + dwarf.sectionOffsetBytes() +
uleb128Bytes(@intFromEnum(AbbrevCode.compile_unit)) + 1 + dwarf.sectionOffsetBytes() * 6 + uleb128Bytes(0) +
uleb128Bytes(@intFromEnum(AbbrevCode.module)) + dwarf.sectionOffsetBytes() + uleb128Bytes(0);
}
/// Adds global type relocation of the form: @offset => @symbol + 0
/// @symbol signifies a type abbreviation posititioned somewhere in the .debug_abbrev section
/// which we use as our target of the relocation.
fn addTypeRelocGlobal(self: *NavState, atom_index: Atom.Index, ty: Type, offset: u32) !void {
const gpa = self.dwarf.allocator;
const resolv = self.abbrev_resolver.get(ty.toIntern()) orelse blk: {
const sym_index: u32 = @intCast(self.abbrev_table.items.len);
try self.abbrev_table.append(gpa, .{
.atom_index = atom_index,
.type = ty,
.offset = undefined,
});
log.debug("%{d}: {}", .{ sym_index, ty.fmt(self.pt) });
try self.abbrev_resolver.putNoClobber(gpa, ty.toIntern(), sym_index);
break :blk sym_index;
};
log.debug("{x}: %{d} + 0", .{ offset, resolv });
try self.abbrev_relocs.append(gpa, .{
.target = resolv,
.atom_index = atom_index,
.offset = offset,
.addend = 0,
});
fn declEntryLineOff(dwarf: *Dwarf) u32 {
return AbbrevCode.decl_bytes + dwarf.sectionOffsetBytes();
}
fn addDbgInfoType(
self: *NavState,
pt: Zcu.PerThread,
atom_index: Atom.Index,
ty: Type,
) error{OutOfMemory}!void {
const zcu = pt.zcu;
const dbg_info_buffer = &self.dbg_info;
const target = zcu.getTarget();
const target_endian = target.cpu.arch.endian();
const ip = &zcu.intern_pool;
const trailer_bytes = 1 + 1;
};
switch (ty.zigTypeTag(zcu)) {
.NoReturn => unreachable,
.Void => {
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.zero_bit_type));
},
.Bool => {
try dbg_info_buffer.ensureUnusedCapacity(12);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.base_type));
// DW.AT.encoding, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(DW.ATE.boolean);
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
},
.Int => {
const info = ty.intInfo(zcu);
try dbg_info_buffer.ensureUnusedCapacity(12);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.base_type));
// DW.AT.encoding, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(switch (info.signedness) {
.signed => DW.ATE.signed,
.unsigned => DW.ATE.unsigned,
});
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
},
.Optional => {
if (ty.isPtrLikeOptional(zcu)) {
try dbg_info_buffer.ensureUnusedCapacity(12);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.base_type));
// DW.AT.encoding, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(DW.ATE.address);
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
} else {
// Non-pointer optionals are structs: struct { .maybe = *, .val = * }
const payload_ty = ty.optionalChild(zcu);
// DW.AT.structure_type
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_type));
// DW.AT.byte_size, DW.FORM.udata
const abi_size = ty.abiSize(pt);
try leb128.writeUleb128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(21);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity("maybe");
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
try self.addTypeRelocGlobal(atom_index, Type.bool, @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.member
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity("val");
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
const offset = abi_size - payload_ty.abiSize(pt);
try leb128.writeUleb128(dbg_info_buffer.writer(), offset);
// DW.AT.structure_type delimit children
try dbg_info_buffer.append(0);
}
},
.Pointer => {
if (ty.isSlice(zcu)) {
// Slices are structs: struct { .ptr = *, .len = N }
const ptr_bits = target.ptrBitWidth();
const ptr_bytes: u8 = @intCast(@divExact(ptr_bits, 8));
// DW.AT.structure_type
try dbg_info_buffer.ensureUnusedCapacity(2);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_type));
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(21);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity("ptr");
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
const ptr_ty = ty.slicePtrFieldType(zcu);
try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.member
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity("len");
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
dbg_info_buffer.appendAssumeCapacity(ptr_bytes);
// DW.AT.structure_type delimit children
dbg_info_buffer.appendAssumeCapacity(0);
} else {
try dbg_info_buffer.ensureUnusedCapacity(9);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.ptr_type));
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
try self.addTypeRelocGlobal(atom_index, ty.childType(zcu), @intCast(index));
}
},
.Array => {
// DW.AT.array_type
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.array_type));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.ensureUnusedCapacity(9);
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
try self.addTypeRelocGlobal(atom_index, ty.childType(zcu), @intCast(index));
// DW.AT.subrange_type
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.array_dim));
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(index));
// DW.AT.count, DW.FORM.udata
const len = ty.arrayLenIncludingSentinel(pt.zcu);
try leb128.writeUleb128(dbg_info_buffer.writer(), len);
// DW.AT.array_type delimit children
try dbg_info_buffer.append(0);
},
.Struct => {
// DW.AT.structure_type
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_type));
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
const DebugLine = struct {
header: Header,
section: Section,
blk: {
switch (ip.indexToKey(ty.ip_index)) {
.anon_struct_type => |fields| {
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(pt)});
for (fields.types.get(ip), 0..) |field_ty, field_index| {
// DW.AT.member
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{d}\x00", .{field_index});
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.appendNTimes(0, 4);
try self.addTypeRelocGlobal(atom_index, Type.fromInterned(field_ty), @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
const field_off = ty.structFieldOffset(field_index, pt);
try leb128.writeUleb128(dbg_info_buffer.writer(), field_off);
}
},
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
// DW.AT.name, DW.FORM.string
try ty.print(dbg_info_buffer.writer(), pt);
try dbg_info_buffer.append(0);
if (struct_type.layout == .@"packed") {
log.debug("TODO implement .debug_info for packed structs", .{});
break :blk;
}
if (struct_type.isTuple(ip)) {
for (struct_type.field_types.get(ip), struct_type.offsets.get(ip), 0..) |field_ty, field_off, field_index| {
if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
// DW.AT.member
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{d}\x00", .{field_index});
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.appendNTimes(0, 4);
try self.addTypeRelocGlobal(atom_index, Type.fromInterned(field_ty), @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), field_off);
}
} else {
for (
struct_type.field_names.get(ip),
struct_type.field_types.get(ip),
struct_type.offsets.get(ip),
) |field_name, field_ty, field_off| {
if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
const field_name_slice = field_name.toSlice(ip);
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(field_name_slice.len + 2);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity(field_name_slice[0 .. field_name_slice.len + 1]);
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.appendNTimes(0, 4);
try self.addTypeRelocGlobal(atom_index, Type.fromInterned(field_ty), @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), field_off);
}
}
},
else => unreachable,
}
}
// DW.AT.structure_type delimit children
try dbg_info_buffer.append(0);
},
.Enum => {
// DW.AT.enumeration_type
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.enum_type));
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), ty.abiSize(pt));
// DW.AT.name, DW.FORM.string
try ty.print(dbg_info_buffer.writer(), pt);
try dbg_info_buffer.append(0);
const enum_type = ip.loadEnumType(ty.ip_index);
for (enum_type.names.get(ip), 0..) |field_name, field_i| {
const field_name_slice = field_name.toSlice(ip);
// DW.AT.enumerator
try dbg_info_buffer.ensureUnusedCapacity(field_name_slice.len + 2 + @sizeOf(u64));
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.enum_variant));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity(field_name_slice[0 .. field_name_slice.len + 1]);
// DW.AT.const_value, DW.FORM.data8
const value: u64 = value: {
if (enum_type.values.len == 0) break :value field_i; // auto-numbered
const value = enum_type.values.get(ip)[field_i];
// TODO do not assume a 64bit enum value - could be bigger.
// See https://github.com/ziglang/zig/issues/645
const field_int_val = try Value.fromInterned(value).intFromEnum(ty, pt);
break :value @bitCast(field_int_val.toSignedInt(pt));
};
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian);
}
// DW.AT.enumeration_type delimit children
try dbg_info_buffer.append(0);
},
.Union => {
const union_obj = zcu.typeToUnion(ty).?;
const layout = pt.getUnionLayout(union_obj);
const payload_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) layout.tag_size else 0;
const tag_offset = if (layout.tag_align.compare(.gte, layout.payload_align)) 0 else layout.payload_size;
// TODO this is temporary to match current state of unions in Zig - we don't yet have
// safety checks implemented meaning the implicit tag is not yet stored and generated
// for untagged unions.
const is_tagged = layout.tag_size > 0;
if (is_tagged) {
// DW.AT.structure_type
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_type));
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), layout.abi_size);
// DW.AT.name, DW.FORM.string
try ty.print(dbg_info_buffer.writer(), pt);
try dbg_info_buffer.append(0);
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(13);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity("payload");
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.type, DW.FORM.ref4
const inner_union_index = dbg_info_buffer.items.len;
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
try self.addTypeRelocLocal(atom_index, @intCast(inner_union_index), 5);
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), payload_offset);
}
// DW.AT.union_type
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.union_type));
// DW.AT.byte_size, DW.FORM.udata,
try leb128.writeUleb128(dbg_info_buffer.writer(), layout.payload_size);
// DW.AT.name, DW.FORM.string
if (is_tagged) {
try dbg_info_buffer.writer().print("AnonUnion\x00", .{});
} else {
try ty.print(dbg_info_buffer.writer(), pt);
try dbg_info_buffer.append(0);
}
for (union_obj.field_types.get(ip), union_obj.loadTagType(ip).names.get(ip)) |field_ty, field_name| {
if (!Type.fromInterned(field_ty).hasRuntimeBits(pt)) continue;
const field_name_slice = field_name.toSlice(ip);
// DW.AT.member
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.appendSlice(field_name_slice[0 .. field_name_slice.len + 1]);
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.appendNTimes(0, 4);
try self.addTypeRelocGlobal(atom_index, Type.fromInterned(field_ty), @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
try dbg_info_buffer.append(0);
}
// DW.AT.union_type delimit children
try dbg_info_buffer.append(0);
if (is_tagged) {
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(9);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity("tag");
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
try self.addTypeRelocGlobal(atom_index, Type.fromInterned(union_obj.enum_tag_ty), @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), tag_offset);
// DW.AT.structure_type delimit children
try dbg_info_buffer.append(0);
}
},
.ErrorSet => try addDbgInfoErrorSet(pt, ty, target, &self.dbg_info),
.ErrorUnion => {
const error_ty = ty.errorUnionSet(zcu);
const payload_ty = ty.errorUnionPayload(zcu);
const payload_align = if (payload_ty.isNoReturn(zcu)) .none else payload_ty.abiAlignment(pt);
const error_align = Type.anyerror.abiAlignment(pt);
const abi_size = ty.abiSize(pt);
const payload_off = if (error_align.compare(.gte, payload_align)) Type.anyerror.abiSize(pt) else 0;
const error_off = if (error_align.compare(.gte, payload_align)) 0 else payload_ty.abiSize(pt);
// DW.AT.structure_type
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_type));
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
try ty.print(dbg_info_buffer.writer(), pt);
try dbg_info_buffer.append(0);
if (!payload_ty.isNoReturn(zcu)) {
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(11);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity("value");
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), payload_off);
}
{
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(9);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity("err");
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4);
try self.addTypeRelocGlobal(atom_index, error_ty, @intCast(index));
// DW.AT.data_member_location, DW.FORM.udata
try leb128.writeUleb128(dbg_info_buffer.writer(), error_off);
}
// DW.AT.structure_type delimit children
try dbg_info_buffer.append(0);
},
else => {
log.debug("TODO implement .debug_info for type '{}'", .{ty.fmt(pt)});
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.zero_bit_type));
},
}
}
pub const DbgInfoLoc = union(enum) {
register: u8,
register_pair: [2]u8,
stack: struct {
fp_register: u8,
offset: i32,
},
wasm_local: u32,
memory: u64,
linker_load: LinkerLoad,
immediate: u64,
undef,
none,
nop,
const Header = struct {
minimum_instruction_length: u8,
maximum_operations_per_instruction: u8,
default_is_stmt: bool,
line_base: i8,
line_range: u8,
opcode_base: u8,
};
pub fn genArgDbgInfo(
self: *NavState,
name: [:0]const u8,
ty: Type,
owner_nav: InternPool.Nav.Index,
loc: DbgInfoLoc,
) error{OutOfMemory}!void {
const pt = self.pt;
const dbg_info = &self.dbg_info;
const atom_index = self.di_atom_navs.get(owner_nav).?;
const name_with_null = name.ptr[0 .. name.len + 1];
switch (loc) {
.register => |reg| {
try dbg_info.ensureUnusedCapacity(4);
dbg_info.appendAssumeCapacity(@intFromEnum(AbbrevCode.parameter));
// DW.AT.location, DW.FORM.exprloc
var expr_len = std.io.countingWriter(std.io.null_writer);
if (reg < 32) {
expr_len.writer().writeByte(DW.OP.reg0 + reg) catch unreachable;
} else {
expr_len.writer().writeByte(DW.OP.regx) catch unreachable;
leb128.writeUleb128(expr_len.writer(), reg) catch unreachable;
}
leb128.writeUleb128(dbg_info.writer(), expr_len.bytes_written) catch unreachable;
if (reg < 32) {
dbg_info.appendAssumeCapacity(DW.OP.reg0 + reg);
} else {
dbg_info.appendAssumeCapacity(DW.OP.regx);
leb128.writeUleb128(dbg_info.writer(), reg) catch unreachable;
}
},
.register_pair => |regs| {
const reg_bits = pt.zcu.getTarget().ptrBitWidth();
const reg_bytes: u8 = @intCast(@divExact(reg_bits, 8));
const abi_size = ty.abiSize(pt);
try dbg_info.ensureUnusedCapacity(10);
dbg_info.appendAssumeCapacity(@intFromEnum(AbbrevCode.parameter));
// DW.AT.location, DW.FORM.exprloc
var expr_len = std.io.countingWriter(std.io.null_writer);
for (regs, 0..) |reg, reg_i| {
if (reg < 32) {
expr_len.writer().writeByte(DW.OP.reg0 + reg) catch unreachable;
} else {
expr_len.writer().writeByte(DW.OP.regx) catch unreachable;
leb128.writeUleb128(expr_len.writer(), reg) catch unreachable;
}
expr_len.writer().writeByte(DW.OP.piece) catch unreachable;
leb128.writeUleb128(
expr_len.writer(),
@min(abi_size - reg_i * reg_bytes, reg_bytes),
) catch unreachable;
}
leb128.writeUleb128(dbg_info.writer(), expr_len.bytes_written) catch unreachable;
for (regs, 0..) |reg, reg_i| {
if (reg < 32) {
dbg_info.appendAssumeCapacity(DW.OP.reg0 + reg);
} else {
dbg_info.appendAssumeCapacity(DW.OP.regx);
leb128.writeUleb128(dbg_info.writer(), reg) catch unreachable;
}
dbg_info.appendAssumeCapacity(DW.OP.piece);
leb128.writeUleb128(
dbg_info.writer(),
@min(abi_size - reg_i * reg_bytes, reg_bytes),
) catch unreachable;
}
},
.stack => |info| {
try dbg_info.ensureUnusedCapacity(9);
dbg_info.appendAssumeCapacity(@intFromEnum(AbbrevCode.parameter));
// DW.AT.location, DW.FORM.exprloc
var expr_len = std.io.countingWriter(std.io.null_writer);
if (info.fp_register < 32) {
expr_len.writer().writeByte(DW.OP.breg0 + info.fp_register) catch unreachable;
} else {
expr_len.writer().writeByte(DW.OP.bregx) catch unreachable;
leb128.writeUleb128(expr_len.writer(), info.fp_register) catch unreachable;
}
leb128.writeIleb128(expr_len.writer(), info.offset) catch unreachable;
leb128.writeUleb128(dbg_info.writer(), expr_len.bytes_written) catch unreachable;
if (info.fp_register < 32) {
dbg_info.appendAssumeCapacity(DW.OP.breg0 + info.fp_register);
} else {
dbg_info.appendAssumeCapacity(DW.OP.bregx);
leb128.writeUleb128(dbg_info.writer(), info.fp_register) catch unreachable;
}
leb128.writeIleb128(dbg_info.writer(), info.offset) catch unreachable;
},
.wasm_local => |value| {
@import("../dev.zig").check(.wasm_linker);
const leb_size = link.File.Wasm.getUleb128Size(value);
try dbg_info.ensureUnusedCapacity(3 + leb_size);
// wasm locations are encoded as follow:
// DW_OP_WASM_location wasm-op
// where wasm-op is defined as
// wasm-op := wasm-local | wasm-global | wasm-operand_stack
// where each argument is encoded as
// <opcode> i:uleb128
dbg_info.appendSliceAssumeCapacity(&.{
@intFromEnum(AbbrevCode.parameter),
DW.OP.WASM_location,
DW.OP.WASM_local,
});
leb128.writeUleb128(dbg_info.writer(), value) catch unreachable;
},
else => unreachable,
}
try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
const index = dbg_info.items.len;
dbg_info.appendNTimesAssumeCapacity(0, 4);
try self.addTypeRelocGlobal(atom_index, ty, @intCast(index)); // DW.AT.type, DW.FORM.ref4
dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
fn headerBytes(dwarf: *Dwarf, file_count: u32) u32 {
return dwarf.unitLengthBytes() + 2 + 1 + 1 + dwarf.sectionOffsetBytes() + 1 + 1 + 1 + 1 + 1 + 1 + 1 * (dwarf.debug_line.header.opcode_base - 1) +
1 + uleb128Bytes(DW.LNCT.path) + uleb128Bytes(DW.FORM.line_strp) + uleb128Bytes(1) + (dwarf.sectionOffsetBytes()) * 1 +
1 + uleb128Bytes(DW.LNCT.path) + uleb128Bytes(DW.FORM.line_strp) + uleb128Bytes(DW.LNCT.LLVM_source) + uleb128Bytes(DW.FORM.line_strp) + uleb128Bytes(file_count) + (dwarf.sectionOffsetBytes() + dwarf.sectionOffsetBytes()) * file_count;
}
pub fn genVarDbgInfo(
self: *NavState,
name: [:0]const u8,
ty: Type,
owner_nav: InternPool.Nav.Index,
is_ptr: bool,
loc: DbgInfoLoc,
) error{OutOfMemory}!void {
const dbg_info = &self.dbg_info;
const atom_index = self.di_atom_navs.get(owner_nav).?;
const name_with_null = name.ptr[0 .. name.len + 1];
try dbg_info.append(@intFromEnum(AbbrevCode.variable));
const gpa = self.dwarf.allocator;
const pt = self.pt;
const target = pt.zcu.getTarget();
const endian = target.cpu.arch.endian();
const child_ty = if (is_ptr) ty.childType(pt.zcu) else ty;
const trailer_bytes = 1 + uleb128Bytes(0) +
1 + uleb128Bytes(1) + 1;
};
switch (loc) {
.register => |reg| {
try dbg_info.ensureUnusedCapacity(3);
// DW.AT.location, DW.FORM.exprloc
var expr_len = std.io.countingWriter(std.io.null_writer);
if (reg < 32) {
expr_len.writer().writeByte(DW.OP.reg0 + reg) catch unreachable;
} else {
expr_len.writer().writeByte(DW.OP.regx) catch unreachable;
leb128.writeUleb128(expr_len.writer(), reg) catch unreachable;
}
leb128.writeUleb128(dbg_info.writer(), expr_len.bytes_written) catch unreachable;
if (reg < 32) {
dbg_info.appendAssumeCapacity(DW.OP.reg0 + reg);
} else {
dbg_info.appendAssumeCapacity(DW.OP.regx);
leb128.writeUleb128(dbg_info.writer(), reg) catch unreachable;
}
},
const DebugLocLists = struct {
section: Section,
.register_pair => |regs| {
const reg_bits = pt.zcu.getTarget().ptrBitWidth();
const reg_bytes: u8 = @intCast(@divExact(reg_bits, 8));
const abi_size = child_ty.abiSize(pt);
try dbg_info.ensureUnusedCapacity(9);
// DW.AT.location, DW.FORM.exprloc
var expr_len = std.io.countingWriter(std.io.null_writer);
for (regs, 0..) |reg, reg_i| {
if (reg < 32) {
expr_len.writer().writeByte(DW.OP.reg0 + reg) catch unreachable;
} else {
expr_len.writer().writeByte(DW.OP.regx) catch unreachable;
leb128.writeUleb128(expr_len.writer(), reg) catch unreachable;
}
expr_len.writer().writeByte(DW.OP.piece) catch unreachable;
leb128.writeUleb128(
expr_len.writer(),
@min(abi_size - reg_i * reg_bytes, reg_bytes),
) catch unreachable;
}
leb128.writeUleb128(dbg_info.writer(), expr_len.bytes_written) catch unreachable;
for (regs, 0..) |reg, reg_i| {
if (reg < 32) {
dbg_info.appendAssumeCapacity(DW.OP.reg0 + reg);
} else {
dbg_info.appendAssumeCapacity(DW.OP.regx);
leb128.writeUleb128(dbg_info.writer(), reg) catch unreachable;
}
dbg_info.appendAssumeCapacity(DW.OP.piece);
leb128.writeUleb128(
dbg_info.writer(),
@min(abi_size - reg_i * reg_bytes, reg_bytes),
) catch unreachable;
}
},
fn baseOffset(dwarf: *Dwarf) u32 {
return dwarf.unitLengthBytes() + 2 + 1 + 1 + 4;
}
.stack => |info| {
try dbg_info.ensureUnusedCapacity(9);
// DW.AT.location, DW.FORM.exprloc
var expr_len = std.io.countingWriter(std.io.null_writer);
if (info.fp_register < 32) {
expr_len.writer().writeByte(DW.OP.breg0 + info.fp_register) catch unreachable;
} else {
expr_len.writer().writeByte(DW.OP.bregx) catch unreachable;
leb128.writeUleb128(expr_len.writer(), info.fp_register) catch unreachable;
}
leb128.writeIleb128(expr_len.writer(), info.offset) catch unreachable;
leb128.writeUleb128(dbg_info.writer(), expr_len.bytes_written) catch unreachable;
if (info.fp_register < 32) {
dbg_info.appendAssumeCapacity(DW.OP.breg0 + info.fp_register);
} else {
dbg_info.appendAssumeCapacity(DW.OP.bregx);
leb128.writeUleb128(dbg_info.writer(), info.fp_register) catch unreachable;
}
leb128.writeIleb128(dbg_info.writer(), info.offset) catch unreachable;
},
fn headerBytes(dwarf: *Dwarf) u32 {
return baseOffset(dwarf);
}
.wasm_local => |value| {
const leb_size = link.File.Wasm.getUleb128Size(value);
try dbg_info.ensureUnusedCapacity(2 + leb_size);
// wasm locals are encoded as follow:
// DW_OP_WASM_location wasm-op
// where wasm-op is defined as
// wasm-op := wasm-local | wasm-global | wasm-operand_stack
// where wasm-local is encoded as
// wasm-local := 0x00 i:uleb128
dbg_info.appendSliceAssumeCapacity(&.{
DW.OP.WASM_location,
DW.OP.WASM_local,
});
leb128.writeUleb128(dbg_info.writer(), value) catch unreachable;
},
const trailer_bytes = 0;
};
.memory,
.linker_load,
=> {
const ptr_width: u8 = @intCast(@divExact(target.ptrBitWidth(), 8));
try dbg_info.ensureUnusedCapacity(2 + ptr_width);
dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
1 + ptr_width + @intFromBool(is_ptr),
DW.OP.addr, // literal address
});
const offset: u32 = @intCast(dbg_info.items.len);
const addr = switch (loc) {
.memory => |x| x,
else => 0,
};
switch (ptr_width) {
0...4 => {
try dbg_info.writer().writeInt(u32, @intCast(addr), endian);
},
5...8 => {
try dbg_info.writer().writeInt(u64, addr, endian);
},
else => unreachable,
}
if (is_ptr) {
// We need deref the address as we point to the value via GOT entry.
try dbg_info.append(DW.OP.deref);
}
switch (loc) {
.linker_load => |load_struct| switch (load_struct.type) {
.direct => {
log.debug("{x}: target sym %{d}", .{ offset, load_struct.sym_index });
try self.exprloc_relocs.append(gpa, .{
.type = .direct_load,
.target = load_struct.sym_index,
.offset = offset,
});
},
.got => {
log.debug("{x}: target sym %{d} via GOT", .{ offset, load_struct.sym_index });
try self.exprloc_relocs.append(gpa, .{
.type = .got_load,
.target = load_struct.sym_index,
.offset = offset,
});
},
else => {}, // TODO
},
else => {},
}
},
const DebugRngLists = struct {
section: Section,
.immediate => |x| {
try dbg_info.ensureUnusedCapacity(2);
const fixup = dbg_info.items.len;
dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
1,
if (child_ty.isSignedInt(pt.zcu)) DW.OP.consts else DW.OP.constu,
});
if (child_ty.isSignedInt(pt.zcu)) {
try leb128.writeIleb128(dbg_info.writer(), @as(i64, @bitCast(x)));
} else {
try leb128.writeUleb128(dbg_info.writer(), x);
}
try dbg_info.append(DW.OP.stack_value);
dbg_info.items[fixup] += @intCast(dbg_info.items.len - fixup - 2);
},
const baseOffset = DebugLocLists.baseOffset;
.undef => {
// DW.AT.location, DW.FORM.exprloc
// uleb128(exprloc_len)
// DW.OP.implicit_value uleb128(len_of_bytes) bytes
const abi_size: u32 = @intCast(child_ty.abiSize(self.pt));
var implicit_value_len = std.ArrayList(u8).init(gpa);
defer implicit_value_len.deinit();
try leb128.writeUleb128(implicit_value_len.writer(), abi_size);
const total_exprloc_len = 1 + implicit_value_len.items.len + abi_size;
try leb128.writeUleb128(dbg_info.writer(), total_exprloc_len);
try dbg_info.ensureUnusedCapacity(total_exprloc_len);
dbg_info.appendAssumeCapacity(DW.OP.implicit_value);
dbg_info.appendSliceAssumeCapacity(implicit_value_len.items);
dbg_info.appendNTimesAssumeCapacity(0xaa, abi_size);
},
fn headerBytes(dwarf: *Dwarf) u32 {
return baseOffset(dwarf) + dwarf.sectionOffsetBytes() * 1;
}
.none => {
try dbg_info.ensureUnusedCapacity(3);
dbg_info.appendSliceAssumeCapacity(&[3]u8{ // DW.AT.location, DW.FORM.exprloc
2, DW.OP.lit0, DW.OP.stack_value,
});
},
const trailer_bytes = 1;
};
.nop => {
try dbg_info.ensureUnusedCapacity(2);
dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
1, DW.OP.nop,
});
},
const StringSection = struct {
contents: std.ArrayListUnmanaged(u8),
map: std.AutoArrayHashMapUnmanaged(void, void),
section: Section,
const unit: Unit.Index = @enumFromInt(0);
const init: StringSection = .{
.contents = .{},
.map = .{},
.section = Section.init,
};
fn deinit(str_sec: *StringSection, gpa: std.mem.Allocator) void {
str_sec.contents.deinit(gpa);
str_sec.map.deinit(gpa);
str_sec.section.deinit(gpa);
}
fn addString(str_sec: *StringSection, dwarf: *Dwarf, str: []const u8) UpdateError!Entry.Index {
const gop = try str_sec.map.getOrPutAdapted(dwarf.gpa, str, Adapter{ .str_sec = str_sec });
errdefer _ = str_sec.map.pop();
const entry: Entry.Index = @enumFromInt(gop.index);
if (!gop.found_existing) {
assert(try str_sec.section.addEntry(unit, dwarf) == entry);
errdefer _ = str_sec.section.getUnit(unit).entries.pop();
const entry_ptr = str_sec.section.getUnit(unit).getEntry(entry);
assert(entry_ptr.off == str_sec.contents.items.len);
entry_ptr.len = @intCast(str.len + 1);
try str_sec.contents.ensureUnusedCapacity(dwarf.gpa, str.len + 1);
str_sec.contents.appendSliceAssumeCapacity(str);
str_sec.contents.appendAssumeCapacity(0);
str_sec.section.dirty = true;
}
return entry;
}
const Adapter = struct {
str_sec: *StringSection,
pub fn hash(_: Adapter, key: []const u8) u32 {
return @truncate(std.hash.Wyhash.hash(0, key));
}
try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
const index = dbg_info.items.len;
dbg_info.appendNTimesAssumeCapacity(0, 4); // dw.at.type, dw.form.ref4
try self.addTypeRelocGlobal(atom_index, child_ty, @intCast(index));
dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
pub fn eql(adapter: Adapter, key: []const u8, _: void, rhs_index: usize) bool {
const entry = adapter.str_sec.section.getUnit(unit).getEntry(@enumFromInt(rhs_index));
return std.mem.eql(u8, key, adapter.str_sec.contents.items[entry.off..][0 .. entry.len - 1 :0]);
}
};
};
/// A linker section containing a sequence of `Unit`s.
const Section = struct {
dirty: bool,
pad_to_ideal: bool,
alignment: InternPool.Alignment,
index: u32,
first: Unit.Index.Optional,
last: Unit.Index.Optional,
off: u64,
len: u64,
units: std.ArrayListUnmanaged(Unit),
const Index = enum {
debug_abbrev,
debug_info,
debug_line,
debug_line_str,
debug_loclists,
debug_rnglists,
debug_str,
};
const init: Section = .{
.dirty = true,
.pad_to_ideal = true,
.alignment = .@"1",
.index = std.math.maxInt(u32),
.first = .none,
.last = .none,
.off = 0,
.len = 0,
.units = .{},
};
fn deinit(sec: *Section, gpa: std.mem.Allocator) void {
for (sec.units.items) |*unit| unit.deinit(gpa);
sec.units.deinit(gpa);
sec.* = undefined;
}
fn addUnit(sec: *Section, header_len: u32, trailer_len: u32, dwarf: *Dwarf) UpdateError!Unit.Index {
const unit: Unit.Index = @enumFromInt(sec.units.items.len);
const unit_ptr = try sec.units.addOne(dwarf.gpa);
errdefer sec.popUnit();
unit_ptr.* = .{
.prev = sec.last,
.next = .none,
.first = .none,
.last = .none,
.off = 0,
.header_len = header_len,
.trailer_len = trailer_len,
.len = header_len + trailer_len,
.entries = .{},
.cross_entry_relocs = .{},
.cross_unit_relocs = .{},
.cross_section_relocs = .{},
.external_relocs = .{},
};
if (sec.last.unwrap()) |last_unit| {
const last_unit_ptr = sec.getUnit(last_unit);
last_unit_ptr.next = unit.toOptional();
unit_ptr.off = last_unit_ptr.off + sec.padToIdeal(last_unit_ptr.len);
}
if (sec.first == .none)
sec.first = unit.toOptional();
sec.last = unit.toOptional();
try sec.resize(dwarf, unit_ptr.off + sec.padToIdeal(unit_ptr.len));
return unit;
}
fn unlinkUnit(sec: *Section, unit: Unit.Index) void {
const unit_ptr = sec.getUnit(unit);
if (unit_ptr.prev.unwrap()) |prev_unit| sec.getUnit(prev_unit).next = unit_ptr.next;
if (unit_ptr.next.unwrap()) |next_unit| sec.getUnit(next_unit).prev = unit_ptr.prev;
if (sec.first.unwrap().? == unit) sec.first = unit_ptr.next;
if (sec.last.unwrap().? == unit) sec.last = unit_ptr.prev;
}
fn popUnit(sec: *Section) void {
const unit: Unit.Index = @enumFromInt(sec.units.items.len - 1);
sec.unlinkUnit(unit);
_ = sec.units.pop();
}
fn addEntry(sec: *Section, unit: Unit.Index, dwarf: *Dwarf) UpdateError!Entry.Index {
return sec.getUnit(unit).addEntry(sec, dwarf);
}
fn getUnit(sec: *Section, unit: Unit.Index) *Unit {
return &sec.units.items[@intFromEnum(unit)];
}
fn replaceEntry(sec: *Section, unit: Unit.Index, entry: Entry.Index, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
const unit_ptr = sec.getUnit(unit);
try unit_ptr.getEntry(entry).replace(unit_ptr, sec, dwarf, contents);
}
fn resize(sec: *Section, dwarf: *Dwarf, len: u64) UpdateError!void {
if (dwarf.bin_file.cast(.elf)) |elf_file| {
try elf_file.growNonAllocSection(sec.index, len, @intCast(sec.alignment.toByteUnits().?), true);
const shdr = &elf_file.shdrs.items[sec.index];
sec.off = shdr.sh_offset;
sec.len = shdr.sh_size;
} else if (dwarf.bin_file.cast(.macho)) |macho_file| {
const header = if (macho_file.d_sym) |*d_sym| header: {
try d_sym.growSection(@intCast(sec.index), len, true, macho_file);
break :header &d_sym.sections.items[sec.index];
} else header: {
try macho_file.growSection(@intCast(sec.index), len);
break :header &macho_file.sections.items(.header)[sec.index];
};
sec.off = header.offset;
sec.len = header.size;
}
}
fn trim(sec: *Section, dwarf: *Dwarf) void {
const len = sec.getUnit(sec.first.unwrap() orelse return).off;
if (len == 0) return;
for (sec.units.items) |*unit| unit.off -= len;
sec.off += len;
sec.len -= len;
if (dwarf.bin_file.cast(.elf)) |elf_file| {
const shdr = &elf_file.shdrs.items[sec.index];
shdr.sh_offset = sec.off;
shdr.sh_size = sec.len;
} else if (dwarf.bin_file.cast(.macho)) |macho_file| {
const header = if (macho_file.d_sym) |*d_sym|
&d_sym.sections.items[sec.index]
else
&macho_file.sections.items(.header)[sec.index];
header.offset = @intCast(sec.off);
header.size = sec.len;
}
}
fn resolveRelocs(sec: *Section, dwarf: *Dwarf) RelocError!void {
for (sec.units.items) |*unit| try unit.resolveRelocs(sec, dwarf);
}
fn padToIdeal(sec: *Section, actual_size: anytype) @TypeOf(actual_size) {
return if (sec.pad_to_ideal) Dwarf.padToIdeal(actual_size) else actual_size;
}
};
/// A unit within a `Section` containing a sequence of `Entry`s.
const Unit = struct {
prev: Index.Optional,
next: Index.Optional,
first: Entry.Index.Optional,
last: Entry.Index.Optional,
/// offset within containing section
off: u32,
header_len: u32,
trailer_len: u32,
/// data length in bytes
len: u32,
entries: std.ArrayListUnmanaged(Entry),
cross_entry_relocs: std.ArrayListUnmanaged(CrossEntryReloc),
cross_unit_relocs: std.ArrayListUnmanaged(CrossUnitReloc),
cross_section_relocs: std.ArrayListUnmanaged(CrossSectionReloc),
external_relocs: std.ArrayListUnmanaged(ExternalReloc),
const Index = enum(u32) {
main,
_,
const Optional = enum(u32) {
none = std.math.maxInt(u32),
_,
fn unwrap(uio: Optional) ?Index {
return if (uio != .none) @enumFromInt(@intFromEnum(uio)) else null;
}
};
fn toOptional(ui: Index) Optional {
return @enumFromInt(@intFromEnum(ui));
}
};
fn deinit(unit: *Unit, gpa: std.mem.Allocator) void {
unit.entries.deinit(gpa);
unit.cross_entry_relocs.deinit(gpa);
unit.cross_unit_relocs.deinit(gpa);
unit.cross_section_relocs.deinit(gpa);
unit.external_relocs.deinit(gpa);
unit.* = undefined;
}
fn addEntry(unit: *Unit, sec: *Section, dwarf: *Dwarf) UpdateError!Entry.Index {
const entry: Entry.Index = @enumFromInt(unit.entries.items.len);
const entry_ptr = try unit.entries.addOne(dwarf.gpa);
entry_ptr.* = .{
.prev = unit.last,
.next = .none,
.off = 0,
.len = 0,
};
if (unit.last.unwrap()) |last_entry| {
const last_entry_ptr = unit.getEntry(last_entry);
last_entry_ptr.next = entry.toOptional();
entry_ptr.off = last_entry_ptr.off + sec.padToIdeal(last_entry_ptr.len);
}
if (unit.first == .none)
unit.first = entry.toOptional();
unit.last = entry.toOptional();
return entry;
}
fn getEntry(unit: *Unit, entry: Entry.Index) *Entry {
return &unit.entries.items[@intFromEnum(entry)];
}
fn resize(unit_ptr: *Unit, sec: *Section, dwarf: *Dwarf, extra_header_len: u32, len: u32) UpdateError!void {
const end = if (unit_ptr.next.unwrap()) |next_unit|
sec.getUnit(next_unit).off
else
sec.len;
if (extra_header_len > 0 or unit_ptr.off + len > end) {
unit_ptr.len = @min(unit_ptr.len, len);
var new_off = unit_ptr.off;
if (unit_ptr.next.unwrap()) |next_unit| {
const next_unit_ptr = sec.getUnit(next_unit);
if (unit_ptr.prev.unwrap()) |prev_unit|
sec.getUnit(prev_unit).next = unit_ptr.next
else
sec.first = unit_ptr.next;
const unit = next_unit_ptr.prev;
next_unit_ptr.prev = unit_ptr.prev;
const last_unit_ptr = sec.getUnit(sec.last.unwrap().?);
last_unit_ptr.next = unit;
unit_ptr.prev = sec.last;
unit_ptr.next = .none;
new_off = last_unit_ptr.off + sec.padToIdeal(last_unit_ptr.len);
sec.last = unit;
sec.dirty = true;
} else if (extra_header_len > 0) {
// `copyRangeAll` in `move` does not support overlapping ranges
// so make sure new location is disjoint from current location.
new_off += unit_ptr.len -| extra_header_len;
}
try sec.resize(dwarf, new_off + len);
try unit_ptr.move(sec, dwarf, new_off + extra_header_len);
unit_ptr.off -= extra_header_len;
unit_ptr.header_len += extra_header_len;
sec.trim(dwarf);
}
unit_ptr.len = len;
}
fn move(unit: *Unit, sec: *Section, dwarf: *Dwarf, new_off: u32) UpdateError!void {
if (unit.off == new_off) return;
if (try dwarf.getFile().?.copyRangeAll(
sec.off + unit.off,
dwarf.getFile().?,
sec.off + new_off,
unit.len,
) != unit.len) return error.InputOutput;
unit.off = new_off;
}
fn resizeHeader(unit: *Unit, sec: *Section, dwarf: *Dwarf, len: u32) UpdateError!void {
if (unit.header_len == len) return;
const available_len = if (unit.prev.unwrap()) |prev_unit| prev_excess: {
const prev_unit_ptr = sec.getUnit(prev_unit);
break :prev_excess unit.off - prev_unit_ptr.off - prev_unit_ptr.len;
} else 0;
if (available_len + unit.header_len < len)
try unit.resize(sec, dwarf, len - unit.header_len, unit.len - unit.header_len + len);
if (unit.header_len > len) {
const excess_header_len = unit.header_len - len;
unit.off += excess_header_len;
unit.header_len -= excess_header_len;
unit.len -= excess_header_len;
} else if (unit.header_len < len) {
const needed_header_len = len - unit.header_len;
unit.off -= needed_header_len;
unit.header_len += needed_header_len;
unit.len += needed_header_len;
}
assert(unit.header_len == len);
sec.trim(dwarf);
}
fn replaceHeader(unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
assert(contents.len == unit.header_len);
try dwarf.getFile().?.pwriteAll(contents, sec.off + unit.off);
}
fn writeTrailer(unit: *Unit, sec: *Section, dwarf: *Dwarf) UpdateError!void {
const start = unit.off + unit.header_len + if (unit.last.unwrap()) |last_entry| end: {
const last_entry_ptr = unit.getEntry(last_entry);
break :end last_entry_ptr.off + last_entry_ptr.len;
} else 0;
const end = if (unit.next.unwrap()) |next_unit|
sec.getUnit(next_unit).off
else
sec.len;
const trailer_len: usize = @intCast(end - start);
assert(trailer_len >= unit.trailer_len);
var trailer = try std.ArrayList(u8).initCapacity(dwarf.gpa, trailer_len);
defer trailer.deinit();
const fill_byte: u8 = if (sec == &dwarf.debug_aranges.section) fill: {
trailer.appendNTimesAssumeCapacity(0, @intFromEnum(dwarf.address_size) * 2);
break :fill 0;
} else if (sec == &dwarf.debug_info.section) fill: {
assert(uleb128Bytes(@intFromEnum(AbbrevCode.null)) == 1);
trailer.appendNTimesAssumeCapacity(@intFromEnum(AbbrevCode.null), 2);
break :fill @intFromEnum(AbbrevCode.null);
} else if (sec == &dwarf.debug_line.section) fill: {
unit.len -= unit.trailer_len;
const extra_len: u32 = @intCast((trailer_len - DebugLine.trailer_bytes) & 1);
unit.trailer_len = DebugLine.trailer_bytes + extra_len;
unit.len += unit.trailer_len;
// prevent end sequence from emitting an invalid file index
trailer.appendAssumeCapacity(DW.LNS.set_file);
uleb128(trailer.fixedWriter(), 0) catch unreachable;
trailer.appendAssumeCapacity(DW.LNS.extended_op);
std.leb.writeUnsignedExtended(trailer.addManyAsSliceAssumeCapacity(uleb128Bytes(1) + extra_len), 1);
trailer.appendAssumeCapacity(DW.LNE.end_sequence);
break :fill DW.LNS.extended_op;
} else if (sec == &dwarf.debug_rnglists.section) fill: {
trailer.appendAssumeCapacity(DW.RLE.end_of_list);
break :fill DW.RLE.end_of_list;
} else unreachable;
assert(trailer.items.len == unit.trailer_len);
trailer.appendNTimesAssumeCapacity(fill_byte, trailer_len - trailer.items.len);
assert(trailer.items.len == trailer_len);
try dwarf.getFile().?.pwriteAll(trailer.items, sec.off + start);
}
fn resolveRelocs(unit: *Unit, sec: *Section, dwarf: *Dwarf) RelocError!void {
for (unit.cross_entry_relocs.items) |reloc| {
try dwarf.resolveReloc(
sec.off + unit.off + (if (reloc.source_entry.unwrap()) |source_entry|
unit.header_len + unit.getEntry(source_entry).off
else
0) + reloc.source_off,
unit.off + unit.header_len + unit.getEntry(reloc.target_entry).assertNonEmpty(unit, sec, dwarf).off + reloc.target_off,
dwarf.sectionOffsetBytes(),
);
}
for (unit.cross_unit_relocs.items) |reloc| {
const target_unit = sec.getUnit(reloc.target_unit);
try dwarf.resolveReloc(
sec.off + unit.off + (if (reloc.source_entry.unwrap()) |source_entry|
unit.header_len + unit.getEntry(source_entry).off
else
0) + reloc.source_off,
target_unit.off + (if (reloc.target_entry.unwrap()) |target_entry|
target_unit.header_len + target_unit.getEntry(target_entry).assertNonEmpty(unit, sec, dwarf).off
else
0) + reloc.target_off,
dwarf.sectionOffsetBytes(),
);
}
for (unit.cross_section_relocs.items) |reloc| {
const target_sec = switch (reloc.target_sec) {
inline else => |target_sec| &@field(dwarf, @tagName(target_sec)).section,
};
const target_unit = target_sec.getUnit(reloc.target_unit);
try dwarf.resolveReloc(
sec.off + unit.off + (if (reloc.source_entry.unwrap()) |source_entry|
unit.header_len + unit.getEntry(source_entry).off
else
0) + reloc.source_off,
target_unit.off + (if (reloc.target_entry.unwrap()) |target_entry|
target_unit.header_len + target_unit.getEntry(target_entry).assertNonEmpty(unit, sec, dwarf).off
else
0) + reloc.target_off,
dwarf.sectionOffsetBytes(),
);
}
if (dwarf.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
for (unit.external_relocs.items) |reloc| {
const symbol = zo.symbol(reloc.target_sym);
try dwarf.resolveReloc(
sec.off + unit.off + unit.header_len + unit.getEntry(reloc.source_entry).off + reloc.source_off,
@bitCast(symbol.address(.{}, elf_file) + @as(i64, @intCast(reloc.target_off)) -
if (symbol.flags.is_tls) elf_file.dtpAddress() else 0),
@intFromEnum(dwarf.address_size),
);
}
} else if (dwarf.bin_file.cast(.macho)) |macho_file| {
const zo = macho_file.getZigObject().?;
for (unit.external_relocs.items) |reloc| {
const ref = zo.getSymbolRef(reloc.target_sym, macho_file);
try dwarf.resolveReloc(
sec.off + unit.off + unit.header_len + unit.getEntry(reloc.source_entry).off + reloc.source_off,
ref.getSymbol(macho_file).?.getAddress(.{}, macho_file),
@intFromEnum(dwarf.address_size),
);
}
}
}
const CrossEntryReloc = struct {
source_entry: Entry.Index.Optional = .none,
source_off: u32 = 0,
target_entry: Entry.Index,
target_off: u32 = 0,
};
const CrossUnitReloc = struct {
source_entry: Entry.Index.Optional = .none,
source_off: u32 = 0,
target_unit: Unit.Index,
target_entry: Entry.Index.Optional = .none,
target_off: u32 = 0,
};
const CrossSectionReloc = struct {
source_entry: Entry.Index.Optional = .none,
source_off: u32 = 0,
target_sec: Section.Index,
target_unit: Unit.Index,
target_entry: Entry.Index.Optional = .none,
target_off: u32 = 0,
};
const ExternalReloc = struct {
source_entry: Entry.Index,
source_off: u32 = 0,
target_sym: u32,
target_off: u64 = 0,
};
};
/// An indivisible entry within a `Unit` containing section-specific data.
const Entry = struct {
prev: Index.Optional,
next: Index.Optional,
/// offset from end of containing unit header
off: u32,
/// data length in bytes
len: u32,
const Index = enum(u32) {
_,
const Optional = enum(u32) {
none = std.math.maxInt(u32),
_,
fn unwrap(eio: Optional) ?Index {
return if (eio != .none) @enumFromInt(@intFromEnum(eio)) else null;
}
};
fn toOptional(ei: Index) Optional {
return @enumFromInt(@intFromEnum(ei));
}
};
fn pad(entry: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf) UpdateError!void {
const start = entry.off + entry.len;
const len = unit.getEntry(entry.next.unwrap() orelse return).off - start;
if (sec == &dwarf.debug_info.section) {
var buf: [
@max(
uleb128Bytes(@intFromEnum(AbbrevCode.pad_1)),
uleb128Bytes(@intFromEnum(AbbrevCode.pad_n)) + uleb128Bytes(std.math.maxInt(u32)),
)
]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf);
switch (len) {
0 => {},
1 => uleb128(fbs.writer(), @intFromEnum(AbbrevCode.pad_1)) catch unreachable,
else => {
uleb128(fbs.writer(), @intFromEnum(AbbrevCode.pad_n)) catch unreachable;
const abbrev_code_bytes = fbs.pos;
var block_len_bytes: u5 = 1;
while (true) switch (std.math.order(len - abbrev_code_bytes - block_len_bytes, @as(u32, 1) << 7 * block_len_bytes)) {
.lt => break uleb128(fbs.writer(), len - abbrev_code_bytes - block_len_bytes) catch unreachable,
.eq => {
// no length will ever work, so undercount and futz with the leb encoding to make up the missing byte
block_len_bytes += 1;
std.leb.writeUnsignedExtended(buf[fbs.pos..][0..block_len_bytes], len - abbrev_code_bytes - block_len_bytes);
fbs.pos += block_len_bytes;
break;
},
.gt => block_len_bytes += 1,
};
assert(fbs.pos == abbrev_code_bytes + block_len_bytes);
},
}
assert(fbs.pos <= len);
try dwarf.getFile().?.pwriteAll(fbs.getWritten(), sec.off + unit.off + unit.header_len + start);
} else if (sec == &dwarf.debug_line.section) {
const buf = try dwarf.gpa.alloc(u8, len);
defer dwarf.gpa.free(buf);
@memset(buf, DW.LNS.const_add_pc);
try dwarf.getFile().?.pwriteAll(buf, sec.off + unit.off + unit.header_len + start);
} else assert(!sec.pad_to_ideal and len == 0);
}
fn replace(entry_ptr: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
const end = if (entry_ptr.next.unwrap()) |next_entry|
unit.getEntry(next_entry).off
else
unit.len -| (unit.header_len + unit.trailer_len);
if (entry_ptr.off + contents.len > end) {
if (entry_ptr.next.unwrap()) |next_entry| {
if (entry_ptr.prev.unwrap()) |prev_entry| {
const prev_entry_ptr = unit.getEntry(prev_entry);
prev_entry_ptr.next = entry_ptr.next;
try prev_entry_ptr.pad(unit, sec, dwarf);
} else unit.first = entry_ptr.next;
const next_entry_ptr = unit.getEntry(next_entry);
const entry = next_entry_ptr.prev;
next_entry_ptr.prev = entry_ptr.prev;
const last_entry_ptr = unit.getEntry(unit.last.unwrap().?);
last_entry_ptr.next = entry;
entry_ptr.prev = unit.last;
entry_ptr.next = .none;
entry_ptr.off = last_entry_ptr.off + sec.padToIdeal(last_entry_ptr.len);
unit.last = entry;
}
try unit.resize(sec, dwarf, 0, @intCast(unit.header_len + entry_ptr.off + sec.padToIdeal(contents.len) + unit.trailer_len));
}
entry_ptr.len = @intCast(contents.len);
{
var prev_entry_ptr = entry_ptr;
while (prev_entry_ptr.prev.unwrap()) |prev_entry| {
prev_entry_ptr = unit.getEntry(prev_entry);
if (prev_entry_ptr.len == 0) continue;
try prev_entry_ptr.pad(unit, sec, dwarf);
break;
}
}
try dwarf.getFile().?.pwriteAll(contents, sec.off + unit.off + unit.header_len + entry_ptr.off);
try entry_ptr.pad(unit, sec, dwarf);
if (false) {
const buf = try dwarf.gpa.alloc(u8, sec.len);
defer dwarf.gpa.free(buf);
_ = try dwarf.getFile().?.preadAll(buf, sec.off);
log.info("Section{{ .first = {}, .last = {}, .off = 0x{x}, .len = 0x{x} }}", .{
@intFromEnum(sec.first),
@intFromEnum(sec.last),
sec.off,
sec.len,
});
for (sec.units.items) |*unit_ptr| {
log.info(" Unit{{ .prev = {}, .next = {}, .first = {}, .last = {}, .off = 0x{x}, .header_len = 0x{x}, .trailer_len = 0x{x}, .len = 0x{x} }}", .{
@intFromEnum(unit_ptr.prev),
@intFromEnum(unit_ptr.next),
@intFromEnum(unit_ptr.first),
@intFromEnum(unit_ptr.last),
unit_ptr.off,
unit_ptr.header_len,
unit_ptr.trailer_len,
unit_ptr.len,
});
for (unit_ptr.entries.items) |*entry| {
log.info(" Entry{{ .prev = {}, .next = {}, .off = 0x{x}, .len = 0x{x} }}", .{
@intFromEnum(entry.prev),
@intFromEnum(entry.next),
entry.off,
entry.len,
});
}
}
std.debug.dumpHex(buf);
}
}
fn assertNonEmpty(entry: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf) *Entry {
if (entry.len > 0) return entry;
if (std.debug.runtime_safety) {
log.err("missing {} from {s}", .{
@as(Entry.Index, @enumFromInt(entry - unit.entries.items.ptr)),
std.mem.sliceTo(if (dwarf.bin_file.cast(.elf)) |elf_file|
elf_file.shstrtab.items[elf_file.shdrs.items[sec.index].sh_name..]
else if (dwarf.bin_file.cast(.macho)) |macho_file|
if (macho_file.d_sym) |*d_sym|
&d_sym.sections.items[sec.index].segname
else
&macho_file.sections.items(.header)[sec.index].segname
else
"?", 0),
});
const zcu = dwarf.bin_file.comp.module.?;
const ip = &zcu.intern_pool;
for (dwarf.types.keys(), dwarf.types.values()) |ty, other_entry| {
const ty_unit: Unit.Index = if (Type.fromInterned(ty).typeDeclInst(zcu)) |inst_index|
dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFull(ip).file).mod) catch unreachable
else
.main;
if (sec.getUnit(ty_unit) == unit and unit.getEntry(other_entry) == entry)
log.err("missing Type({}({d}))", .{
Type.fromInterned(ty).fmt(.{ .tid = .main, .zcu = zcu }),
@intFromEnum(ty),
});
}
for (dwarf.navs.keys(), dwarf.navs.values()) |nav, other_entry| {
const nav_unit = dwarf.getUnit(zcu.fileByIndex(ip.getNav(nav).srcInst(ip).resolveFull(ip).file).mod) catch unreachable;
if (sec.getUnit(nav_unit) == unit and unit.getEntry(other_entry) == entry)
log.err("missing Nav({}({d}))", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) });
}
}
@panic("missing dwarf relocation target");
}
};
pub const Loc = union(enum) {
empty,
addr: union(enum) { sym: u32 },
constu: u64,
consts: i64,
plus: Bin,
reg: u32,
breg: u32,
push_object_address,
form_tls_address: *const Loc,
implicit_value: []const u8,
stack_value: *const Loc,
wasm_ext: union(enum) {
local: u32,
global: u32,
operand_stack: u32,
},
pub const Bin = struct { *const Loc, *const Loc };
fn getConst(loc: Loc, comptime Int: type) ?Int {
return switch (loc) {
.constu => |constu| std.math.cast(Int, constu),
.consts => |consts| std.math.cast(Int, consts),
else => null,
};
}
fn getBaseReg(loc: Loc) ?u32 {
return switch (loc) {
.breg => |breg| breg,
else => null,
};
}
fn writeReg(reg: u32, op0: u8, opx: u8, writer: anytype) @TypeOf(writer).Error!void {
if (std.math.cast(u5, reg)) |small_reg| {
try writer.writeByte(op0 + small_reg);
} else {
try writer.writeByte(opx);
try uleb128(writer, reg);
}
}
fn write(loc: Loc, wip: anytype) UpdateError!void {
const writer = wip.infoWriter();
switch (loc) {
.empty => unreachable,
.addr => |addr| {
try writer.writeByte(DW.OP.addr);
switch (addr) {
.sym => |sym_index| try wip.addrSym(sym_index),
}
},
.constu => |constu| if (std.math.cast(u5, constu)) |lit| {
try writer.writeByte(@as(u8, DW.OP.lit0) + lit);
} else if (std.math.cast(u8, constu)) |const1u| {
try writer.writeAll(&.{ DW.OP.const1u, const1u });
} else if (std.math.cast(u16, constu)) |const2u| {
try writer.writeByte(DW.OP.const2u);
try writer.writeInt(u16, const2u, wip.dwarf.endian);
} else if (std.math.cast(u21, constu)) |const3u| {
try writer.writeByte(DW.OP.constu);
try uleb128(writer, const3u);
} else if (std.math.cast(u32, constu)) |const4u| {
try writer.writeByte(DW.OP.const4u);
try writer.writeInt(u32, const4u, wip.dwarf.endian);
} else if (std.math.cast(u49, constu)) |const7u| {
try writer.writeByte(DW.OP.constu);
try uleb128(writer, const7u);
} else {
try writer.writeByte(DW.OP.const8u);
try writer.writeInt(u64, constu, wip.dwarf.endian);
},
.consts => |consts| if (std.math.cast(i8, consts)) |const1s| {
try writer.writeAll(&.{ DW.OP.const1s, @bitCast(const1s) });
} else if (std.math.cast(i16, consts)) |const2s| {
try writer.writeByte(DW.OP.const2s);
try writer.writeInt(i16, const2s, wip.dwarf.endian);
} else if (std.math.cast(i21, consts)) |const3s| {
try writer.writeByte(DW.OP.consts);
try sleb128(writer, const3s);
} else if (std.math.cast(i32, consts)) |const4s| {
try writer.writeByte(DW.OP.const4s);
try writer.writeInt(i32, const4s, wip.dwarf.endian);
} else if (std.math.cast(i49, consts)) |const7s| {
try writer.writeByte(DW.OP.consts);
try sleb128(writer, const7s);
} else {
try writer.writeByte(DW.OP.const8s);
try writer.writeInt(i64, consts, wip.dwarf.endian);
},
.plus => |plus| done: {
if (plus[0].getConst(u0)) |_| {
try plus[1].write(wip);
break :done;
}
if (plus[1].getConst(u0)) |_| {
try plus[0].write(wip);
break :done;
}
if (plus[0].getBaseReg()) |breg| {
if (plus[1].getConst(i65)) |offset| {
try writeReg(breg, DW.OP.breg0, DW.OP.bregx, writer);
try sleb128(writer, offset);
break :done;
}
}
if (plus[1].getBaseReg()) |breg| {
if (plus[0].getConst(i65)) |offset| {
try writeReg(breg, DW.OP.breg0, DW.OP.bregx, writer);
try sleb128(writer, offset);
break :done;
}
}
if (plus[0].getConst(u64)) |uconst| {
try plus[1].write(wip);
try writer.writeByte(DW.OP.plus_uconst);
try uleb128(writer, uconst);
break :done;
}
if (plus[1].getConst(u64)) |uconst| {
try plus[0].write(wip);
try writer.writeByte(DW.OP.plus_uconst);
try uleb128(writer, uconst);
break :done;
}
try plus[0].write(wip);
try plus[1].write(wip);
try writer.writeByte(DW.OP.plus);
},
.reg => |reg| try writeReg(reg, DW.OP.reg0, DW.OP.regx, writer),
.breg => |breg| {
try writeReg(breg, DW.OP.breg0, DW.OP.bregx, writer);
try sleb128(writer, 0);
},
.push_object_address => try writer.writeByte(DW.OP.push_object_address),
.form_tls_address => |addr| {
try addr.write(wip);
try writer.writeByte(DW.OP.form_tls_address);
},
.implicit_value => |value| {
try writer.writeByte(DW.OP.implicit_value);
try uleb128(writer, value.len);
try writer.writeAll(value);
},
.stack_value => |value| {
try value.write(wip);
try writer.writeByte(DW.OP.stack_value);
},
.wasm_ext => |wasm_ext| {
try writer.writeByte(DW.OP.WASM_location);
switch (wasm_ext) {
.local => |local| {
try writer.writeByte(DW.OP.WASM_local);
try uleb128(writer, local);
},
.global => |global| if (std.math.cast(u21, global)) |global_u21| {
try writer.writeByte(DW.OP.WASM_global);
try uleb128(writer, global_u21);
} else {
try writer.writeByte(DW.OP.WASM_global_u32);
try writer.writeInt(u32, global, wip.dwarf.endian);
},
.operand_stack => |operand_stack| {
try writer.writeByte(DW.OP.WASM_operand_stack);
try uleb128(writer, operand_stack);
},
}
},
}
}
};
pub const WipNav = struct {
dwarf: *Dwarf,
pt: Zcu.PerThread,
unit: Unit.Index,
entry: Entry.Index,
any_children: bool,
func: InternPool.Index,
func_high_reloc: u32,
debug_info: std.ArrayListUnmanaged(u8),
debug_line: std.ArrayListUnmanaged(u8),
debug_loclists: std.ArrayListUnmanaged(u8),
pending_types: std.ArrayListUnmanaged(InternPool.Index),
pub fn deinit(wip_nav: *WipNav) void {
const gpa = wip_nav.dwarf.gpa;
wip_nav.debug_info.deinit(gpa);
wip_nav.debug_line.deinit(gpa);
wip_nav.debug_loclists.deinit(gpa);
wip_nav.pending_types.deinit(gpa);
}
pub fn infoWriter(wip_nav: *WipNav) std.ArrayListUnmanaged(u8).Writer {
return wip_nav.debug_info.writer(wip_nav.dwarf.gpa);
}
pub const VarTag = enum { local_arg, local_var };
pub fn genVarDebugInfo(
wip_nav: *WipNav,
tag: VarTag,
name: []const u8,
ty: Type,
loc: Loc,
) UpdateError!void {
wip_nav.any_children = true;
assert(wip_nav.func != .none);
const diw = wip_nav.debug_info.writer(wip_nav.dwarf.gpa);
try uleb128(diw, @intFromEnum(switch (tag) {
inline else => |ct_tag| @field(AbbrevCode, @tagName(ct_tag)),
}));
try wip_nav.strp(name);
try wip_nav.refType(ty);
try wip_nav.exprloc(loc);
}
pub fn advancePCAndLine(
self: *NavState,
wip_nav: *WipNav,
delta_line: i33,
delta_pc: u64,
) error{OutOfMemory}!void {
const dbg_line = &self.dbg_line;
try dbg_line.ensureUnusedCapacity(5 + 5 + 1);
const dlw = wip_nav.debug_line.writer(wip_nav.dwarf.gpa);
const header = self.dwarf.dbg_line_header;
const header = wip_nav.dwarf.debug_line.header;
assert(header.maximum_operations_per_instruction == 1);
const delta_op: u64 = 0;
@@ -897,8 +1018,8 @@ pub const NavState = struct {
delta_line - header.line_base >= header.line_range)
remaining: {
assert(delta_line != 0);
dbg_line.appendAssumeCapacity(DW.LNS.advance_line);
leb128.writeIleb128(dbg_line.writer(), delta_line) catch unreachable;
try dlw.writeByte(DW.LNS.advance_line);
try sleb128(dlw, delta_line);
break :remaining 0;
} else delta_line);
@@ -906,1979 +1027,2758 @@ pub const NavState = struct {
header.maximum_operations_per_instruction + delta_op;
const max_op_advance: u9 = (std.math.maxInt(u8) - header.opcode_base) / header.line_range;
const remaining_op_advance: u8 = @intCast(if (op_advance >= 2 * max_op_advance) remaining: {
dbg_line.appendAssumeCapacity(DW.LNS.advance_pc);
leb128.writeUleb128(dbg_line.writer(), op_advance) catch unreachable;
try dlw.writeByte(DW.LNS.advance_pc);
try uleb128(dlw, op_advance);
break :remaining 0;
} else if (op_advance >= max_op_advance) remaining: {
dbg_line.appendAssumeCapacity(DW.LNS.const_add_pc);
try dlw.writeByte(DW.LNS.const_add_pc);
break :remaining op_advance - max_op_advance;
} else op_advance);
if (remaining_delta_line == 0 and remaining_op_advance == 0) {
dbg_line.appendAssumeCapacity(DW.LNS.copy);
} else {
dbg_line.appendAssumeCapacity(@intCast((remaining_delta_line - header.line_base) +
if (remaining_delta_line == 0 and remaining_op_advance == 0)
try dlw.writeByte(DW.LNS.copy)
else
try dlw.writeByte(@intCast((remaining_delta_line - header.line_base) +
(header.line_range * remaining_op_advance) + header.opcode_base));
}
}
pub fn setColumn(self: *NavState, column: u32) error{OutOfMemory}!void {
try self.dbg_line.ensureUnusedCapacity(1 + 5);
self.dbg_line.appendAssumeCapacity(DW.LNS.set_column);
leb128.writeUleb128(self.dbg_line.writer(), column + 1) catch unreachable;
pub fn setColumn(wip_nav: *WipNav, column: u32) error{OutOfMemory}!void {
const dlw = wip_nav.debug_line.writer(wip_nav.dwarf.gpa);
try dlw.writeByte(DW.LNS.set_column);
try uleb128(dlw, column + 1);
}
pub fn setPrologueEnd(self: *NavState) error{OutOfMemory}!void {
try self.dbg_line.append(DW.LNS.set_prologue_end);
pub fn setPrologueEnd(wip_nav: *WipNav) error{OutOfMemory}!void {
const dlw = wip_nav.debug_line.writer(wip_nav.dwarf.gpa);
try dlw.writeByte(DW.LNS.set_prologue_end);
}
pub fn setEpilogueBegin(self: *NavState) error{OutOfMemory}!void {
try self.dbg_line.append(DW.LNS.set_epilogue_begin);
pub fn setEpilogueBegin(wip_nav: *WipNav) error{OutOfMemory}!void {
const dlw = wip_nav.debug_line.writer(wip_nav.dwarf.gpa);
try dlw.writeByte(DW.LNS.set_epilogue_begin);
}
pub fn setInlineFunc(self: *NavState, func: InternPool.Index) error{OutOfMemory}!void {
const zcu = self.pt.zcu;
if (self.dbg_line_func == func) return;
pub fn setInlineFunc(wip_nav: *WipNav, func: InternPool.Index) UpdateError!void {
const zcu = wip_nav.pt.zcu;
const dwarf = wip_nav.dwarf;
if (wip_nav.func == func) return;
try self.dbg_line.ensureUnusedCapacity((1 + 4) + (1 + 5));
const old_func_info = zcu.funcInfo(self.dbg_line_func);
const new_func_info = zcu.funcInfo(func);
const new_file = zcu.navFileScopeIndex(new_func_info.owner_nav);
const new_unit = try dwarf.getUnit(zcu.fileByIndex(new_file).mod);
const dlw = wip_nav.debug_line.writer(dwarf.gpa);
if (dwarf.incremental()) {
const new_nav_gop = try dwarf.navs.getOrPut(dwarf.gpa, new_func_info.owner_nav);
errdefer _ = dwarf.navs.pop();
if (!new_nav_gop.found_existing) new_nav_gop.value_ptr.* = try dwarf.addCommonEntry(new_unit);
const old_file = try self.dwarf.addDIFile(zcu, old_func_info.owner_nav);
const new_file = try self.dwarf.addDIFile(zcu, new_func_info.owner_nav);
try dlw.writeByte(DW.LNS.extended_op);
try uleb128(dlw, 1 + dwarf.sectionOffsetBytes());
try dlw.writeByte(DW.LNE.ZIG_set_decl);
try dwarf.debug_line.section.getUnit(wip_nav.unit).cross_section_relocs.append(dwarf.gpa, .{
.source_entry = wip_nav.entry.toOptional(),
.source_off = @intCast(wip_nav.debug_line.items.len),
.target_sec = .debug_info,
.target_unit = new_unit,
.target_entry = new_nav_gop.value_ptr.toOptional(),
});
try dlw.writeByteNTimes(0, dwarf.sectionOffsetBytes());
return;
}
const old_func_info = zcu.funcInfo(wip_nav.func);
const old_file = zcu.navFileScopeIndex(old_func_info.owner_nav);
if (old_file != new_file) {
self.dbg_line.appendAssumeCapacity(DW.LNS.set_file);
leb128.writeUnsignedFixed(4, self.dbg_line.addManyAsArrayAssumeCapacity(4), new_file);
const new_file_gop = try dwarf.getUnitFiles(new_unit).getOrPut(dwarf.gpa, new_file);
try dlw.writeByte(DW.LNS.set_file);
try uleb128(dlw, new_file_gop.index);
}
const old_src_line: i33 = zcu.navSrcLine(old_func_info.owner_nav);
const new_src_line: i33 = zcu.navSrcLine(new_func_info.owner_nav);
if (new_src_line != old_src_line) {
self.dbg_line.appendAssumeCapacity(DW.LNS.advance_line);
leb128.writeSignedFixed(5, self.dbg_line.addManyAsArrayAssumeCapacity(5), new_src_line - old_src_line);
try dlw.writeByte(DW.LNS.advance_line);
try sleb128(dlw, new_src_line - old_src_line);
}
self.dbg_line_func = func;
wip_nav.func = func;
}
fn infoSectionOffset(wip_nav: *WipNav, sec: Section.Index, unit: Unit.Index, entry: Entry.Index, off: u32) UpdateError!void {
const dwarf = wip_nav.dwarf;
const gpa = dwarf.gpa;
if (sec != .debug_info) {
try dwarf.debug_info.section.getUnit(wip_nav.unit).cross_section_relocs.append(gpa, .{
.source_entry = wip_nav.entry.toOptional(),
.source_off = @intCast(wip_nav.debug_info.items.len),
.target_sec = sec,
.target_unit = unit,
.target_entry = entry.toOptional(),
.target_off = off,
});
} else if (unit != wip_nav.unit) {
try dwarf.debug_info.section.getUnit(wip_nav.unit).cross_unit_relocs.append(gpa, .{
.source_entry = wip_nav.entry.toOptional(),
.source_off = @intCast(wip_nav.debug_info.items.len),
.target_unit = unit,
.target_entry = entry.toOptional(),
.target_off = off,
});
} else {
try dwarf.debug_info.section.getUnit(wip_nav.unit).cross_entry_relocs.append(gpa, .{
.source_entry = wip_nav.entry.toOptional(),
.source_off = @intCast(wip_nav.debug_info.items.len),
.target_entry = entry,
.target_off = off,
});
}
try wip_nav.debug_info.appendNTimes(gpa, 0, dwarf.sectionOffsetBytes());
}
fn strp(wip_nav: *WipNav, str: []const u8) UpdateError!void {
try wip_nav.infoSectionOffset(.debug_str, StringSection.unit, try wip_nav.dwarf.debug_str.addString(wip_nav.dwarf, str), 0);
}
fn addrSym(wip_nav: *WipNav, sym_index: u32) UpdateError!void {
const dwarf = wip_nav.dwarf;
try dwarf.debug_info.section.getUnit(wip_nav.unit).external_relocs.append(dwarf.gpa, .{
.source_entry = wip_nav.entry,
.source_off = @intCast(wip_nav.debug_info.items.len),
.target_sym = sym_index,
});
try wip_nav.debug_info.appendNTimes(dwarf.gpa, 0, @intFromEnum(dwarf.address_size));
}
fn exprloc(wip_nav: *WipNav, loc: Loc) UpdateError!void {
if (loc == .empty) return;
var wip: struct {
const Info = std.io.CountingWriter(std.io.NullWriter);
dwarf: *Dwarf,
debug_info: Info,
fn infoWriter(wip: *@This()) Info.Writer {
return wip.debug_info.writer();
}
fn addrSym(wip: *@This(), _: u32) error{}!void {
wip.debug_info.bytes_written += @intFromEnum(wip.dwarf.address_size);
}
} = .{
.dwarf = wip_nav.dwarf,
.debug_info = std.io.countingWriter(std.io.null_writer),
};
try loc.write(&wip);
try uleb128(wip_nav.debug_info.writer(wip_nav.dwarf.gpa), wip.debug_info.bytes_written);
try loc.write(wip_nav);
}
fn getTypeEntry(wip_nav: *WipNav, ty: Type) UpdateError!struct { Unit.Index, Entry.Index } {
const zcu = wip_nav.pt.zcu;
const ip = &zcu.intern_pool;
const maybe_inst_index = ty.typeDeclInst(zcu);
const unit = if (maybe_inst_index) |inst_index|
try wip_nav.dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFull(ip).file).mod)
else
.main;
const gop = try wip_nav.dwarf.types.getOrPut(wip_nav.dwarf.gpa, ty.toIntern());
if (gop.found_existing) return .{ unit, gop.value_ptr.* };
const entry = try wip_nav.dwarf.addCommonEntry(unit);
gop.value_ptr.* = entry;
if (maybe_inst_index == null) try wip_nav.pending_types.append(wip_nav.dwarf.gpa, ty.toIntern());
return .{ unit, entry };
}
fn refType(wip_nav: *WipNav, ty: Type) UpdateError!void {
const unit, const entry = try wip_nav.getTypeEntry(ty);
try wip_nav.infoSectionOffset(.debug_info, unit, entry, 0);
}
fn refForward(wip_nav: *WipNav) std.mem.Allocator.Error!u32 {
const dwarf = wip_nav.dwarf;
const cross_entry_relocs = &dwarf.debug_info.section.getUnit(wip_nav.unit).cross_entry_relocs;
const reloc_index: u32 = @intCast(cross_entry_relocs.items.len);
try cross_entry_relocs.append(dwarf.gpa, .{
.source_entry = wip_nav.entry.toOptional(),
.source_off = @intCast(wip_nav.debug_info.items.len),
.target_entry = undefined,
.target_off = undefined,
});
try wip_nav.debug_info.appendNTimes(dwarf.gpa, 0, dwarf.sectionOffsetBytes());
return reloc_index;
}
fn finishForward(wip_nav: *WipNav, reloc_index: u32) void {
const reloc = &wip_nav.dwarf.debug_info.section.getUnit(wip_nav.unit).cross_entry_relocs.items[reloc_index];
reloc.target_entry = wip_nav.entry;
reloc.target_off = @intCast(wip_nav.debug_info.items.len);
}
fn enumConstValue(
wip_nav: *WipNav,
loaded_enum: InternPool.LoadedEnumType,
abbrev_code: std.enums.EnumFieldStruct(std.builtin.Signedness, AbbrevCode, null),
field_index: usize,
) std.mem.Allocator.Error!void {
const zcu = wip_nav.pt.zcu;
const ip = &zcu.intern_pool;
const diw = wip_nav.debug_info.writer(wip_nav.dwarf.gpa);
const signedness = switch (loaded_enum.tag_ty) {
.comptime_int_type => .signed,
else => Type.fromInterned(loaded_enum.tag_ty).intInfo(zcu).signedness,
};
try uleb128(diw, @intFromEnum(switch (signedness) {
inline .signed, .unsigned => |ct_signedness| @field(abbrev_code, @tagName(ct_signedness)),
}));
if (loaded_enum.values.len > 0) switch (ip.indexToKey(loaded_enum.values.get(ip)[field_index]).int.storage) {
.u64 => |value| switch (signedness) {
.signed => try sleb128(diw, value),
.unsigned => try uleb128(diw, value),
},
.i64 => |value| switch (signedness) {
.signed => try sleb128(diw, value),
.unsigned => unreachable,
},
.big_int => |big_int| {
const bits = big_int.bitCountTwosCompForSignedness(signedness);
try wip_nav.debug_info.ensureUnusedCapacity(wip_nav.dwarf.gpa, std.math.divCeil(usize, bits, 7) catch unreachable);
var bit: usize = 0;
var carry: u1 = 1;
while (bit < bits) : (bit += 7) {
const limb_bits = @typeInfo(std.math.big.Limb).Int.bits;
const limb_index = bit / limb_bits;
const limb_shift: std.math.Log2Int(std.math.big.Limb) = @intCast(bit % limb_bits);
const low_abs_part: u7 = @truncate(big_int.limbs[limb_index] >> limb_shift);
const abs_part = if (limb_shift > limb_bits - 7) abs_part: {
const next_limb: std.math.big.Limb = if (limb_index + 1 < big_int.limbs.len)
big_int.limbs[limb_index + 1]
else if (big_int.positive) 0 else std.math.maxInt(std.math.big.Limb);
const high_abs_part: u7 = @truncate(next_limb << -%limb_shift);
break :abs_part high_abs_part | low_abs_part;
} else low_abs_part;
const twos_comp_part = if (big_int.positive) abs_part else twos_comp_part: {
const twos_comp_part, carry = @addWithOverflow(~abs_part, carry);
break :twos_comp_part twos_comp_part;
};
wip_nav.debug_info.appendAssumeCapacity(@as(u8, if (bit + 7 < bits) 0x80 else 0x00) | twos_comp_part);
}
},
.lazy_align, .lazy_size => unreachable,
} else switch (signedness) {
.signed => try sleb128(diw, field_index),
.unsigned => try uleb128(diw, field_index),
}
}
fn flush(wip_nav: *WipNav) UpdateError!void {
while (wip_nav.pending_types.popOrNull()) |ty| try wip_nav.dwarf.updateType(wip_nav.pt, ty, &wip_nav.pending_types);
}
};
pub const AbbrevEntry = struct {
atom_index: Atom.Index,
type: Type,
offset: u32,
};
pub const AbbrevRelocation = struct {
/// If target is null, we deal with a local relocation that is based on simple offset + addend
/// only.
target: ?u32,
atom_index: Atom.Index,
offset: u32,
addend: u32,
};
pub const ExprlocRelocation = struct {
/// Type of the relocation: direct load ref, or GOT load ref (via GOT table)
type: enum {
direct_load,
got_load,
},
/// Index of the target in the linker's locals symbol table.
target: u32,
/// Offset within the debug info buffer where to patch up the address value.
offset: u32,
};
pub const PtrWidth = enum { p32, p64 };
pub const AbbrevCode = enum(u8) {
null,
padding,
compile_unit,
subprogram,
subprogram_retvoid,
base_type,
ptr_type,
struct_type,
struct_member,
enum_type,
enum_variant,
union_type,
zero_bit_type,
parameter,
variable,
array_type,
array_dim,
};
/// The reloc offset for the virtual address of a function in its Line Number Program.
/// Size is a virtual address integer.
const dbg_line_vaddr_reloc_index = 3;
/// The reloc offset for the virtual address of a function in its .debug_info TAG.subprogram.
/// Size is a virtual address integer.
const dbg_info_low_pc_reloc_index = 1;
const min_nop_size = 2;
/// When allocating, the ideal_capacity is calculated by
/// actual_capacity + (actual_capacity / ideal_factor)
const ideal_factor = 3;
pub fn init(lf: *File, format: Format) Dwarf {
const comp = lf.comp;
const gpa = comp.gpa;
const target = comp.root_mod.resolved_target.result;
const ptr_width: PtrWidth = switch (target.ptrBitWidth()) {
0...32 => .p32,
33...64 => .p64,
else => unreachable,
};
return .{
.allocator = gpa,
.bin_file = lf,
.format = format,
.ptr_width = ptr_width,
.dbg_line_header = switch (target.cpu.arch) {
.x86_64, .aarch64 => .{
.minimum_instruction_length = 1,
.maximum_operations_per_instruction = 1,
.default_is_stmt = true,
.line_base = -5,
.line_range = 14,
.opcode_base = DW.LNS.set_isa + 1,
},
else => .{
.minimum_instruction_length = 1,
.maximum_operations_per_instruction = 1,
.default_is_stmt = true,
.line_base = 1,
.line_range = 1,
.opcode_base = DW.LNS.set_isa + 1,
},
},
};
}
pub fn deinit(self: *Dwarf) void {
const gpa = self.allocator;
self.src_fn_free_list.deinit(gpa);
self.src_fns.deinit(gpa);
self.src_fn_navs.deinit(gpa);
self.di_atom_free_list.deinit(gpa);
self.di_atoms.deinit(gpa);
self.di_atom_navs.deinit(gpa);
self.strtab.deinit(gpa);
self.di_files.deinit(gpa);
self.global_abbrev_relocs.deinit(gpa);
}
/// Initializes Nav's state and its matching output buffers.
/// Call this before `commitNavState`.
pub fn initNavState(self: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !NavState {
const tracy = trace(@src());
defer tracy.end();
const nav = pt.zcu.intern_pool.getNav(nav_index);
log.debug("initNavState {}", .{nav.fqn.fmt(&pt.zcu.intern_pool)});
const gpa = self.allocator;
var nav_state: NavState = .{
.dwarf = self,
.pt = pt,
.di_atom_navs = &self.di_atom_navs,
.dbg_line_func = undefined,
.dbg_line = std.ArrayList(u8).init(gpa),
.dbg_info = std.ArrayList(u8).init(gpa),
.abbrev_type_arena = std.heap.ArenaAllocator.init(gpa),
.abbrev_table = .{},
.abbrev_resolver = .{},
.abbrev_relocs = .{},
.exprloc_relocs = .{},
};
errdefer nav_state.deinit();
const dbg_line_buffer = &nav_state.dbg_line;
const dbg_info_buffer = &nav_state.dbg_info;
const di_atom_index = try self.getOrCreateAtomForNav(.di_atom, nav_index);
const nav_val = Value.fromInterned(nav.status.resolved.val);
switch (nav_val.typeOf(pt.zcu).zigTypeTag(pt.zcu)) {
.Fn => {
_ = try self.getOrCreateAtomForNav(.src_fn, nav_index);
// For functions we need to add a prologue to the debug line program.
const ptr_width_bytes = self.ptrWidthBytes();
try dbg_line_buffer.ensureTotalCapacity((3 + ptr_width_bytes) + (1 + 4) + (1 + 4) + (1 + 5) + 1);
nav_state.dbg_line_func = nav_val.toIntern();
const func = nav_val.getFunction(pt.zcu).?;
log.debug("src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{
pt.zcu.navSrcLine(nav_index),
func.lbrace_line,
func.rbrace_line,
});
const line: u28 = @intCast(pt.zcu.navSrcLine(nav_index) + func.lbrace_line);
dbg_line_buffer.appendSliceAssumeCapacity(&.{
DW.LNS.extended_op,
ptr_width_bytes + 1,
DW.LNE.set_address,
});
// This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`.
assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len);
dbg_line_buffer.appendNTimesAssumeCapacity(0, ptr_width_bytes);
dbg_line_buffer.appendAssumeCapacity(DW.LNS.advance_line);
// This is the "relocatable" relative line offset from the previous function's end curly
// to this function's begin curly.
assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len);
// Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later.
leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line);
dbg_line_buffer.appendAssumeCapacity(DW.LNS.set_file);
assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len);
// Once we support more than one source file, this will have the ability to be more
// than one possible value.
const file_index = try self.addDIFile(pt.zcu, nav_index);
leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index);
dbg_line_buffer.appendAssumeCapacity(DW.LNS.set_column);
leb128.writeUleb128(dbg_line_buffer.writer(), func.lbrace_column + 1) catch unreachable;
// Emit a line for the begin curly with prologue_end=false. The codegen will
// do the work of setting prologue_end=true and epilogue_begin=true.
dbg_line_buffer.appendAssumeCapacity(DW.LNS.copy);
// .debug_info subprogram
const nav_name_slice = nav.name.toSlice(&pt.zcu.intern_pool);
const nav_linkage_name_slice = nav.fqn.toSlice(&pt.zcu.intern_pool);
try dbg_info_buffer.ensureUnusedCapacity(1 + ptr_width_bytes + 4 + 4 +
(nav_name_slice.len + 1) + (nav_linkage_name_slice.len + 1));
const fn_ret_type = nav_val.typeOf(pt.zcu).fnReturnType(pt.zcu);
const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(pt);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(
@as(AbbrevCode, if (fn_ret_has_bits) .subprogram else .subprogram_retvoid),
));
// These get overwritten after generating the machine code. These values are
// "relocations" and have to be in this fixed place so that functions can be
// moved in virtual address space.
assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len);
dbg_info_buffer.appendNTimesAssumeCapacity(0, ptr_width_bytes); // DW.AT.low_pc, DW.FORM.addr
assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len);
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4); // DW.AT.high_pc, DW.FORM.data4
if (fn_ret_has_bits) {
try nav_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @intCast(dbg_info_buffer.items.len));
dbg_info_buffer.appendNTimesAssumeCapacity(0, 4); // DW.AT.type, DW.FORM.ref4
}
dbg_info_buffer.appendSliceAssumeCapacity(
nav_name_slice[0 .. nav_name_slice.len + 1],
); // DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity(
nav_linkage_name_slice[0 .. nav_linkage_name_slice.len + 1],
); // DW.AT.linkage_name, DW.FORM.string
},
else => {
// TODO implement .debug_info for global variables
},
}
return nav_state;
}
pub fn commitNavState(
self: *Dwarf,
pt: Zcu.PerThread,
nav_index: InternPool.Nav.Index,
sym_addr: u64,
sym_size: u64,
nav_state: *NavState,
) !void {
const tracy = trace(@src());
defer tracy.end();
const gpa = self.allocator;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
const target = zcu.navFileScope(nav_index).mod.resolved_target.result;
const target_endian = target.cpu.arch.endian();
var dbg_line_buffer = &nav_state.dbg_line;
var dbg_info_buffer = &nav_state.dbg_info;
const nav_val = Value.fromInterned(nav.status.resolved.val);
switch (nav_val.typeOf(zcu).zigTypeTag(zcu)) {
.Fn => {
try nav_state.setInlineFunc(nav_val.toIntern());
// Since the Nav is a function, we need to update the .debug_line program.
// Perform the relocations based on vaddr.
switch (self.ptr_width) {
.p32 => {
{
const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4];
mem.writeInt(u32, ptr, @intCast(sym_addr), target_endian);
}
{
const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4];
mem.writeInt(u32, ptr, @intCast(sym_addr), target_endian);
}
},
.p64 => {
{
const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8];
mem.writeInt(u64, ptr, sym_addr, target_endian);
}
{
const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8];
mem.writeInt(u64, ptr, sym_addr, target_endian);
}
},
}
{
log.debug("relocating subprogram high PC value: {x} => {x}", .{
self.getRelocDbgInfoSubprogramHighPC(),
sym_size,
});
const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4];
mem.writeInt(u32, ptr, @intCast(sym_size), target_endian);
}
try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS.extended_op, 1, DW.LNE.end_sequence });
// Now we have the full contents and may allocate a region to store it.
// This logic is nearly identical to the logic below in `updateNavDebugInfo` for
// `TextBlock` and the .debug_info. If you are editing this logic, you
// probably need to edit that logic too.
const src_fn_index = self.src_fn_navs.get(nav_index).?;
const src_fn = self.getAtomPtr(.src_fn, src_fn_index);
src_fn.len = @intCast(dbg_line_buffer.items.len);
if (self.src_fn_last_index) |last_index| blk: {
if (src_fn_index == last_index) break :blk;
if (src_fn.next_index) |next_index| {
const next = self.getAtomPtr(.src_fn, next_index);
// Update existing function - non-last item.
if (src_fn.off + src_fn.len + min_nop_size > next.off) {
// It grew too big, so we move it to a new location.
if (src_fn.prev_index) |prev_index| {
self.src_fn_free_list.put(gpa, prev_index, {}) catch {};
self.getAtomPtr(.src_fn, prev_index).next_index = src_fn.next_index;
}
next.prev_index = src_fn.prev_index;
src_fn.next_index = null;
// Populate where it used to be with NOPs.
if (self.bin_file.cast(.elf)) |elf_file| {
const debug_line_sect = &elf_file.shdrs.items[elf_file.debug_line_section_index.?];
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, src_fn.len);
} else if (self.bin_file.cast(.macho)) |macho_file| {
if (macho_file.base.isRelocatable()) {
const debug_line_sect = &macho_file.sections.items(.header)[macho_file.debug_line_sect_index.?];
const file_pos = debug_line_sect.offset + src_fn.off;
try pwriteDbgLineNops(macho_file.base.file.?, file_pos, 0, &[0]u8{}, src_fn.len);
} else {
const d_sym = macho_file.getDebugSymbols().?;
const debug_line_sect = d_sym.getSectionPtr(d_sym.debug_line_section_index.?);
const file_pos = debug_line_sect.offset + src_fn.off;
try pwriteDbgLineNops(d_sym.file, file_pos, 0, &[0]u8{}, src_fn.len);
}
} else if (self.bin_file.cast(.wasm)) |wasm_file| {
_ = wasm_file;
// const debug_line = wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
// writeDbgLineNopsBuffered(debug_line.items, src_fn.off, 0, &.{}, src_fn.len);
} else unreachable;
// TODO Look at the free list before appending at the end.
src_fn.prev_index = last_index;
const last = self.getAtomPtr(.src_fn, last_index);
last.next_index = src_fn_index;
self.src_fn_last_index = src_fn_index;
src_fn.off = last.off + padToIdeal(last.len);
}
} else if (src_fn.prev_index == null) {
// Append new function.
// TODO Look at the free list before appending at the end.
src_fn.prev_index = last_index;
const last = self.getAtomPtr(.src_fn, last_index);
last.next_index = src_fn_index;
self.src_fn_last_index = src_fn_index;
src_fn.off = last.off + padToIdeal(last.len);
}
} else {
// This is the first function of the Line Number Program.
self.src_fn_first_index = src_fn_index;
self.src_fn_last_index = src_fn_index;
src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes(&[0][]u8{}, &[0][]u8{}));
}
const last_src_fn_index = self.src_fn_last_index.?;
const last_src_fn = self.getAtom(.src_fn, last_src_fn_index);
const needed_size = last_src_fn.off + last_src_fn.len;
const prev_padding_size: u32 = if (src_fn.prev_index) |prev_index| blk: {
const prev = self.getAtom(.src_fn, prev_index);
break :blk src_fn.off - (prev.off + prev.len);
} else 0;
const next_padding_size: u32 = if (src_fn.next_index) |next_index| blk: {
const next = self.getAtom(.src_fn, next_index);
break :blk next.off - (src_fn.off + src_fn.len);
} else 0;
// We only have support for one compilation unit so far, so the offsets are directly
// from the .debug_line section.
if (self.bin_file.cast(.elf)) |elf_file| {
const shdr_index = elf_file.debug_line_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
const debug_line_sect = elf_file.shdrs.items[shdr_index];
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try pwriteDbgLineNops(
elf_file.base.file.?,
file_pos,
prev_padding_size,
dbg_line_buffer.items,
next_padding_size,
);
} else if (self.bin_file.cast(.macho)) |macho_file| {
if (macho_file.base.isRelocatable()) {
const sect_index = macho_file.debug_line_sect_index.?;
try macho_file.growSection(sect_index, needed_size);
const sect = macho_file.sections.items(.header)[sect_index];
const file_pos = sect.offset + src_fn.off;
try pwriteDbgLineNops(
macho_file.base.file.?,
file_pos,
prev_padding_size,
dbg_line_buffer.items,
next_padding_size,
);
} else {
const d_sym = macho_file.getDebugSymbols().?;
const sect_index = d_sym.debug_line_section_index.?;
try d_sym.growSection(sect_index, needed_size, true, macho_file);
const sect = d_sym.getSection(sect_index);
const file_pos = sect.offset + src_fn.off;
try pwriteDbgLineNops(
d_sym.file,
file_pos,
prev_padding_size,
dbg_line_buffer.items,
next_padding_size,
);
}
} else if (self.bin_file.cast(.wasm)) |wasm_file| {
_ = wasm_file;
// const atom = wasm_file.getAtomPtr(wasm_file.debug_line_atom.?);
// const debug_line = &atom.code;
// const segment_size = debug_line.items.len;
// if (needed_size != segment_size) {
// log.debug(" needed size does not equal allocated size: {d}", .{needed_size});
// if (needed_size > segment_size) {
// log.debug(" allocating {d} bytes for 'debug line' information", .{needed_size - segment_size});
// try debug_line.resize(self.allocator, needed_size);
// @memset(debug_line.items[segment_size..], 0);
// }
// debug_line.items.len = needed_size;
// }
// writeDbgLineNopsBuffered(
// debug_line.items,
// src_fn.off,
// prev_padding_size,
// dbg_line_buffer.items,
// next_padding_size,
// );
} else unreachable;
// .debug_info - End the TAG.subprogram children.
try dbg_info_buffer.append(0);
},
else => {},
}
if (dbg_info_buffer.items.len == 0)
return;
const di_atom_index = self.di_atom_navs.get(nav_index).?;
if (nav_state.abbrev_table.items.len > 0) {
// Now we emit the .debug_info types of the Nav. These will count towards the size of
// the buffer, so we have to do it before computing the offset, and we can't perform the actual
// relocations yet.
var sym_index: usize = 0;
while (sym_index < nav_state.abbrev_table.items.len) : (sym_index += 1) {
const symbol = &nav_state.abbrev_table.items[sym_index];
const ty = symbol.type;
if (ip.isErrorSetType(ty.toIntern())) continue;
symbol.offset = @intCast(dbg_info_buffer.items.len);
try nav_state.addDbgInfoType(pt, di_atom_index, ty);
}
}
try self.updateNavDebugInfoAllocation(di_atom_index, @intCast(dbg_info_buffer.items.len));
while (nav_state.abbrev_relocs.popOrNull()) |reloc| {
if (reloc.target) |reloc_target| {
const symbol = nav_state.abbrev_table.items[reloc_target];
const ty = symbol.type;
if (ip.isErrorSetType(ty.toIntern())) {
log.debug("resolving %{d} deferred until flush", .{reloc_target});
try self.global_abbrev_relocs.append(gpa, .{
.target = null,
.offset = reloc.offset,
.atom_index = reloc.atom_index,
.addend = reloc.addend,
});
} else {
const atom = self.getAtom(.di_atom, symbol.atom_index);
const value = atom.off + symbol.offset + reloc.addend;
log.debug("{x}: [() => {x}] (%{d}, '{}')", .{
reloc.offset,
value,
reloc_target,
ty.fmt(pt),
});
mem.writeInt(
u32,
dbg_info_buffer.items[reloc.offset..][0..@sizeOf(u32)],
value,
target_endian,
);
}
} else {
const atom = self.getAtom(.di_atom, reloc.atom_index);
mem.writeInt(
u32,
dbg_info_buffer.items[reloc.offset..][0..@sizeOf(u32)],
atom.off + reloc.offset + reloc.addend,
target_endian,
);
}
}
while (nav_state.exprloc_relocs.popOrNull()) |reloc| {
if (self.bin_file.cast(.elf)) |elf_file| {
_ = elf_file; // TODO
} else if (self.bin_file.cast(.macho)) |macho_file| {
if (macho_file.base.isRelocatable()) {
// TODO
} else {
const d_sym = macho_file.getDebugSymbols().?;
try d_sym.relocs.append(d_sym.allocator, .{
.type = switch (reloc.type) {
.direct_load => .direct_load,
.got_load => .got_load,
},
.target = reloc.target,
.offset = reloc.offset + self.getAtom(.di_atom, di_atom_index).off,
.addend = 0,
});
}
} else unreachable;
}
try self.writeNavDebugInfo(di_atom_index, dbg_info_buffer.items);
}
fn updateNavDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32) !void {
const tracy = trace(@src());
defer tracy.end();
// This logic is nearly identical to the logic above in `updateNav` for
// `SrcFn` and the line number programs. If you are editing this logic, you
// probably need to edit that logic too.
const gpa = self.allocator;
const atom = self.getAtomPtr(.di_atom, atom_index);
atom.len = len;
if (self.di_atom_last_index) |last_index| blk: {
if (atom_index == last_index) break :blk;
if (atom.next_index) |next_index| {
const next = self.getAtomPtr(.di_atom, next_index);
// Update existing Nav - non-last item.
if (atom.off + atom.len + min_nop_size > next.off) {
// It grew too big, so we move it to a new location.
if (atom.prev_index) |prev_index| {
self.di_atom_free_list.put(gpa, prev_index, {}) catch {};
self.getAtomPtr(.di_atom, prev_index).next_index = atom.next_index;
}
next.prev_index = atom.prev_index;
atom.next_index = null;
// Populate where it used to be with NOPs.
if (self.bin_file.cast(.elf)) |elf_file| {
const debug_info_sect = &elf_file.shdrs.items[elf_file.debug_info_section_index.?];
const file_pos = debug_info_sect.sh_offset + atom.off;
try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, atom.len, false);
} else if (self.bin_file.cast(.macho)) |macho_file| {
if (macho_file.base.isRelocatable()) {
const debug_info_sect = macho_file.sections.items(.header)[macho_file.debug_info_sect_index.?];
const file_pos = debug_info_sect.offset + atom.off;
try pwriteDbgInfoNops(macho_file.base.file.?, file_pos, 0, &[0]u8{}, atom.len, false);
} else {
const d_sym = macho_file.getDebugSymbols().?;
const debug_info_sect = d_sym.getSectionPtr(d_sym.debug_info_section_index.?);
const file_pos = debug_info_sect.offset + atom.off;
try pwriteDbgInfoNops(d_sym.file, file_pos, 0, &[0]u8{}, atom.len, false);
}
} else if (self.bin_file.cast(.wasm)) |wasm_file| {
_ = wasm_file;
// const debug_info_index = wasm_file.debug_info_atom.?;
// const debug_info = &wasm_file.getAtomPtr(debug_info_index).code;
// try writeDbgInfoNopsToArrayList(gpa, debug_info, atom.off, 0, &.{0}, atom.len, false);
} else unreachable;
// TODO Look at the free list before appending at the end.
atom.prev_index = last_index;
const last = self.getAtomPtr(.di_atom, last_index);
last.next_index = atom_index;
self.di_atom_last_index = atom_index;
atom.off = last.off + padToIdeal(last.len);
}
} else if (atom.prev_index == null) {
// Append new Nav.
// TODO Look at the free list before appending at the end.
atom.prev_index = last_index;
const last = self.getAtomPtr(.di_atom, last_index);
last.next_index = atom_index;
self.di_atom_last_index = atom_index;
atom.off = last.off + padToIdeal(last.len);
}
} else {
// This is the first Nav of the .debug_info
self.di_atom_first_index = atom_index;
self.di_atom_last_index = atom_index;
atom.off = @intCast(padToIdeal(self.dbgInfoHeaderBytes()));
}
}
fn writeNavDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []const u8) !void {
const tracy = trace(@src());
defer tracy.end();
// This logic is nearly identical to the logic above in `updateNav` for
// `SrcFn` and the line number programs. If you are editing this logic, you
// probably need to edit that logic too.
const atom = self.getAtom(.di_atom, atom_index);
const last_nav_index = self.di_atom_last_index.?;
const last_nav = self.getAtom(.di_atom, last_nav_index);
// +1 for a trailing zero to end the children of the nav tag.
const needed_size = last_nav.off + last_nav.len + 1;
const prev_padding_size: u32 = if (atom.prev_index) |prev_index| blk: {
const prev = self.getAtom(.di_atom, prev_index);
break :blk atom.off - (prev.off + prev.len);
} else 0;
const next_padding_size: u32 = if (atom.next_index) |next_index| blk: {
const next = self.getAtom(.di_atom, next_index);
break :blk next.off - (atom.off + atom.len);
} else 0;
// To end the children of the nav tag.
const trailing_zero = atom.next_index == null;
// We only have support for one compilation unit so far, so the offsets are directly
// from the .debug_info section.
if (self.bin_file.cast(.elf)) |elf_file| {
const shdr_index = elf_file.debug_info_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
const debug_info_sect = &elf_file.shdrs.items[shdr_index];
const file_pos = debug_info_sect.sh_offset + atom.off;
try pwriteDbgInfoNops(
elf_file.base.file.?,
file_pos,
prev_padding_size,
dbg_info_buf,
next_padding_size,
trailing_zero,
);
} else if (self.bin_file.cast(.macho)) |macho_file| {
if (macho_file.base.isRelocatable()) {
const sect_index = macho_file.debug_info_sect_index.?;
try macho_file.growSection(sect_index, needed_size);
const sect = macho_file.sections.items(.header)[sect_index];
const file_pos = sect.offset + atom.off;
try pwriteDbgInfoNops(
macho_file.base.file.?,
file_pos,
prev_padding_size,
dbg_info_buf,
next_padding_size,
trailing_zero,
);
} else {
const d_sym = macho_file.getDebugSymbols().?;
const sect_index = d_sym.debug_info_section_index.?;
try d_sym.growSection(sect_index, needed_size, true, macho_file);
const sect = d_sym.getSection(sect_index);
const file_pos = sect.offset + atom.off;
try pwriteDbgInfoNops(
d_sym.file,
file_pos,
prev_padding_size,
dbg_info_buf,
next_padding_size,
trailing_zero,
);
}
} else if (self.bin_file.cast(.wasm)) |wasm_file| {
_ = wasm_file;
// const info_atom = wasm_file.debug_info_atom.?;
// const debug_info = &wasm_file.getAtomPtr(info_atom).code;
// const segment_size = debug_info.items.len;
// if (needed_size != segment_size) {
// log.debug(" needed size does not equal allocated size: {d}", .{needed_size});
// if (needed_size > segment_size) {
// log.debug(" allocating {d} bytes for 'debug info' information", .{needed_size - segment_size});
// try debug_info.resize(self.allocator, needed_size);
// @memset(debug_info.items[segment_size..], 0);
// }
// debug_info.items.len = needed_size;
// }
// log.debug(" writeDbgInfoNopsToArrayList debug_info_len={d} offset={d} content_len={d} next_padding_size={d}", .{
// debug_info.items.len, atom.off, dbg_info_buf.len, next_padding_size,
// });
// try writeDbgInfoNopsToArrayList(
// gpa,
// debug_info,
// atom.off,
// prev_padding_size,
// dbg_info_buf,
// next_padding_size,
// trailing_zero,
// );
} else unreachable;
}
pub fn updateNavLineNumber(self: *Dwarf, zcu: *Zcu, nav_index: InternPool.Nav.Index) !void {
const tracy = trace(@src());
defer tracy.end();
const atom_index = try self.getOrCreateAtomForNav(.src_fn, nav_index);
const atom = self.getAtom(.src_fn, atom_index);
if (atom.len == 0) return;
const nav = zcu.intern_pool.getNav(nav_index);
const nav_val = Value.fromInterned(nav.status.resolved.val);
const func = nav_val.getFunction(zcu).?;
log.debug("src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{
zcu.navSrcLine(nav_index),
func.lbrace_line,
func.rbrace_line,
});
const line: u28 = @intCast(zcu.navSrcLine(nav_index) + func.lbrace_line);
var data: [4]u8 = undefined;
leb128.writeUnsignedFixed(4, &data, line);
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr = elf_file.shdrs.items[elf_file.debug_line_section_index.?];
const file_pos = shdr.sh_offset + atom.off + self.getRelocDbgLineOff();
try elf_file.base.file.?.pwriteAll(&data, file_pos);
},
.macho => {
const macho_file = self.bin_file.cast(File.MachO).?;
if (macho_file.base.isRelocatable()) {
const sect = macho_file.sections.items(.header)[macho_file.debug_line_sect_index.?];
const file_pos = sect.offset + atom.off + self.getRelocDbgLineOff();
try macho_file.base.file.?.pwriteAll(&data, file_pos);
} else {
const d_sym = macho_file.getDebugSymbols().?;
const sect = d_sym.getSection(d_sym.debug_line_section_index.?);
const file_pos = sect.offset + atom.off + self.getRelocDbgLineOff();
try d_sym.file.pwriteAll(&data, file_pos);
}
},
.wasm => {
// const wasm_file = self.bin_file.cast(File.Wasm).?;
// const offset = atom.off + self.getRelocDbgLineOff();
// const line_atom_index = wasm_file.debug_line_atom.?;
// wasm_file.getAtomPtr(line_atom_index).code.items[offset..][0..data.len].* = data;
},
else => unreachable,
}
}
pub fn freeNav(self: *Dwarf, nav_index: InternPool.Nav.Index) void {
const gpa = self.allocator;
// Free SrcFn atom
if (self.src_fn_navs.fetchRemove(nav_index)) |kv| {
const src_fn_index = kv.value;
const src_fn = self.getAtom(.src_fn, src_fn_index);
_ = self.src_fn_free_list.remove(src_fn_index);
if (src_fn.prev_index) |prev_index| {
self.src_fn_free_list.put(gpa, prev_index, {}) catch {};
const prev = self.getAtomPtr(.src_fn, prev_index);
prev.next_index = src_fn.next_index;
if (src_fn.next_index) |next_index| {
self.getAtomPtr(.src_fn, next_index).prev_index = prev_index;
} else {
self.src_fn_last_index = prev_index;
}
} else if (src_fn.next_index) |next_index| {
self.src_fn_first_index = next_index;
self.getAtomPtr(.src_fn, next_index).prev_index = null;
}
if (self.src_fn_first_index == src_fn_index) {
self.src_fn_first_index = src_fn.next_index;
}
if (self.src_fn_last_index == src_fn_index) {
self.src_fn_last_index = src_fn.prev_index;
}
}
// Free DI atom
if (self.di_atom_navs.fetchRemove(nav_index)) |kv| {
const di_atom_index = kv.value;
const di_atom = self.getAtomPtr(.di_atom, di_atom_index);
if (self.di_atom_first_index == di_atom_index) {
self.di_atom_first_index = di_atom.next_index;
}
if (self.di_atom_last_index == di_atom_index) {
// TODO shrink the .debug_info section size here
self.di_atom_last_index = di_atom.prev_index;
}
if (di_atom.prev_index) |prev_index| {
self.getAtomPtr(.di_atom, prev_index).next_index = di_atom.next_index;
// TODO the free list logic like we do for SrcFn above
} else {
di_atom.prev_index = null;
}
if (di_atom.next_index) |next_index| {
self.getAtomPtr(.di_atom, next_index).prev_index = di_atom.prev_index;
} else {
di_atom.next_index = null;
}
}
}
pub fn writeDbgAbbrev(self: *Dwarf) !void {
// These are LEB encoded but since the values are all less than 127
// we can simply append these bytes.
// zig fmt: off
const abbrev_buf = [_]u8{
@intFromEnum(AbbrevCode.padding),
@as(u8, 0x80) | @as(u7, @truncate(DW.TAG.ZIG_padding >> 0)),
@as(u8, 0x80) | @as(u7, @truncate(DW.TAG.ZIG_padding >> 7)),
@as(u8, 0x00) | @as(u7, @intCast(DW.TAG.ZIG_padding >> 14)),
DW.CHILDREN.no,
0, 0,
@intFromEnum(AbbrevCode.compile_unit),
DW.TAG.compile_unit,
DW.CHILDREN.yes,
DW.AT.stmt_list, DW.FORM.sec_offset,
DW.AT.low_pc, DW.FORM.addr,
DW.AT.high_pc, DW.FORM.addr,
DW.AT.name, DW.FORM.strp,
DW.AT.comp_dir, DW.FORM.strp,
DW.AT.producer, DW.FORM.strp,
DW.AT.language, DW.FORM.data2,
0, 0,
@intFromEnum(AbbrevCode.subprogram),
DW.TAG.subprogram,
DW.CHILDREN.yes,
DW.AT.low_pc, DW.FORM.addr,
DW.AT.high_pc, DW.FORM.data4,
DW.AT.type, DW.FORM.ref4,
DW.AT.name, DW.FORM.string,
DW.AT.linkage_name, DW.FORM.string,
0, 0,
@intFromEnum(AbbrevCode.subprogram_retvoid),
DW.TAG.subprogram,
DW.CHILDREN.yes,
DW.AT.low_pc, DW.FORM.addr,
DW.AT.high_pc, DW.FORM.data4,
DW.AT.name, DW.FORM.string,
DW.AT.linkage_name, DW.FORM.string,
0, 0,
@intFromEnum(AbbrevCode.base_type),
DW.TAG.base_type, DW.CHILDREN.no,
DW.AT.encoding, DW.FORM.data1,
DW.AT.byte_size, DW.FORM.udata,
DW.AT.name, DW.FORM.string,
0, 0,
@intFromEnum(AbbrevCode.ptr_type),
DW.TAG.pointer_type, DW.CHILDREN.no,
DW.AT.type, DW.FORM.ref4,
0, 0,
@intFromEnum(AbbrevCode.struct_type),
DW.TAG.structure_type, DW.CHILDREN.yes,
DW.AT.byte_size, DW.FORM.udata,
DW.AT.name, DW.FORM.string,
0, 0,
@intFromEnum(AbbrevCode.struct_member),
DW.TAG.member,
DW.CHILDREN.no,
DW.AT.name, DW.FORM.string,
DW.AT.type, DW.FORM.ref4,
DW.AT.data_member_location, DW.FORM.udata,
0, 0,
@intFromEnum(AbbrevCode.enum_type),
DW.TAG.enumeration_type,
DW.CHILDREN.yes,
DW.AT.byte_size, DW.FORM.udata,
DW.AT.name, DW.FORM.string,
0, 0,
@intFromEnum(AbbrevCode.enum_variant),
DW.TAG.enumerator, DW.CHILDREN.no,
DW.AT.name, DW.FORM.string,
DW.AT.const_value, DW.FORM.data8,
0, 0,
@intFromEnum(AbbrevCode.union_type),
DW.TAG.union_type, DW.CHILDREN.yes,
DW.AT.byte_size, DW.FORM.udata,
DW.AT.name, DW.FORM.string,
0, 0,
@intFromEnum(AbbrevCode.zero_bit_type),
DW.TAG.unspecified_type,
DW.CHILDREN.no,
0, 0,
@intFromEnum(AbbrevCode.parameter),
DW.TAG.formal_parameter,
DW.CHILDREN.no,
DW.AT.location, DW.FORM.exprloc,
DW.AT.type, DW.FORM.ref4,
DW.AT.name, DW.FORM.string,
0, 0,
@intFromEnum(AbbrevCode.variable),
DW.TAG.variable,
DW.CHILDREN.no,
DW.AT.location, DW.FORM.exprloc,
DW.AT.type, DW.FORM.ref4,
DW.AT.name, DW.FORM.string,
0, 0,
@intFromEnum(AbbrevCode.array_type),
DW.TAG.array_type,
DW.CHILDREN.yes,
DW.AT.name, DW.FORM.string,
DW.AT.type, DW.FORM.ref4,
0, 0,
@intFromEnum(AbbrevCode.array_dim),
DW.TAG.subrange_type,
DW.CHILDREN.no,
DW.AT.type, DW.FORM.ref4,
DW.AT.count, DW.FORM.udata,
0, 0,
0,
};
// zig fmt: on
const abbrev_offset = 0;
self.abbrev_table_offset = abbrev_offset;
const needed_size = abbrev_buf.len;
if (self.bin_file.cast(.elf)) |elf_file| {
const shdr_index = elf_file.debug_abbrev_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, false);
const debug_abbrev_sect = &elf_file.shdrs.items[shdr_index];
const file_pos = debug_abbrev_sect.sh_offset + abbrev_offset;
try elf_file.base.file.?.pwriteAll(&abbrev_buf, file_pos);
} else if (self.bin_file.cast(.macho)) |macho_file| {
if (macho_file.base.isRelocatable()) {
const sect_index = macho_file.debug_abbrev_sect_index.?;
try macho_file.growSection(sect_index, needed_size);
const sect = macho_file.sections.items(.header)[sect_index];
const file_pos = sect.offset + abbrev_offset;
try macho_file.base.file.?.pwriteAll(&abbrev_buf, file_pos);
} else {
const d_sym = macho_file.getDebugSymbols().?;
const sect_index = d_sym.debug_abbrev_section_index.?;
try d_sym.growSection(sect_index, needed_size, false, macho_file);
const sect = d_sym.getSection(sect_index);
const file_pos = sect.offset + abbrev_offset;
try d_sym.file.pwriteAll(&abbrev_buf, file_pos);
}
} else if (self.bin_file.cast(.wasm)) |wasm_file| {
_ = wasm_file;
// const debug_abbrev = &wasm_file.getAtomPtr(wasm_file.debug_abbrev_atom.?).code;
// try debug_abbrev.resize(gpa, needed_size);
// debug_abbrev.items[0..abbrev_buf.len].* = abbrev_buf;
} else unreachable;
}
fn dbgInfoHeaderBytes(self: *Dwarf) usize {
_ = self;
return 120;
}
pub fn writeDbgInfoHeader(self: *Dwarf, zcu: *Zcu, low_pc: u64, high_pc: u64) !void {
// If this value is null it means there is an error in the module;
// leave debug_info_header_dirty=true.
const first_dbg_info_off = self.getDebugInfoOff() orelse return;
// We have a function to compute the upper bound size, because it's needed
// for determining where to put the offset of the first `LinkBlock`.
const needed_bytes = self.dbgInfoHeaderBytes();
var di_buf = try std.ArrayList(u8).initCapacity(self.allocator, needed_bytes);
defer di_buf.deinit();
const comp = self.bin_file.comp;
const target = comp.root_mod.resolved_target.result;
const target_endian = target.cpu.arch.endian();
const init_len_size: usize = switch (self.format) {
.dwarf32 => 4,
.dwarf64 => 12,
};
// initial length - length of the .debug_info contribution for this compilation unit,
// not including the initial length itself.
// We have to come back and write it later after we know the size.
const after_init_len = di_buf.items.len + init_len_size;
const dbg_info_end = self.getDebugInfoEnd().?;
const init_len = dbg_info_end - after_init_len + 1;
if (self.format == .dwarf64) di_buf.appendNTimesAssumeCapacity(0xff, 4);
self.writeOffsetAssumeCapacity(&di_buf, init_len);
mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), 4, target_endian); // DWARF version
const abbrev_offset = self.abbrev_table_offset.?;
self.writeOffsetAssumeCapacity(&di_buf, abbrev_offset);
di_buf.appendAssumeCapacity(self.ptrWidthBytes()); // address size
// Write the form for the compile unit, which must match the abbrev table above.
const name_strp = try self.strtab.insert(self.allocator, zcu.root_mod.root_src_path);
var compile_unit_dir_buffer: [std.fs.max_path_bytes]u8 = undefined;
const compile_unit_dir = resolveCompilationDir(zcu, &compile_unit_dir_buffer);
const comp_dir_strp = try self.strtab.insert(self.allocator, compile_unit_dir);
const producer_strp = try self.strtab.insert(self.allocator, link.producer_string);
di_buf.appendAssumeCapacity(@intFromEnum(AbbrevCode.compile_unit));
self.writeOffsetAssumeCapacity(&di_buf, 0); // DW.AT.stmt_list, DW.FORM.sec_offset
self.writeAddrAssumeCapacity(&di_buf, low_pc);
self.writeAddrAssumeCapacity(&di_buf, high_pc);
self.writeOffsetAssumeCapacity(&di_buf, name_strp);
self.writeOffsetAssumeCapacity(&di_buf, comp_dir_strp);
self.writeOffsetAssumeCapacity(&di_buf, producer_strp);
// We are still waiting on dwarf-std.org to assign DW_LANG_Zig a number:
// http://dwarfstd.org/ShowIssue.php?issue=171115.1
// Until then we say it is C99.
mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), DW.LANG.C99, target_endian);
if (di_buf.items.len > first_dbg_info_off) {
// Move the first N navs to the end to make more padding for the header.
@panic("TODO: handle .debug_info header exceeding its padding");
}
const jmp_amt = first_dbg_info_off - di_buf.items.len;
if (self.bin_file.cast(.elf)) |elf_file| {
const debug_info_sect = &elf_file.shdrs.items[elf_file.debug_info_section_index.?];
const file_pos = debug_info_sect.sh_offset;
try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt, false);
} else if (self.bin_file.cast(.macho)) |macho_file| {
if (macho_file.base.isRelocatable()) {
const debug_info_sect = macho_file.sections.items(.header)[macho_file.debug_info_sect_index.?];
const file_pos = debug_info_sect.offset;
try pwriteDbgInfoNops(macho_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt, false);
} else {
const d_sym = macho_file.getDebugSymbols().?;
const debug_info_sect = d_sym.getSection(d_sym.debug_info_section_index.?);
const file_pos = debug_info_sect.offset;
try pwriteDbgInfoNops(d_sym.file, file_pos, 0, di_buf.items, jmp_amt, false);
}
} else if (self.bin_file.cast(.wasm)) |wasm_file| {
_ = wasm_file;
// const debug_info = &wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code;
// try writeDbgInfoNopsToArrayList(self.allocator, debug_info, 0, 0, di_buf.items, jmp_amt, false);
} else unreachable;
}
fn resolveCompilationDir(zcu: *Zcu, buffer: *[std.fs.max_path_bytes]u8) []const u8 {
// We fully resolve all paths at this point to avoid lack of source line info in stack
// traces or lack of debugging information which, if relative paths were used, would
// be very location dependent.
// TODO: the only concern I have with this is WASI as either host or target, should
// we leave the paths as relative then?
const root_dir_path = zcu.root_mod.root.root_dir.path orelse ".";
const sub_path = zcu.root_mod.root.sub_path;
const realpath = if (std.fs.path.isAbsolute(root_dir_path)) r: {
@memcpy(buffer[0..root_dir_path.len], root_dir_path);
break :r root_dir_path;
} else std.fs.realpath(root_dir_path, buffer) catch return root_dir_path;
const len = realpath.len + 1 + sub_path.len;
if (buffer.len < len) return root_dir_path;
buffer[realpath.len] = '/';
@memcpy(buffer[realpath.len + 1 ..][0..sub_path.len], sub_path);
return buffer[0..len];
}
fn writeAddrAssumeCapacity(self: *Dwarf, buf: *std.ArrayList(u8), addr: u64) void {
const comp = self.bin_file.comp;
const target = comp.root_mod.resolved_target.result;
const target_endian = target.cpu.arch.endian();
switch (self.ptr_width) {
.p32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @intCast(addr), target_endian),
.p64 => mem.writeInt(u64, buf.addManyAsArrayAssumeCapacity(8), addr, target_endian),
}
}
fn writeOffsetAssumeCapacity(self: *Dwarf, buf: *std.ArrayList(u8), off: u64) void {
const comp = self.bin_file.comp;
const target = comp.root_mod.resolved_target.result;
const target_endian = target.cpu.arch.endian();
switch (self.format) {
.dwarf32 => mem.writeInt(u32, buf.addManyAsArrayAssumeCapacity(4), @intCast(off), target_endian),
.dwarf64 => mem.writeInt(u64, buf.addManyAsArrayAssumeCapacity(8), off, target_endian),
}
}
/// Writes to the file a buffer, prefixed and suffixed by the specified number of
/// bytes of NOPs. Asserts each padding size is at least `min_nop_size` and total padding bytes
/// are less than 1044480 bytes (if this limit is ever reached, this function can be
/// improved to make more than one pwritev call, or the limit can be raised by a fixed
/// amount by increasing the length of `vecs`).
fn pwriteDbgLineNops(
file: fs.File,
offset: u64,
prev_padding_size: usize,
buf: []const u8,
next_padding_size: usize,
) !void {
const tracy = trace(@src());
defer tracy.end();
const page_of_nops = [1]u8{DW.LNS.negate_stmt} ** 4096;
const three_byte_nop = [3]u8{ DW.LNS.advance_pc, 0b1000_0000, 0 };
var vecs: [512]std.posix.iovec_const = undefined;
var vec_index: usize = 0;
{
var padding_left = prev_padding_size;
if (padding_left % 2 != 0) {
vecs[vec_index] = .{
.base = &three_byte_nop,
.len = three_byte_nop.len,
};
vec_index += 1;
padding_left -= three_byte_nop.len;
}
while (padding_left > page_of_nops.len) {
vecs[vec_index] = .{
.base = &page_of_nops,
.len = page_of_nops.len,
};
vec_index += 1;
padding_left -= page_of_nops.len;
}
if (padding_left > 0) {
vecs[vec_index] = .{
.base = &page_of_nops,
.len = padding_left,
};
vec_index += 1;
}
}
vecs[vec_index] = .{
.base = buf.ptr,
.len = buf.len,
};
if (buf.len > 0) vec_index += 1;
{
var padding_left = next_padding_size;
if (padding_left % 2 != 0) {
vecs[vec_index] = .{
.base = &three_byte_nop,
.len = three_byte_nop.len,
};
vec_index += 1;
padding_left -= three_byte_nop.len;
}
while (padding_left > page_of_nops.len) {
vecs[vec_index] = .{
.base = &page_of_nops,
.len = page_of_nops.len,
};
vec_index += 1;
padding_left -= page_of_nops.len;
}
if (padding_left > 0) {
vecs[vec_index] = .{
.base = &page_of_nops,
.len = padding_left,
};
vec_index += 1;
}
}
try file.pwritevAll(vecs[0..vec_index], offset - prev_padding_size);
}
fn writeDbgLineNopsBuffered(
buf: []u8,
offset: u32,
prev_padding_size: usize,
content: []const u8,
next_padding_size: usize,
) void {
assert(buf.len >= content.len + prev_padding_size + next_padding_size);
const tracy = trace(@src());
defer tracy.end();
const three_byte_nop = [3]u8{ DW.LNS.advance_pc, 0b1000_0000, 0 };
{
var padding_left = prev_padding_size;
if (padding_left % 2 != 0) {
buf[offset - padding_left ..][0..3].* = three_byte_nop;
padding_left -= 3;
}
while (padding_left > 0) : (padding_left -= 1) {
buf[offset - padding_left] = DW.LNS.negate_stmt;
}
}
@memcpy(buf[offset..][0..content.len], content);
{
var padding_left = next_padding_size;
if (padding_left % 2 != 0) {
buf[offset + content.len + padding_left ..][0..3].* = three_byte_nop;
padding_left -= 3;
}
while (padding_left > 0) : (padding_left -= 1) {
buf[offset + content.len + padding_left] = DW.LNS.negate_stmt;
}
}
}
/// Writes to the file a buffer, prefixed and suffixed by the specified number of
/// bytes of padding.
fn pwriteDbgInfoNops(
file: fs.File,
offset: u64,
prev_padding_size: usize,
buf: []const u8,
next_padding_size: usize,
trailing_zero: bool,
) !void {
const tracy = trace(@src());
defer tracy.end();
const page_of_nops = [1]u8{@intFromEnum(AbbrevCode.padding)} ** 4096;
var vecs: [32]std.posix.iovec_const = undefined;
var vec_index: usize = 0;
{
var padding_left = prev_padding_size;
while (padding_left > page_of_nops.len) {
vecs[vec_index] = .{
.base = &page_of_nops,
.len = page_of_nops.len,
};
vec_index += 1;
padding_left -= page_of_nops.len;
}
if (padding_left > 0) {
vecs[vec_index] = .{
.base = &page_of_nops,
.len = padding_left,
};
vec_index += 1;
}
}
vecs[vec_index] = .{
.base = buf.ptr,
.len = buf.len,
};
if (buf.len > 0) vec_index += 1;
{
var padding_left = next_padding_size;
while (padding_left > page_of_nops.len) {
vecs[vec_index] = .{
.base = &page_of_nops,
.len = page_of_nops.len,
};
vec_index += 1;
padding_left -= page_of_nops.len;
}
if (padding_left > 0) {
vecs[vec_index] = .{
.base = &page_of_nops,
.len = padding_left,
};
vec_index += 1;
}
}
if (trailing_zero) {
var zbuf = [1]u8{0};
vecs[vec_index] = .{
.base = &zbuf,
.len = zbuf.len,
};
vec_index += 1;
}
try file.pwritevAll(vecs[0..vec_index], offset - prev_padding_size);
}
fn writeDbgInfoNopsToArrayList(
gpa: Allocator,
buffer: *std.ArrayListUnmanaged(u8),
offset: u32,
prev_padding_size: usize,
content: []const u8,
next_padding_size: usize,
trailing_zero: bool,
) Allocator.Error!void {
try buffer.resize(gpa, @max(
buffer.items.len,
offset + content.len + next_padding_size + 1,
));
@memset(buffer.items[offset - prev_padding_size .. offset], @intFromEnum(AbbrevCode.padding));
@memcpy(buffer.items[offset..][0..content.len], content);
@memset(buffer.items[offset + content.len ..][0..next_padding_size], @intFromEnum(AbbrevCode.padding));
if (trailing_zero) {
buffer.items[offset + content.len + next_padding_size] = 0;
}
}
pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
const comp = self.bin_file.comp;
const target = comp.root_mod.resolved_target.result;
const target_endian = target.cpu.arch.endian();
const ptr_width_bytes = self.ptrWidthBytes();
// Enough for all the data without resizing. When support for more compilation units
// is added, the size of this section will become more variable.
var di_buf = try std.ArrayList(u8).initCapacity(self.allocator, 100);
defer di_buf.deinit();
// initial length - length of the .debug_aranges contribution for this compilation unit,
// not including the initial length itself.
// We have to come back and write it later after we know the size.
if (self.format == .dwarf64) di_buf.appendNTimesAssumeCapacity(0xff, 4);
const init_len_index = di_buf.items.len;
self.writeOffsetAssumeCapacity(&di_buf, 0);
const after_init_len = di_buf.items.len;
mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), 2, target_endian); // version
// When more than one compilation unit is supported, this will be the offset to it.
// For now it is always at offset 0 in .debug_info.
self.writeOffsetAssumeCapacity(&di_buf, 0); // .debug_info offset
di_buf.appendAssumeCapacity(ptr_width_bytes); // address_size
di_buf.appendAssumeCapacity(0); // segment_selector_size
const end_header_offset = di_buf.items.len;
const begin_entries_offset = mem.alignForward(usize, end_header_offset, ptr_width_bytes * 2);
di_buf.appendNTimesAssumeCapacity(0, begin_entries_offset - end_header_offset);
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
self.writeAddrAssumeCapacity(&di_buf, addr);
self.writeAddrAssumeCapacity(&di_buf, size);
// Sentinel.
self.writeAddrAssumeCapacity(&di_buf, 0);
self.writeAddrAssumeCapacity(&di_buf, 0);
// Go back and populate the initial length.
const init_len = di_buf.items.len - after_init_len;
switch (self.format) {
.dwarf32 => mem.writeInt(u32, di_buf.items[init_len_index..][0..4], @intCast(init_len), target_endian),
.dwarf64 => mem.writeInt(u64, di_buf.items[init_len_index..][0..8], init_len, target_endian),
}
const needed_size: u32 = @intCast(di_buf.items.len);
if (self.bin_file.cast(.elf)) |elf_file| {
const shdr_index = elf_file.debug_aranges_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 16, false);
const debug_aranges_sect = &elf_file.shdrs.items[shdr_index];
const file_pos = debug_aranges_sect.sh_offset;
try elf_file.base.file.?.pwriteAll(di_buf.items, file_pos);
} else if (self.bin_file.cast(.macho)) |macho_file| {
if (macho_file.base.isRelocatable()) {
const sect_index = macho_file.debug_aranges_sect_index.?;
try macho_file.growSection(sect_index, needed_size);
const sect = macho_file.sections.items(.header)[sect_index];
const file_pos = sect.offset;
try macho_file.base.file.?.pwriteAll(di_buf.items, file_pos);
} else {
const d_sym = macho_file.getDebugSymbols().?;
const sect_index = d_sym.debug_aranges_section_index.?;
try d_sym.growSection(sect_index, needed_size, false, macho_file);
const sect = d_sym.getSection(sect_index);
const file_pos = sect.offset;
try d_sym.file.pwriteAll(di_buf.items, file_pos);
}
} else if (self.bin_file.cast(.wasm)) |wasm_file| {
_ = wasm_file;
// const debug_ranges = &wasm_file.getAtomPtr(wasm_file.debug_ranges_atom.?).code;
// try debug_ranges.resize(gpa, needed_size);
// @memcpy(debug_ranges.items[0..di_buf.items.len], di_buf.items);
} else unreachable;
}
pub fn writeDbgLineHeader(self: *Dwarf) !void {
const comp = self.bin_file.comp;
const gpa = self.allocator;
const target = comp.root_mod.resolved_target.result;
const target_endian = target.cpu.arch.endian();
const init_len_size: usize = switch (self.format) {
.dwarf32 => 4,
.dwarf64 => 12,
};
const dbg_line_prg_off = self.getDebugLineProgramOff() orelse return;
assert(self.getDebugLineProgramEnd().? != 0);
// Convert all input DI files into a set of include dirs and file names.
var arena = std.heap.ArenaAllocator.init(gpa);
defer arena.deinit();
const paths = try self.genIncludeDirsAndFileNames(arena.allocator());
// The size of this header is variable, depending on the number of directories,
// files, and padding. We have a function to compute the upper bound size, however,
// because it's needed for determining where to put the offset of the first `SrcFn`.
const needed_bytes = self.dbgLineNeededHeaderBytes(paths.dirs, paths.files);
var di_buf = try std.ArrayList(u8).initCapacity(gpa, needed_bytes);
defer di_buf.deinit();
if (self.format == .dwarf64) di_buf.appendNTimesAssumeCapacity(0xff, 4);
self.writeOffsetAssumeCapacity(&di_buf, 0);
mem.writeInt(u16, di_buf.addManyAsArrayAssumeCapacity(2), 4, target_endian); // version
// Empirically, debug info consumers do not respect this field, or otherwise
// consider it to be an error when it does not point exactly to the end of the header.
// Therefore we rely on the NOP jump at the beginning of the Line Number Program for
// padding rather than this field.
const before_header_len = di_buf.items.len;
self.writeOffsetAssumeCapacity(&di_buf, 0); // We will come back and write this.
const after_header_len = di_buf.items.len;
assert(self.dbg_line_header.opcode_base == DW.LNS.set_isa + 1);
di_buf.appendSliceAssumeCapacity(&[_]u8{
self.dbg_line_header.minimum_instruction_length,
self.dbg_line_header.maximum_operations_per_instruction,
@intFromBool(self.dbg_line_header.default_is_stmt),
@bitCast(self.dbg_line_header.line_base),
self.dbg_line_header.line_range,
self.dbg_line_header.opcode_base,
// Standard opcode lengths. The number of items here is based on `opcode_base`.
// The value is the number of LEB128 operands the instruction takes.
0, // `DW.LNS.copy`
1, // `DW.LNS.advance_pc`
1, // `DW.LNS.advance_line`
1, // `DW.LNS.set_file`
1, // `DW.LNS.set_column`
0, // `DW.LNS.negate_stmt`
0, // `DW.LNS.set_basic_block`
0, // `DW.LNS.const_add_pc`
1, // `DW.LNS.fixed_advance_pc`
0, // `DW.LNS.set_prologue_end`
0, // `DW.LNS.set_epilogue_begin`
1, // `DW.LNS.set_isa`
});
for (paths.dirs, 0..) |dir, i| {
log.debug("adding new include dir at {d} of '{s}'", .{ i + 1, dir });
di_buf.appendSliceAssumeCapacity(dir);
di_buf.appendAssumeCapacity(0);
}
di_buf.appendAssumeCapacity(0); // include directories sentinel
for (paths.files, 0..) |file, i| {
const dir_index = paths.files_dirs_indexes[i];
log.debug("adding new file name at {d} of '{s}' referencing directory {d}", .{
i + 1,
file,
dir_index + 1,
});
di_buf.appendSliceAssumeCapacity(file);
di_buf.appendSliceAssumeCapacity(&[_]u8{
0, // null byte for the relative path name
@intCast(dir_index), // directory_index
0, // mtime (TODO supply this)
0, // file size bytes (TODO supply this)
});
}
di_buf.appendAssumeCapacity(0); // file names sentinel
const header_len = di_buf.items.len - after_header_len;
switch (self.format) {
.dwarf32 => mem.writeInt(u32, di_buf.items[before_header_len..][0..4], @intCast(header_len), target_endian),
.dwarf64 => mem.writeInt(u64, di_buf.items[before_header_len..][0..8], header_len, target_endian),
}
assert(needed_bytes == di_buf.items.len);
if (di_buf.items.len > dbg_line_prg_off) {
const needed_with_padding = padToIdeal(needed_bytes);
const delta = needed_with_padding - dbg_line_prg_off;
const first_fn_index = self.src_fn_first_index.?;
const first_fn = self.getAtom(.src_fn, first_fn_index);
const last_fn_index = self.src_fn_last_index.?;
const last_fn = self.getAtom(.src_fn, last_fn_index);
var src_fn_index = first_fn_index;
const buffer = try gpa.alloc(u8, last_fn.off + last_fn.len - first_fn.off);
defer gpa.free(buffer);
if (self.bin_file.cast(.elf)) |elf_file| {
const shdr_index = elf_file.debug_line_section_index.?;
const needed_size = elf_file.shdrs.items[shdr_index].sh_size + delta;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
const file_pos = elf_file.shdrs.items[shdr_index].sh_offset + first_fn.off;
const amt = try elf_file.base.file.?.preadAll(buffer, file_pos);
if (amt != buffer.len) return error.InputOutput;
try elf_file.base.file.?.pwriteAll(buffer, file_pos + delta);
} else if (self.bin_file.cast(.macho)) |macho_file| {
if (macho_file.base.isRelocatable()) {
const sect_index = macho_file.debug_line_sect_index.?;
const needed_size: u32 = @intCast(macho_file.sections.items(.header)[sect_index].size + delta);
try macho_file.growSection(sect_index, needed_size);
const file_pos = macho_file.sections.items(.header)[sect_index].offset + first_fn.off;
const amt = try macho_file.base.file.?.preadAll(buffer, file_pos);
if (amt != buffer.len) return error.InputOutput;
try macho_file.base.file.?.pwriteAll(buffer, file_pos + delta);
} else {
const d_sym = macho_file.getDebugSymbols().?;
const sect_index = d_sym.debug_line_section_index.?;
const needed_size: u32 = @intCast(d_sym.getSection(sect_index).size + delta);
try d_sym.growSection(sect_index, needed_size, true, macho_file);
const file_pos = d_sym.getSection(sect_index).offset + first_fn.off;
const amt = try d_sym.file.preadAll(buffer, file_pos);
if (amt != buffer.len) return error.InputOutput;
try d_sym.file.pwriteAll(buffer, file_pos + delta);
}
} else if (self.bin_file.cast(.wasm)) |wasm_file| {
_ = wasm_file;
// const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
// {
// const src = debug_line.items[first_fn.off..];
// @memcpy(buffer[0..src.len], src);
// }
// try debug_line.resize(self.allocator, debug_line.items.len + delta);
// @memcpy(debug_line.items[first_fn.off + delta ..][0..buffer.len], buffer);
} else unreachable;
while (true) {
const src_fn = self.getAtomPtr(.src_fn, src_fn_index);
src_fn.off += delta;
if (src_fn.next_index) |next_index| {
src_fn_index = next_index;
} else break;
}
}
// Backpatch actual length of the debug line program
const init_len = self.getDebugLineProgramEnd().? - init_len_size;
switch (self.format) {
.dwarf32 => {
mem.writeInt(u32, di_buf.items[0..4], @intCast(init_len), target_endian);
},
.dwarf64 => {
mem.writeInt(u64, di_buf.items[4..][0..8], init_len, target_endian);
},
}
// We use NOPs because consumers empirically do not respect the header length field.
const jmp_amt = self.getDebugLineProgramOff().? - di_buf.items.len;
if (self.bin_file.cast(.elf)) |elf_file| {
const debug_line_sect = &elf_file.shdrs.items[elf_file.debug_line_section_index.?];
const file_pos = debug_line_sect.sh_offset;
try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt);
} else if (self.bin_file.cast(.macho)) |macho_file| {
if (macho_file.base.isRelocatable()) {
const debug_line_sect = macho_file.sections.items(.header)[macho_file.debug_line_sect_index.?];
const file_pos = debug_line_sect.offset;
try pwriteDbgLineNops(macho_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt);
} else {
const d_sym = macho_file.getDebugSymbols().?;
const debug_line_sect = d_sym.getSection(d_sym.debug_line_section_index.?);
const file_pos = debug_line_sect.offset;
try pwriteDbgLineNops(d_sym.file, file_pos, 0, di_buf.items, jmp_amt);
}
} else if (self.bin_file.cast(.wasm)) |wasm_file| {
_ = wasm_file;
// const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
// writeDbgLineNopsBuffered(debug_line.items, 0, 0, di_buf.items, jmp_amt);
} else unreachable;
}
fn getDebugInfoOff(self: Dwarf) ?u32 {
const first_index = self.di_atom_first_index orelse return null;
const first = self.getAtom(.di_atom, first_index);
return first.off;
}
fn getDebugInfoEnd(self: Dwarf) ?u32 {
const last_index = self.di_atom_last_index orelse return null;
const last = self.getAtom(.di_atom, last_index);
return last.off + last.len;
}
fn getDebugLineProgramOff(self: Dwarf) ?u32 {
const first_index = self.src_fn_first_index orelse return null;
const first = self.getAtom(.src_fn, first_index);
return first.off;
}
fn getDebugLineProgramEnd(self: Dwarf) ?u32 {
const last_index = self.src_fn_last_index orelse return null;
const last = self.getAtom(.src_fn, last_index);
return last.off + last.len;
}
/// Always 4 or 8 depending on whether this is 32-bit or 64-bit format.
fn ptrWidthBytes(self: Dwarf) u8 {
return switch (self.ptr_width) {
.p32 => 4,
.p64 => 8,
};
}
fn dbgLineNeededHeaderBytes(self: Dwarf, dirs: []const []const u8, files: []const []const u8) u32 {
var size: usize = switch (self.format) { // length field
.dwarf32 => 4,
.dwarf64 => 12,
};
size += @sizeOf(u16); // version field
size += switch (self.format) { // offset to end-of-header
.dwarf32 => 4,
.dwarf64 => 8,
};
size += 18; // opcodes
for (dirs) |dir| { // include dirs
size += dir.len + 1;
}
size += 1; // include dirs sentinel
for (files) |file| { // file names
size += file.len + 1 + 1 + 1 + 1;
}
size += 1; // file names sentinel
return @intCast(size);
}
/// The reloc offset for the line offset of a function from the previous function's line.
/// It's a fixed-size 4-byte ULEB128.
fn getRelocDbgLineOff(self: Dwarf) usize {
return dbg_line_vaddr_reloc_index + self.ptrWidthBytes() + 1;
}
fn getRelocDbgFileIndex(self: Dwarf) usize {
return self.getRelocDbgLineOff() + 5;
}
fn getRelocDbgInfoSubprogramHighPC(self: Dwarf) u32 {
return dbg_info_low_pc_reloc_index + self.ptrWidthBytes();
}
fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
return actual_size +| (actual_size / ideal_factor);
}
pub fn flushModule(self: *Dwarf, pt: Zcu.PerThread) !void {
const comp = self.bin_file.comp;
pub fn init(lf: *link.File, format: DW.Format) Dwarf {
const comp = lf.comp;
const gpa = comp.gpa;
const target = comp.root_mod.resolved_target.result;
return .{
.gpa = gpa,
.bin_file = lf,
.format = format,
.address_size = switch (target.ptrBitWidth()) {
0...32 => .@"32",
33...64 => .@"64",
else => unreachable,
},
.endian = target.cpu.arch.endian(),
if (self.global_abbrev_relocs.items.len > 0) {
const gpa = self.allocator;
var arena_alloc = std.heap.ArenaAllocator.init(gpa);
defer arena_alloc.deinit();
const arena = arena_alloc.allocator();
.mods = .{},
.types = .{},
.navs = .{},
var dbg_info_buffer = std.ArrayList(u8).init(arena);
try addDbgInfoErrorSetNames(
pt,
Type.anyerror,
pt.zcu.intern_pool.global_error_set.getNamesFromMainThread(),
target,
&dbg_info_buffer,
.debug_abbrev = .{ .section = Section.init },
.debug_aranges = .{ .section = Section.init },
.debug_info = .{ .section = Section.init },
.debug_line = .{
.header = switch (target.cpu.arch) {
.x86_64, .aarch64 => .{
.minimum_instruction_length = 1,
.maximum_operations_per_instruction = 1,
.default_is_stmt = true,
.line_base = -5,
.line_range = 14,
.opcode_base = DW.LNS.set_isa + 1,
},
else => .{
.minimum_instruction_length = 1,
.maximum_operations_per_instruction = 1,
.default_is_stmt = true,
.line_base = 0,
.line_range = 1,
.opcode_base = DW.LNS.set_isa + 1,
},
},
.section = Section.init,
},
.debug_line_str = StringSection.init,
.debug_loclists = .{ .section = Section.init },
.debug_rnglists = .{ .section = Section.init },
.debug_str = StringSection.init,
};
}
pub fn reloadSectionMetadata(dwarf: *Dwarf) void {
if (dwarf.bin_file.cast(.elf)) |elf_file| {
for ([_]*Section{
&dwarf.debug_abbrev.section,
&dwarf.debug_aranges.section,
&dwarf.debug_info.section,
&dwarf.debug_line.section,
&dwarf.debug_line_str.section,
&dwarf.debug_loclists.section,
&dwarf.debug_rnglists.section,
&dwarf.debug_str.section,
}, [_]u32{
elf_file.debug_abbrev_section_index.?,
elf_file.debug_aranges_section_index.?,
elf_file.debug_info_section_index.?,
elf_file.debug_line_section_index.?,
elf_file.debug_line_str_section_index.?,
elf_file.debug_loclists_section_index.?,
elf_file.debug_rnglists_section_index.?,
elf_file.debug_str_section_index.?,
}) |sec, section_index| {
const shdr = &elf_file.shdrs.items[section_index];
sec.index = section_index;
sec.off = shdr.sh_offset;
sec.len = shdr.sh_size;
}
} else if (dwarf.bin_file.cast(.macho)) |macho_file| {
if (macho_file.d_sym) |*d_sym| {
for ([_]*Section{
&dwarf.debug_abbrev.section,
&dwarf.debug_aranges.section,
&dwarf.debug_info.section,
&dwarf.debug_line.section,
&dwarf.debug_line_str.section,
&dwarf.debug_loclists.section,
&dwarf.debug_rnglists.section,
&dwarf.debug_str.section,
}, [_]u8{
d_sym.debug_abbrev_section_index.?,
d_sym.debug_aranges_section_index.?,
d_sym.debug_info_section_index.?,
d_sym.debug_line_section_index.?,
d_sym.debug_line_str_section_index.?,
d_sym.debug_loclists_section_index.?,
d_sym.debug_rnglists_section_index.?,
d_sym.debug_str_section_index.?,
}) |sec, sect_index| {
const header = &d_sym.sections.items[sect_index];
sec.index = sect_index;
sec.off = header.offset;
sec.len = header.size;
}
} else {
for ([_]*Section{
&dwarf.debug_abbrev.section,
&dwarf.debug_aranges.section,
&dwarf.debug_info.section,
&dwarf.debug_line.section,
&dwarf.debug_line_str.section,
&dwarf.debug_loclists.section,
&dwarf.debug_rnglists.section,
&dwarf.debug_str.section,
}, [_]u8{
macho_file.debug_abbrev_sect_index.?,
macho_file.debug_aranges_sect_index.?,
macho_file.debug_info_sect_index.?,
macho_file.debug_line_sect_index.?,
macho_file.debug_line_str_sect_index.?,
macho_file.debug_loclists_sect_index.?,
macho_file.debug_rnglists_sect_index.?,
macho_file.debug_str_sect_index.?,
}) |sec, sect_index| {
const header = &macho_file.sections.items(.header)[sect_index];
sec.index = sect_index;
sec.off = header.offset;
sec.len = header.size;
}
}
}
}
pub fn initMetadata(dwarf: *Dwarf) UpdateError!void {
dwarf.reloadSectionMetadata();
dwarf.debug_abbrev.section.pad_to_ideal = false;
assert(try dwarf.debug_abbrev.section.addUnit(0, 0, dwarf) == DebugAbbrev.unit);
errdefer dwarf.debug_abbrev.section.popUnit();
assert(try dwarf.debug_abbrev.section.addEntry(DebugAbbrev.unit, dwarf) == DebugAbbrev.entry);
dwarf.debug_aranges.section.pad_to_ideal = false;
dwarf.debug_aranges.section.alignment = InternPool.Alignment.fromNonzeroByteUnits(@intFromEnum(dwarf.address_size) * 2);
dwarf.debug_line_str.section.pad_to_ideal = false;
assert(try dwarf.debug_line_str.section.addUnit(0, 0, dwarf) == StringSection.unit);
errdefer dwarf.debug_line_str.section.popUnit();
dwarf.debug_str.section.pad_to_ideal = false;
assert(try dwarf.debug_str.section.addUnit(0, 0, dwarf) == StringSection.unit);
errdefer dwarf.debug_str.section.popUnit();
dwarf.debug_loclists.section.pad_to_ideal = false;
dwarf.debug_rnglists.section.pad_to_ideal = false;
}
pub fn deinit(dwarf: *Dwarf) void {
const gpa = dwarf.gpa;
for (dwarf.mods.values()) |*mod_info| mod_info.files.deinit(gpa);
dwarf.mods.deinit(gpa);
dwarf.types.deinit(gpa);
dwarf.navs.deinit(gpa);
dwarf.debug_abbrev.section.deinit(gpa);
dwarf.debug_aranges.section.deinit(gpa);
dwarf.debug_info.section.deinit(gpa);
dwarf.debug_line.section.deinit(gpa);
dwarf.debug_line_str.deinit(gpa);
dwarf.debug_loclists.section.deinit(gpa);
dwarf.debug_rnglists.section.deinit(gpa);
dwarf.debug_str.deinit(gpa);
dwarf.* = undefined;
}
fn getUnit(dwarf: *Dwarf, mod: *Module) UpdateError!Unit.Index {
const mod_gop = try dwarf.mods.getOrPut(dwarf.gpa, mod);
const unit: Unit.Index = @enumFromInt(mod_gop.index);
if (!mod_gop.found_existing) {
errdefer _ = dwarf.mods.pop();
mod_gop.value_ptr.* = .{
.files = .{},
};
assert(try dwarf.debug_aranges.section.addUnit(
DebugAranges.headerBytes(dwarf),
DebugAranges.trailerBytes(dwarf),
dwarf,
) == unit);
errdefer dwarf.debug_aranges.section.popUnit();
assert(try dwarf.debug_info.section.addUnit(
DebugInfo.headerBytes(dwarf),
DebugInfo.trailer_bytes,
dwarf,
) == unit);
errdefer dwarf.debug_info.section.popUnit();
assert(try dwarf.debug_line.section.addUnit(
DebugLine.headerBytes(dwarf, 25),
DebugLine.trailer_bytes,
dwarf,
) == unit);
errdefer dwarf.debug_line.section.popUnit();
assert(try dwarf.debug_loclists.section.addUnit(
DebugLocLists.headerBytes(dwarf),
DebugLocLists.trailer_bytes,
dwarf,
) == unit);
errdefer dwarf.debug_loclists.section.popUnit();
assert(try dwarf.debug_rnglists.section.addUnit(
DebugRngLists.headerBytes(dwarf),
DebugRngLists.trailer_bytes,
dwarf,
) == unit);
errdefer dwarf.debug_rnglists.section.popUnit();
}
return unit;
}
fn getUnitFiles(dwarf: *Dwarf, unit: Unit.Index) *Files {
return &dwarf.mods.values()[@intFromEnum(unit)].files;
}
pub fn initWipNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index, sym_index: u32) UpdateError!?WipNav {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
log.debug("initWipNav({})", .{nav.fqn.fmt(ip)});
const inst_info = nav.srcInst(ip).resolveFull(ip);
const file = zcu.fileByIndex(inst_info.file);
const unit = try dwarf.getUnit(file.mod);
const nav_gop = try dwarf.navs.getOrPut(dwarf.gpa, nav_index);
errdefer _ = dwarf.navs.pop();
if (!nav_gop.found_existing) nav_gop.value_ptr.* = try dwarf.addCommonEntry(unit);
const nav_val = zcu.navValue(nav_index);
var wip_nav: WipNav = .{
.dwarf = dwarf,
.pt = pt,
.unit = unit,
.entry = nav_gop.value_ptr.*,
.any_children = false,
.func = .none,
.func_high_reloc = undefined,
.debug_info = .{},
.debug_line = .{},
.debug_loclists = .{},
.pending_types = .{},
};
errdefer wip_nav.deinit();
switch (ip.indexToKey(nav_val.toIntern())) {
else => {
assert(file.zir_loaded);
const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst));
assert(decl_inst.tag == .declaration);
const tree = try file.getTree(dwarf.gpa);
const loc = tree.tokenLocation(0, tree.nodes.items(.main_token)[decl_inst.data.declaration.src_node]);
assert(loc.line == zcu.navSrcLine(nav_index));
const parent_type, const accessibility: u8 = if (nav.analysis_owner.unwrap()) |cau| parent: {
const decl_extra = file.zir.extraData(Zir.Inst.Declaration, decl_inst.data.declaration.payload_index).data;
const parent_namespace_ptr = ip.namespacePtr(ip.getCau(cau).namespace);
break :parent .{
parent_namespace_ptr.owner_type,
switch (decl_extra.name) {
.@"comptime",
.@"usingnamespace",
.unnamed_test,
.decltest,
=> DW.ACCESS.private,
_ => if (decl_extra.name.isNamedTest(file.zir))
DW.ACCESS.private
else if (parent_namespace_ptr.pub_decls.containsContext(nav_index, .{ .zcu = zcu }))
DW.ACCESS.public
else if (parent_namespace_ptr.priv_decls.containsContext(nav_index, .{ .zcu = zcu }))
DW.ACCESS.private
else
unreachable,
},
};
} else .{ zcu.fileRootType(inst_info.file), DW.ACCESS.private };
const diw = wip_nav.debug_info.writer(dwarf.gpa);
try uleb128(diw, @intFromEnum(AbbrevCode.decl_var));
try wip_nav.refType(Type.fromInterned(parent_type));
assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf));
try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian);
try uleb128(diw, loc.column + 1);
try diw.writeByte(accessibility);
try wip_nav.strp(nav.name.toSlice(ip));
try wip_nav.strp(nav.fqn.toSlice(ip));
const ty = nav_val.typeOf(zcu);
const ty_reloc_index = try wip_nav.refForward();
try wip_nav.exprloc(.{ .addr = .{ .sym = sym_index } });
try uleb128(diw, nav.status.resolved.alignment.toByteUnits() orelse
ty.abiAlignment(pt).toByteUnits().?);
const func_unit = InternPool.AnalUnit.wrap(.{ .func = nav_val.toIntern() });
try diw.writeByte(@intFromBool(for (if (zcu.single_exports.get(func_unit)) |export_index|
zcu.all_exports.items[export_index..][0..1]
else if (zcu.multi_exports.get(func_unit)) |export_range|
zcu.all_exports.items[export_range.index..][0..export_range.len]
else
&.{}) |@"export"|
{
if (@"export".exported == .nav and @"export".exported.nav == nav_index) break true;
} else false));
wip_nav.finishForward(ty_reloc_index);
try uleb128(diw, @intFromEnum(AbbrevCode.is_const));
try wip_nav.refType(ty);
},
.variable => |variable| {
assert(file.zir_loaded);
const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst));
assert(decl_inst.tag == .declaration);
const tree = try file.getTree(dwarf.gpa);
const loc = tree.tokenLocation(0, tree.nodes.items(.main_token)[decl_inst.data.declaration.src_node]);
assert(loc.line == zcu.navSrcLine(nav_index));
const parent_type, const accessibility: u8 = if (nav.analysis_owner.unwrap()) |cau| parent: {
const decl_extra = file.zir.extraData(Zir.Inst.Declaration, decl_inst.data.declaration.payload_index).data;
const parent_namespace_ptr = ip.namespacePtr(ip.getCau(cau).namespace);
break :parent .{
parent_namespace_ptr.owner_type,
switch (decl_extra.name) {
.@"comptime",
.@"usingnamespace",
.unnamed_test,
.decltest,
=> DW.ACCESS.private,
_ => if (decl_extra.name.isNamedTest(file.zir))
DW.ACCESS.private
else if (parent_namespace_ptr.pub_decls.containsContext(nav_index, .{ .zcu = zcu }))
DW.ACCESS.public
else if (parent_namespace_ptr.priv_decls.containsContext(nav_index, .{ .zcu = zcu }))
DW.ACCESS.private
else
unreachable,
},
};
} else .{ zcu.fileRootType(inst_info.file), DW.ACCESS.private };
const diw = wip_nav.debug_info.writer(dwarf.gpa);
try uleb128(diw, @intFromEnum(AbbrevCode.decl_var));
try wip_nav.refType(Type.fromInterned(parent_type));
assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf));
try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian);
try uleb128(diw, loc.column + 1);
try diw.writeByte(accessibility);
try wip_nav.strp(nav.name.toSlice(ip));
try wip_nav.strp(nav.fqn.toSlice(ip));
const ty = Type.fromInterned(variable.ty);
try wip_nav.refType(ty);
const addr: Loc = .{ .addr = .{ .sym = sym_index } };
try wip_nav.exprloc(if (variable.is_threadlocal) .{ .form_tls_address = &addr } else addr);
try uleb128(diw, nav.status.resolved.alignment.toByteUnits() orelse
ty.abiAlignment(pt).toByteUnits().?);
const func_unit = InternPool.AnalUnit.wrap(.{ .func = nav_val.toIntern() });
try diw.writeByte(@intFromBool(for (if (zcu.single_exports.get(func_unit)) |export_index|
zcu.all_exports.items[export_index..][0..1]
else if (zcu.multi_exports.get(func_unit)) |export_range|
zcu.all_exports.items[export_range.index..][0..export_range.len]
else
&.{}) |@"export"|
{
if (@"export".exported == .nav and @"export".exported.nav == nav_index) break true;
} else false));
},
.func => |func| {
assert(file.zir_loaded);
const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst));
assert(decl_inst.tag == .declaration);
const tree = try file.getTree(dwarf.gpa);
const loc = tree.tokenLocation(0, tree.nodes.items(.main_token)[decl_inst.data.declaration.src_node]);
assert(loc.line == zcu.navSrcLine(nav_index));
const parent_type, const accessibility: u8 = if (nav.analysis_owner.unwrap()) |cau| parent: {
const decl_extra = file.zir.extraData(Zir.Inst.Declaration, decl_inst.data.declaration.payload_index).data;
const parent_namespace_ptr = ip.namespacePtr(ip.getCau(cau).namespace);
break :parent .{
parent_namespace_ptr.owner_type,
switch (decl_extra.name) {
.@"comptime",
.@"usingnamespace",
.unnamed_test,
.decltest,
=> DW.ACCESS.private,
_ => if (decl_extra.name.isNamedTest(file.zir))
DW.ACCESS.private
else if (parent_namespace_ptr.pub_decls.containsContext(nav_index, .{ .zcu = zcu }))
DW.ACCESS.public
else if (parent_namespace_ptr.priv_decls.containsContext(nav_index, .{ .zcu = zcu }))
DW.ACCESS.private
else
unreachable,
},
};
} else .{ zcu.fileRootType(inst_info.file), DW.ACCESS.private };
const func_type = ip.indexToKey(func.ty).func_type;
wip_nav.func = nav_val.toIntern();
const diw = wip_nav.debug_info.writer(dwarf.gpa);
try uleb128(diw, @intFromEnum(AbbrevCode.decl_func));
try wip_nav.refType(Type.fromInterned(parent_type));
assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf));
try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian);
try uleb128(diw, loc.column + 1);
try diw.writeByte(accessibility);
try wip_nav.strp(nav.name.toSlice(ip));
try wip_nav.strp(nav.fqn.toSlice(ip));
try wip_nav.refType(Type.fromInterned(func_type.return_type));
const external_relocs = &dwarf.debug_info.section.getUnit(unit).external_relocs;
try external_relocs.append(dwarf.gpa, .{
.source_entry = wip_nav.entry,
.source_off = @intCast(wip_nav.debug_info.items.len),
.target_sym = sym_index,
});
try diw.writeByteNTimes(0, @intFromEnum(dwarf.address_size));
wip_nav.func_high_reloc = @intCast(external_relocs.items.len);
try external_relocs.append(dwarf.gpa, .{
.source_entry = wip_nav.entry,
.source_off = @intCast(wip_nav.debug_info.items.len),
.target_sym = sym_index,
});
try diw.writeByteNTimes(0, @intFromEnum(dwarf.address_size));
try uleb128(diw, nav.status.resolved.alignment.toByteUnits() orelse
target_info.defaultFunctionAlignment(file.mod.resolved_target.result).toByteUnits().?);
const func_unit = InternPool.AnalUnit.wrap(.{ .func = nav_val.toIntern() });
try diw.writeByte(@intFromBool(for (if (zcu.single_exports.get(func_unit)) |export_index|
zcu.all_exports.items[export_index..][0..1]
else if (zcu.multi_exports.get(func_unit)) |export_range|
zcu.all_exports.items[export_range.index..][0..export_range.len]
else
&.{}) |@"export"|
{
if (@"export".exported == .nav and @"export".exported.nav == nav_index) break true;
} else false));
try diw.writeByte(@intFromBool(func_type.return_type == .noreturn_type));
const dlw = wip_nav.debug_line.writer(dwarf.gpa);
try dlw.writeByte(DW.LNS.extended_op);
if (dwarf.incremental()) {
try uleb128(dlw, 1 + dwarf.sectionOffsetBytes());
try dlw.writeByte(DW.LNE.ZIG_set_decl);
try dwarf.debug_line.section.getUnit(wip_nav.unit).cross_section_relocs.append(dwarf.gpa, .{
.source_entry = wip_nav.entry.toOptional(),
.source_off = @intCast(wip_nav.debug_line.items.len),
.target_sec = .debug_info,
.target_unit = wip_nav.unit,
.target_entry = wip_nav.entry.toOptional(),
});
try dlw.writeByteNTimes(0, dwarf.sectionOffsetBytes());
try dlw.writeByte(DW.LNS.set_column);
try uleb128(dlw, func.lbrace_column + 1);
try wip_nav.advancePCAndLine(func.lbrace_line, 0);
} else {
try uleb128(dlw, 1 + @intFromEnum(dwarf.address_size));
try dlw.writeByte(DW.LNE.set_address);
try dwarf.debug_line.section.getUnit(wip_nav.unit).external_relocs.append(dwarf.gpa, .{
.source_entry = wip_nav.entry,
.source_off = @intCast(wip_nav.debug_line.items.len),
.target_sym = sym_index,
});
try dlw.writeByteNTimes(0, @intFromEnum(dwarf.address_size));
const file_gop = try dwarf.getUnitFiles(unit).getOrPut(dwarf.gpa, inst_info.file);
try dlw.writeByte(DW.LNS.set_file);
try uleb128(dlw, file_gop.index);
try dlw.writeByte(DW.LNS.set_column);
try uleb128(dlw, func.lbrace_column + 1);
try wip_nav.advancePCAndLine(@intCast(loc.line + func.lbrace_line), 0);
}
},
}
return wip_nav;
}
pub fn finishWipNav(
dwarf: *Dwarf,
pt: Zcu.PerThread,
nav_index: InternPool.Nav.Index,
sym: struct { index: u32, addr: u64, size: u64 },
wip_nav: *WipNav,
) UpdateError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
log.debug("finishWipNav({})", .{nav.fqn.fmt(ip)});
if (wip_nav.func != .none) {
dwarf.debug_info.section.getUnit(wip_nav.unit).external_relocs.items[wip_nav.func_high_reloc].target_off = sym.size;
if (wip_nav.any_children) {
const diw = wip_nav.debug_info.writer(dwarf.gpa);
try uleb128(diw, @intFromEnum(AbbrevCode.null));
} else std.leb.writeUnsignedFixed(
AbbrevCode.decl_bytes,
wip_nav.debug_info.items[0..AbbrevCode.decl_bytes],
@intFromEnum(AbbrevCode.decl_func_empty),
);
const di_atom_index = try self.createAtom(.di_atom);
log.debug("updateNavDebugInfoAllocation in flushModule", .{});
try self.updateNavDebugInfoAllocation(di_atom_index, @intCast(dbg_info_buffer.items.len));
log.debug("writeNavDebugInfo in flushModule", .{});
try self.writeNavDebugInfo(di_atom_index, dbg_info_buffer.items);
var aranges_entry = [1]u8{0} ** (8 + 8);
try dwarf.debug_aranges.section.getUnit(wip_nav.unit).external_relocs.append(dwarf.gpa, .{
.source_entry = wip_nav.entry,
.target_sym = sym.index,
});
dwarf.writeInt(aranges_entry[0..@intFromEnum(dwarf.address_size)], 0);
dwarf.writeInt(aranges_entry[@intFromEnum(dwarf.address_size)..][0..@intFromEnum(dwarf.address_size)], sym.size);
const file_pos = if (self.bin_file.cast(.elf)) |elf_file| pos: {
const debug_info_sect = &elf_file.shdrs.items[elf_file.debug_info_section_index.?];
break :pos debug_info_sect.sh_offset;
} else if (self.bin_file.cast(.macho)) |macho_file| pos: {
if (macho_file.base.isRelocatable()) {
const debug_info_sect = &macho_file.sections.items(.header)[macho_file.debug_info_sect_index.?];
break :pos debug_info_sect.offset;
} else {
const d_sym = macho_file.getDebugSymbols().?;
const debug_info_sect = d_sym.getSectionPtr(d_sym.debug_info_section_index.?);
break :pos debug_info_sect.offset;
}
} else if (self.bin_file.cast(.wasm)) |_|
// for wasm, the offset is always 0 as we write to memory first
0
else
unreachable;
@memset(aranges_entry[0..@intFromEnum(dwarf.address_size)], 0);
try dwarf.debug_aranges.section.replaceEntry(
wip_nav.unit,
wip_nav.entry,
dwarf,
aranges_entry[0 .. @intFromEnum(dwarf.address_size) * 2],
);
var buf: [@sizeOf(u32)]u8 = undefined;
mem.writeInt(u32, &buf, self.getAtom(.di_atom, di_atom_index).off, target.cpu.arch.endian());
try dwarf.debug_rnglists.section.getUnit(wip_nav.unit).external_relocs.appendSlice(dwarf.gpa, &.{
.{
.source_entry = wip_nav.entry,
.source_off = 1,
.target_sym = sym.index,
},
.{
.source_entry = wip_nav.entry,
.source_off = 1 + @intFromEnum(dwarf.address_size),
.target_sym = sym.index,
.target_off = sym.size,
},
});
try dwarf.debug_rnglists.section.replaceEntry(
wip_nav.unit,
wip_nav.entry,
dwarf,
([1]u8{DW.RLE.start_end} ++ [1]u8{0} ** (8 + 8))[0 .. 1 + @intFromEnum(dwarf.address_size) + @intFromEnum(dwarf.address_size)],
);
}
while (self.global_abbrev_relocs.popOrNull()) |reloc| {
const atom = self.getAtom(.di_atom, reloc.atom_index);
if (self.bin_file.cast(.elf)) |elf_file| {
try elf_file.base.file.?.pwriteAll(&buf, file_pos + atom.off + reloc.offset);
} else if (self.bin_file.cast(.macho)) |macho_file| {
if (macho_file.base.isRelocatable()) {
try macho_file.base.file.?.pwriteAll(&buf, file_pos + atom.off + reloc.offset);
} else {
const d_sym = macho_file.getDebugSymbols().?;
try d_sym.file.pwriteAll(&buf, file_pos + atom.off + reloc.offset);
try dwarf.debug_info.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_info.items);
if (wip_nav.debug_line.items.len > 0) {
if (!dwarf.incremental()) {
const dlw = wip_nav.debug_line.writer(dwarf.gpa);
try dlw.writeByte(DW.LNS.extended_op);
try uleb128(dlw, 1);
try dlw.writeByte(DW.LNE.end_sequence);
}
try dwarf.debug_line.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_line.items);
}
try dwarf.debug_loclists.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_loclists.items);
try wip_nav.flush();
}
pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) UpdateError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const nav_val = zcu.navValue(nav_index);
const nav = ip.getNav(nav_index);
log.debug("updateComptimeNav({})", .{nav.fqn.fmt(ip)});
const inst_info = nav.srcInst(ip).resolveFull(ip);
const file = zcu.fileByIndex(inst_info.file);
assert(file.zir_loaded);
const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst));
assert(decl_inst.tag == .declaration);
const tree = try file.getTree(dwarf.gpa);
const loc = tree.tokenLocation(0, tree.nodes.items(.main_token)[decl_inst.data.declaration.src_node]);
assert(loc.line == zcu.navSrcLine(nav_index));
const unit = try dwarf.getUnit(file.mod);
var wip_nav: WipNav = .{
.dwarf = dwarf,
.pt = pt,
.unit = unit,
.entry = undefined,
.any_children = false,
.func = .none,
.func_high_reloc = undefined,
.debug_info = .{},
.debug_line = .{},
.debug_loclists = .{},
.pending_types = .{},
};
defer wip_nav.deinit();
const nav_gop = try dwarf.navs.getOrPut(dwarf.gpa, nav_index);
errdefer _ = dwarf.navs.pop();
switch (ip.indexToKey(nav_val.toIntern())) {
.struct_type => done: {
const loaded_struct = ip.loadStructType(nav_val.toIntern());
const parent_type, const accessibility: u8 = if (nav.analysis_owner.unwrap()) |cau| parent: {
const parent_namespace_ptr = ip.namespacePtr(ip.getCau(cau).namespace);
break :parent .{
parent_namespace_ptr.owner_type,
if (parent_namespace_ptr.pub_decls.containsContext(nav_index, .{ .zcu = zcu }))
DW.ACCESS.public
else if (parent_namespace_ptr.priv_decls.containsContext(nav_index, .{ .zcu = zcu }))
DW.ACCESS.private
else
unreachable,
};
} else .{ zcu.fileRootType(inst_info.file), DW.ACCESS.private };
decl_struct: {
if (loaded_struct.zir_index == .none) break :decl_struct;
const value_inst = value_inst: {
const decl_extra = file.zir.extraData(Zir.Inst.Declaration, decl_inst.data.declaration.payload_index);
const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body;
const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1]));
if (break_inst.tag != .break_inline) break :value_inst null;
assert(file.zir.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data.block_inst == inst_info.inst);
var value_inst = break_inst.data.@"break".operand.toIndex();
while (value_inst) |value_inst_index| switch (file.zir.instructions.items(.tag)[@intFromEnum(value_inst_index)]) {
else => break,
.as_node => value_inst = file.zir.extraData(
Zir.Inst.As,
file.zir.instructions.items(.data)[@intFromEnum(value_inst_index)].pl_node.payload_index,
).data.operand.toIndex(),
};
break :value_inst value_inst;
};
const type_inst_info = loaded_struct.zir_index.unwrap().?.resolveFull(ip);
if (type_inst_info.inst != value_inst) break :decl_struct;
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
if (type_gop.found_existing) nav_gop.value_ptr.* = type_gop.value_ptr.* else {
if (!nav_gop.found_existing) nav_gop.value_ptr.* = try dwarf.addCommonEntry(unit);
type_gop.value_ptr.* = nav_gop.value_ptr.*;
}
} else if (self.bin_file.cast(.wasm)) |wasm_file| {
_ = wasm_file;
// const debug_info = wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code;
// debug_info.items[atom.off + reloc.offset ..][0..buf.len].* = buf;
} else unreachable;
}
wip_nav.entry = nav_gop.value_ptr.*;
const diw = wip_nav.debug_info.writer(dwarf.gpa);
switch (loaded_struct.layout) {
.auto, .@"extern" => {
try uleb128(diw, @intFromEnum(@as(AbbrevCode, if (loaded_struct.field_types.len == 0)
.decl_namespace_struct
else
.decl_struct)));
try wip_nav.refType(Type.fromInterned(parent_type));
assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf));
try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian);
try uleb128(diw, loc.column + 1);
try diw.writeByte(accessibility);
try wip_nav.strp(nav.name.toSlice(ip));
if (loaded_struct.field_types.len == 0) try diw.writeByte(@intFromBool(false)) else {
try uleb128(diw, nav_val.toType().abiSize(pt));
try uleb128(diw, nav_val.toType().abiAlignment(pt).toByteUnits().?);
for (0..loaded_struct.field_types.len) |field_index| {
const is_comptime = loaded_struct.fieldIsComptime(ip, field_index);
try uleb128(diw, @intFromEnum(@as(AbbrevCode, if (is_comptime) .struct_field_comptime else .struct_field)));
if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else {
const field_name = try std.fmt.allocPrint(dwarf.gpa, "{d}", .{field_index});
defer dwarf.gpa.free(field_name);
try wip_nav.strp(field_name);
}
const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
try wip_nav.refType(field_type);
if (!is_comptime) {
try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]);
try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse
field_type.abiAlignment(pt).toByteUnits().?);
}
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
}
},
.@"packed" => {
try uleb128(diw, @intFromEnum(AbbrevCode.decl_packed_struct));
try wip_nav.refType(Type.fromInterned(parent_type));
assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf));
try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian);
try uleb128(diw, loc.column + 1);
try diw.writeByte(accessibility);
try wip_nav.strp(nav.name.toSlice(ip));
try wip_nav.refType(Type.fromInterned(loaded_struct.backingIntTypeUnordered(ip)));
var field_bit_offset: u16 = 0;
for (0..loaded_struct.field_types.len) |field_index| {
try uleb128(diw, @intFromEnum(@as(AbbrevCode, .packed_struct_field)));
try wip_nav.strp(loaded_struct.fieldName(ip, field_index).unwrap().?.toSlice(ip));
const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
try wip_nav.refType(field_type);
try uleb128(diw, field_bit_offset);
field_bit_offset += @intCast(field_type.bitSize(pt));
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
}
break :done;
}
if (!nav_gop.found_existing) nav_gop.value_ptr.* = try dwarf.addCommonEntry(unit);
wip_nav.entry = nav_gop.value_ptr.*;
const diw = wip_nav.debug_info.writer(dwarf.gpa);
try uleb128(diw, @intFromEnum(AbbrevCode.decl_alias));
try wip_nav.refType(Type.fromInterned(parent_type));
assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf));
try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian);
try uleb128(diw, loc.column + 1);
try diw.writeByte(accessibility);
try wip_nav.strp(nav.name.toSlice(ip));
try wip_nav.refType(nav_val.toType());
},
.enum_type => done: {
const loaded_enum = ip.loadEnumType(nav_val.toIntern());
const parent_type, const accessibility: u8 = if (nav.analysis_owner.unwrap()) |cau| parent: {
const parent_namespace_ptr = ip.namespacePtr(ip.getCau(cau).namespace);
break :parent .{
parent_namespace_ptr.owner_type,
if (parent_namespace_ptr.pub_decls.containsContext(nav_index, .{ .zcu = zcu }))
DW.ACCESS.public
else if (parent_namespace_ptr.priv_decls.containsContext(nav_index, .{ .zcu = zcu }))
DW.ACCESS.private
else
unreachable,
};
} else .{ zcu.fileRootType(inst_info.file), DW.ACCESS.private };
decl_enum: {
if (loaded_enum.zir_index == .none) break :decl_enum;
const value_inst = value_inst: {
const decl_extra = file.zir.extraData(Zir.Inst.Declaration, decl_inst.data.declaration.payload_index);
const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body;
const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1]));
if (break_inst.tag != .break_inline) break :value_inst null;
assert(file.zir.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data.block_inst == inst_info.inst);
var value_inst = break_inst.data.@"break".operand.toIndex();
while (value_inst) |value_inst_index| switch (file.zir.instructions.items(.tag)[@intFromEnum(value_inst_index)]) {
else => break,
.as_node => value_inst = file.zir.extraData(
Zir.Inst.As,
file.zir.instructions.items(.data)[@intFromEnum(value_inst_index)].pl_node.payload_index,
).data.operand.toIndex(),
};
break :value_inst value_inst;
};
const type_inst_info = loaded_enum.zir_index.unwrap().?.resolveFull(ip);
if (type_inst_info.inst != value_inst) break :decl_enum;
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
if (type_gop.found_existing) nav_gop.value_ptr.* = type_gop.value_ptr.* else {
if (!nav_gop.found_existing) nav_gop.value_ptr.* = try dwarf.addCommonEntry(unit);
type_gop.value_ptr.* = nav_gop.value_ptr.*;
}
wip_nav.entry = nav_gop.value_ptr.*;
const diw = wip_nav.debug_info.writer(dwarf.gpa);
try uleb128(diw, @intFromEnum(AbbrevCode.decl_enum));
try wip_nav.refType(Type.fromInterned(parent_type));
assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf));
try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian);
try uleb128(diw, loc.column + 1);
try diw.writeByte(accessibility);
try wip_nav.strp(nav.name.toSlice(ip));
try wip_nav.refType(Type.fromInterned(loaded_enum.tag_ty));
for (0..loaded_enum.names.len) |field_index| {
try wip_nav.enumConstValue(loaded_enum, .{
.signed = .signed_enum_field,
.unsigned = .unsigned_enum_field,
}, field_index);
try wip_nav.strp(loaded_enum.names.get(ip)[field_index].toSlice(ip));
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
break :done;
}
if (!nav_gop.found_existing) nav_gop.value_ptr.* = try dwarf.addCommonEntry(unit);
wip_nav.entry = nav_gop.value_ptr.*;
const diw = wip_nav.debug_info.writer(dwarf.gpa);
try uleb128(diw, @intFromEnum(AbbrevCode.decl_alias));
try wip_nav.refType(Type.fromInterned(parent_type));
assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf));
try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian);
try uleb128(diw, loc.column + 1);
try diw.writeByte(accessibility);
try wip_nav.strp(nav.name.toSlice(ip));
try wip_nav.refType(nav_val.toType());
},
.union_type => done: {
const loaded_union = ip.loadUnionType(nav_val.toIntern());
const parent_type, const accessibility: u8 = if (nav.analysis_owner.unwrap()) |cau| parent: {
const parent_namespace_ptr = ip.namespacePtr(ip.getCau(cau).namespace);
break :parent .{
parent_namespace_ptr.owner_type,
if (parent_namespace_ptr.pub_decls.containsContext(nav_index, .{ .zcu = zcu }))
DW.ACCESS.public
else if (parent_namespace_ptr.priv_decls.containsContext(nav_index, .{ .zcu = zcu }))
DW.ACCESS.private
else
unreachable,
};
} else .{ zcu.fileRootType(inst_info.file), DW.ACCESS.private };
decl_union: {
const value_inst = value_inst: {
const decl_extra = file.zir.extraData(Zir.Inst.Declaration, decl_inst.data.declaration.payload_index);
const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body;
const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1]));
if (break_inst.tag != .break_inline) break :value_inst null;
assert(file.zir.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data.block_inst == inst_info.inst);
var value_inst = break_inst.data.@"break".operand.toIndex();
while (value_inst) |value_inst_index| switch (file.zir.instructions.items(.tag)[@intFromEnum(value_inst_index)]) {
else => break,
.as_node => value_inst = file.zir.extraData(
Zir.Inst.As,
file.zir.instructions.items(.data)[@intFromEnum(value_inst_index)].pl_node.payload_index,
).data.operand.toIndex(),
};
break :value_inst value_inst;
};
const type_inst_info = loaded_union.zir_index.resolveFull(ip);
if (type_inst_info.inst != value_inst) break :decl_union;
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
if (type_gop.found_existing) nav_gop.value_ptr.* = type_gop.value_ptr.* else {
if (!nav_gop.found_existing) nav_gop.value_ptr.* = try dwarf.addCommonEntry(unit);
type_gop.value_ptr.* = nav_gop.value_ptr.*;
}
wip_nav.entry = nav_gop.value_ptr.*;
const diw = wip_nav.debug_info.writer(dwarf.gpa);
try uleb128(diw, @intFromEnum(AbbrevCode.decl_union));
try wip_nav.refType(Type.fromInterned(parent_type));
assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf));
try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian);
try uleb128(diw, loc.column + 1);
try diw.writeByte(accessibility);
try wip_nav.strp(nav.name.toSlice(ip));
const union_layout = pt.getUnionLayout(loaded_union);
try uleb128(diw, union_layout.abi_size);
try uleb128(diw, union_layout.abi_align.toByteUnits().?);
const loaded_tag = loaded_union.loadTagType(ip);
if (loaded_union.hasTag(ip)) {
try uleb128(diw, @intFromEnum(AbbrevCode.tagged_union));
try wip_nav.infoSectionOffset(
.debug_info,
wip_nav.unit,
wip_nav.entry,
@intCast(wip_nav.debug_info.items.len + dwarf.sectionOffsetBytes()),
);
{
try uleb128(diw, @intFromEnum(AbbrevCode.generated_field));
try wip_nav.strp("tag");
try wip_nav.refType(Type.fromInterned(loaded_union.enum_tag_ty));
try uleb128(diw, union_layout.tagOffset());
for (0..loaded_union.field_types.len) |field_index| {
try wip_nav.enumConstValue(loaded_tag, .{
.signed = .signed_tagged_union_field,
.unsigned = .unsigned_tagged_union_field,
}, field_index);
{
try uleb128(diw, @intFromEnum(AbbrevCode.struct_field));
try wip_nav.strp(loaded_tag.names.get(ip)[field_index].toSlice(ip));
const field_type = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
try wip_nav.refType(field_type);
try uleb128(diw, union_layout.payloadOffset());
try uleb128(diw, loaded_union.fieldAlign(ip, field_index).toByteUnits() orelse
if (field_type.isNoReturn(zcu)) 1 else field_type.abiAlignment(pt).toByteUnits().?);
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
}
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
if (ip.indexToKey(loaded_union.enum_tag_ty).enum_type == .generated_tag)
try wip_nav.pending_types.append(dwarf.gpa, loaded_union.enum_tag_ty);
} else for (0..loaded_union.field_types.len) |field_index| {
try uleb128(diw, @intFromEnum(AbbrevCode.untagged_union_field));
try wip_nav.strp(loaded_tag.names.get(ip)[field_index].toSlice(ip));
const field_type = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
try wip_nav.refType(field_type);
try uleb128(diw, loaded_union.fieldAlign(ip, field_index).toByteUnits() orelse
field_type.abiAlignment(pt).toByteUnits().?);
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
break :done;
}
if (!nav_gop.found_existing) nav_gop.value_ptr.* = try dwarf.addCommonEntry(unit);
wip_nav.entry = nav_gop.value_ptr.*;
const diw = wip_nav.debug_info.writer(dwarf.gpa);
try uleb128(diw, @intFromEnum(AbbrevCode.decl_alias));
try wip_nav.refType(Type.fromInterned(parent_type));
assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf));
try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian);
try uleb128(diw, loc.column + 1);
try diw.writeByte(accessibility);
try wip_nav.strp(nav.name.toSlice(ip));
try wip_nav.refType(nav_val.toType());
},
.opaque_type => done: {
const loaded_opaque = ip.loadOpaqueType(nav_val.toIntern());
const parent_type, const accessibility: u8 = if (nav.analysis_owner.unwrap()) |cau| parent: {
const parent_namespace_ptr = ip.namespacePtr(ip.getCau(cau).namespace);
break :parent .{
parent_namespace_ptr.owner_type,
if (parent_namespace_ptr.pub_decls.containsContext(nav_index, .{ .zcu = zcu }))
DW.ACCESS.public
else if (parent_namespace_ptr.priv_decls.containsContext(nav_index, .{ .zcu = zcu }))
DW.ACCESS.private
else
unreachable,
};
} else .{ zcu.fileRootType(inst_info.file), DW.ACCESS.private };
decl_opaque: {
const value_inst = value_inst: {
const decl_extra = file.zir.extraData(Zir.Inst.Declaration, decl_inst.data.declaration.payload_index);
const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body;
const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1]));
if (break_inst.tag != .break_inline) break :value_inst null;
assert(file.zir.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data.block_inst == inst_info.inst);
var value_inst = break_inst.data.@"break".operand.toIndex();
while (value_inst) |value_inst_index| switch (file.zir.instructions.items(.tag)[@intFromEnum(value_inst_index)]) {
else => break,
.as_node => value_inst = file.zir.extraData(
Zir.Inst.As,
file.zir.instructions.items(.data)[@intFromEnum(value_inst_index)].pl_node.payload_index,
).data.operand.toIndex(),
};
break :value_inst value_inst;
};
const type_inst_info = loaded_opaque.zir_index.resolveFull(ip);
if (type_inst_info.inst != value_inst) break :decl_opaque;
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
if (type_gop.found_existing) nav_gop.value_ptr.* = type_gop.value_ptr.* else {
if (!nav_gop.found_existing) nav_gop.value_ptr.* = try dwarf.addCommonEntry(unit);
type_gop.value_ptr.* = nav_gop.value_ptr.*;
}
wip_nav.entry = nav_gop.value_ptr.*;
const diw = wip_nav.debug_info.writer(dwarf.gpa);
try uleb128(diw, @intFromEnum(AbbrevCode.decl_namespace_struct));
try wip_nav.refType(Type.fromInterned(parent_type));
assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf));
try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian);
try uleb128(diw, loc.column + 1);
try diw.writeByte(accessibility);
try wip_nav.strp(nav.name.toSlice(ip));
try diw.writeByte(@intFromBool(false));
break :done;
}
if (!nav_gop.found_existing) nav_gop.value_ptr.* = try dwarf.addCommonEntry(unit);
wip_nav.entry = nav_gop.value_ptr.*;
const diw = wip_nav.debug_info.writer(dwarf.gpa);
try uleb128(diw, @intFromEnum(AbbrevCode.decl_alias));
try wip_nav.refType(Type.fromInterned(parent_type));
assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf));
try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian);
try uleb128(diw, loc.column + 1);
try diw.writeByte(accessibility);
try wip_nav.strp(nav.name.toSlice(ip));
try wip_nav.refType(nav_val.toType());
},
else => {
_ = dwarf.navs.pop();
return;
},
}
try dwarf.debug_info.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_info.items);
try dwarf.debug_loclists.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_loclists.items);
try wip_nav.flush();
}
fn addDIFile(self: *Dwarf, zcu: *Zcu, nav_index: InternPool.Nav.Index) !u28 {
const file_scope = zcu.navFileScope(nav_index);
const gop = try self.di_files.getOrPut(self.allocator, file_scope);
if (!gop.found_existing) {
if (self.bin_file.cast(.elf)) |elf_file| {
elf_file.markDirty(elf_file.debug_line_section_index.?);
} else if (self.bin_file.cast(.macho)) |macho_file| {
if (macho_file.base.isRelocatable()) {
macho_file.markDirty(macho_file.debug_line_sect_index.?);
fn updateType(
dwarf: *Dwarf,
pt: Zcu.PerThread,
type_index: InternPool.Index,
pending_types: *std.ArrayListUnmanaged(InternPool.Index),
) UpdateError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ty = Type.fromInterned(type_index);
switch (type_index) {
.generic_poison_type => log.debug("updateType({s})", .{"anytype"}),
else => log.debug("updateType({})", .{ty.fmt(pt)}),
}
var wip_nav: WipNav = .{
.dwarf = dwarf,
.pt = pt,
.unit = .main,
.entry = dwarf.types.get(type_index).?,
.any_children = false,
.func = .none,
.func_high_reloc = undefined,
.debug_info = .{},
.debug_line = .{},
.debug_loclists = .{},
.pending_types = pending_types.*,
};
defer {
pending_types.* = wip_nav.pending_types;
wip_nav.pending_types = .{};
wip_nav.deinit();
}
const diw = wip_nav.debug_info.writer(dwarf.gpa);
const name = switch (type_index) {
.generic_poison_type => "",
else => try std.fmt.allocPrint(dwarf.gpa, "{}", .{ty.fmt(pt)}),
};
defer dwarf.gpa.free(name);
switch (ip.indexToKey(type_index)) {
.int_type => |int_type| {
try uleb128(diw, @intFromEnum(AbbrevCode.numeric_type));
try wip_nav.strp(name);
try diw.writeByte(switch (int_type.signedness) {
inline .signed, .unsigned => |signedness| @field(DW.ATE, @tagName(signedness)),
});
try uleb128(diw, int_type.bits);
try uleb128(diw, ty.abiSize(pt));
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
},
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.One, .Many, .C => {
const ptr_child_type = Type.fromInterned(ptr_type.child);
try uleb128(diw, @intFromEnum(AbbrevCode.ptr_type));
try wip_nav.strp(name);
try diw.writeByte(@intFromBool(ptr_type.flags.is_allowzero));
try uleb128(diw, ptr_type.flags.alignment.toByteUnits() orelse
ptr_child_type.abiAlignment(pt).toByteUnits().?);
try diw.writeByte(@intFromEnum(ptr_type.flags.address_space));
if (ptr_type.flags.is_const or ptr_type.flags.is_volatile) try wip_nav.infoSectionOffset(
.debug_info,
wip_nav.unit,
wip_nav.entry,
@intCast(wip_nav.debug_info.items.len + dwarf.sectionOffsetBytes()),
) else try wip_nav.refType(ptr_child_type);
if (ptr_type.flags.is_const) {
try uleb128(diw, @intFromEnum(AbbrevCode.is_const));
if (ptr_type.flags.is_volatile) try wip_nav.infoSectionOffset(
.debug_info,
wip_nav.unit,
wip_nav.entry,
@intCast(wip_nav.debug_info.items.len + dwarf.sectionOffsetBytes()),
) else try wip_nav.refType(ptr_child_type);
}
if (ptr_type.flags.is_volatile) {
try uleb128(diw, @intFromEnum(AbbrevCode.is_volatile));
try wip_nav.refType(ptr_child_type);
}
},
.Slice => {
try uleb128(diw, @intFromEnum(AbbrevCode.struct_type));
try wip_nav.strp(name);
try uleb128(diw, ty.abiSize(pt));
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
try uleb128(diw, @intFromEnum(AbbrevCode.generated_field));
try wip_nav.strp("ptr");
const ptr_field_type = ty.slicePtrFieldType(zcu);
try wip_nav.refType(ptr_field_type);
try uleb128(diw, 0);
try uleb128(diw, @intFromEnum(AbbrevCode.generated_field));
try wip_nav.strp("len");
const len_field_type = Type.usize;
try wip_nav.refType(len_field_type);
try uleb128(diw, len_field_type.abiAlignment(pt).forward(ptr_field_type.abiSize(pt)));
try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
},
inline .array_type, .vector_type => |array_type, ty_tag| {
try uleb128(diw, @intFromEnum(AbbrevCode.array_type));
try wip_nav.strp(name);
try wip_nav.refType(Type.fromInterned(array_type.child));
try diw.writeByte(@intFromBool(ty_tag == .vector_type));
try uleb128(diw, @intFromEnum(AbbrevCode.array_index));
try wip_nav.refType(Type.usize);
try uleb128(diw, array_type.len);
try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
.opt_type => |opt_child_type_index| {
const opt_child_type = Type.fromInterned(opt_child_type_index);
try uleb128(diw, @intFromEnum(AbbrevCode.union_type));
try wip_nav.strp(name);
try uleb128(diw, ty.abiSize(pt));
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
if (opt_child_type.isNoReturn(zcu)) {
try uleb128(diw, @intFromEnum(AbbrevCode.generated_field));
try wip_nav.strp("null");
try wip_nav.refType(Type.null);
try uleb128(diw, 0);
} else {
const d_sym = macho_file.getDebugSymbols().?;
d_sym.markDirty(d_sym.debug_line_section_index.?, macho_file);
try uleb128(diw, @intFromEnum(AbbrevCode.tagged_union));
try wip_nav.infoSectionOffset(
.debug_info,
wip_nav.unit,
wip_nav.entry,
@intCast(wip_nav.debug_info.items.len + dwarf.sectionOffsetBytes()),
);
{
try uleb128(diw, @intFromEnum(AbbrevCode.generated_field));
try wip_nav.strp("has_value");
const repr: enum { unpacked, error_set, pointer } = switch (opt_child_type_index) {
.anyerror_type => .error_set,
else => switch (ip.indexToKey(opt_child_type_index)) {
else => .unpacked,
.error_set_type, .inferred_error_set_type => .error_set,
.ptr_type => |ptr_type| if (ptr_type.flags.is_allowzero) .unpacked else .pointer,
},
};
switch (repr) {
.unpacked => {
try wip_nav.refType(Type.bool);
try uleb128(diw, if (opt_child_type.hasRuntimeBits(pt))
opt_child_type.abiSize(pt)
else
0);
},
.error_set => {
try wip_nav.refType(Type.fromInterned(try pt.intern(.{ .int_type = .{
.signedness = .unsigned,
.bits = pt.zcu.errorSetBits(),
} })));
try uleb128(diw, 0);
},
.pointer => {
try wip_nav.refType(Type.usize);
try uleb128(diw, 0);
},
}
try uleb128(diw, @intFromEnum(AbbrevCode.unsigned_tagged_union_field));
try uleb128(diw, 0);
{
try uleb128(diw, @intFromEnum(AbbrevCode.generated_field));
try wip_nav.strp("null");
try wip_nav.refType(Type.null);
try uleb128(diw, 0);
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
try uleb128(diw, @intFromEnum(AbbrevCode.tagged_union_default_field));
{
try uleb128(diw, @intFromEnum(AbbrevCode.generated_field));
try wip_nav.strp("?");
try wip_nav.refType(opt_child_type);
try uleb128(diw, 0);
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
}
} else if (self.bin_file.cast(.wasm)) |_| {} else unreachable;
try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
.anyframe_type => unreachable,
.error_union_type => |error_union_type| {
const error_union_error_set_type = Type.fromInterned(error_union_type.error_set_type);
const error_union_payload_type = Type.fromInterned(error_union_type.payload_type);
const error_union_error_set_offset = codegen.errUnionErrorOffset(error_union_payload_type, pt);
const error_union_payload_offset = codegen.errUnionPayloadOffset(error_union_payload_type, pt);
try uleb128(diw, @intFromEnum(AbbrevCode.union_type));
try wip_nav.strp(name);
try uleb128(diw, ty.abiSize(pt));
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
{
try uleb128(diw, @intFromEnum(AbbrevCode.tagged_union));
try wip_nav.infoSectionOffset(
.debug_info,
wip_nav.unit,
wip_nav.entry,
@intCast(wip_nav.debug_info.items.len + dwarf.sectionOffsetBytes()),
);
{
try uleb128(diw, @intFromEnum(AbbrevCode.generated_field));
try wip_nav.strp("is_error");
const is_error_field_type = Type.fromInterned(try pt.intern(.{
.opt_type = error_union_type.error_set_type,
}));
try wip_nav.refType(is_error_field_type);
try uleb128(diw, error_union_error_set_offset);
try uleb128(diw, @intFromEnum(AbbrevCode.unsigned_tagged_union_field));
try uleb128(diw, 0);
{
try uleb128(diw, @intFromEnum(AbbrevCode.generated_field));
try wip_nav.strp("value");
try wip_nav.refType(error_union_payload_type);
try uleb128(diw, error_union_payload_offset);
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
try uleb128(diw, @intFromEnum(AbbrevCode.tagged_union_default_field));
{
try uleb128(diw, @intFromEnum(AbbrevCode.generated_field));
try wip_nav.strp("error");
try wip_nav.refType(error_union_error_set_type);
try uleb128(diw, error_union_error_set_offset);
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
.simple_type => |simple_type| switch (simple_type) {
.f16,
.f32,
.f64,
.f80,
.f128,
.usize,
.isize,
.c_char,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.bool,
=> {
try uleb128(diw, @intFromEnum(AbbrevCode.numeric_type));
try wip_nav.strp(name);
try diw.writeByte(if (type_index == .bool_type)
DW.ATE.boolean
else if (ty.isRuntimeFloat())
DW.ATE.float
else if (ty.isSignedInt(zcu))
DW.ATE.signed
else if (ty.isUnsignedInt(zcu))
DW.ATE.unsigned
else
unreachable);
try uleb128(diw, ty.bitSize(pt));
try uleb128(diw, ty.abiSize(pt));
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
},
.anyopaque,
.void,
.type,
.comptime_int,
.comptime_float,
.noreturn,
.null,
.undefined,
.enum_literal,
.generic_poison,
=> {
try uleb128(diw, @intFromEnum(AbbrevCode.void_type));
try wip_nav.strp(if (type_index == .generic_poison_type) "anytype" else name);
},
.anyerror => return, // delay until flush
.atomic_order,
.atomic_rmw_op,
.calling_convention,
.address_space,
.float_mode,
.reduce_op,
.call_modifier,
.prefetch_options,
.export_options,
.extern_options,
.type_info,
.adhoc_inferred_error_set,
=> unreachable,
},
.struct_type,
.union_type,
.opaque_type,
=> unreachable,
.anon_struct_type => |anon_struct_type| {
try uleb128(diw, @intFromEnum(AbbrevCode.struct_type));
try wip_nav.strp(name);
try uleb128(diw, ty.abiSize(pt));
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
var field_byte_offset: u64 = 0;
for (0..anon_struct_type.types.len) |field_index| {
const comptime_value = anon_struct_type.values.get(ip)[field_index];
try uleb128(diw, @intFromEnum(@as(AbbrevCode, if (comptime_value != .none) .struct_field_comptime else .struct_field)));
if (anon_struct_type.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else {
const field_name = try std.fmt.allocPrint(dwarf.gpa, "{d}", .{field_index});
defer dwarf.gpa.free(field_name);
try wip_nav.strp(field_name);
}
const field_type = Type.fromInterned(anon_struct_type.types.get(ip)[field_index]);
try wip_nav.refType(field_type);
if (comptime_value == .none) {
const field_align = field_type.abiAlignment(pt);
field_byte_offset = field_align.forward(field_byte_offset);
try uleb128(diw, field_byte_offset);
try uleb128(diw, field_type.abiAlignment(pt).toByteUnits().?);
field_byte_offset += field_type.abiSize(pt);
}
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
.enum_type => {
const loaded_enum = ip.loadEnumType(type_index);
try uleb128(diw, @intFromEnum(AbbrevCode.enum_type));
try wip_nav.strp(name);
try wip_nav.refType(Type.fromInterned(loaded_enum.tag_ty));
for (0..loaded_enum.names.len) |field_index| {
try wip_nav.enumConstValue(loaded_enum, .{
.signed = .signed_enum_field,
.unsigned = .unsigned_enum_field,
}, field_index);
try wip_nav.strp(loaded_enum.names.get(ip)[field_index].toSlice(ip));
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
.func_type => |func_type| {
const is_nullary = func_type.param_types.len == 0 and !func_type.is_var_args;
try uleb128(diw, @intFromEnum(@as(AbbrevCode, if (is_nullary) .nullary_func_type else .func_type)));
try wip_nav.strp(name);
try diw.writeByte(@intFromEnum(@as(DW.CC, switch (func_type.cc) {
.Unspecified, .C => .normal,
.Naked, .Async, .Inline => .nocall,
.Interrupt, .Signal => .nocall,
.Stdcall => .BORLAND_stdcall,
.Fastcall => .BORLAND_fastcall,
.Vectorcall => .LLVM_vectorcall,
.Thiscall => .BORLAND_thiscall,
.APCS => .nocall,
.AAPCS => .LLVM_AAPCS,
.AAPCSVFP => .LLVM_AAPCS_VFP,
.SysV => .LLVM_X86_64SysV,
.Win64 => .LLVM_Win64,
.Kernel, .Fragment, .Vertex => .nocall,
})));
try wip_nav.refType(Type.fromInterned(func_type.return_type));
if (!is_nullary) {
for (0..func_type.param_types.len) |param_index| {
try uleb128(diw, @intFromEnum(AbbrevCode.func_type_param));
try wip_nav.refType(Type.fromInterned(func_type.param_types.get(ip)[param_index]));
}
if (func_type.is_var_args) try uleb128(diw, @intFromEnum(AbbrevCode.is_var_args));
try uleb128(diw, @intFromEnum(AbbrevCode.null));
}
},
.error_set_type => |error_set_type| {
try uleb128(diw, @intFromEnum(@as(AbbrevCode, if (error_set_type.names.len > 0) .enum_type else .empty_enum_type)));
try wip_nav.strp(name);
try wip_nav.refType(Type.fromInterned(try pt.intern(.{ .int_type = .{
.signedness = .unsigned,
.bits = pt.zcu.errorSetBits(),
} })));
for (0..error_set_type.names.len) |field_index| {
const field_name = error_set_type.names.get(ip)[field_index];
try uleb128(diw, @intFromEnum(AbbrevCode.unsigned_enum_field));
try uleb128(diw, ip.getErrorValueIfExists(field_name).?);
try wip_nav.strp(field_name.toSlice(ip));
}
if (error_set_type.names.len > 0) try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
.inferred_error_set_type => |func| {
try uleb128(diw, @intFromEnum(AbbrevCode.inferred_error_set_type));
try wip_nav.strp(name);
try wip_nav.refType(Type.fromInterned(ip.funcIesResolvedUnordered(func)));
},
// values, not types
.undef,
.simple_value,
.variable,
.@"extern",
.func,
.int,
.err,
.error_union,
.enum_literal,
.enum_tag,
.empty_enum_value,
.float,
.ptr,
.slice,
.opt,
.aggregate,
.un,
// memoization, not types
.memoized_call,
=> unreachable,
}
return @intCast(gop.index + 1);
try dwarf.debug_info.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_info.items);
}
fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct {
dirs: []const []const u8,
files: []const []const u8,
files_dirs_indexes: []u28,
} {
var dirs = std.StringArrayHashMap(void).init(arena);
try dirs.ensureTotalCapacity(self.di_files.count());
pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternPool.Index) UpdateError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ty = Type.fromInterned(type_index);
log.debug("updateContainerType({}({d}))", .{ ty.fmt(pt), @intFromEnum(type_index) });
var files = std.ArrayList([]const u8).init(arena);
try files.ensureTotalCapacityPrecise(self.di_files.count());
var files_dir_indexes = std.ArrayList(u28).init(arena);
try files_dir_indexes.ensureTotalCapacity(self.di_files.count());
for (self.di_files.keys()) |dif| {
const full_path = try dif.mod.root.joinString(arena, dif.sub_file_path);
const dir_path = std.fs.path.dirname(full_path) orelse ".";
const sub_file_path = std.fs.path.basename(full_path);
// https://github.com/ziglang/zig/issues/19353
var buffer: [std.fs.max_path_bytes]u8 = undefined;
const resolved = if (!std.fs.path.isAbsolute(dir_path))
std.posix.realpath(dir_path, &buffer) catch dir_path
else
dir_path;
const dir_index: u28 = index: {
const dirs_gop = dirs.getOrPutAssumeCapacity(try arena.dupe(u8, resolved));
break :index @intCast(dirs_gop.index + 1);
const inst_info = ty.typeDeclInst(zcu).?.resolveFull(ip);
const file = zcu.fileByIndex(inst_info.file);
if (inst_info.inst == .main_struct_inst) {
const unit = try dwarf.getUnit(file.mod);
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, type_index);
if (!type_gop.found_existing) type_gop.value_ptr.* = try dwarf.addCommonEntry(unit);
var wip_nav: WipNav = .{
.dwarf = dwarf,
.pt = pt,
.unit = unit,
.entry = type_gop.value_ptr.*,
.any_children = false,
.func = .none,
.func_high_reloc = undefined,
.debug_info = .{},
.debug_line = .{},
.debug_loclists = .{},
.pending_types = .{},
};
defer wip_nav.deinit();
files_dir_indexes.appendAssumeCapacity(dir_index);
files.appendAssumeCapacity(sub_file_path);
}
const loaded_struct = ip.loadStructType(type_index);
return .{
.dirs = dirs.keys(),
.files = files.items,
.files_dirs_indexes = files_dir_indexes.items,
};
}
fn addDbgInfoErrorSet(
pt: Zcu.PerThread,
ty: Type,
target: std.Target,
dbg_info_buffer: *std.ArrayList(u8),
) !void {
return addDbgInfoErrorSetNames(pt, ty, ty.errorSetNames(pt.zcu).get(&pt.zcu.intern_pool), target, dbg_info_buffer);
}
fn addDbgInfoErrorSetNames(
pt: Zcu.PerThread,
/// Used for printing the type name only.
ty: Type,
error_names: []const InternPool.NullTerminatedString,
target: std.Target,
dbg_info_buffer: *std.ArrayList(u8),
) !void {
const target_endian = target.cpu.arch.endian();
// DW.AT.enumeration_type
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.enum_type));
// DW.AT.byte_size, DW.FORM.udata
const abi_size = Type.anyerror.abiSize(pt);
try leb128.writeUleb128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
try ty.print(dbg_info_buffer.writer(), pt);
try dbg_info_buffer.append(0);
// DW.AT.enumerator
const no_error = "(no error)";
try dbg_info_buffer.ensureUnusedCapacity(no_error.len + 2 + @sizeOf(u64));
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.enum_variant));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity(no_error);
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.const_value, DW.FORM.data8
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian);
for (error_names) |error_name| {
const int = try pt.getErrorValue(error_name);
const error_name_slice = error_name.toSlice(&pt.zcu.intern_pool);
// DW.AT.enumerator
try dbg_info_buffer.ensureUnusedCapacity(error_name_slice.len + 2 + @sizeOf(u64));
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.enum_variant));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity(error_name_slice[0 .. error_name_slice.len + 1]);
// DW.AT.const_value, DW.FORM.data8
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), int, target_endian);
}
// DW.AT.enumeration_type delimit children
try dbg_info_buffer.append(0);
}
const Kind = enum { src_fn, di_atom };
fn createAtom(self: *Dwarf, comptime kind: Kind) !Atom.Index {
const index = blk: {
switch (kind) {
.src_fn => {
const index: Atom.Index = @intCast(self.src_fns.items.len);
_ = try self.src_fns.addOne(self.allocator);
break :blk index;
},
.di_atom => {
const index: Atom.Index = @intCast(self.di_atoms.items.len);
_ = try self.di_atoms.addOne(self.allocator);
break :blk index;
},
const diw = wip_nav.debug_info.writer(dwarf.gpa);
try uleb128(diw, @intFromEnum(@as(AbbrevCode, if (loaded_struct.field_types.len == 0) .namespace_file else .file)));
const file_gop = try dwarf.getUnitFiles(unit).getOrPut(dwarf.gpa, inst_info.file);
try uleb128(diw, file_gop.index);
try wip_nav.strp(loaded_struct.name.toSlice(ip));
if (loaded_struct.field_types.len > 0) {
try uleb128(diw, ty.abiSize(pt));
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
for (0..loaded_struct.field_types.len) |field_index| {
const is_comptime = loaded_struct.fieldIsComptime(ip, field_index);
try uleb128(diw, @intFromEnum(@as(AbbrevCode, if (is_comptime) .struct_field_comptime else .struct_field)));
if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else {
const field_name = try std.fmt.allocPrint(dwarf.gpa, "{d}", .{field_index});
defer dwarf.gpa.free(field_name);
try wip_nav.strp(field_name);
}
const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
try wip_nav.refType(field_type);
if (!is_comptime) {
try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]);
try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse
field_type.abiAlignment(pt).toByteUnits().?);
}
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
}
};
const atom = self.getAtomPtr(kind, index);
atom.* = .{
.off = 0,
.len = 0,
.prev_index = null,
.next_index = null,
};
return index;
}
fn getOrCreateAtomForNav(self: *Dwarf, comptime kind: Kind, nav_index: InternPool.Nav.Index) !Atom.Index {
switch (kind) {
.src_fn => {
const gop = try self.src_fn_navs.getOrPut(self.allocator, nav_index);
if (!gop.found_existing) {
gop.value_ptr.* = try self.createAtom(kind);
}
return gop.value_ptr.*;
},
.di_atom => {
const gop = try self.di_atom_navs.getOrPut(self.allocator, nav_index);
if (!gop.found_existing) {
gop.value_ptr.* = try self.createAtom(kind);
}
return gop.value_ptr.*;
},
try dwarf.debug_info.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_info.items);
try wip_nav.flush();
} else {
const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst));
assert(decl_inst.tag == .extended);
if (switch (decl_inst.data.extended.opcode) {
.struct_decl => @as(Zir.Inst.StructDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
.enum_decl => @as(Zir.Inst.EnumDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
.union_decl => @as(Zir.Inst.UnionDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
.opaque_decl => @as(Zir.Inst.OpaqueDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
.reify => @as(Zir.Inst.NameStrategy, @enumFromInt(decl_inst.data.extended.small)),
else => unreachable,
} == .parent) return;
const unit = try dwarf.getUnit(file.mod);
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, type_index);
if (!type_gop.found_existing) type_gop.value_ptr.* = try dwarf.addCommonEntry(unit);
var wip_nav: WipNav = .{
.dwarf = dwarf,
.pt = pt,
.unit = unit,
.entry = type_gop.value_ptr.*,
.any_children = false,
.func = .none,
.func_high_reloc = undefined,
.debug_info = .{},
.debug_line = .{},
.debug_loclists = .{},
.pending_types = .{},
};
defer wip_nav.deinit();
const diw = wip_nav.debug_info.writer(dwarf.gpa);
const name = try std.fmt.allocPrint(dwarf.gpa, "{}", .{ty.fmt(pt)});
defer dwarf.gpa.free(name);
switch (ip.indexToKey(type_index)) {
.struct_type => {
const loaded_struct = ip.loadStructType(type_index);
switch (loaded_struct.layout) {
.auto, .@"extern" => {
try uleb128(diw, @intFromEnum(@as(AbbrevCode, if (loaded_struct.field_types.len == 0)
.namespace_struct_type
else
.struct_type)));
try wip_nav.strp(name);
if (loaded_struct.field_types.len == 0) try diw.writeByte(@intFromBool(false)) else {
try uleb128(diw, ty.abiSize(pt));
try uleb128(diw, ty.abiAlignment(pt).toByteUnits().?);
for (0..loaded_struct.field_types.len) |field_index| {
const is_comptime = loaded_struct.fieldIsComptime(ip, field_index);
try uleb128(diw, @intFromEnum(@as(AbbrevCode, if (is_comptime) .struct_field_comptime else .struct_field)));
if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else {
const field_name = try std.fmt.allocPrint(dwarf.gpa, "{d}", .{field_index});
defer dwarf.gpa.free(field_name);
try wip_nav.strp(field_name);
}
const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
try wip_nav.refType(field_type);
if (!is_comptime) {
try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]);
try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse
field_type.abiAlignment(pt).toByteUnits().?);
}
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
}
},
.@"packed" => {
try uleb128(diw, @intFromEnum(AbbrevCode.packed_struct_type));
try wip_nav.strp(name);
try wip_nav.refType(Type.fromInterned(loaded_struct.backingIntTypeUnordered(ip)));
var field_bit_offset: u16 = 0;
for (0..loaded_struct.field_types.len) |field_index| {
try uleb128(diw, @intFromEnum(@as(AbbrevCode, .packed_struct_field)));
try wip_nav.strp(loaded_struct.fieldName(ip, field_index).unwrap().?.toSlice(ip));
const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
try wip_nav.refType(field_type);
try uleb128(diw, field_bit_offset);
field_bit_offset += @intCast(field_type.bitSize(pt));
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
}
},
.enum_type => {
const loaded_enum = ip.loadEnumType(type_index);
try uleb128(diw, @intFromEnum(AbbrevCode.enum_type));
try wip_nav.strp(name);
try wip_nav.refType(Type.fromInterned(loaded_enum.tag_ty));
for (0..loaded_enum.names.len) |field_index| {
try wip_nav.enumConstValue(loaded_enum, .{
.signed = .signed_enum_field,
.unsigned = .unsigned_enum_field,
}, field_index);
try wip_nav.strp(loaded_enum.names.get(ip)[field_index].toSlice(ip));
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
.union_type => {
const loaded_union = ip.loadUnionType(type_index);
try uleb128(diw, @intFromEnum(AbbrevCode.union_type));
try wip_nav.strp(name);
const union_layout = pt.getUnionLayout(loaded_union);
try uleb128(diw, union_layout.abi_size);
try uleb128(diw, union_layout.abi_align.toByteUnits().?);
const loaded_tag = loaded_union.loadTagType(ip);
if (loaded_union.hasTag(ip)) {
try uleb128(diw, @intFromEnum(AbbrevCode.tagged_union));
try wip_nav.infoSectionOffset(
.debug_info,
wip_nav.unit,
wip_nav.entry,
@intCast(wip_nav.debug_info.items.len + dwarf.sectionOffsetBytes()),
);
{
try uleb128(diw, @intFromEnum(AbbrevCode.generated_field));
try wip_nav.strp("tag");
try wip_nav.refType(Type.fromInterned(loaded_union.enum_tag_ty));
try uleb128(diw, union_layout.tagOffset());
for (0..loaded_union.field_types.len) |field_index| {
try wip_nav.enumConstValue(loaded_tag, .{
.signed = .signed_tagged_union_field,
.unsigned = .unsigned_tagged_union_field,
}, field_index);
{
try uleb128(diw, @intFromEnum(AbbrevCode.struct_field));
try wip_nav.strp(loaded_tag.names.get(ip)[field_index].toSlice(ip));
const field_type = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
try wip_nav.refType(field_type);
try uleb128(diw, union_layout.payloadOffset());
try uleb128(diw, loaded_union.fieldAlign(ip, field_index).toByteUnits() orelse
if (field_type.isNoReturn(zcu)) 1 else field_type.abiAlignment(pt).toByteUnits().?);
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
}
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
if (ip.indexToKey(loaded_union.enum_tag_ty).enum_type == .generated_tag)
try wip_nav.pending_types.append(dwarf.gpa, loaded_union.enum_tag_ty);
} else for (0..loaded_union.field_types.len) |field_index| {
try uleb128(diw, @intFromEnum(AbbrevCode.untagged_union_field));
try wip_nav.strp(loaded_tag.names.get(ip)[field_index].toSlice(ip));
const field_type = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
try wip_nav.refType(field_type);
try uleb128(diw, loaded_union.fieldAlign(ip, field_index).toByteUnits() orelse
field_type.abiAlignment(pt).toByteUnits().?);
}
try uleb128(diw, @intFromEnum(AbbrevCode.null));
},
.opaque_type => {
try uleb128(diw, @intFromEnum(AbbrevCode.namespace_struct_type));
try wip_nav.strp(name);
try diw.writeByte(@intFromBool(true));
},
else => unreachable,
}
try dwarf.debug_info.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_info.items);
try dwarf.debug_loclists.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_loclists.items);
try wip_nav.flush();
}
}
fn getAtom(self: *const Dwarf, comptime kind: Kind, index: Atom.Index) Atom {
return switch (kind) {
.src_fn => self.src_fns.items[index],
.di_atom => self.di_atoms.items[index],
};
pub fn updateNavLineNumber(dwarf: *Dwarf, zcu: *Zcu, nav_index: InternPool.Nav.Index) UpdateError!void {
const ip = &zcu.intern_pool;
const zir_index = ip.getCau(ip.getNav(nav_index).analysis_owner.unwrap() orelse return).zir_index;
const inst_info = zir_index.resolveFull(ip);
assert(inst_info.inst != .main_struct_inst);
const file = zcu.fileByIndex(inst_info.file);
const inst = file.zir.instructions.get(@intFromEnum(inst_info.inst));
assert(inst.tag == .declaration);
const line = file.zir.extraData(Zir.Inst.Declaration, inst.data.declaration.payload_index).data.src_line;
var line_buf: [4]u8 = undefined;
std.mem.writeInt(u32, &line_buf, line, dwarf.endian);
const unit = dwarf.debug_line.section.getUnit(dwarf.mods.get(file.mod).?);
const entry = unit.getEntry(dwarf.navs.get(nav_index).?);
try dwarf.getFile().?.pwriteAll(&line, dwarf.debug_line.section.off + unit.off + unit.header_len + entry.off + DebugInfo.declEntryLineOff(dwarf));
}
fn getAtomPtr(self: *Dwarf, comptime kind: Kind, index: Atom.Index) *Atom {
return switch (kind) {
.src_fn => &self.src_fns.items[index],
.di_atom => &self.di_atoms.items[index],
};
pub fn freeNav(dwarf: *Dwarf, nav_index: InternPool.Nav.Index) void {
_ = dwarf;
_ = nav_index;
}
pub const Format = enum {
dwarf32,
dwarf64,
pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
const ip = &pt.zcu.intern_pool;
if (dwarf.types.get(.anyerror_type)) |entry| {
var wip_nav: WipNav = .{
.dwarf = dwarf,
.pt = pt,
.unit = .main,
.entry = entry,
.any_children = false,
.func = .none,
.func_high_reloc = undefined,
.debug_info = .{},
.debug_line = .{},
.debug_loclists = .{},
.pending_types = .{},
};
defer wip_nav.deinit();
const diw = wip_nav.debug_info.writer(dwarf.gpa);
const global_error_set_names = ip.global_error_set.getNamesFromMainThread();
try uleb128(diw, @intFromEnum(@as(AbbrevCode, if (global_error_set_names.len > 0) .enum_type else .empty_enum_type)));
try wip_nav.strp("anyerror");
try wip_nav.refType(Type.fromInterned(try pt.intern(.{ .int_type = .{
.signedness = .unsigned,
.bits = pt.zcu.errorSetBits(),
} })));
for (global_error_set_names, 1..) |name, value| {
try uleb128(diw, @intFromEnum(AbbrevCode.unsigned_enum_field));
try uleb128(diw, value);
try wip_nav.strp(name.toSlice(ip));
}
if (global_error_set_names.len > 0) try uleb128(diw, @intFromEnum(AbbrevCode.null));
try dwarf.debug_info.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_info.items);
}
const cwd = try std.process.getCwdAlloc(dwarf.gpa);
defer dwarf.gpa.free(cwd);
var header = std.ArrayList(u8).init(dwarf.gpa);
defer header.deinit();
if (dwarf.debug_abbrev.section.dirty) {
for (1.., &AbbrevCode.abbrevs) |code, *abbrev| {
try uleb128(header.writer(), code);
try uleb128(header.writer(), @intFromEnum(abbrev.tag));
try header.append(if (abbrev.children) DW.CHILDREN.yes else DW.CHILDREN.no);
for (abbrev.attrs) |*attr| {
try uleb128(header.writer(), @intFromEnum(attr[0]));
try uleb128(header.writer(), @intFromEnum(attr[1]));
}
try header.appendSlice(&.{ 0, 0 });
}
try header.append(@intFromEnum(AbbrevCode.null));
try dwarf.debug_abbrev.section.replaceEntry(DebugAbbrev.unit, DebugAbbrev.entry, dwarf, header.items);
dwarf.debug_abbrev.section.dirty = false;
}
if (dwarf.debug_aranges.section.dirty) {
for (dwarf.debug_aranges.section.units.items, 0..) |*unit_ptr, unit_index| {
const unit: Unit.Index = @enumFromInt(unit_index);
try unit_ptr.cross_section_relocs.ensureUnusedCapacity(dwarf.gpa, 1);
header.clearRetainingCapacity();
try header.ensureTotalCapacity(unit_ptr.header_len);
const unit_len = (if (unit_ptr.next.unwrap()) |next_unit|
dwarf.debug_aranges.section.getUnit(next_unit).off
else
dwarf.debug_aranges.section.len) - unit_ptr.off - dwarf.unitLengthBytes();
switch (dwarf.format) {
.@"32" => std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), @intCast(unit_len), dwarf.endian),
.@"64" => {
std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), std.math.maxInt(u32), dwarf.endian);
std.mem.writeInt(u64, header.addManyAsArrayAssumeCapacity(@sizeOf(u64)), unit_len, dwarf.endian);
},
}
std.mem.writeInt(u16, header.addManyAsArrayAssumeCapacity(@sizeOf(u16)), 2, dwarf.endian);
unit_ptr.cross_section_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
.target_sec = .debug_info,
.target_unit = unit,
});
header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes());
header.appendSliceAssumeCapacity(&.{ @intFromEnum(dwarf.address_size), 0 });
header.appendNTimesAssumeCapacity(0, unit_ptr.header_len - header.items.len);
try unit_ptr.replaceHeader(&dwarf.debug_aranges.section, dwarf, header.items);
try unit_ptr.writeTrailer(&dwarf.debug_aranges.section, dwarf);
}
dwarf.debug_aranges.section.dirty = false;
}
if (dwarf.debug_info.section.dirty) {
for (dwarf.mods.keys(), dwarf.debug_info.section.units.items, 0..) |mod, *unit_ptr, unit_index| {
const unit: Unit.Index = @enumFromInt(unit_index);
try unit_ptr.cross_unit_relocs.ensureUnusedCapacity(dwarf.gpa, 1);
try unit_ptr.cross_section_relocs.ensureUnusedCapacity(dwarf.gpa, 7);
header.clearRetainingCapacity();
try header.ensureTotalCapacity(unit_ptr.header_len);
const unit_len = (if (unit_ptr.next.unwrap()) |next_unit|
dwarf.debug_info.section.getUnit(next_unit).off
else
dwarf.debug_info.section.len) - unit_ptr.off - dwarf.unitLengthBytes();
switch (dwarf.format) {
.@"32" => std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), @intCast(unit_len), dwarf.endian),
.@"64" => {
std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), std.math.maxInt(u32), dwarf.endian);
std.mem.writeInt(u64, header.addManyAsArrayAssumeCapacity(@sizeOf(u64)), unit_len, dwarf.endian);
},
}
std.mem.writeInt(u16, header.addManyAsArrayAssumeCapacity(@sizeOf(u16)), 5, dwarf.endian);
header.appendSliceAssumeCapacity(&.{ DW.UT.compile, @intFromEnum(dwarf.address_size) });
unit_ptr.cross_section_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
.target_sec = .debug_abbrev,
.target_unit = DebugAbbrev.unit,
.target_entry = DebugAbbrev.entry.toOptional(),
});
header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes());
const compile_unit_off: u32 = @intCast(header.items.len);
uleb128(header.fixedWriter(), @intFromEnum(AbbrevCode.compile_unit)) catch unreachable;
header.appendAssumeCapacity(DW.LANG.Zig);
unit_ptr.cross_section_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
.target_sec = .debug_line_str,
.target_unit = StringSection.unit,
.target_entry = (try dwarf.debug_line_str.addString(dwarf, "zig " ++ @import("build_options").version)).toOptional(),
});
{
const mod_root_path = try std.fs.path.resolve(dwarf.gpa, &.{
cwd,
mod.root.root_dir.path orelse "",
mod.root.sub_path,
});
defer dwarf.gpa.free(mod_root_path);
header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes());
unit_ptr.cross_section_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
.target_sec = .debug_line_str,
.target_unit = StringSection.unit,
.target_entry = (try dwarf.debug_line_str.addString(dwarf, mod_root_path)).toOptional(),
});
header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes());
}
unit_ptr.cross_section_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
.target_sec = .debug_line_str,
.target_unit = StringSection.unit,
.target_entry = (try dwarf.debug_line_str.addString(dwarf, mod.root_src_path)).toOptional(),
});
header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes());
unit_ptr.cross_unit_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
.target_unit = .main,
.target_off = compile_unit_off,
});
header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes());
unit_ptr.cross_section_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
.target_sec = .debug_line,
.target_unit = unit,
});
header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes());
unit_ptr.cross_section_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
.target_sec = .debug_rnglists,
.target_unit = unit,
.target_off = DebugRngLists.baseOffset(dwarf),
});
header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes());
uleb128(header.fixedWriter(), 0) catch unreachable;
uleb128(header.fixedWriter(), @intFromEnum(AbbrevCode.module)) catch unreachable;
unit_ptr.cross_section_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
.target_sec = .debug_str,
.target_unit = StringSection.unit,
.target_entry = (try dwarf.debug_str.addString(dwarf, mod.fully_qualified_name)).toOptional(),
});
header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes());
uleb128(header.fixedWriter(), 0) catch unreachable;
try unit_ptr.replaceHeader(&dwarf.debug_info.section, dwarf, header.items);
try unit_ptr.writeTrailer(&dwarf.debug_info.section, dwarf);
}
dwarf.debug_info.section.dirty = false;
}
if (dwarf.debug_str.section.dirty) {
const contents = dwarf.debug_str.contents.items;
try dwarf.debug_str.section.resize(dwarf, contents.len);
try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_str.section.off);
dwarf.debug_str.section.dirty = false;
}
if (dwarf.debug_line.section.dirty) {
for (dwarf.mods.values(), dwarf.debug_line.section.units.items) |mod_info, *unit|
try unit.resizeHeader(&dwarf.debug_line.section, dwarf, DebugLine.headerBytes(dwarf, @intCast(mod_info.files.count())));
for (dwarf.mods.keys(), dwarf.mods.values(), dwarf.debug_line.section.units.items) |mod, mod_info, *unit| {
try unit.cross_section_relocs.ensureUnusedCapacity(dwarf.gpa, 2 * (1 + mod_info.files.count()));
header.clearRetainingCapacity();
try header.ensureTotalCapacity(unit.header_len);
const unit_len = (if (unit.next.unwrap()) |next_unit|
dwarf.debug_line.section.getUnit(next_unit).off
else
dwarf.debug_line.section.len) - unit.off - dwarf.unitLengthBytes();
switch (dwarf.format) {
.@"32" => std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), @intCast(unit_len), dwarf.endian),
.@"64" => {
std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), std.math.maxInt(u32), dwarf.endian);
std.mem.writeInt(u64, header.addManyAsArrayAssumeCapacity(@sizeOf(u64)), unit_len, dwarf.endian);
},
}
std.mem.writeInt(u16, header.addManyAsArrayAssumeCapacity(@sizeOf(u16)), 5, dwarf.endian);
header.appendSliceAssumeCapacity(&.{ @intFromEnum(dwarf.address_size), 0 });
switch (dwarf.format) {
inline .@"32", .@"64" => |format| std.mem.writeInt(
SectionOffset(format),
header.addManyAsArrayAssumeCapacity(@sizeOf(SectionOffset(format))),
@intCast(unit.header_len - header.items.len),
dwarf.endian,
),
}
const StandardOpcode = DeclValEnum(DW.LNS);
header.appendSliceAssumeCapacity(&[_]u8{
dwarf.debug_line.header.minimum_instruction_length,
dwarf.debug_line.header.maximum_operations_per_instruction,
@intFromBool(dwarf.debug_line.header.default_is_stmt),
@bitCast(dwarf.debug_line.header.line_base),
dwarf.debug_line.header.line_range,
dwarf.debug_line.header.opcode_base,
});
header.appendSliceAssumeCapacity(std.enums.EnumArray(StandardOpcode, u8).init(.{
.extended_op = undefined,
.copy = 0,
.advance_pc = 1,
.advance_line = 1,
.set_file = 1,
.set_column = 1,
.negate_stmt = 0,
.set_basic_block = 0,
.const_add_pc = 0,
.fixed_advance_pc = 1,
.set_prologue_end = 0,
.set_epilogue_begin = 0,
.set_isa = 1,
}).values[1..dwarf.debug_line.header.opcode_base]);
header.appendAssumeCapacity(1);
uleb128(header.fixedWriter(), DW.LNCT.path) catch unreachable;
uleb128(header.fixedWriter(), DW.FORM.line_strp) catch unreachable;
uleb128(header.fixedWriter(), 1) catch unreachable;
{
const mod_root_path = try std.fs.path.resolve(dwarf.gpa, &.{
cwd,
mod.root.root_dir.path orelse "",
mod.root.sub_path,
});
defer dwarf.gpa.free(mod_root_path);
unit.cross_section_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
.target_sec = .debug_line_str,
.target_unit = StringSection.unit,
.target_entry = (try dwarf.debug_line_str.addString(dwarf, mod_root_path)).toOptional(),
});
header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes());
}
header.appendAssumeCapacity(2);
uleb128(header.fixedWriter(), DW.LNCT.path) catch unreachable;
uleb128(header.fixedWriter(), DW.FORM.line_strp) catch unreachable;
uleb128(header.fixedWriter(), DW.LNCT.LLVM_source) catch unreachable;
uleb128(header.fixedWriter(), DW.FORM.line_strp) catch unreachable;
uleb128(header.fixedWriter(), mod_info.files.count()) catch unreachable;
for (mod_info.files.keys()) |file_index| {
const file = pt.zcu.fileByIndex(file_index);
unit.cross_section_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
.target_sec = .debug_line_str,
.target_unit = StringSection.unit,
.target_entry = (try dwarf.debug_line_str.addString(dwarf, file.sub_file_path)).toOptional(),
});
header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes());
unit.cross_section_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
.target_sec = .debug_line_str,
.target_unit = StringSection.unit,
.target_entry = (try dwarf.debug_line_str.addString(
dwarf,
if (file.mod.builtin_file == file) file.source else "",
)).toOptional(),
});
header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes());
}
try unit.replaceHeader(&dwarf.debug_line.section, dwarf, header.items);
try unit.writeTrailer(&dwarf.debug_line.section, dwarf);
}
dwarf.debug_line.section.dirty = false;
}
if (dwarf.debug_line_str.section.dirty) {
const contents = dwarf.debug_line_str.contents.items;
try dwarf.debug_line_str.section.resize(dwarf, contents.len);
try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_line_str.section.off);
dwarf.debug_line_str.section.dirty = false;
}
if (dwarf.debug_rnglists.section.dirty) {
for (dwarf.debug_rnglists.section.units.items) |*unit| {
header.clearRetainingCapacity();
try header.ensureTotalCapacity(unit.header_len);
const unit_len = (if (unit.next.unwrap()) |next_unit|
dwarf.debug_rnglists.section.getUnit(next_unit).off
else
dwarf.debug_rnglists.section.len) - unit.off - dwarf.unitLengthBytes();
switch (dwarf.format) {
.@"32" => std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), @intCast(unit_len), dwarf.endian),
.@"64" => {
std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), std.math.maxInt(u32), dwarf.endian);
std.mem.writeInt(u64, header.addManyAsArrayAssumeCapacity(@sizeOf(u64)), unit_len, dwarf.endian);
},
}
std.mem.writeInt(u16, header.addManyAsArrayAssumeCapacity(@sizeOf(u16)), 5, dwarf.endian);
header.appendSliceAssumeCapacity(&.{ @intFromEnum(dwarf.address_size), 0 });
std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), 1, dwarf.endian);
switch (dwarf.format) {
inline .@"32", .@"64" => |format| std.mem.writeInt(
SectionOffset(format),
header.addManyAsArrayAssumeCapacity(@sizeOf(SectionOffset(format))),
@sizeOf(SectionOffset(format)),
dwarf.endian,
),
}
try unit.replaceHeader(&dwarf.debug_rnglists.section, dwarf, header.items);
try unit.writeTrailer(&dwarf.debug_rnglists.section, dwarf);
}
dwarf.debug_rnglists.section.dirty = false;
}
}
pub fn resolveRelocs(dwarf: *Dwarf) RelocError!void {
for ([_]*Section{
&dwarf.debug_abbrev.section,
&dwarf.debug_aranges.section,
&dwarf.debug_info.section,
&dwarf.debug_line.section,
&dwarf.debug_line_str.section,
&dwarf.debug_loclists.section,
&dwarf.debug_rnglists.section,
&dwarf.debug_str.section,
}) |sec| try sec.resolveRelocs(dwarf);
}
fn DeclValEnum(comptime T: type) type {
const decls = @typeInfo(T).Struct.decls;
@setEvalBranchQuota(7 * decls.len);
var fields: [decls.len]std.builtin.Type.EnumField = undefined;
var fields_len = 0;
var min_value: ?comptime_int = null;
var max_value: ?comptime_int = null;
for (decls) |decl| {
if (std.mem.startsWith(u8, decl.name, "HP_") or std.mem.endsWith(u8, decl.name, "_user")) continue;
const value = @field(T, decl.name);
fields[fields_len] = .{ .name = decl.name, .value = value };
fields_len += 1;
if (min_value == null or min_value.? > value) min_value = value;
if (max_value == null or max_value.? < value) max_value = value;
}
return @Type(.{ .Enum = .{
.tag_type = std.math.IntFittingRange(min_value orelse 0, max_value orelse 0),
.fields = fields[0..fields_len],
.decls = &.{},
.is_exhaustive = true,
} });
}
const AbbrevCode = enum(u8) {
null,
// padding codes must be one byte uleb128 values to function
pad_1,
pad_n,
// decl codes are assumed to all have the same uleb128 length
decl_alias,
decl_enum,
decl_namespace_struct,
decl_struct,
decl_packed_struct,
decl_union,
decl_var,
decl_func,
decl_func_empty,
// the rest are unrestricted
compile_unit,
module,
namespace_file,
file,
signed_enum_field,
unsigned_enum_field,
generated_field,
struct_field,
struct_field_comptime,
packed_struct_field,
untagged_union_field,
tagged_union,
signed_tagged_union_field,
unsigned_tagged_union_field,
tagged_union_default_field,
void_type,
numeric_type,
inferred_error_set_type,
ptr_type,
is_const,
is_volatile,
array_type,
array_index,
nullary_func_type,
func_type,
func_type_param,
is_var_args,
enum_type,
empty_enum_type,
namespace_struct_type,
struct_type,
packed_struct_type,
union_type,
local_arg,
local_var,
const decl_bytes = uleb128Bytes(@intFromEnum(AbbrevCode.decl_func_empty));
const Attr = struct {
DeclValEnum(DW.AT),
DeclValEnum(DW.FORM),
};
const decl_abbrev_common_attrs = &[_]Attr{
.{ .ZIG_parent, .ref_addr },
.{ .decl_line, .data4 },
.{ .decl_column, .udata },
.{ .accessibility, .data1 },
.{ .name, .strp },
};
const abbrevs = std.EnumArray(AbbrevCode, struct {
tag: DeclValEnum(DW.TAG),
children: bool = false,
attrs: []const Attr = &.{},
}).init(.{
.pad_1 = .{
.tag = .ZIG_padding,
},
.pad_n = .{
.tag = .ZIG_padding,
.attrs = &.{
.{ .ZIG_padding, .block },
},
},
.decl_alias = .{
.tag = .imported_declaration,
.attrs = decl_abbrev_common_attrs ++ .{
.{ .import, .ref_addr },
},
},
.decl_enum = .{
.tag = .enumeration_type,
.children = true,
.attrs = decl_abbrev_common_attrs ++ .{
.{ .type, .ref_addr },
},
},
.decl_namespace_struct = .{
.tag = .structure_type,
.attrs = decl_abbrev_common_attrs ++ .{
.{ .declaration, .flag },
},
},
.decl_struct = .{
.tag = .structure_type,
.children = true,
.attrs = decl_abbrev_common_attrs ++ .{
.{ .byte_size, .udata },
.{ .alignment, .udata },
},
},
.decl_packed_struct = .{
.tag = .structure_type,
.children = true,
.attrs = decl_abbrev_common_attrs ++ .{
.{ .type, .ref_addr },
},
},
.decl_union = .{
.tag = .union_type,
.children = true,
.attrs = decl_abbrev_common_attrs ++ .{
.{ .byte_size, .udata },
.{ .alignment, .udata },
},
},
.decl_var = .{
.tag = .variable,
.attrs = decl_abbrev_common_attrs ++ .{
.{ .linkage_name, .strp },
.{ .type, .ref_addr },
.{ .location, .exprloc },
.{ .alignment, .udata },
.{ .external, .flag },
},
},
.decl_func = .{
.tag = .subprogram,
.children = true,
.attrs = decl_abbrev_common_attrs ++ .{
.{ .linkage_name, .strp },
.{ .type, .ref_addr },
.{ .low_pc, .addr },
.{ .high_pc, .addr },
.{ .alignment, .udata },
.{ .external, .flag },
.{ .noreturn, .flag },
},
},
.decl_func_empty = .{
.tag = .subprogram,
.attrs = decl_abbrev_common_attrs ++ .{
.{ .linkage_name, .strp },
.{ .type, .ref_addr },
.{ .low_pc, .addr },
.{ .high_pc, .addr },
.{ .alignment, .udata },
.{ .external, .flag },
.{ .noreturn, .flag },
},
},
.compile_unit = .{
.tag = .compile_unit,
.children = true,
.attrs = &.{
.{ .language, .data1 },
.{ .producer, .line_strp },
.{ .comp_dir, .line_strp },
.{ .name, .line_strp },
.{ .base_types, .ref_addr },
.{ .stmt_list, .sec_offset },
.{ .rnglists_base, .sec_offset },
.{ .ranges, .rnglistx },
},
},
.module = .{
.tag = .module,
.children = true,
.attrs = &.{
.{ .name, .strp },
.{ .ranges, .rnglistx },
},
},
.namespace_file = .{
.tag = .structure_type,
.attrs = &.{
.{ .decl_file, .udata },
.{ .name, .strp },
},
},
.file = .{
.tag = .structure_type,
.children = true,
.attrs = &.{
.{ .decl_file, .udata },
.{ .name, .strp },
.{ .byte_size, .udata },
.{ .alignment, .udata },
},
},
.signed_enum_field = .{
.tag = .enumerator,
.attrs = &.{
.{ .const_value, .sdata },
.{ .name, .strp },
},
},
.unsigned_enum_field = .{
.tag = .enumerator,
.attrs = &.{
.{ .const_value, .udata },
.{ .name, .strp },
},
},
.generated_field = .{
.tag = .member,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
.{ .data_member_location, .udata },
.{ .artificial, .flag_present },
},
},
.struct_field = .{
.tag = .member,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
.{ .data_member_location, .udata },
.{ .alignment, .udata },
},
},
.struct_field_comptime = .{
.tag = .member,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
.{ .const_expr, .flag_present },
},
},
.packed_struct_field = .{
.tag = .member,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
.{ .data_bit_offset, .udata },
},
},
.untagged_union_field = .{
.tag = .member,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
.{ .alignment, .udata },
},
},
.tagged_union = .{
.tag = .variant_part,
.children = true,
.attrs = &.{
.{ .discr, .ref_addr },
},
},
.signed_tagged_union_field = .{
.tag = .variant,
.children = true,
.attrs = &.{
.{ .discr_value, .sdata },
},
},
.unsigned_tagged_union_field = .{
.tag = .variant,
.children = true,
.attrs = &.{
.{ .discr_value, .udata },
},
},
.tagged_union_default_field = .{
.tag = .variant,
.children = true,
.attrs = &.{},
},
.void_type = .{
.tag = .unspecified_type,
.attrs = &.{
.{ .name, .strp },
},
},
.numeric_type = .{
.tag = .base_type,
.attrs = &.{
.{ .name, .strp },
.{ .encoding, .data1 },
.{ .bit_size, .udata },
.{ .byte_size, .udata },
.{ .alignment, .udata },
},
},
.inferred_error_set_type = .{
.tag = .typedef,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
},
},
.ptr_type = .{
.tag = .pointer_type,
.attrs = &.{
.{ .name, .strp },
.{ .ZIG_is_allowzero, .flag },
.{ .alignment, .udata },
.{ .address_class, .data1 },
.{ .type, .ref_addr },
},
},
.is_const = .{
.tag = .const_type,
.attrs = &.{
.{ .type, .ref_addr },
},
},
.is_volatile = .{
.tag = .volatile_type,
.attrs = &.{
.{ .type, .ref_addr },
},
},
.array_type = .{
.tag = .array_type,
.children = true,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
.{ .GNU_vector, .flag },
},
},
.array_index = .{
.tag = .subrange_type,
.attrs = &.{
.{ .type, .ref_addr },
.{ .count, .udata },
},
},
.nullary_func_type = .{
.tag = .subroutine_type,
.attrs = &.{
.{ .name, .strp },
.{ .calling_convention, .data1 },
.{ .type, .ref_addr },
},
},
.func_type = .{
.tag = .subroutine_type,
.children = true,
.attrs = &.{
.{ .name, .strp },
.{ .calling_convention, .data1 },
.{ .type, .ref_addr },
},
},
.func_type_param = .{
.tag = .formal_parameter,
.attrs = &.{
.{ .type, .ref_addr },
},
},
.is_var_args = .{
.tag = .unspecified_parameters,
},
.enum_type = .{
.tag = .enumeration_type,
.children = true,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
},
},
.empty_enum_type = .{
.tag = .enumeration_type,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
},
},
.namespace_struct_type = .{
.tag = .structure_type,
.attrs = &.{
.{ .name, .strp },
.{ .declaration, .flag },
},
},
.struct_type = .{
.tag = .structure_type,
.children = true,
.attrs = &.{
.{ .name, .strp },
.{ .byte_size, .udata },
.{ .alignment, .udata },
},
},
.packed_struct_type = .{
.tag = .structure_type,
.children = true,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
},
},
.union_type = .{
.tag = .union_type,
.children = true,
.attrs = &.{
.{ .name, .strp },
.{ .byte_size, .udata },
.{ .alignment, .udata },
},
},
.local_arg = .{
.tag = .formal_parameter,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
.{ .location, .exprloc },
},
},
.local_var = .{
.tag = .variable,
.attrs = &.{
.{ .name, .strp },
.{ .type, .ref_addr },
.{ .location, .exprloc },
},
},
.null = undefined,
}).values[1..].*;
};
const Dwarf = @This();
fn getFile(dwarf: *Dwarf) ?std.fs.File {
if (dwarf.bin_file.cast(.macho)) |macho_file| if (macho_file.d_sym) |*d_sym| return d_sym.file;
return dwarf.bin_file.file;
}
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const fs = std.fs;
const leb128 = std.leb;
const log = std.log.scoped(.dwarf);
const mem = std.mem;
fn addCommonEntry(dwarf: *Dwarf, unit: Unit.Index) UpdateError!Entry.Index {
const entry = try dwarf.debug_aranges.section.addEntry(unit, dwarf);
assert(try dwarf.debug_info.section.addEntry(unit, dwarf) == entry);
assert(try dwarf.debug_line.section.addEntry(unit, dwarf) == entry);
assert(try dwarf.debug_loclists.section.addEntry(unit, dwarf) == entry);
assert(try dwarf.debug_rnglists.section.addEntry(unit, dwarf) == entry);
return entry;
}
const link = @import("../link.zig");
const trace = @import("../tracy.zig").trace;
fn writeInt(dwarf: *Dwarf, buf: []u8, int: u64) void {
switch (buf.len) {
inline 0...8 => |len| std.mem.writeInt(@Type(.{ .Int = .{
.signedness = .unsigned,
.bits = len * 8,
} }), buf[0..len], @intCast(int), dwarf.endian),
else => unreachable,
}
}
fn resolveReloc(dwarf: *Dwarf, source: u64, target: u64, size: u32) RelocError!void {
var buf: [8]u8 = undefined;
dwarf.writeInt(buf[0..size], target);
try dwarf.getFile().?.pwriteAll(buf[0..size], source);
}
fn unitLengthBytes(dwarf: *Dwarf) u32 {
return switch (dwarf.format) {
.@"32" => 4,
.@"64" => 4 + 8,
};
}
fn sectionOffsetBytes(dwarf: *Dwarf) u32 {
return switch (dwarf.format) {
.@"32" => 4,
.@"64" => 8,
};
}
fn SectionOffset(comptime format: DW.Format) type {
return switch (format) {
.@"32" => u32,
.@"64" => u64,
};
}
fn uleb128Bytes(value: anytype) u32 {
var cw = std.io.countingWriter(std.io.null_writer);
try uleb128(cw.writer(), value);
return @intCast(cw.bytes_written);
}
/// overrides `-fno-incremental` for testing incremental debug info until `-fincremental` is functional
const force_incremental = false;
inline fn incremental(dwarf: Dwarf) bool {
return force_incremental or dwarf.bin_file.comp.incremental;
}
const Allocator = mem.Allocator;
const DW = std.dwarf;
const File = link.File;
const LinkBlock = File.LinkBlock;
const LinkFn = File.LinkFn;
const LinkerLoad = @import("../codegen.zig").LinkerLoad;
const Zcu = @import("../Zcu.zig");
const Dwarf = @This();
const InternPool = @import("../InternPool.zig");
const StringTable = @import("StringTable.zig");
const Module = @import("../Package.zig").Module;
const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
const Zcu = @import("../Zcu.zig");
const Zir = std.zig.Zir;
const assert = std.debug.assert;
const codegen = @import("../codegen.zig");
const link = @import("../link.zig");
const log = std.log.scoped(.dwarf);
const sleb128 = std.leb.writeIleb128;
const std = @import("std");
const target_info = @import("../target.zig");
const uleb128 = std.leb.writeUleb128;
+138 -94
View File
@@ -143,6 +143,9 @@ debug_abbrev_section_index: ?u32 = null,
debug_str_section_index: ?u32 = null,
debug_aranges_section_index: ?u32 = null,
debug_line_section_index: ?u32 = null,
debug_line_str_section_index: ?u32 = null,
debug_loclists_section_index: ?u32 = null,
debug_rnglists_section_index: ?u32 = null,
copy_rel_section_index: ?u32 = null,
dynamic_section_index: ?u32 = null,
@@ -492,12 +495,13 @@ pub fn getUavVAddr(self: *Elf, uav: InternPool.Index, reloc_info: link.File.Relo
}
/// Returns end pos of collision, if any.
fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
fn detectAllocCollision(self: *Elf, start: u64, size: u64) !?u64 {
const small_ptr = self.ptr_width == .p32;
const ehdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Ehdr) else @sizeOf(elf.Elf64_Ehdr);
if (start < ehdr_size)
return ehdr_size;
var at_end = true;
const end = start + padToIdeal(size);
if (self.shdr_table_offset) |off| {
@@ -505,8 +509,9 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
const tight_size = self.shdrs.items.len * shdr_size;
const increased_size = padToIdeal(tight_size);
const test_end = off +| increased_size;
if (end > off and start < test_end) {
return test_end;
if (start < test_end) {
if (end > off) return test_end;
if (test_end < std.math.maxInt(u64)) at_end = false;
}
}
@@ -514,8 +519,9 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
if (shdr.sh_type == elf.SHT_NOBITS) continue;
const increased_size = padToIdeal(shdr.sh_size);
const test_end = shdr.sh_offset +| increased_size;
if (end > shdr.sh_offset and start < test_end) {
return test_end;
if (start < test_end) {
if (end > shdr.sh_offset) return test_end;
if (test_end < std.math.maxInt(u64)) at_end = false;
}
}
@@ -523,11 +529,13 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
if (phdr.p_type != elf.PT_LOAD) continue;
const increased_size = padToIdeal(phdr.p_filesz);
const test_end = phdr.p_offset +| increased_size;
if (end > phdr.p_offset and start < test_end) {
return test_end;
if (start < test_end) {
if (end > phdr.p_offset) return test_end;
if (test_end < std.math.maxInt(u64)) at_end = false;
}
}
if (at_end) try self.base.file.?.setEndPos(end);
return null;
}
@@ -558,9 +566,9 @@ fn allocatedVirtualSize(self: *Elf, start: u64) u64 {
return min_pos - start;
}
pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) u64 {
pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 {
var start: u64 = 0;
while (self.detectAllocCollision(start, object_size)) |item_end| {
while (try self.detectAllocCollision(start, object_size)) |item_end| {
start = mem.alignForward(u64, item_end, min_alignment);
}
return start;
@@ -580,9 +588,9 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
const zig_object = self.zigObjectPtr().?;
const fillSection = struct {
fn fillSection(elf_file: *Elf, shdr: *elf.Elf64_Shdr, size: u64, phndx: ?u16) void {
fn fillSection(elf_file: *Elf, shdr: *elf.Elf64_Shdr, size: u64, phndx: ?u16) !void {
if (elf_file.base.isRelocatable()) {
const off = elf_file.findFreeSpace(size, shdr.sh_addralign);
const off = try elf_file.findFreeSpace(size, shdr.sh_addralign);
shdr.sh_offset = off;
shdr.sh_size = size;
} else {
@@ -599,7 +607,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
if (!self.base.isRelocatable()) {
if (self.phdr_zig_load_re_index == null) {
const filesz = options.program_code_size_hint;
const off = self.findFreeSpace(filesz, self.page_size);
const off = try self.findFreeSpace(filesz, self.page_size);
self.phdr_zig_load_re_index = try self.addPhdr(.{
.type = elf.PT_LOAD,
.offset = off,
@@ -614,7 +622,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
if (self.phdr_zig_load_ro_index == null) {
const alignment = self.page_size;
const filesz: u64 = 1024;
const off = self.findFreeSpace(filesz, alignment);
const off = try self.findFreeSpace(filesz, alignment);
self.phdr_zig_load_ro_index = try self.addPhdr(.{
.type = elf.PT_LOAD,
.offset = off,
@@ -629,7 +637,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
if (self.phdr_zig_load_rw_index == null) {
const alignment = self.page_size;
const filesz: u64 = 1024;
const off = self.findFreeSpace(filesz, alignment);
const off = try self.findFreeSpace(filesz, alignment);
self.phdr_zig_load_rw_index = try self.addPhdr(.{
.type = elf.PT_LOAD,
.offset = off,
@@ -662,7 +670,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.zig_text_section_index.?];
fillSection(self, shdr, options.program_code_size_hint, self.phdr_zig_load_re_index);
try fillSection(self, shdr, options.program_code_size_hint, self.phdr_zig_load_re_index);
if (self.base.isRelocatable()) {
const rela_shndx = try self.addRelaShdr(try self.insertShString(".rela.text.zig"), self.zig_text_section_index.?);
try self.output_rela_sections.putNoClobber(gpa, self.zig_text_section_index.?, .{
@@ -688,7 +696,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.zig_data_rel_ro_section_index.?];
fillSection(self, shdr, 1024, self.phdr_zig_load_ro_index);
try fillSection(self, shdr, 1024, self.phdr_zig_load_ro_index);
if (self.base.isRelocatable()) {
const rela_shndx = try self.addRelaShdr(
try self.insertShString(".rela.data.rel.ro.zig"),
@@ -717,7 +725,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.zig_data_section_index.?];
fillSection(self, shdr, 1024, self.phdr_zig_load_rw_index);
try fillSection(self, shdr, 1024, self.phdr_zig_load_rw_index);
if (self.base.isRelocatable()) {
const rela_shndx = try self.addRelaShdr(
try self.insertShString(".rela.data.zig"),
@@ -758,24 +766,16 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
try self.last_atom_and_free_list_table.putNoClobber(gpa, self.zig_bss_section_index.?, .{});
}
if (zig_object.dwarf) |*dw| {
if (zig_object.dwarf) |*dwarf| {
if (self.debug_str_section_index == null) {
assert(dw.strtab.buffer.items.len == 0);
try dw.strtab.buffer.append(gpa, 0);
self.debug_str_section_index = try self.addSection(.{
.name = try self.insertShString(".debug_str"),
.flags = elf.SHF_MERGE | elf.SHF_STRINGS,
.entsize = 1,
.type = elf.SHT_PROGBITS,
.addralign = 1,
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.debug_str_section_index.?];
const size = @as(u64, @intCast(dw.strtab.buffer.items.len));
const off = self.findFreeSpace(size, 1);
shdr.sh_offset = off;
shdr.sh_size = size;
zig_object.debug_strtab_dirty = true;
zig_object.debug_str_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_str_section_index.?, .{});
}
@@ -784,14 +784,8 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
.name = try self.insertShString(".debug_info"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.debug_info_section_index.?];
const size: u64 = 200;
const off = self.findFreeSpace(size, 1);
shdr.sh_offset = off;
shdr.sh_size = size;
zig_object.debug_info_header_dirty = true;
zig_object.debug_info_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_info_section_index.?, .{});
}
@@ -800,13 +794,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
.name = try self.insertShString(".debug_abbrev"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.debug_abbrev_section_index.?];
const size: u64 = 128;
const off = self.findFreeSpace(size, 1);
shdr.sh_offset = off;
shdr.sh_size = size;
zig_object.debug_abbrev_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_abbrev_section_index.?, .{});
}
@@ -816,13 +804,7 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
.name = try self.insertShString(".debug_aranges"),
.type = elf.SHT_PROGBITS,
.addralign = 16,
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.debug_aranges_section_index.?];
const size: u64 = 160;
const off = self.findFreeSpace(size, 16);
shdr.sh_offset = off;
shdr.sh_size = size;
zig_object.debug_aranges_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_aranges_section_index.?, .{});
}
@@ -832,62 +814,83 @@ pub fn initMetadata(self: *Elf, options: InitMetadataOptions) !void {
.name = try self.insertShString(".debug_line"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
.offset = std.math.maxInt(u64),
});
const shdr = &self.shdrs.items[self.debug_line_section_index.?];
const size: u64 = 250;
const off = self.findFreeSpace(size, 1);
shdr.sh_offset = off;
shdr.sh_size = size;
zig_object.debug_line_header_dirty = true;
zig_object.debug_line_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_line_section_index.?, .{});
}
}
// We need to find current max assumed file offset, and actually write to file to make it a reality.
var end_pos: u64 = 0;
for (self.shdrs.items) |shdr| {
if (shdr.sh_offset == std.math.maxInt(u64)) continue;
end_pos = @max(end_pos, shdr.sh_offset + shdr.sh_size);
if (self.debug_line_str_section_index == null) {
self.debug_line_str_section_index = try self.addSection(.{
.name = try self.insertShString(".debug_line_str"),
.flags = elf.SHF_MERGE | elf.SHF_STRINGS,
.entsize = 1,
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
zig_object.debug_line_str_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_line_str_section_index.?, .{});
}
if (self.debug_loclists_section_index == null) {
self.debug_loclists_section_index = try self.addSection(.{
.name = try self.insertShString(".debug_loclists"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
zig_object.debug_loclists_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_loclists_section_index.?, .{});
}
if (self.debug_rnglists_section_index == null) {
self.debug_rnglists_section_index = try self.addSection(.{
.name = try self.insertShString(".debug_rnglists"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
zig_object.debug_rnglists_section_dirty = true;
try self.output_sections.putNoClobber(gpa, self.debug_rnglists_section_index.?, .{});
}
try dwarf.initMetadata();
}
try self.base.file.?.pwriteAll(&[1]u8{0}, end_pos);
}
pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64) !void {
const shdr = &self.shdrs.items[shdr_index];
const maybe_phdr = if (self.phdr_to_shdr_table.get(shdr_index)) |phndx| &self.phdrs.items[phndx] else null;
const is_zerofill = shdr.sh_type == elf.SHT_NOBITS;
log.debug("allocated size {x} of {s}, needed size {x}", .{
self.allocatedSize(shdr.sh_offset),
self.getShString(shdr.sh_name),
needed_size,
});
if (needed_size > self.allocatedSize(shdr.sh_offset) and !is_zerofill) {
const existing_size = shdr.sh_size;
shdr.sh_size = 0;
// Must move the entire section.
const alignment = if (maybe_phdr) |phdr| phdr.p_align else shdr.sh_addralign;
const new_offset = self.findFreeSpace(needed_size, alignment);
if (shdr.sh_type != elf.SHT_NOBITS) {
const allocated_size = self.allocatedSize(shdr.sh_offset);
if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
} else if (needed_size > allocated_size) {
const existing_size = shdr.sh_size;
shdr.sh_size = 0;
// Must move the entire section.
const alignment = if (maybe_phdr) |phdr| phdr.p_align else shdr.sh_addralign;
const new_offset = try self.findFreeSpace(needed_size, alignment);
log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{
self.getShString(shdr.sh_name),
new_offset,
new_offset + existing_size,
});
log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{
self.getShString(shdr.sh_name),
new_offset,
new_offset + existing_size,
});
const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, existing_size);
// TODO figure out what to about this error condition - how to communicate it up.
if (amt != existing_size) return error.InputOutput;
const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, existing_size);
// TODO figure out what to about this error condition - how to communicate it up.
if (amt != existing_size) return error.InputOutput;
shdr.sh_offset = new_offset;
if (maybe_phdr) |phdr| phdr.p_offset = new_offset;
}
shdr.sh_size = needed_size;
if (!is_zerofill) {
shdr.sh_offset = new_offset;
if (maybe_phdr) |phdr| phdr.p_offset = new_offset;
}
if (maybe_phdr) |phdr| phdr.p_filesz = needed_size;
}
shdr.sh_size = needed_size;
if (maybe_phdr) |phdr| {
const mem_capacity = self.allocatedVirtualSize(phdr.p_vaddr);
@@ -915,11 +918,14 @@ pub fn growNonAllocSection(
) !void {
const shdr = &self.shdrs.items[shdr_index];
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
const allocated_size = self.allocatedSize(shdr.sh_offset);
if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
} else if (needed_size > allocated_size) {
const existing_size = shdr.sh_size;
shdr.sh_size = 0;
// Move all the symbols to a new file location.
const new_offset = self.findFreeSpace(needed_size, min_alignment);
const new_offset = try self.findFreeSpace(needed_size, min_alignment);
log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{
self.getShString(shdr.sh_name),
@@ -939,7 +945,6 @@ pub fn growNonAllocSection(
shdr.sh_offset = new_offset;
}
shdr.sh_size = needed_size;
self.markDirty(shdr_index);
@@ -949,15 +954,21 @@ pub fn markDirty(self: *Elf, shdr_index: u32) void {
const zig_object = self.zigObjectPtr().?;
if (zig_object.dwarf) |_| {
if (self.debug_info_section_index.? == shdr_index) {
zig_object.debug_info_header_dirty = true;
} else if (self.debug_line_section_index.? == shdr_index) {
zig_object.debug_line_header_dirty = true;
zig_object.debug_info_section_dirty = true;
} else if (self.debug_abbrev_section_index.? == shdr_index) {
zig_object.debug_abbrev_section_dirty = true;
} else if (self.debug_str_section_index.? == shdr_index) {
zig_object.debug_strtab_dirty = true;
zig_object.debug_str_section_dirty = true;
} else if (self.debug_aranges_section_index.? == shdr_index) {
zig_object.debug_aranges_section_dirty = true;
} else if (self.debug_line_section_index.? == shdr_index) {
zig_object.debug_line_section_dirty = true;
} else if (self.debug_line_str_section_index.? == shdr_index) {
zig_object.debug_line_str_section_dirty = true;
} else if (self.debug_loclists_section_index.? == shdr_index) {
zig_object.debug_loclists_section_dirty = true;
} else if (self.debug_rnglists_section_index.? == shdr_index) {
zig_object.debug_rnglists_section_dirty = true;
}
}
}
@@ -1306,6 +1317,8 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
try self.base.file.?.pwriteAll(code, file_offset);
}
if (zo.dwarf) |*dwarf| try dwarf.resolveRelocs();
if (has_reloc_errors) return error.FlushFailure;
}
@@ -2667,7 +2680,7 @@ pub fn writeShdrTable(self: *Elf) !void {
if (needed_size > self.allocatedSize(shoff)) {
self.shdr_table_offset = null;
self.shdr_table_offset = self.findFreeSpace(needed_size, shalign);
self.shdr_table_offset = try self.findFreeSpace(needed_size, shalign);
}
log.debug("writing section headers from 0x{x} to 0x{x}", .{
@@ -2900,6 +2913,18 @@ pub fn updateNav(
return self.zigObjectPtr().?.updateNav(self, pt, nav);
}
pub fn updateContainerType(
self: *Elf,
pt: Zcu.PerThread,
ty: InternPool.Index,
) link.File.UpdateNavError!void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (self.llvm_object) |_| return;
return self.zigObjectPtr().?.updateContainerType(pt, ty);
}
pub fn updateExports(
self: *Elf,
pt: Zcu.PerThread,
@@ -3658,11 +3683,14 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) !void {
&self.zig_data_rel_ro_section_index,
&self.zig_data_section_index,
&self.zig_bss_section_index,
&self.debug_str_section_index,
&self.debug_info_section_index,
&self.debug_abbrev_section_index,
&self.debug_str_section_index,
&self.debug_aranges_section_index,
&self.debug_line_section_index,
&self.debug_line_str_section_index,
&self.debug_loclists_section_index,
&self.debug_rnglists_section_index,
}) |maybe_index| {
if (maybe_index.*) |*index| {
index.* = backlinks[index.*];
@@ -3787,6 +3815,7 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) !void {
const atom_ptr = zo.atom(atom_index) orelse continue;
atom_ptr.output_section_index = backlinks[atom_ptr.output_section_index];
}
if (zo.dwarf) |*dwarf| dwarf.reloadSectionMetadata();
}
for (self.output_rela_sections.keys(), self.output_rela_sections.values()) |shndx, sec| {
@@ -3992,7 +4021,7 @@ fn allocatePhdrTable(self: *Elf) error{OutOfMemory}!void {
/// Allocates alloc sections and creates load segments for sections
/// extracted from input object files.
pub fn allocateAllocSections(self: *Elf) error{OutOfMemory}!void {
pub fn allocateAllocSections(self: *Elf) !void {
// We use this struct to track maximum alignment of all TLS sections.
// According to https://github.com/rui314/mold/commit/bd46edf3f0fe9e1a787ea453c4657d535622e61f in mold,
// in-file offsets have to be aligned against the start of TLS program header.
@@ -4112,7 +4141,7 @@ pub fn allocateAllocSections(self: *Elf) error{OutOfMemory}!void {
}
const first = self.shdrs.items[cover.items[0]];
var off = self.findFreeSpace(filesz, @"align");
var off = try self.findFreeSpace(filesz, @"align");
const phndx = try self.addPhdr(.{
.type = elf.PT_LOAD,
.offset = off,
@@ -4147,7 +4176,7 @@ pub fn allocateNonAllocSections(self: *Elf) !void {
const needed_size = shdr.sh_size;
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
shdr.sh_size = 0;
const new_offset = self.findFreeSpace(needed_size, shdr.sh_addralign);
const new_offset = try self.findFreeSpace(needed_size, shdr.sh_addralign);
if (self.isDebugSection(@intCast(shndx))) {
log.debug("moving {s} from 0x{x} to 0x{x}", .{
@@ -4167,6 +4196,12 @@ pub fn allocateNonAllocSections(self: *Elf) !void {
break :blk zig_object.debug_aranges_section_zig_size;
if (shndx == self.debug_line_section_index.?)
break :blk zig_object.debug_line_section_zig_size;
if (shndx == self.debug_line_str_section_index.?)
break :blk zig_object.debug_line_str_section_zig_size;
if (shndx == self.debug_loclists_section_index.?)
break :blk zig_object.debug_loclists_section_zig_size;
if (shndx == self.debug_rnglists_section_index.?)
break :blk zig_object.debug_rnglists_section_zig_size;
unreachable;
};
const amt = try self.base.file.?.copyRangeAll(
@@ -4275,6 +4310,12 @@ fn writeAtoms(self: *Elf) !void {
break :blk zig_object.debug_aranges_section_zig_size;
if (shndx == self.debug_line_section_index.?)
break :blk zig_object.debug_line_section_zig_size;
if (shndx == self.debug_line_str_section_index.?)
break :blk zig_object.debug_line_str_section_zig_size;
if (shndx == self.debug_loclists_section_index.?)
break :blk zig_object.debug_loclists_section_zig_size;
if (shndx == self.debug_rnglists_section_index.?)
break :blk zig_object.debug_rnglists_section_zig_size;
unreachable;
} else 0;
const sh_offset = shdr.sh_offset + base_offset;
@@ -5044,6 +5085,9 @@ pub fn isDebugSection(self: Elf, shndx: u32) bool {
self.debug_str_section_index,
self.debug_aranges_section_index,
self.debug_line_section_index,
self.debug_line_str_section_index,
self.debug_loclists_section_index,
self.debug_rnglists_section_index,
}) |maybe_index| {
if (maybe_index) |index| {
if (index == shndx) return true;
@@ -5109,7 +5153,7 @@ pub const AddSectionOpts = struct {
pub fn addSection(self: *Elf, opts: AddSectionOpts) !u32 {
const gpa = self.base.comp.gpa;
const index = @as(u32, @intCast(self.shdrs.items.len));
const index: u32 = @intCast(self.shdrs.items.len);
const shdr = try self.shdrs.addOne(gpa);
shdr.* = .{
.sh_name = opts.name,
+2 -1
View File
@@ -201,11 +201,12 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
// The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
// range of the compilation unit. When we expand the text section, this range changes,
// so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
zig_object.debug_info_header_dirty = true;
zig_object.debug_info_section_dirty = true;
// This becomes dirty for the same reason. We could potentially make this more
// fine-grained with the addition of support for more compilation units. It is planned to
// model each package as a different compilation unit.
zig_object.debug_aranges_section_dirty = true;
zig_object.debug_rnglists_section_dirty = true;
}
}
shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnits().?);
+104 -102
View File
@@ -41,11 +41,14 @@ tls_variables: TlsTable = .{},
/// Table of tracked `Uav`s.
uavs: UavTable = .{},
debug_strtab_dirty: bool = false,
debug_info_section_dirty: bool = false,
debug_abbrev_section_dirty: bool = false,
debug_aranges_section_dirty: bool = false,
debug_info_header_dirty: bool = false,
debug_line_header_dirty: bool = false,
debug_str_section_dirty: bool = false,
debug_line_section_dirty: bool = false,
debug_line_str_section_dirty: bool = false,
debug_loclists_section_dirty: bool = false,
debug_rnglists_section_dirty: bool = false,
/// Size contribution of Zig's metadata to each debug section.
/// Used to track start of metadata from input object files.
@@ -54,6 +57,9 @@ debug_abbrev_section_zig_size: u64 = 0,
debug_str_section_zig_size: u64 = 0,
debug_aranges_section_zig_size: u64 = 0,
debug_line_section_zig_size: u64 = 0,
debug_line_str_section_zig_size: u64 = 0,
debug_loclists_section_zig_size: u64 = 0,
debug_rnglists_section_zig_size: u64 = 0,
pub const global_symbol_bit: u32 = 0x80000000;
pub const symbol_mask: u32 = 0x7fffffff;
@@ -76,10 +82,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf) !void {
switch (comp.config.debug_format) {
.strip => {},
.dwarf => |v| {
assert(v == .@"32");
self.dwarf = Dwarf.init(&elf_file.base, .dwarf32);
},
.dwarf => |v| self.dwarf = Dwarf.init(&elf_file.base, v),
.code_view => unreachable,
}
}
@@ -119,8 +122,8 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
}
self.tls_variables.deinit(allocator);
if (self.dwarf) |*dw| {
dw.deinit();
if (self.dwarf) |*dwarf| {
dwarf.deinit();
}
}
@@ -165,44 +168,14 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
}
}
if (self.dwarf) |*dw| {
if (self.dwarf) |*dwarf| {
const pt: Zcu.PerThread = .{ .zcu = elf_file.base.comp.module.?, .tid = tid };
try dw.flushModule(pt);
try dwarf.flushModule(pt);
// TODO I need to re-think how to handle ZigObject's debug sections AND debug sections
// extracted from input object files correctly.
if (self.debug_abbrev_section_dirty) {
try dw.writeDbgAbbrev();
self.debug_abbrev_section_dirty = false;
}
if (self.debug_info_header_dirty) {
const text_shdr = elf_file.shdrs.items[elf_file.zig_text_section_index.?];
const low_pc = text_shdr.sh_addr;
const high_pc = text_shdr.sh_addr + text_shdr.sh_size;
try dw.writeDbgInfoHeader(pt.zcu, low_pc, high_pc);
self.debug_info_header_dirty = false;
}
if (self.debug_aranges_section_dirty) {
const text_shdr = elf_file.shdrs.items[elf_file.zig_text_section_index.?];
try dw.writeDbgAranges(text_shdr.sh_addr, text_shdr.sh_size);
self.debug_aranges_section_dirty = false;
}
if (self.debug_line_header_dirty) {
try dw.writeDbgLineHeader();
self.debug_line_header_dirty = false;
}
if (elf_file.debug_str_section_index) |shndx| {
if (self.debug_strtab_dirty or dw.strtab.buffer.items.len != elf_file.shdrs.items[shndx].sh_size) {
try elf_file.growNonAllocSection(shndx, dw.strtab.buffer.items.len, 1, false);
const shdr = elf_file.shdrs.items[shndx];
try elf_file.base.file.?.pwriteAll(dw.strtab.buffer.items, shdr.sh_offset);
self.debug_strtab_dirty = false;
}
}
self.debug_abbrev_section_dirty = false;
self.debug_aranges_section_dirty = false;
self.debug_rnglists_section_dirty = false;
self.debug_str_section_dirty = false;
self.saveDebugSectionsSizes(elf_file);
}
@@ -213,7 +186,8 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
// such as debug_line_header_dirty and debug_info_header_dirty.
assert(!self.debug_abbrev_section_dirty);
assert(!self.debug_aranges_section_dirty);
assert(!self.debug_strtab_dirty);
assert(!self.debug_rnglists_section_dirty);
assert(!self.debug_str_section_dirty);
}
fn saveDebugSectionsSizes(self: *ZigObject, elf_file: *Elf) void {
@@ -232,6 +206,15 @@ fn saveDebugSectionsSizes(self: *ZigObject, elf_file: *Elf) void {
if (elf_file.debug_line_section_index) |shndx| {
self.debug_line_section_zig_size = elf_file.shdrs.items[shndx].sh_size;
}
if (elf_file.debug_line_str_section_index) |shndx| {
self.debug_line_str_section_zig_size = elf_file.shdrs.items[shndx].sh_size;
}
if (elf_file.debug_loclists_section_index) |shndx| {
self.debug_loclists_section_zig_size = elf_file.shdrs.items[shndx].sh_size;
}
if (elf_file.debug_rnglists_section_index) |shndx| {
self.debug_rnglists_section_zig_size = elf_file.shdrs.items[shndx].sh_size;
}
}
fn newSymbol(self: *ZigObject, allocator: Allocator, name_off: u32, st_bind: u4) !Symbol.Index {
@@ -783,8 +766,8 @@ pub fn freeNav(self: *ZigObject, elf_file: *Elf, nav_index: InternPool.Nav.Index
kv.value.exports.deinit(gpa);
}
if (self.dwarf) |*dw| {
dw.freeNav(nav_index);
if (self.dwarf) |*dwarf| {
dwarf.freeNav(nav_index);
}
}
@@ -1034,8 +1017,8 @@ pub fn updateFunc(
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var dwarf_state = if (self.dwarf) |*dw| try dw.initNavState(pt, func.owner_nav) else null;
defer if (dwarf_state) |*ds| ds.deinit();
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
const res = try codegen.generateFunction(
&elf_file.base,
@@ -1045,7 +1028,7 @@ pub fn updateFunc(
air,
liveness,
&code_buffer,
if (dwarf_state) |*ds| .{ .dwarf = ds } else .none,
if (debug_wip_nav) |*dn| .{ .dwarf = dn } else .none,
);
const code = switch (res) {
@@ -1072,14 +1055,17 @@ pub fn updateFunc(
break :blk .{ atom_ptr.value, atom_ptr.alignment };
};
if (dwarf_state) |*ds| {
if (debug_wip_nav) |*wip_nav| {
const sym = self.symbol(sym_index);
try self.dwarf.?.commitNavState(
try self.dwarf.?.finishWipNav(
pt,
func.owner_nav,
@intCast(sym.address(.{}, elf_file)),
sym.atom(elf_file).?.size,
ds,
.{
.index = sym_index,
.addr = @intCast(sym.address(.{}, elf_file)),
.size = sym.atom(elf_file).?.size,
},
wip_nav,
);
}
@@ -1152,59 +1138,75 @@ pub fn updateNav(
else => nav_val,
};
const sym_index = try self.getOrCreateMetadataForNav(elf_file, nav_index);
self.symbol(sym_index).atom(elf_file).?.freeRelocs(elf_file);
if (nav_init.typeOf(zcu).isFnOrHasRuntimeBits(pt)) {
const sym_index = try self.getOrCreateMetadataForNav(elf_file, nav_index);
self.symbol(sym_index).atom(elf_file).?.freeRelocs(elf_file);
var code_buffer = std.ArrayList(u8).init(zcu.gpa);
defer code_buffer.deinit();
var code_buffer = std.ArrayList(u8).init(zcu.gpa);
defer code_buffer.deinit();
var nav_state: ?Dwarf.NavState = if (self.dwarf) |*dw| try dw.initNavState(pt, nav_index) else null;
defer if (nav_state) |*ns| ns.deinit();
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, nav_index, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
// TODO implement .debug_info for global variables
const res = try codegen.generateSymbol(
&elf_file.base,
pt,
zcu.navSrcLoc(nav_index),
nav_init,
&code_buffer,
if (nav_state) |*ns| .{ .dwarf = ns } else .none,
.{ .parent_atom_index = sym_index },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
const shndx = try self.getNavShdrIndex(elf_file, zcu, nav_index, sym_index, code);
log.debug("setting shdr({x},{s}) for {}", .{
shndx,
elf_file.getShString(elf_file.shdrs.items[shndx].sh_name),
nav.fqn.fmt(ip),
});
if (elf_file.shdrs.items[shndx].sh_flags & elf.SHF_TLS != 0)
try self.updateTlv(elf_file, pt, nav_index, sym_index, shndx, code)
else
try self.updateNavCode(elf_file, pt, nav_index, sym_index, shndx, code, elf.STT_OBJECT);
if (nav_state) |*ns| {
const sym = self.symbol(sym_index);
try self.dwarf.?.commitNavState(
// TODO implement .debug_info for global variables
const res = try codegen.generateSymbol(
&elf_file.base,
pt,
nav_index,
@intCast(sym.address(.{}, elf_file)),
sym.atom(elf_file).?.size,
ns,
zcu.navSrcLoc(nav_index),
nav_init,
&code_buffer,
if (debug_wip_nav) |*wip_nav| .{ .dwarf = wip_nav } else .none,
.{ .parent_atom_index = sym_index },
);
}
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
const shndx = try self.getNavShdrIndex(elf_file, zcu, nav_index, sym_index, code);
log.debug("setting shdr({x},{s}) for {}", .{
shndx,
elf_file.getShString(elf_file.shdrs.items[shndx].sh_name),
nav.fqn.fmt(ip),
});
if (elf_file.shdrs.items[shndx].sh_flags & elf.SHF_TLS != 0)
try self.updateTlv(elf_file, pt, nav_index, sym_index, shndx, code)
else
try self.updateNavCode(elf_file, pt, nav_index, sym_index, shndx, code, elf.STT_OBJECT);
if (debug_wip_nav) |*wip_nav| {
const sym = self.symbol(sym_index);
try self.dwarf.?.finishWipNav(
pt,
nav_index,
.{
.index = sym_index,
.addr = @intCast(sym.address(.{}, elf_file)),
.size = sym.atom(elf_file).?.size,
},
wip_nav,
);
}
} else if (self.dwarf) |*dwarf| try dwarf.updateComptimeNav(pt, nav_index);
// Exports will be updated by `Zcu.processExports` after the update.
}
pub fn updateContainerType(
self: *ZigObject,
pt: Zcu.PerThread,
ty: InternPool.Index,
) link.File.UpdateNavError!void {
const tracy = trace(@src());
defer tracy.end();
if (self.dwarf) |*dwarf| try dwarf.updateContainerType(pt, ty);
}
fn updateLazySymbol(
self: *ZigObject,
elf_file: *Elf,
@@ -1441,8 +1443,8 @@ pub fn updateNavLineNumber(
log.debug("updateNavLineNumber {}({d})", .{ nav.fqn.fmt(ip), nav_index });
if (self.dwarf) |*dw| {
try dw.updateNavLineNumber(pt.zcu, nav_index);
if (self.dwarf) |*dwarf| {
try dwarf.updateNavLineNumber(pt.zcu, nav_index);
}
}
+7 -1
View File
@@ -401,7 +401,7 @@ fn allocateAllocSections(elf_file: *Elf) !void {
const needed_size = shdr.sh_size;
if (needed_size > elf_file.allocatedSize(shdr.sh_offset)) {
shdr.sh_size = 0;
const new_offset = elf_file.findFreeSpace(needed_size, shdr.sh_addralign);
const new_offset = try elf_file.findFreeSpace(needed_size, shdr.sh_addralign);
shdr.sh_offset = new_offset;
shdr.sh_size = needed_size;
}
@@ -434,6 +434,12 @@ fn writeAtoms(elf_file: *Elf) !void {
break :blk zig_object.debug_aranges_section_zig_size;
if (shndx == elf_file.debug_line_section_index.?)
break :blk zig_object.debug_line_section_zig_size;
if (shndx == elf_file.debug_line_str_section_index.?)
break :blk zig_object.debug_line_str_section_zig_size;
if (shndx == elf_file.debug_loclists_section_index.?)
break :blk zig_object.debug_loclists_section_zig_size;
if (shndx == elf_file.debug_rnglists_section_index.?)
break :blk zig_object.debug_rnglists_section_zig_size;
unreachable;
} else 0;
const sh_offset = shdr.sh_offset + base_offset;
+127 -120
View File
@@ -94,6 +94,9 @@ debug_abbrev_sect_index: ?u8 = null,
debug_str_sect_index: ?u8 = null,
debug_aranges_sect_index: ?u8 = null,
debug_line_sect_index: ?u8 = null,
debug_line_str_sect_index: ?u8 = null,
debug_loclists_sect_index: ?u8 = null,
debug_rnglists_sect_index: ?u8 = null,
has_tlv: AtomicBool = AtomicBool.init(false),
binds_to_weak: AtomicBool = AtomicBool.init(false),
@@ -1789,12 +1792,42 @@ pub fn sortSections(self: *MachO) !void {
self.sections.appendAssumeCapacity(slice.get(sorted.index));
}
for (&[_]*?u8{
&self.data_sect_index,
&self.got_sect_index,
&self.zig_text_sect_index,
&self.zig_got_sect_index,
&self.zig_const_sect_index,
&self.zig_data_sect_index,
&self.zig_bss_sect_index,
&self.stubs_sect_index,
&self.stubs_helper_sect_index,
&self.la_symbol_ptr_sect_index,
&self.tlv_ptr_sect_index,
&self.eh_frame_sect_index,
&self.unwind_info_sect_index,
&self.objc_stubs_sect_index,
&self.debug_str_sect_index,
&self.debug_info_sect_index,
&self.debug_abbrev_sect_index,
&self.debug_aranges_sect_index,
&self.debug_line_sect_index,
&self.debug_line_str_sect_index,
&self.debug_loclists_sect_index,
&self.debug_rnglists_sect_index,
}) |maybe_index| {
if (maybe_index.*) |*index| {
index.* = backlinks[index.*];
}
}
if (self.getZigObject()) |zo| {
for (zo.getAtoms()) |atom_index| {
const atom = zo.getAtom(atom_index) orelse continue;
if (!atom.isAlive()) continue;
atom.out_n_sect = backlinks[atom.out_n_sect];
}
if (zo.dwarf) |*dwarf| dwarf.reloadSectionMetadata();
}
for (self.objects.items) |index| {
@@ -1813,32 +1846,6 @@ pub fn sortSections(self: *MachO) !void {
atom.out_n_sect = backlinks[atom.out_n_sect];
}
}
for (&[_]*?u8{
&self.data_sect_index,
&self.got_sect_index,
&self.zig_text_sect_index,
&self.zig_got_sect_index,
&self.zig_const_sect_index,
&self.zig_data_sect_index,
&self.zig_bss_sect_index,
&self.stubs_sect_index,
&self.stubs_helper_sect_index,
&self.la_symbol_ptr_sect_index,
&self.tlv_ptr_sect_index,
&self.eh_frame_sect_index,
&self.unwind_info_sect_index,
&self.objc_stubs_sect_index,
&self.debug_info_sect_index,
&self.debug_str_sect_index,
&self.debug_line_sect_index,
&self.debug_abbrev_sect_index,
&self.debug_info_sect_index,
}) |maybe_index| {
if (maybe_index.*) |*index| {
index.* = backlinks[index.*];
}
}
}
pub fn addAtomsToSections(self: *MachO) !void {
@@ -2189,7 +2196,7 @@ fn allocateSections(self: *MachO) !void {
header.size = 0;
// Must move the entire section.
const new_offset = self.findFreeSpace(existing_size, page_size);
const new_offset = try self.findFreeSpace(existing_size, page_size);
log.debug("moving '{s},{s}' from 0x{x} to 0x{x}", .{
header.segName(),
@@ -3066,32 +3073,36 @@ pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
return actual_size +| (actual_size / ideal_factor);
}
fn detectAllocCollision(self: *MachO, start: u64, size: u64) ?u64 {
fn detectAllocCollision(self: *MachO, start: u64, size: u64) !?u64 {
// Conservatively commit one page size as reserved space for the headers as we
// expect it to grow and everything else be moved in flush anyhow.
const header_size = self.getPageSize();
if (start < header_size)
return header_size;
var at_end = true;
const end = start + padToIdeal(size);
for (self.sections.items(.header)) |header| {
if (header.isZerofill()) continue;
const increased_size = padToIdeal(header.size);
const test_end = header.offset +| increased_size;
if (end > header.offset and start < test_end) {
return test_end;
if (start < test_end) {
if (end > header.offset) return test_end;
if (test_end < std.math.maxInt(u64)) at_end = false;
}
}
for (self.segments.items) |seg| {
const increased_size = padToIdeal(seg.filesize);
const test_end = seg.fileoff +| increased_size;
if (end > seg.fileoff and start < test_end) {
return test_end;
if (start < test_end) {
if (end > seg.fileoff) return test_end;
if (test_end < std.math.maxInt(u64)) at_end = false;
}
}
if (at_end) try self.base.file.?.setEndPos(end);
return null;
}
@@ -3159,9 +3170,9 @@ pub fn allocatedSizeVirtual(self: *MachO, start: u64) u64 {
return min_pos - start;
}
pub fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) u64 {
pub fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) !u64 {
var start: u64 = 0;
while (self.detectAllocCollision(start, object_size)) |item_end| {
while (try self.detectAllocCollision(start, object_size)) |item_end| {
start = mem.alignForward(u64, item_end, min_alignment);
}
return start;
@@ -3210,7 +3221,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
{
const filesize = options.program_code_size_hint;
const off = self.findFreeSpace(filesize, self.getPageSize());
const off = try self.findFreeSpace(filesize, self.getPageSize());
self.zig_text_seg_index = try self.addSegment("__TEXT_ZIG", .{
.fileoff = off,
.filesize = filesize,
@@ -3222,7 +3233,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
{
const filesize = options.symbol_count_hint * @sizeOf(u64);
const off = self.findFreeSpace(filesize, self.getPageSize());
const off = try self.findFreeSpace(filesize, self.getPageSize());
self.zig_got_seg_index = try self.addSegment("__GOT_ZIG", .{
.fileoff = off,
.filesize = filesize,
@@ -3234,7 +3245,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
{
const filesize: u64 = 1024;
const off = self.findFreeSpace(filesize, self.getPageSize());
const off = try self.findFreeSpace(filesize, self.getPageSize());
self.zig_const_seg_index = try self.addSegment("__CONST_ZIG", .{
.fileoff = off,
.filesize = filesize,
@@ -3246,7 +3257,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
{
const filesize: u64 = 1024;
const off = self.findFreeSpace(filesize, self.getPageSize());
const off = try self.findFreeSpace(filesize, self.getPageSize());
self.zig_data_seg_index = try self.addSegment("__DATA_ZIG", .{
.fileoff = off,
.filesize = filesize,
@@ -3265,7 +3276,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
});
}
if (options.zo.dwarf) |_| {
if (options.zo.dwarf) |*dwarf| {
// Create dSYM bundle.
log.debug("creating {s}.dSYM bundle", .{options.emit.sub_path});
@@ -3288,6 +3299,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
self.d_sym = .{ .allocator = gpa, .file = d_sym_file };
try self.d_sym.?.initMetadata(self);
try dwarf.initMetadata();
}
}
@@ -3307,7 +3319,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
const sect = &macho_file.sections.items(.header)[sect_id];
const alignment = try math.powi(u32, 2, sect.@"align");
if (!sect.isZerofill()) {
sect.offset = math.cast(u32, macho_file.findFreeSpace(size, alignment)) orelse
sect.offset = math.cast(u32, try macho_file.findFreeSpace(size, alignment)) orelse
return error.Overflow;
}
sect.addr = macho_file.findFreeSpaceVirtual(size, alignment);
@@ -3367,43 +3379,34 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
}
}
if (self.base.isRelocatable() and options.zo.dwarf != null) {
{
self.debug_str_sect_index = try self.addSection("__DWARF", "__debug_str", .{
.flags = macho.S_ATTR_DEBUG,
});
try allocSect(self, self.debug_str_sect_index.?, 200);
}
{
self.debug_info_sect_index = try self.addSection("__DWARF", "__debug_info", .{
.flags = macho.S_ATTR_DEBUG,
});
try allocSect(self, self.debug_info_sect_index.?, 200);
}
{
self.debug_abbrev_sect_index = try self.addSection("__DWARF", "__debug_abbrev", .{
.flags = macho.S_ATTR_DEBUG,
});
try allocSect(self, self.debug_abbrev_sect_index.?, 128);
}
{
self.debug_aranges_sect_index = try self.addSection("__DWARF", "__debug_aranges", .{
.alignment = 4,
.flags = macho.S_ATTR_DEBUG,
});
try allocSect(self, self.debug_aranges_sect_index.?, 160);
}
{
self.debug_line_sect_index = try self.addSection("__DWARF", "__debug_line", .{
.flags = macho.S_ATTR_DEBUG,
});
try allocSect(self, self.debug_line_sect_index.?, 250);
}
}
if (self.base.isRelocatable()) if (options.zo.dwarf) |*dwarf| {
self.debug_str_sect_index = try self.addSection("__DWARF", "__debug_str", .{
.flags = macho.S_ATTR_DEBUG,
});
self.debug_info_sect_index = try self.addSection("__DWARF", "__debug_info", .{
.flags = macho.S_ATTR_DEBUG,
});
self.debug_abbrev_sect_index = try self.addSection("__DWARF", "__debug_abbrev", .{
.flags = macho.S_ATTR_DEBUG,
});
self.debug_aranges_sect_index = try self.addSection("__DWARF", "__debug_aranges", .{
.alignment = 4,
.flags = macho.S_ATTR_DEBUG,
});
self.debug_line_sect_index = try self.addSection("__DWARF", "__debug_line", .{
.flags = macho.S_ATTR_DEBUG,
});
self.debug_line_str_sect_index = try self.addSection("__DWARF", "__debug_line_str", .{
.flags = macho.S_ATTR_DEBUG,
});
self.debug_loclists_sect_index = try self.addSection("__DWARF", "__debug_loclists", .{
.flags = macho.S_ATTR_DEBUG,
});
self.debug_rnglists_sect_index = try self.addSection("__DWARF", "__debug_rnglists", .{
.flags = macho.S_ATTR_DEBUG,
});
try dwarf.initMetadata();
};
}
pub fn growSection(self: *MachO, sect_index: u8, needed_size: u64) !void {
@@ -3417,35 +3420,36 @@ pub fn growSection(self: *MachO, sect_index: u8, needed_size: u64) !void {
fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void {
const sect = &self.sections.items(.header)[sect_index];
if (needed_size > self.allocatedSize(sect.offset) and !sect.isZerofill()) {
const existing_size = sect.size;
sect.size = 0;
// Must move the entire section.
const alignment = self.getPageSize();
const new_offset = self.findFreeSpace(needed_size, alignment);
log.debug("moving '{s},{s}' from 0x{x} to 0x{x}", .{
sect.segName(),
sect.sectName(),
sect.offset,
new_offset,
});
try self.copyRangeAllZeroOut(sect.offset, new_offset, existing_size);
sect.offset = @intCast(new_offset);
}
sect.size = needed_size;
const seg_id = self.sections.items(.segment_id)[sect_index];
const seg = &self.segments.items[seg_id];
seg.fileoff = sect.offset;
if (!sect.isZerofill()) {
const allocated_size = self.allocatedSize(sect.offset);
if (sect.offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(sect.offset + needed_size);
} else if (needed_size > allocated_size) {
const existing_size = sect.size;
sect.size = 0;
// Must move the entire section.
const alignment = self.getPageSize();
const new_offset = try self.findFreeSpace(needed_size, alignment);
log.debug("moving '{s},{s}' from 0x{x} to 0x{x}", .{
sect.segName(),
sect.sectName(),
sect.offset,
new_offset,
});
try self.copyRangeAllZeroOut(sect.offset, new_offset, existing_size);
sect.offset = @intCast(new_offset);
}
seg.filesize = needed_size;
}
sect.size = needed_size;
seg.fileoff = sect.offset;
const mem_capacity = self.allocatedSizeVirtual(seg.vmaddr);
if (needed_size > mem_capacity) {
@@ -3464,30 +3468,34 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void {
const sect = &self.sections.items(.header)[sect_index];
if (needed_size > self.allocatedSize(sect.offset) and !sect.isZerofill()) {
const existing_size = sect.size;
sect.size = 0;
if (!sect.isZerofill()) {
const allocated_size = self.allocatedSize(sect.offset);
if (sect.offset + allocated_size == std.math.maxInt(u64)) {
try self.base.file.?.setEndPos(sect.offset + needed_size);
} else if (needed_size > allocated_size) {
const existing_size = sect.size;
sect.size = 0;
// Must move the entire section.
const alignment = try math.powi(u32, 2, sect.@"align");
const new_offset = self.findFreeSpace(needed_size, alignment);
const new_addr = self.findFreeSpaceVirtual(needed_size, alignment);
// Must move the entire section.
const alignment = try math.powi(u32, 2, sect.@"align");
const new_offset = try self.findFreeSpace(needed_size, alignment);
const new_addr = self.findFreeSpaceVirtual(needed_size, alignment);
log.debug("new '{s},{s}' file offset 0x{x} to 0x{x} (0x{x} - 0x{x})", .{
sect.segName(),
sect.sectName(),
new_offset,
new_offset + existing_size,
new_addr,
new_addr + existing_size,
});
log.debug("new '{s},{s}' file offset 0x{x} to 0x{x} (0x{x} - 0x{x})", .{
sect.segName(),
sect.sectName(),
new_offset,
new_offset + existing_size,
new_addr,
new_addr + existing_size,
});
try self.copyRangeAll(sect.offset, new_offset, existing_size);
try self.copyRangeAll(sect.offset, new_offset, existing_size);
sect.offset = @intCast(new_offset);
sect.addr = new_addr;
sect.offset = @intCast(new_offset);
sect.addr = new_addr;
}
}
sect.size = needed_size;
}
@@ -4591,7 +4599,6 @@ const std = @import("std");
const build_options = @import("build_options");
const builtin = @import("builtin");
const assert = std.debug.assert;
const dwarf = std.dwarf;
const fs = std.fs;
const log = std.log.scoped(.link);
const state_log = std.log.scoped(.link_state);
+36 -31
View File
@@ -15,6 +15,9 @@ debug_abbrev_section_index: ?u8 = null,
debug_str_section_index: ?u8 = null,
debug_aranges_section_index: ?u8 = null,
debug_line_section_index: ?u8 = null,
debug_line_str_section_index: ?u8 = null,
debug_loclists_section_index: ?u8 = null,
debug_rnglists_section_index: ?u8 = null,
relocs: std.ArrayListUnmanaged(Reloc) = .{},
@@ -56,13 +59,16 @@ pub fn initMetadata(self: *DebugSymbols, macho_file: *MachO) !void {
});
}
self.debug_str_section_index = try self.allocateSection("__debug_str", 200, 0);
self.debug_info_section_index = try self.allocateSection("__debug_info", 200, 0);
self.debug_abbrev_section_index = try self.allocateSection("__debug_abbrev", 128, 0);
self.debug_aranges_section_index = try self.allocateSection("__debug_aranges", 160, 4);
self.debug_line_section_index = try self.allocateSection("__debug_line", 250, 0);
self.debug_str_section_index = try self.createSection("__debug_str", 0);
self.debug_info_section_index = try self.createSection("__debug_info", 0);
self.debug_abbrev_section_index = try self.createSection("__debug_abbrev", 0);
self.debug_aranges_section_index = try self.createSection("__debug_aranges", 4);
self.debug_line_section_index = try self.createSection("__debug_line", 0);
self.debug_line_str_section_index = try self.createSection("__debug_line_str", 0);
self.debug_loclists_section_index = try self.createSection("__debug_loclists", 0);
self.debug_rnglists_section_index = try self.createSection("__debug_rnglists", 0);
self.linkedit_segment_cmd_index = @as(u8, @intCast(self.segments.items.len));
self.linkedit_segment_cmd_index = @intCast(self.segments.items.len);
try self.segments.append(self.allocator, .{
.segname = makeStaticString("__LINKEDIT"),
.maxprot = macho.PROT.READ,
@@ -71,27 +77,17 @@ pub fn initMetadata(self: *DebugSymbols, macho_file: *MachO) !void {
});
}
fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignment: u16) !u8 {
fn createSection(self: *DebugSymbols, sectname: []const u8, alignment: u16) !u8 {
const segment = self.getDwarfSegmentPtr();
var sect = macho.section_64{
.sectname = makeStaticString(sectname),
.segname = segment.segname,
.size = @as(u32, @intCast(size)),
.@"align" = alignment,
};
const alignment_pow_2 = try math.powi(u32, 2, alignment);
const off = self.findFreeSpace(size, alignment_pow_2);
log.debug("found {s},{s} section free space 0x{x} to 0x{x}", .{
sect.segName(),
sect.sectName(),
off,
off + size,
});
log.debug("create {s},{s} section", .{ sect.segName(), sect.sectName() });
sect.offset = @as(u32, @intCast(off));
const index = @as(u8, @intCast(self.sections.items.len));
const index: u8 = @intCast(self.sections.items.len);
try self.sections.append(self.allocator, sect);
segment.cmdsize += @sizeOf(macho.section_64);
segment.nsects += 1;
@@ -102,16 +98,19 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme
pub fn growSection(
self: *DebugSymbols,
sect_index: u8,
needed_size: u32,
needed_size: u64,
requires_file_copy: bool,
macho_file: *MachO,
) !void {
const sect = self.getSectionPtr(sect_index);
if (needed_size > self.allocatedSize(sect.offset)) {
const allocated_size = self.allocatedSize(sect.offset);
if (sect.offset + allocated_size == std.math.maxInt(u64)) {
try self.file.setEndPos(sect.offset + needed_size);
} else if (needed_size > allocated_size) {
const existing_size = sect.size;
sect.size = 0; // free the space
const new_offset = self.findFreeSpace(needed_size, 1);
const new_offset = try self.findFreeSpace(needed_size, 1);
log.debug("moving {s} section: {} bytes from 0x{x} to 0x{x}", .{
sect.sectName(),
@@ -130,7 +129,7 @@ pub fn growSection(
if (amt != existing_size) return error.InputOutput;
}
sect.offset = @as(u32, @intCast(new_offset));
sect.offset = @intCast(new_offset);
}
sect.size = needed_size;
@@ -153,22 +152,27 @@ pub fn markDirty(self: *DebugSymbols, sect_index: u8, macho_file: *MachO) void {
}
}
fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) ?u64 {
fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) !?u64 {
var at_end = true;
const end = start + padToIdeal(size);
for (self.sections.items) |section| {
const increased_size = padToIdeal(section.size);
const test_end = section.offset + increased_size;
if (end > section.offset and start < test_end) {
return test_end;
if (start < test_end) {
if (end > section.offset) return test_end;
if (test_end < std.math.maxInt(u64)) at_end = false;
}
}
if (at_end) try self.file.setEndPos(end);
return null;
}
fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) u64 {
fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) !u64 {
const segment = self.getDwarfSegmentPtr();
var offset: u64 = segment.fileoff;
while (self.detectAllocCollision(offset, object_size)) |item_end| {
while (try self.detectAllocCollision(offset, object_size)) |item_end| {
offset = mem.alignForward(u64, item_end, min_alignment);
}
return offset;
@@ -346,6 +350,7 @@ fn writeHeader(self: *DebugSymbols, macho_file: *MachO, ncmds: usize, sizeofcmds
}
fn allocatedSize(self: *DebugSymbols, start: u64) u64 {
if (start == 0) return 0;
const seg = self.getDwarfSegmentPtr();
assert(start >= seg.fileoff);
var min_pos: u64 = std.math.maxInt(u64);
@@ -413,9 +418,9 @@ pub fn writeStrtab(self: *DebugSymbols, off: u32) !u32 {
pub fn getSectionIndexes(self: *DebugSymbols, segment_index: u8) struct { start: u8, end: u8 } {
var start: u8 = 0;
const nsects = for (self.segments.items, 0..) |seg, i| {
if (i == segment_index) break @as(u8, @intCast(seg.nsects));
start += @as(u8, @intCast(seg.nsects));
const nsects: u8 = for (self.segments.items, 0..) |seg, i| {
if (i == segment_index) break @intCast(seg.nsects);
start += @intCast(seg.nsects);
} else 0;
return .{ .start = start, .end = start + nsects };
}
+64 -99
View File
@@ -55,8 +55,7 @@ pub fn init(self: *ZigObject, macho_file: *MachO) !void {
switch (comp.config.debug_format) {
.strip => {},
.dwarf => |v| {
assert(v == .@"32");
self.dwarf = Dwarf.init(&macho_file.base, .dwarf32);
self.dwarf = Dwarf.init(&macho_file.base, v);
self.debug_strtab_dirty = true;
self.debug_abbrev_dirty = true;
self.debug_aranges_dirty = true;
@@ -101,8 +100,8 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
}
self.tlv_initializers.deinit(allocator);
if (self.dwarf) |*dw| {
dw.deinit();
if (self.dwarf) |*dwarf| {
dwarf.deinit();
}
}
@@ -595,56 +594,13 @@ pub fn flushModule(self: *ZigObject, macho_file: *MachO, tid: Zcu.PerThread.Id)
if (metadata.const_state != .unused) metadata.const_state = .flushed;
}
if (self.dwarf) |*dw| {
if (self.dwarf) |*dwarf| {
const pt: Zcu.PerThread = .{ .zcu = macho_file.base.comp.module.?, .tid = tid };
try dw.flushModule(pt);
try dwarf.flushModule(pt);
if (self.debug_abbrev_dirty) {
try dw.writeDbgAbbrev();
self.debug_abbrev_dirty = false;
}
if (self.debug_info_header_dirty) {
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
const text_section = macho_file.sections.items(.header)[macho_file.zig_text_sect_index.?];
const low_pc = text_section.addr;
const high_pc = text_section.addr + text_section.size;
try dw.writeDbgInfoHeader(pt.zcu, low_pc, high_pc);
self.debug_info_header_dirty = false;
}
if (self.debug_aranges_dirty) {
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
const text_section = macho_file.sections.items(.header)[macho_file.zig_text_sect_index.?];
try dw.writeDbgAranges(text_section.addr, text_section.size);
self.debug_aranges_dirty = false;
}
if (self.debug_line_header_dirty) {
try dw.writeDbgLineHeader();
self.debug_line_header_dirty = false;
}
if (!macho_file.base.isRelocatable()) {
const d_sym = macho_file.getDebugSymbols().?;
const sect_index = d_sym.debug_str_section_index.?;
if (self.debug_strtab_dirty or dw.strtab.buffer.items.len != d_sym.getSection(sect_index).size) {
const needed_size = @as(u32, @intCast(dw.strtab.buffer.items.len));
try d_sym.growSection(sect_index, needed_size, false, macho_file);
try d_sym.file.pwriteAll(dw.strtab.buffer.items, d_sym.getSection(sect_index).offset);
self.debug_strtab_dirty = false;
}
} else {
const sect_index = macho_file.debug_str_sect_index.?;
if (self.debug_strtab_dirty or dw.strtab.buffer.items.len != macho_file.sections.items(.header)[sect_index].size) {
const needed_size = @as(u32, @intCast(dw.strtab.buffer.items.len));
try macho_file.growSection(sect_index, needed_size);
try macho_file.base.file.?.pwriteAll(dw.strtab.buffer.items, macho_file.sections.items(.header)[sect_index].offset);
self.debug_strtab_dirty = false;
}
}
self.debug_abbrev_dirty = false;
self.debug_aranges_dirty = false;
self.debug_strtab_dirty = false;
}
// The point of flushModule() is to commit changes, so in theory, nothing should
@@ -816,8 +772,8 @@ pub fn updateFunc(
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
var dwarf_state = if (self.dwarf) |*dw| try dw.initNavState(pt, func.owner_nav) else null;
defer if (dwarf_state) |*ds| ds.deinit();
var dwarf_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, func.owner_nav, sym_index) else null;
defer if (dwarf_wip_nav) |*wip_nav| wip_nav.deinit();
const res = try codegen.generateFunction(
&macho_file.base,
@@ -827,7 +783,7 @@ pub fn updateFunc(
air,
liveness,
&code_buffer,
if (dwarf_state) |*ds| .{ .dwarf = ds } else .none,
if (dwarf_wip_nav) |*wip_nav| .{ .dwarf = wip_nav } else .none,
);
const code = switch (res) {
@@ -841,14 +797,17 @@ pub fn updateFunc(
const sect_index = try self.getNavOutputSection(macho_file, zcu, func.owner_nav, code);
try self.updateNavCode(macho_file, pt, func.owner_nav, sym_index, sect_index, code);
if (dwarf_state) |*ds| {
if (dwarf_wip_nav) |*wip_nav| {
const sym = self.symbols.items[sym_index];
try self.dwarf.?.commitNavState(
try self.dwarf.?.finishWipNav(
pt,
func.owner_nav,
sym.getAddress(.{}, macho_file),
sym.getAtom(macho_file).?.size,
ds,
.{
.index = sym_index,
.addr = sym.getAddress(.{}, macho_file),
.size = sym.getAtom(macho_file).?.size,
},
wip_nav,
);
}
@@ -866,6 +825,7 @@ pub fn updateNav(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const nav_val = zcu.navValue(nav_index);
const nav_init = switch (ip.indexToKey(nav_val.toIntern())) {
.variable => |variable| Value.fromInterned(variable.init),
@@ -882,48 +842,53 @@ pub fn updateNav(
else => nav_val,
};
const sym_index = try self.getOrCreateMetadataForNav(macho_file, nav_index);
self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file);
if (nav_init.typeOf(zcu).isFnOrHasRuntimeBits(pt)) {
const sym_index = try self.getOrCreateMetadataForNav(macho_file, nav_index);
self.symbols.items[sym_index].getAtom(macho_file).?.freeRelocs(macho_file);
var code_buffer = std.ArrayList(u8).init(zcu.gpa);
defer code_buffer.deinit();
var code_buffer = std.ArrayList(u8).init(zcu.gpa);
defer code_buffer.deinit();
var nav_state: ?Dwarf.NavState = if (self.dwarf) |*dw| try dw.initNavState(pt, nav_index) else null;
defer if (nav_state) |*ns| ns.deinit();
var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, nav_index, sym_index) else null;
defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit();
const res = try codegen.generateSymbol(
&macho_file.base,
pt,
zcu.navSrcLoc(nav_index),
nav_init,
&code_buffer,
if (nav_state) |*ns| .{ .dwarf = ns } else .none,
.{ .parent_atom_index = sym_index },
);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
const sect_index = try self.getNavOutputSection(macho_file, zcu, nav_index, code);
if (isThreadlocal(macho_file, nav_index))
try self.updateTlv(macho_file, pt, nav_index, sym_index, sect_index, code)
else
try self.updateNavCode(macho_file, pt, nav_index, sym_index, sect_index, code);
if (nav_state) |*ns| {
const sym = self.symbols.items[sym_index];
try self.dwarf.?.commitNavState(
const res = try codegen.generateSymbol(
&macho_file.base,
pt,
nav_index,
sym.getAddress(.{}, macho_file),
sym.getAtom(macho_file).?.size,
ns,
zcu.navSrcLoc(nav_index),
nav_init,
&code_buffer,
if (debug_wip_nav) |*wip_nav| .{ .dwarf = wip_nav } else .none,
.{ .parent_atom_index = sym_index },
);
}
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
const sect_index = try self.getNavOutputSection(macho_file, zcu, nav_index, code);
if (isThreadlocal(macho_file, nav_index))
try self.updateTlv(macho_file, pt, nav_index, sym_index, sect_index, code)
else
try self.updateNavCode(macho_file, pt, nav_index, sym_index, sect_index, code);
if (debug_wip_nav) |*wip_nav| {
const sym = self.symbols.items[sym_index];
try self.dwarf.?.finishWipNav(
pt,
nav_index,
.{
.index = sym_index,
.addr = sym.getAddress(.{}, macho_file),
.size = sym.getAtom(macho_file).?.size,
},
wip_nav,
);
}
} else if (self.dwarf) |*dwarf| try dwarf.updateComptimeNav(pt, nav_index);
// Exports will be updated by `Zcu.processExports` after the update.
}
@@ -1435,8 +1400,8 @@ pub fn updateNavLineNumber(
pt: Zcu.PerThread,
nav_index: InternPool.Nav.Index,
) !void {
if (self.dwarf) |*dw| {
try dw.updateNavLineNumber(pt.zcu, nav_index);
if (self.dwarf) |*dwarf| {
try dwarf.updateNavLineNumber(pt.zcu, nav_index);
}
}
+1 -1
View File
@@ -465,7 +465,7 @@ fn allocateSections(macho_file: *MachO) !void {
const alignment = try math.powi(u32, 2, header.@"align");
if (!header.isZerofill()) {
if (needed_size > macho_file.allocatedSize(header.offset)) {
header.offset = math.cast(u32, macho_file.findFreeSpace(needed_size, alignment)) orelse
header.offset = math.cast(u32, try macho_file.findFreeSpace(needed_size, alignment)) orelse
return error.Overflow;
}
}
+23 -20
View File
@@ -454,28 +454,31 @@ pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Inde
},
else => nav_val,
};
const atom_idx = try self.seeNav(pt, nav_index);
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
// TODO we need the symbol index for symbol in the table of locals for the containing atom
const res = try codegen.generateSymbol(&self.base, pt, zcu.navSrcLoc(nav_index), nav_init, &code_buffer, .none, .{
.parent_atom_index = @intCast(atom_idx),
});
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(gpa, nav_index, em);
return;
},
};
try self.data_nav_table.ensureUnusedCapacity(gpa, 1);
const duped_code = try gpa.dupe(u8, code);
self.getAtomPtr(self.navs.get(nav_index).?.index).code = .{ .code_ptr = null, .other = .{ .nav_index = nav_index } };
if (self.data_nav_table.fetchPutAssumeCapacity(nav_index, duped_code)) |old_entry| {
gpa.free(old_entry.value);
if (nav_init.typeOf(zcu).isFnOrHasRuntimeBits(pt)) {
const atom_idx = try self.seeNav(pt, nav_index);
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
// TODO we need the symbol index for symbol in the table of locals for the containing atom
const res = try codegen.generateSymbol(&self.base, pt, zcu.navSrcLoc(nav_index), nav_init, &code_buffer, .none, .{
.parent_atom_index = @intCast(atom_idx),
});
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
try zcu.failed_codegen.put(gpa, nav_index, em);
return;
},
};
try self.data_nav_table.ensureUnusedCapacity(gpa, 1);
const duped_code = try gpa.dupe(u8, code);
self.getAtomPtr(self.navs.get(nav_index).?.index).code = .{ .code_ptr = null, .other = .{ .nav_index = nav_index } };
if (self.data_nav_table.fetchPutAssumeCapacity(nav_index, duped_code)) |old_entry| {
gpa.free(old_entry.value);
}
try self.updateFinish(pt, nav_index);
}
return self.updateFinish(pt, nav_index);
}
/// called at the end of update{Decl,Func}
+32 -29
View File
@@ -248,46 +248,49 @@ pub fn updateNav(
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
const is_extern, const lib_name, const nav_init = switch (ip.indexToKey(nav.status.resolved.val)) {
.variable => |variable| .{ false, variable.lib_name, variable.init },
const nav_val = zcu.navValue(nav_index);
const is_extern, const lib_name, const nav_init = switch (ip.indexToKey(nav_val.toIntern())) {
.variable => |variable| .{ false, variable.lib_name, Value.fromInterned(variable.init) },
.func => return,
.@"extern" => |@"extern"| if (ip.isFunctionType(nav.typeOf(ip)))
return
else
.{ true, @"extern".lib_name, nav.status.resolved.val },
else => .{ false, .none, nav.status.resolved.val },
.{ true, @"extern".lib_name, nav_val },
else => .{ false, .none, nav_val },
};
const gpa = wasm_file.base.comp.gpa;
const atom_index = try zig_object.getOrCreateAtomForNav(wasm_file, pt, nav_index);
const atom = wasm_file.getAtomPtr(atom_index);
atom.clear();
if (nav_init.typeOf(zcu).isFnOrHasRuntimeBits(pt)) {
const gpa = wasm_file.base.comp.gpa;
const atom_index = try zig_object.getOrCreateAtomForNav(wasm_file, pt, nav_index);
const atom = wasm_file.getAtomPtr(atom_index);
atom.clear();
if (is_extern)
return zig_object.addOrUpdateImport(wasm_file, nav.name.toSlice(ip), atom.sym_index, lib_name.toSlice(ip), null);
if (is_extern)
return zig_object.addOrUpdateImport(wasm_file, nav.name.toSlice(ip), atom.sym_index, lib_name.toSlice(ip), null);
var code_writer = std.ArrayList(u8).init(gpa);
defer code_writer.deinit();
var code_writer = std.ArrayList(u8).init(gpa);
defer code_writer.deinit();
const res = try codegen.generateSymbol(
&wasm_file.base,
pt,
zcu.navSrcLoc(nav_index),
Value.fromInterned(nav_init),
&code_writer,
.none,
.{ .parent_atom_index = @intFromEnum(atom.sym_index) },
);
const res = try codegen.generateSymbol(
&wasm_file.base,
pt,
zcu.navSrcLoc(nav_index),
nav_init,
&code_writer,
.none,
.{ .parent_atom_index = @intFromEnum(atom.sym_index) },
);
const code = switch (res) {
.ok => code_writer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
const code = switch (res) {
.ok => code_writer.items,
.fail => |em| {
try zcu.failed_codegen.put(zcu.gpa, nav_index, em);
return;
},
};
return zig_object.finishUpdateNav(wasm_file, pt, nav_index, code);
try zig_object.finishUpdateNav(wasm_file, pt, nav_index, code);
}
}
pub fn updateFunc(
+1 -1
View File
@@ -746,7 +746,7 @@ const Writer = struct {
fn writeIntBig(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].str;
const byte_count = inst_data.len * @sizeOf(std.math.big.Limb);
const limb_bytes = self.code.nullTerminatedString(inst_data.start)[0..byte_count];
const limb_bytes = self.code.string_bytes[@intFromEnum(inst_data.start)..][0..byte_count];
// limb_bytes is not aligned properly; we must allocate and copy the bytes
// in order to accomplish this.
const limbs = try self.gpa.alloc(std.math.big.Limb, inst_data.len);
+2 -1
View File
@@ -10,6 +10,7 @@ const Zcu = @import("Zcu.zig");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const expectEqualSlices = std.testing.expectEqualSlices;
const link = @import("link.zig");
const log = std.log.scoped(.register_manager);
@@ -25,7 +26,7 @@ pub const AllocateRegistersError = error{
/// Can happen when spilling an instruction triggers a codegen
/// error, so we propagate that error
CodegenFail,
};
} || link.File.UpdateDebugInfoError;
pub fn RegisterManager(
comptime Function: type,
+438
View File
@@ -0,0 +1,438 @@
b: *std.Build,
options: Options,
root_step: *std.Build.Step,
pub const Options = struct {
test_filters: []const []const u8,
gdb: ?[]const u8,
lldb: ?[]const u8,
optimize_modes: []const std.builtin.OptimizeMode,
skip_single_threaded: bool,
skip_non_native: bool,
skip_libc: bool,
};
pub const Target = struct {
resolved: std.Build.ResolvedTarget,
optimize_mode: std.builtin.OptimizeMode = .Debug,
link_libc: ?bool = null,
single_threaded: ?bool = null,
pic: ?bool = null,
test_name_suffix: []const u8,
};
pub fn addTestsForTarget(db: *Debugger, target: Target) void {
db.addLldbTest(
"basic",
target,
&.{
.{
.path = "basic.zig",
.source =
\\const Basic = struct {
\\ void: void = {},
\\ bool_false: bool = false,
\\ bool_true: bool = true,
\\ u0_0: u0 = 0,
\\ u1_0: u1 = 0,
\\ u1_1: u1 = 1,
\\ u2_0: u2 = 0,
\\ u2_3: u2 = 3,
\\ u3_0: u3 = 0,
\\ u3_7: u3 = 7,
\\ u4_0: u4 = 0,
\\ u4_15: u4 = 15,
\\ u5_0: u5 = 0,
\\ u5_31: u5 = 31,
\\ u6_0: u6 = 0,
\\ u6_63: u6 = 63,
\\ u7_0: u7 = 0,
\\ u7_127: u7 = 127,
\\ u8_0: u8 = 0,
\\ u8_255: u8 = 255,
\\ u16_0: u16 = 0,
\\ u16_65535: u16 = 65535,
\\ u24_0: u24 = 0,
\\ u24_16777215: u24 = 16777215,
\\ u32_0: u32 = 0,
\\ u32_4294967295: u32 = 4294967295,
\\ i0_0: i0 = 0,
\\ @"i1_-1": i1 = -1,
\\ i1_0: i1 = 0,
\\ @"i2_-2": i2 = -2,
\\ i2_0: i2 = 0,
\\ i2_1: i2 = 1,
\\ @"i3_-4": i3 = -4,
\\ i3_0: i3 = 0,
\\ i3_3: i3 = 3,
\\ @"i4_-8": i4 = -8,
\\ i4_0: i4 = 0,
\\ i4_7: i4 = 7,
\\ @"i5_-16": i5 = -16,
\\ i5_0: i5 = 0,
\\ i5_15: i5 = 15,
\\ @"i6_-32": i6 = -32,
\\ i6_0: i6 = 0,
\\ i6_31: i6 = 31,
\\ @"i7_-64": i7 = -64,
\\ i7_0: i7 = 0,
\\ i7_63: i7 = 63,
\\ @"i8_-128": i8 = -128,
\\ i8_0: i8 = 0,
\\ i8_127: i8 = 127,
\\ @"i16_-32768": i16 = -32768,
\\ i16_0: i16 = 0,
\\ i16_32767: i16 = 32767,
\\ @"i24_-8388608": i24 = -8388608,
\\ i24_0: i24 = 0,
\\ i24_8388607: i24 = 8388607,
\\ @"i32_-2147483648": i32 = -2147483648,
\\ i32_0: i32 = 0,
\\ i32_2147483647: i32 = 2147483647,
\\ @"f16_42.625": f16 = 42.625,
\\ @"f32_-2730.65625": f32 = -2730.65625,
\\ @"f64_357913941.33203125": f64 = 357913941.33203125,
\\ @"f80_-91625968981.3330078125": f80 = -91625968981.3330078125,
\\ @"f128_384307168202282325.333332061767578125": f128 = 384307168202282325.333332061767578125,
\\};
\\fn testBasic(basic: Basic) void {
\\ _ = basic;
\\}
\\pub fn main() void {
\\ testBasic(.{});
\\}
\\
,
},
},
\\breakpoint set --file basic.zig --source-pattern-regexp '_ = basic;'
\\process launch
\\frame variable --show-types basic
\\breakpoint delete --force
,
&.{
\\(lldb) frame variable --show-types basic
\\(root.basic.Basic) basic = {
\\ (void) void = {}
\\ (bool) bool_false = false
\\ (bool) bool_true = true
\\ (u0) u0_0 = 0
\\ (u1) u1_0 = 0
\\ (u1) u1_1 = 1
\\ (u2) u2_0 = 0
\\ (u2) u2_3 = 3
\\ (u3) u3_0 = 0
\\ (u3) u3_7 = 7
\\ (u4) u4_0 = 0
\\ (u4) u4_15 = 15
\\ (u5) u5_0 = 0
\\ (u5) u5_31 = 31
\\ (u6) u6_0 = 0
\\ (u6) u6_63 = 63
\\ (u7) u7_0 = 0
\\ (u7) u7_127 = 127
\\ (u8) u8_0 = 0
\\ (u8) u8_255 = 255
\\ (u16) u16_0 = 0
\\ (u16) u16_65535 = 65535
\\ (u24) u24_0 = 0
\\ (u24) u24_16777215 = 16777215
\\ (u32) u32_0 = 0
\\ (u32) u32_4294967295 = 4294967295
\\ (i0) i0_0 = 0
\\ (i1) i1_-1 = -1
\\ (i1) i1_0 = 0
\\ (i2) i2_-2 = -2
\\ (i2) i2_0 = 0
\\ (i2) i2_1 = 1
\\ (i3) i3_-4 = -4
\\ (i3) i3_0 = 0
\\ (i3) i3_3 = 3
\\ (i4) i4_-8 = -8
\\ (i4) i4_0 = 0
\\ (i4) i4_7 = 7
\\ (i5) i5_-16 = -16
\\ (i5) i5_0 = 0
\\ (i5) i5_15 = 15
\\ (i6) i6_-32 = -32
\\ (i6) i6_0 = 0
\\ (i6) i6_31 = 31
\\ (i7) i7_-64 = -64
\\ (i7) i7_0 = 0
\\ (i7) i7_63 = 63
\\ (i8) i8_-128 = -128
\\ (i8) i8_0 = 0
\\ (i8) i8_127 = 127
\\ (i16) i16_-32768 = -32768
\\ (i16) i16_0 = 0
\\ (i16) i16_32767 = 32767
\\ (i24) i24_-8388608 = -8388608
\\ (i24) i24_0 = 0
\\ (i24) i24_8388607 = 8388607
\\ (i32) i32_-2147483648 = -2147483648
\\ (i32) i32_0 = 0
\\ (i32) i32_2147483647 = 2147483647
\\ (f16) f16_42.625 = 42.625
\\ (f32) f32_-2730.65625 = -2730.65625
\\ (f64) f64_357913941.33203125 = 357913941.33203125
\\ (f80) f80_-91625968981.3330078125 = -91625968981.3330078125
\\ (f128) f128_384307168202282325.333332061767578125 = 384307168202282325.333332061767578125
\\}
},
);
db.addLldbTest(
"storage",
target,
&.{
.{
.path = "storage.zig",
.source =
\\const global_const: u64 = 0x19e50dc8d6002077;
\\var global_var: u64 = 0xcc423cec08622e32;
\\threadlocal var global_threadlocal1: u64 = 0xb4d643528c042121;
\\threadlocal var global_threadlocal2: u64 = 0x43faea1cf5ad7a22;
\\fn testStorage(
\\ param1: u64,
\\ param2: u64,
\\ param3: u64,
\\ param4: u64,
\\ param5: u64,
\\ param6: u64,
\\ param7: u64,
\\ param8: u64,
\\) callconv(.C) void {
\\ const local_comptime_val: u64 = global_const *% global_const;
\\ const local_comptime_ptr: struct { u64 } = .{ local_comptime_val *% local_comptime_val };
\\ const local_const: u64 = global_var ^ global_threadlocal1 ^ global_threadlocal2 ^
\\ param1 ^ param2 ^ param3 ^ param4 ^ param5 ^ param6 ^ param7 ^ param8;
\\ var local_var: u64 = local_comptime_ptr[0] ^ local_const;
\\ local_var = local_var;
\\}
\\pub fn main() void {
\\ testStorage(
\\ 0x6a607e08125c7e00,
\\ 0x98944cb2a45a8b51,
\\ 0xa320cf10601ee6fb,
\\ 0x691ed3535bad3274,
\\ 0x63690e6867a5799f,
\\ 0x8e163f0ec76067f2,
\\ 0xf9a252c455fb4c06,
\\ 0xc88533722601e481,
\\ );
\\}
\\
,
},
},
\\breakpoint set --file storage.zig --source-pattern-regexp 'local_var = local_var;'
\\process launch
\\target variable --show-types --format hex global_const global_var global_threadlocal1 global_threadlocal2
\\frame variable --show-types --format hex param1 param2 param3 param4 param5 param6 param7 param8 local_comptime_val local_comptime_ptr.0 local_const local_var
\\breakpoint delete --force
,
&.{
\\(lldb) target variable --show-types --format hex global_const global_var global_threadlocal1 global_threadlocal2
\\(u64) global_const = 0x19e50dc8d6002077
\\(u64) global_var = 0xcc423cec08622e32
\\(u64) global_threadlocal1 = 0xb4d643528c042121
\\(u64) global_threadlocal2 = 0x43faea1cf5ad7a22
\\(lldb) frame variable --show-types --format hex param1 param2 param3 param4 param5 param6 param7 param8 local_comptime_val local_comptime_ptr.0 local_const local_var
\\(u64) param1 = 0x6a607e08125c7e00
\\(u64) param2 = 0x98944cb2a45a8b51
\\(u64) param3 = 0xa320cf10601ee6fb
\\(u64) param4 = 0x691ed3535bad3274
\\(u64) param5 = 0x63690e6867a5799f
\\(u64) param6 = 0x8e163f0ec76067f2
\\(u64) param7 = 0xf9a252c455fb4c06
\\(u64) param8 = 0xc88533722601e481
\\(u64) local_comptime_val = 0x69490636f81df751
\\(u64) local_comptime_ptr.0 = 0x82e834dae74767a1
\\(u64) local_const = 0xdffceb8b2f41e205
\\(u64) local_var = 0x5d14df51c80685a4
},
);
db.addLldbTest(
"slices",
target,
&.{
.{
.path = "slices.zig",
.source =
\\pub fn main() void {
\\ {
\\ var array: [4]u32 = .{ 1, 2, 4, 8 };
\\ const slice: []u32 = &array;
\\ _ = slice;
\\ }
\\}
\\
,
},
},
\\breakpoint set --file slices.zig --source-pattern-regexp '_ = slice;'
\\process launch
\\frame variable --show-types array slice
\\breakpoint delete --force
,
&.{
\\(lldb) frame variable --show-types array slice
\\([4]u32) array = {
\\ (u32) [0] = 1
\\ (u32) [1] = 2
\\ (u32) [2] = 4
\\ (u32) [3] = 8
\\}
\\([]u32) slice = {
\\ (u32) [0] = 1
\\ (u32) [1] = 2
\\ (u32) [2] = 4
\\ (u32) [3] = 8
\\}
},
);
db.addLldbTest(
"optionals",
target,
&.{
.{
.path = "optionals.zig",
.source =
\\pub fn main() void {
\\ {
\\ var null_u32: ?u32 = null;
\\ var maybe_u32: ?u32 = null;
\\ var nonnull_u32: ?u32 = 456;
\\ maybe_u32 = 123;
\\ _ = .{ &null_u32, &nonnull_u32 };
\\ }
\\}
\\
,
},
},
\\breakpoint set --file optionals.zig --source-pattern-regexp 'maybe_u32 = 123;'
\\process launch
\\frame variable null_u32 maybe_u32 nonnull_u32
\\breakpoint delete --force
\\
\\breakpoint set --file optionals.zig --source-pattern-regexp '_ = .{ &null_u32, &nonnull_u32 };'
\\process continue
\\frame variable --show-types null_u32 maybe_u32 nonnull_u32
\\breakpoint delete --force
,
&.{
\\(lldb) frame variable null_u32 maybe_u32 nonnull_u32
\\(?u32) null_u32 = null
\\(?u32) maybe_u32 = null
\\(?u32) nonnull_u32 = (nonnull_u32.? = 456)
,
\\(lldb) frame variable --show-types null_u32 maybe_u32 nonnull_u32
\\(?u32) null_u32 = null
\\(?u32) maybe_u32 = {
\\ (u32) maybe_u32.? = 123
\\}
\\(?u32) nonnull_u32 = {
\\ (u32) nonnull_u32.? = 456
\\}
},
);
}
const File = struct { path: []const u8, source: []const u8 };
fn addGdbTest(
db: *Debugger,
name: []const u8,
target: Target,
files: []const File,
commands: []const u8,
expected_output: []const []const u8,
) void {
db.addTest(
name,
target,
files,
&.{
db.options.gdb orelse return,
"--batch",
"--command",
},
commands,
&.{
"--args",
},
expected_output,
);
}
fn addLldbTest(
db: *Debugger,
name: []const u8,
target: Target,
files: []const File,
commands: []const u8,
expected_output: []const []const u8,
) void {
db.addTest(
name,
target,
files,
&.{
db.options.lldb orelse return,
"--batch",
"--source",
},
commands,
&.{
"--",
},
expected_output,
);
}
/// After a failure while running a script, the debugger starts accepting commands from stdin, and
/// because it is empty, the debugger exits normally with status 0. Choose a non-zero status to
/// return from the debugger script instead to detect it running to completion and indicate success.
const success = 99;
fn addTest(
db: *Debugger,
name: []const u8,
target: Target,
files: []const File,
db_argv1: []const []const u8,
commands: []const u8,
db_argv2: []const []const u8,
expected_output: []const []const u8,
) void {
for (db.options.test_filters) |test_filter| {
if (std.mem.indexOf(u8, name, test_filter)) |_| return;
}
const files_wf = db.b.addWriteFiles();
const exe = db.b.addExecutable(.{
.name = name,
.target = target.resolved,
.root_source_file = files_wf.add(files[0].path, files[0].source),
.optimize = target.optimize_mode,
.link_libc = target.link_libc,
.single_threaded = target.single_threaded,
.pic = target.pic,
.strip = false,
.use_llvm = false,
.use_lld = false,
});
for (files[1..]) |file| _ = files_wf.add(file.path, file.source);
const commands_wf = db.b.addWriteFiles();
const run = std.Build.Step.Run.create(db.b, db.b.fmt("run {s} {s}", .{ name, target.test_name_suffix }));
run.addArgs(db_argv1);
run.addFileArg(commands_wf.add(db.b.fmt("{s}.cmd", .{name}), db.b.fmt("{s}\n\nquit {d}\n", .{ commands, success })));
run.addArgs(db_argv2);
run.addArtifactArg(exe);
for (expected_output) |expected| run.addCheck(.{ .expect_stdout_match = db.b.fmt("{s}\n", .{expected}) });
run.addCheck(.{ .expect_term = .{ .Exited = success } });
run.setStdIn(.{ .bytes = "" });
db.root_step.dependOn(&run.step);
}
const Debugger = @This();
const std = @import("std");
+34
View File
@@ -17,6 +17,7 @@ pub const TranslateCContext = @import("src/TranslateC.zig");
pub const RunTranslatedCContext = @import("src/RunTranslatedC.zig");
pub const CompareOutputContext = @import("src/CompareOutput.zig");
pub const StackTracesContext = @import("src/StackTrace.zig");
pub const DebuggerContext = @import("src/Debugger.zig");
const TestTarget = struct {
target: std.Target.Query = .{},
@@ -1283,3 +1284,36 @@ pub fn addCases(
test_filters,
);
}
pub fn addDebuggerTests(b: *std.Build, options: DebuggerContext.Options) ?*Step {
const step = b.step("test-debugger", "Run the debugger tests");
if (options.gdb == null and options.lldb == null) {
step.dependOn(&b.addFail("test-debugger requires -Dgdb and/or -Dlldb").step);
return null;
}
var context: DebuggerContext = .{
.b = b,
.options = options,
.root_step = step,
};
context.addTestsForTarget(.{
.resolved = b.resolveTargetQuery(.{
.cpu_arch = .x86_64,
.os_tag = .linux,
.abi = .none,
}),
.pic = false,
.test_name_suffix = "x86_64-linux",
});
context.addTestsForTarget(.{
.resolved = b.resolveTargetQuery(.{
.cpu_arch = .x86_64,
.os_tag = .linux,
.abi = .none,
}),
.pic = true,
.test_name_suffix = "x86_64-linux-pic",
});
return step;
}