Merge pull request 'Parses inline callers when generating stack traces from PDBs' (#31814) from MasonRemaley/zig:pdb-backtrace-inlines into master

Reviewed-on: https://codeberg.org/ziglang/zig/pulls/31814
Reviewed-by: Andrew Kelley <andrew@ziglang.org>
This commit is contained in:
Andrew Kelley
2026-04-13 18:26:53 +02:00
22 changed files with 1387 additions and 264 deletions
+1 -1
View File
@@ -3263,7 +3263,7 @@ fn createFoo(param: i32) !Foo {
<ul>
<li>Return an error from main</li>
<li>An error makes its way to {#syntax#}catch unreachable{#endsyntax#} and you have not overridden the default panic handler</li>
<li>Use {#link|errorReturnTrace#} to access the current return trace. You can use {#syntax#}std.debug.dumpStackTrace{#endsyntax#} to print it. This function returns comptime-known {#link|null#} when building without error return tracing support.</li>
<li>Use {#link|errorReturnTrace#} to access the current return trace. You can use {#syntax#}std.debug.dumpErrorReturnTrace{#endsyntax#} to print it. This function returns comptime-known {#link|null#} when building without error return tracing support.</li>
</ul>
{#header_open|Implementation Details#}
<p>
+3 -3
View File
@@ -144,7 +144,7 @@ fn mainServer(init: std.process.Init.Minimal) !void {
error.SkipZigTest => .skip,
else => s: {
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace);
std.debug.dumpErrorReturnTrace(trace);
}
break :s .fail;
},
@@ -312,7 +312,7 @@ fn mainTerminal(init: std.process.Init.Minimal) void {
std.debug.print("FAIL ({t})\n", .{err});
}
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace);
std.debug.dumpErrorReturnTrace(trace);
}
test_node.end();
},
@@ -438,7 +438,7 @@ var fuzz_runner: if (builtin.fuzz) struct {
error.SkipZigTest => return,
else => {
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace);
std.debug.dumpErrorReturnTrace(trace);
}
std.debug.print("failed with error.{t}\n", .{err});
std.process.exit(1);
+2 -2
View File
@@ -67,7 +67,7 @@ test_results: TestResults,
/// The return address associated with creation of this step that can be useful
/// to print along with debugging messages.
debug_stack_trace: std.builtin.StackTrace,
debug_stack_trace: std.debug.StackTrace,
pub const TestResults = struct {
/// The total number of tests in the step. Every test has a "status" from the following:
@@ -328,7 +328,7 @@ pub fn cast(step: *Step, comptime T: type) ?*T {
/// For debugging purposes, prints identifying information about this Step.
pub fn dump(step: *Step, t: Io.Terminal) void {
const w = t.writer;
if (step.debug_stack_trace.instruction_addresses.len > 0) {
if (step.debug_stack_trace.return_addresses.len > 0) {
w.print("name: '{s}'. creation stack trace:\n", .{step.name}) catch {};
std.debug.writeStackTrace(&step.debug_stack_trace, t) catch {};
} else {
+2 -2
View File
@@ -442,7 +442,7 @@ fn callFn(comptime f: anytype, args: anytype) switch (Impl) {
@call(.auto, f, args) catch |err| {
std.debug.print("error: {s}\n", .{@errorName(err)});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace);
std.debug.dumpErrorReturnTrace(trace);
}
};
@@ -932,7 +932,7 @@ const WasiThreadImpl = struct {
@call(.auto, f, w.args) catch |err| {
std.debug.print("error: {s}\n", .{@errorName(err)});
if (@errorReturnTrace()) |trace| {
std.debug.dumpStackTrace(trace);
std.debug.dumpErrorReturnTrace(trace);
}
};
},
+179 -65
View File
@@ -13,7 +13,6 @@ const windows = std.os.windows;
const builtin = @import("builtin");
const native_arch = builtin.cpu.arch;
const native_os = builtin.os.tag;
const StackTrace = std.builtin.StackTrace;
const root = @import("root");
@@ -39,8 +38,8 @@ pub const cpu_context = @import("debug/cpu_context.zig");
/// pub const init: SelfInfo;
/// pub fn deinit(si: *SelfInfo, io: Io) void;
///
/// /// Returns the symbol and source location of the instruction at `address`.
/// pub fn getSymbol(si: *SelfInfo, io: Io, address: usize) SelfInfoError!Symbol;
/// /// Appends the symbols for the instruction at `address` to `symbols`.
/// pub fn getSymbols(si: *SelfInfo, io: Io, symbol_allocator: Allocator, text_arena: Allocator, address: usize, include_inline_callers: bool, symbols: *std.ArrayList(Symbol)) SelfInfoError!void;
/// /// Returns a name for the "module" (e.g. shared library or executable image) containing `address`.
/// pub fn getModuleName(si: *SelfInfo, io: Io, address: usize) SelfInfoError![]const u8;
/// pub fn getModuleSlide(si: *SelfInfo, io: Io, address: usize) SelfInfoError!usize;
@@ -563,7 +562,7 @@ pub fn defaultPanic(msg: []const u8, first_trace_addr: ?usize) noreturn {
if (@errorReturnTrace()) |t| if (t.index > 0) {
writer.writeAll("error return context:\n") catch break :trace;
writeStackTrace(t, stderr) catch break :trace;
writeErrorReturnTrace(t, stderr) catch break :trace;
writer.writeAll("\nstack trace:\n") catch break :trace;
};
writeCurrentStackTrace(.{
@@ -602,6 +601,35 @@ fn waitForOtherThreadToFinishPanicking() void {
}
}
pub const StackTrace = struct {
/// Each element is the "return address" of a function call, meaning the instruction address
/// which control flow will return to when the function returns.
///
/// The first slice element corresponds to the innermost stack frame, and the last element to
/// the outermost.
///
/// Inlined function calls do not have meaningful return addresses and are therefore not
/// included in this slice. Instead, when printing the stack trace, the source locations of
/// inline calls should be read from debug information and the corresponding "inline frames"
/// printed in the appropriate locations.
return_addresses: []usize,
/// Indicates whether any stack frames were omitted from `return_addresses`.
skipped: SkippedAddresses,
};
/// Indicates how many addresses were skipped in a trace.
pub const SkippedAddresses = enum(usize) {
/// No addresses were omitted: `return_addresses` contains all stack frames, including the
/// outermost.
none = 0,
/// It is not known whether any frames were omitted.
unknown = std.math.maxInt(usize),
/// The full stack trace was available, but some frames are not included in
/// `return_addresses` due to buffer size limitations. The enum value is the exact number of
/// addresses which were omitted.
_,
};
pub const StackUnwindOptions = struct {
/// If not `null`, we will ignore all frames up until this return address. This is typically
/// used to omit intermediate handling code (for instance, a panic handler and its machinery)
@@ -621,7 +649,10 @@ pub const StackUnwindOptions = struct {
///
/// See `writeCurrentStackTrace` to immediately print the trace instead of capturing it.
pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf: []usize) StackTrace {
const empty_trace: StackTrace = .{ .index = 0, .instruction_addresses = &.{} };
const empty_trace: StackTrace = .{
.return_addresses = &.{},
.skipped = .none,
};
if (!std.options.allow_stack_tracing) return empty_trace;
var it: StackIterator = .init(options.context);
defer it.deinit();
@@ -632,17 +663,17 @@ pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf:
var total_frames: usize = 0;
var index: usize = 0;
var wait_for = options.first_address;
// Ideally, we would iterate the whole stack so that the `index` in the returned trace was
// Ideally, we would iterate the whole stack so that the `index - min(buf.len, index)` would be
// indicative of how many frames were skipped. However, this has a significant runtime cost
// in some cases, so at least for now, we don't do that.
while (index < addr_buf.len) switch (it.next(io)) {
.switch_to_fp => if (!it.stratOk(options.allow_unsafe_unwind)) break,
.end => break,
const skipped: SkippedAddresses = while (index < addr_buf.len) switch (it.next(io)) {
.switch_to_fp => if (!it.stratOk(options.allow_unsafe_unwind)) break .unknown,
.end => break .none,
.frame => |ret_addr| {
if (total_frames > 10_000) {
// Limit the number of frames in case of (e.g.) broken debug information which is
// getting unwinding stuck in a loop.
break;
break .unknown;
}
total_frames += 1;
if (wait_for) |target| {
@@ -652,10 +683,10 @@ pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf:
addr_buf[index] = ret_addr;
index += 1;
},
};
} else .unknown;
return .{
.index = index,
.instruction_addresses = addr_buf[0..index],
.return_addresses = addr_buf[0..index],
.skipped = skipped,
};
}
/// Write the current stack trace to `writer`, annotated with source locations.
@@ -663,6 +694,10 @@ pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf:
/// See `captureCurrentStackTrace` to capture the trace addresses into a buffer instead of printing.
pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, t: Io.Terminal) Writer.Error!void {
const writer = t.writer;
var text_arena: std.heap.ArenaAllocator = .init(getDebugInfoAllocator());
defer text_arena.deinit();
if (!std.options.allow_stack_tracing) {
t.setColor(.dim) catch {};
try writer.print("Cannot print stack trace: stack tracing is disabled\n", .{});
@@ -740,7 +775,10 @@ pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, t: Io.Termin
}
// `ret_addr` is the return address, which is *after* the function call.
// Subtract 1 to get an address *in* the function call for a better source location.
try printSourceAtAddress(io, di, t, ret_addr -| StackIterator.ra_call_offset);
try printSourceAtAddress(io, &text_arena, di, t, .{
.address = ret_addr -| StackIterator.ra_call_offset,
.resolve_inline_callers = true,
});
printed_any_frame = true;
},
};
@@ -773,8 +811,29 @@ pub const FormatStackTrace = struct {
}
};
/// Write a previously captured error return trace to `writer`, annotated with source locations.
pub fn writeErrorReturnTrace(et: *const std.builtin.StackTrace, t: Io.Terminal) Writer.Error!void {
// We take the slice by value, preventing the length from being mutated if an error occurs while
// writing the stack trace.
const len = @min(et.instruction_addresses.len, et.index);
const skipped = et.index - len;
try writeTrace(et.instruction_addresses[0..len], @enumFromInt(skipped), t, false);
}
/// Write a previously captured stack trace to `writer`, annotated with source locations.
pub fn writeStackTrace(st: *const StackTrace, t: Io.Terminal) Writer.Error!void {
try writeTrace(st.return_addresses, st.skipped, t, true);
}
fn writeTrace(
addresses: []const usize,
skipped: SkippedAddresses,
t: Io.Terminal,
resolve_inline_callers: bool,
) Writer.Error!void {
var text_arena: std.heap.ArenaAllocator = .init(getDebugInfoAllocator());
defer text_arena.deinit();
const writer = t.writer;
if (!std.options.allow_stack_tracing) {
t.setColor(.dim) catch {};
@@ -783,10 +842,7 @@ pub fn writeStackTrace(st: *const StackTrace, t: Io.Terminal) Writer.Error!void
return;
}
// Fetch `st.index` straight away. Aside from avoiding redundant loads, this prevents issues if
// `st` is `@errorReturnTrace()` and errors are encountered while writing the stack trace.
const n_frames = st.index;
if (n_frames == 0) return writer.writeAll("(empty stack trace)\n");
if (addresses.len == 0) return writer.writeAll("(empty stack trace)\n");
const di = getSelfDebugInfo() catch |err| switch (err) {
error.UnsupportedTarget => {
t.setColor(.dim) catch {};
@@ -796,16 +852,26 @@ pub fn writeStackTrace(st: *const StackTrace, t: Io.Terminal) Writer.Error!void
},
};
const io = std.Options.debug_io;
const captured_frames = @min(n_frames, st.instruction_addresses.len);
for (st.instruction_addresses[0..captured_frames]) |ret_addr| {
// `ret_addr` is the return address, which is *after* the function call.
for (addresses) |addr| {
// `addr` is the return address, which is *after* the function call.
// Subtract 1 to get an address *in* the function call for a better source location.
try printSourceAtAddress(io, di, t, ret_addr -| StackIterator.ra_call_offset);
try printSourceAtAddress(io, &text_arena, di, t, .{
.address = addr -| StackIterator.ra_call_offset,
.resolve_inline_callers = resolve_inline_callers,
});
}
if (n_frames > captured_frames) {
t.setColor(.bold) catch {};
try writer.print("({d} additional stack frames skipped...)\n", .{n_frames - captured_frames});
t.setColor(.reset) catch {};
switch (skipped) {
.none => {},
.unknown => {
t.setColor(.bold) catch {};
try writer.writeAll("(additional stack frames may have been skipped...)\n");
t.setColor(.reset) catch {};
},
else => |n| {
t.setColor(.bold) catch {};
try writer.print("({d} additional stack frames skipped due to buffer size limitations...)\n", .{n});
t.setColor(.reset) catch {};
},
}
}
/// A thin wrapper around `writeStackTrace` which writes to stderr and ignores write errors.
@@ -817,6 +883,15 @@ pub fn dumpStackTrace(st: *const StackTrace) void {
};
}
/// A thin wrapper around `writeErrorReturnTrace` which writes to stderr and ignores write errors.
pub fn dumpErrorReturnTrace(et: *const std.builtin.StackTrace) void {
const stderr = lockStderr(&.{}).terminal();
defer unlockStderr();
writeErrorReturnTrace(et, stderr) catch |err| switch (err) {
error.WriteFailed => {},
};
}
const StackIterator = union(enum) {
/// We will first report the current PC of this `CpuContextPtr`, then we will switch to a
/// different strategy to actually unwind.
@@ -1106,48 +1181,77 @@ pub inline fn stripInstructionPtrAuthCode(ptr: usize) usize {
return ptr;
}
fn printSourceAtAddress(io: Io, debug_info: *SelfInfo, t: Io.Terminal, address: usize) Writer.Error!void {
const symbol: Symbol = debug_info.getSymbol(io, address) catch |err| switch (err) {
error.MissingDebugInfo,
error.UnsupportedDebugInfo,
error.InvalidDebugInfo,
=> .unknown,
error.ReadFailed, error.Unexpected, error.Canceled => s: {
t.setColor(.dim) catch {};
try t.writer.print("Failed to read debug info from filesystem, trace may be incomplete\n\n", .{});
t.setColor(.reset) catch {};
break :s .unknown;
},
error.OutOfMemory => s: {
t.setColor(.dim) catch {};
try t.writer.print("Ran out of memory loading debug info, trace may be incomplete\n\n", .{});
t.setColor(.reset) catch {};
break :s .unknown;
},
};
defer if (symbol.source_location) |sl| getDebugInfoAllocator().free(sl.file_name);
return printLineInfo(
const PrintSourceAddressOptions = struct {
address: usize,
resolve_inline_callers: bool,
};
fn printSourceAtAddress(
io: Io,
text_arena: *std.heap.ArenaAllocator,
debug_info: *SelfInfo,
t: Io.Terminal,
options: PrintSourceAddressOptions,
) Writer.Error!void {
defer _ = text_arena.reset(.retain_capacity);
// Initialize the symbol array with space for at least one element, allocating this on the stack
// in the common case where only one element is needed
var symbol_fallback_allocator = std.heap.stackFallback(@sizeOf(Symbol) + @alignOf(Symbol) - 1, getDebugInfoAllocator());
const symbol_allocator = symbol_fallback_allocator.get();
var symbols = std.ArrayList(Symbol).initCapacity(symbol_allocator, 1) catch unreachable;
defer symbols.deinit(symbol_allocator);
debug_info.getSymbols(
io,
t,
symbol.source_location,
address,
symbol.name orelse "???",
symbol.compile_unit_name orelse debug_info.getModuleName(io, address) catch "???",
);
symbol_allocator,
text_arena.allocator(),
options.address,
options.resolve_inline_callers,
&symbols,
) catch |err| {
t.setColor(.dim) catch {};
defer t.setColor(.reset) catch {};
switch (err) {
error.MissingDebugInfo,
error.UnsupportedDebugInfo,
error.InvalidDebugInfo,
=> {},
error.ReadFailed, error.Unexpected, error.Canceled => {
try t.writer.print("Failed to read debug info from filesystem, trace may be incomplete\n\n", .{});
},
error.OutOfMemory => {
t.setColor(.dim) catch {};
try t.writer.print("Ran out of memory loading debug info, trace may be incomplete\n\n", .{});
t.setColor(.reset) catch {};
},
}
};
// If we failed to write any symbols, at least write the unknown symbol. Can't fail since we
// initialized with a capacity of 1.
if (symbols.items.len == 0) symbols.appendAssumeCapacity(.unknown);
for (symbols.items) |symbol| {
try printLineInfo(io, t, debug_info, options.address, symbol);
}
}
fn printLineInfo(
io: Io,
t: Io.Terminal,
source_location: ?SourceLocation,
debug_info: *SelfInfo,
address: usize,
symbol_name: []const u8,
compile_unit_name: []const u8,
symbol: Symbol,
) Writer.Error!void {
const writer = t.writer;
t.setColor(.bold) catch {};
if (source_location) |*sl| {
try writer.print("{s}:{d}:{d}", .{ sl.file_name, sl.line, sl.column });
if (symbol.source_location) |*sl| {
if (sl.column == 0) {
try writer.print("{s}:{d}", .{ sl.file_name, sl.line });
} else {
try writer.print("{s}:{d}:{d}", .{ sl.file_name, sl.line, sl.column });
}
} else {
try writer.writeAll("???:?:?");
}
@@ -1155,12 +1259,16 @@ fn printLineInfo(
t.setColor(.reset) catch {};
try writer.writeAll(": ");
t.setColor(.dim) catch {};
try writer.print("0x{x} in {s} ({s})", .{ address, symbol_name, compile_unit_name });
try writer.print("0x{x} in {s} ({s})", .{
address,
symbol.name orelse "???",
symbol.compile_unit_name orelse debug_info.getModuleName(io, address) catch "???",
});
t.setColor(.reset) catch {};
try writer.writeAll("\n");
// Show the matching source code line if possible
if (source_location) |sl| {
if (symbol.source_location) |sl| {
if (printLineFromFile(io, writer, sl)) {
if (sl.column > 0) {
// The caret already takes one char
@@ -1599,7 +1707,12 @@ test "manage resources correctly" {
var di: SelfInfo = .init;
defer di.deinit(io);
const t: Io.Terminal = .{ .writer = &discarding.writer, .mode = .no_color };
try printSourceAtAddress(io, &di, t, S.showMyTrace());
var text_arena: std.heap.ArenaAllocator = .init(std.testing.allocator);
defer text_arena.deinit();
try printSourceAtAddress(io, &text_arena, &di, t, .{
.address = S.showMyTrace(),
.resolve_inline_callers = true,
});
}
/// This API helps you track where a value originated and where it was mutated,
@@ -1648,8 +1761,8 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
t.notes[t.index] = note;
const addrs = &t.addrs[t.index];
const st = captureCurrentStackTrace(.{ .first_address = addr }, addrs);
if (st.index < addrs.len) {
@memset(addrs[st.index..], 0); // zero unused frames to indicate end of trace
if (st.return_addresses.len < addrs.len) {
@memset(addrs[st.return_addresses.len..], 0); // zero unused frames to indicate end of trace
}
}
// Keep counting even if the end is reached so that the
@@ -1667,9 +1780,10 @@ pub fn ConfigurableTrace(comptime size: usize, comptime stack_frame_count: usize
stderr.writer.print("{s}:\n", .{t.notes[i]}) catch return;
var frames_array_mutable = frames_array;
const frames = mem.sliceTo(frames_array_mutable[0..], 0);
const len = @min(t.index, frames.len);
const stack_trace: StackTrace = .{
.index = frames.len,
.instruction_addresses = frames,
.return_addresses = frames[0..len],
.skipped = if (len < frames.len) .none else .unknown,
};
writeStackTrace(&stack_trace, stderr) catch return;
}
+29 -9
View File
@@ -22,7 +22,9 @@ const cast = std.math.cast;
const maxInt = std.math.maxInt;
const ArrayList = std.ArrayList;
const Endian = std.builtin.Endian;
const Reader = std.Io.Reader;
const Io = std.Io;
const Reader = Io.Reader;
const Error = std.debug.SelfInfoError;
const Dwarf = @This();
@@ -1218,6 +1220,7 @@ pub fn populateSrcLocCache(d: *Dwarf, gpa: Allocator, endian: Endian, cu: *Compi
pub fn getLineNumberInfo(
d: *Dwarf,
gpa: Allocator,
text_arena: Allocator,
endian: Endian,
compile_unit: *CompileUnit,
target_address: u64,
@@ -1230,7 +1233,7 @@ pub fn getLineNumberInfo(
const file_entry = &slc.files[file_index];
if (file_entry.dir_index >= slc.directories.len) return bad();
const dir_name = slc.directories[file_entry.dir_index].path;
const file_name = try std.fs.path.join(gpa, &.{ dir_name, file_entry.path });
const file_name = try std.fs.path.join(text_arena, &.{ dir_name, file_entry.path });
return .{
.line = entry.line,
.column = entry.column,
@@ -1543,21 +1546,38 @@ fn getStringGeneric(opt_str: ?[]const u8, offset: u64) ![:0]const u8 {
return str[casted_offset..last :0];
}
pub fn getSymbol(di: *Dwarf, gpa: Allocator, endian: Endian, address: u64) !std.debug.Symbol {
pub fn getSymbols(
di: *Dwarf,
symbol_allocator: Allocator,
text_arena: Allocator,
endian: Endian,
address: u64,
resolve_inline_callers: bool,
symbols: *std.ArrayList(std.debug.Symbol),
) std.debug.SelfInfoError!void {
_ = resolve_inline_callers;
const gpa = std.debug.getDebugInfoAllocator();
const compile_unit = di.findCompileUnit(endian, address) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => return .unknown,
else => return err,
error.EndOfStream => return error.MissingDebugInfo,
error.Overflow => return error.InvalidDebugInfo,
error.ReadFailed, error.InvalidDebugInfo, error.MissingDebugInfo => |e| return e,
};
return .{
try symbols.append(symbol_allocator, .{
.name = di.getSymbolName(address),
.compile_unit_name = compile_unit.die.getAttrString(di, endian, std.dwarf.AT.name, di.section(.debug_str), compile_unit) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => null,
},
.source_location = di.getLineNumberInfo(gpa, endian, compile_unit, address) catch |err| switch (err) {
.source_location = di.getLineNumberInfo(gpa, text_arena, endian, compile_unit, address) catch |err| switch (err) {
error.MissingDebugInfo, error.InvalidDebugInfo => null,
else => return err,
error.ReadFailed,
error.EndOfStream,
error.Overflow,
error.StreamTooLong,
=> return error.InvalidDebugInfo,
else => |e| return e,
},
};
});
}
/// DWARF5 7.4: "In the 32-bit DWARF format, all values that represent lengths of DWARF sections and
+540 -37
View File
@@ -1,5 +1,6 @@
const std = @import("../std.zig");
const File = std.Io.File;
const Io = std.Io;
const File = Io.File;
const Allocator = std.mem.Allocator;
const pdb = std.pdb;
const assert = std.debug.assert;
@@ -10,7 +11,7 @@ file_reader: *File.Reader,
msf: Msf,
allocator: Allocator,
string_table: ?*MsfStream,
dbi: ?*MsfStream,
ipi: ?[]u8,
modules: []Module,
sect_contribs: []pdb.SectionContribEntry,
guid: [16]u8,
@@ -25,6 +26,10 @@ pub const Module = struct {
symbols: []u8,
subsect_info: []u8,
checksum_offset: ?usize,
/// The inlinee source lines, sorted by inlinee. This saves us from repeatedly doing linear
/// searches over all inlinees. We prefer binary search over a hashmap as LLVM somtimes outputs
/// multiple entries for a single inlinee ID, see `getInlineeSourceLines` for more info.
inlinee_source_lines: []InlineeSourceLine,
pub fn deinit(self: *Module, allocator: Allocator) void {
allocator.free(self.module_name);
@@ -32,6 +37,7 @@ pub const Module = struct {
if (self.populated) {
allocator.free(self.symbols);
allocator.free(self.subsect_info);
allocator.free(self.inlinee_source_lines);
}
}
};
@@ -41,7 +47,7 @@ pub fn init(gpa: Allocator, file_reader: *File.Reader) !Pdb {
.file_reader = file_reader,
.allocator = gpa,
.string_table = null,
.dbi = null,
.ipi = null,
.msf = try Msf.init(gpa, file_reader),
.modules = &.{},
.sect_contribs = &.{},
@@ -53,6 +59,7 @@ pub fn init(gpa: Allocator, file_reader: *File.Reader) !Pdb {
pub fn deinit(self: *Pdb) void {
const gpa = self.allocator;
self.msf.deinit(gpa);
if (self.ipi) |ipi| gpa.free(ipi);
for (self.modules) |*module| {
module.deinit(gpa);
}
@@ -67,7 +74,7 @@ pub fn parseDbiStream(self: *Pdb) !void {
const gpa = self.allocator;
const reader = &stream.interface;
const header = try reader.takeStruct(std.pdb.DbiStreamHeader, .little);
const header = try reader.takeStruct(pdb.DbiStreamHeader, .little);
if (header.version_header != 19990903) // V70, only value observed by LLVM team
return error.UnknownPDBVersion;
// if (header.Age != age)
@@ -85,14 +92,14 @@ pub fn parseDbiStream(self: *Pdb) !void {
const mod_info = try reader.takeStruct(pdb.ModInfo, .little);
var this_record_len: usize = @sizeOf(pdb.ModInfo);
var module_name: std.Io.Writer.Allocating = .init(gpa);
var module_name: Io.Writer.Allocating = .init(gpa);
defer module_name.deinit();
this_record_len += try reader.streamDelimiterLimit(&module_name.writer, 0, .limited(1024));
assert(reader.buffered()[0] == 0); // TODO change streamDelimiterLimit API
reader.toss(1);
this_record_len += 1;
var obj_file_name: std.Io.Writer.Allocating = .init(gpa);
var obj_file_name: Io.Writer.Allocating = .init(gpa);
defer obj_file_name.deinit();
this_record_len += try reader.streamDelimiterLimit(&obj_file_name.writer, 0, .limited(1024));
assert(reader.buffered()[0] == 0); // TODO change streamDelimiterLimit API
@@ -115,6 +122,7 @@ pub fn parseDbiStream(self: *Pdb) !void {
.symbols = undefined,
.subsect_info = undefined,
.checksum_offset = null,
.inlinee_source_lines = undefined,
});
mod_info_offset += this_record_len;
@@ -128,7 +136,7 @@ pub fn parseDbiStream(self: *Pdb) !void {
var sect_cont_offset: usize = 0;
if (section_contrib_size != 0) {
const version = reader.takeEnum(std.pdb.SectionContrSubstreamVersion, .little) catch |err| switch (err) {
const version = reader.takeEnum(pdb.SectionContrSubstreamVersion, .little) catch |err| switch (err) {
error.InvalidEnumTag, error.EndOfStream => return error.InvalidDebugInfo,
error.ReadFailed => return error.ReadFailed,
};
@@ -148,6 +156,15 @@ pub fn parseDbiStream(self: *Pdb) !void {
self.sect_contribs = try sect_contribs.toOwnedSlice();
}
pub fn parseIpiStream(self: *Pdb) !void {
const gpa = self.allocator;
const stream = self.getStream(.ipi) orelse return;
const header = try stream.interface.peekStruct(pdb.IpiStreamHeader, .little);
if (header.version != .v80) // only value observed by LLVM team
return error.UnknownPDBVersion;
self.ipi = try stream.interface.readAlloc(gpa, @sizeOf(pdb.IpiStreamHeader) + header.type_record_bytes);
}
pub fn parseInfoStream(self: *Pdb) !void {
var stream = self.getStream(pdb.StreamType.pdb) orelse return error.InvalidDebugInfo;
const reader = &stream.interface;
@@ -212,38 +229,500 @@ pub fn parseInfoStream(self: *Pdb) !void {
return error.MissingDebugInfo;
}
pub fn getSymbolName(self: *Pdb, module: *Module, address: u64) ?[]const u8 {
pub fn getProcSym(self: *Pdb, module: *Module, address: u64) ?*align(1) pdb.ProcSym {
_ = self;
std.debug.assert(module.populated);
var symbol_i: usize = 0;
while (symbol_i != module.symbols.len) {
const prefix: *align(1) pdb.RecordPrefix = @ptrCast(&module.symbols[symbol_i]);
var reader: Io.Reader = .fixed(module.symbols);
while (true) {
const prefix = reader.takeStructPointer(pdb.RecordPrefix) catch return null;
if (prefix.record_len < 2)
return null;
reader.discardAll(prefix.record_len - @sizeOf(u16)) catch return null;
switch (prefix.record_kind) {
.lproc32, .gproc32 => {
const proc_sym: *align(1) pdb.ProcSym = @ptrCast(&module.symbols[symbol_i + @sizeOf(pdb.RecordPrefix)]);
const proc_sym: *align(1) pdb.ProcSym = @ptrCast(prefix);
if (address >= proc_sym.code_offset and address < proc_sym.code_offset + proc_sym.code_size) {
return std.mem.sliceTo(@as([*:0]u8, @ptrCast(&proc_sym.name[0])), 0);
return proc_sym;
}
},
else => {},
}
symbol_i += prefix.record_len + @sizeOf(u16);
}
return null;
}
pub fn getLineNumberInfo(self: *Pdb, module: *Module, address: u64) !std.debug.SourceLocation {
pub const InlineSiteSymIterator = struct {
module_index: usize,
offset: usize,
end: usize,
const empty: InlineSiteSymIterator = .{
.module_index = 0,
.offset = 0,
.end = 0,
};
pub fn next(iter: *InlineSiteSymIterator, module: *Module) ?*align(1) pdb.InlineSiteSym {
while (iter.offset < iter.end) {
const inline_prefix: *align(1) pdb.RecordPrefix = @ptrCast(&module.symbols[iter.offset]);
const end = iter.offset + inline_prefix.record_len + @sizeOf(u16);
if (end > iter.end) return null;
defer iter.offset = end;
switch (inline_prefix.record_kind) {
// Skip nested procedures
.lproc32,
.lproc32_st,
.gproc32,
.gproc32_st,
.lproc32_id,
.gproc32_id,
.lproc32_dpc,
.lproc32_dpc_id,
=> {
const skip: *align(1) pdb.ProcSym = @ptrCast(inline_prefix);
iter.offset = skip.end;
},
.inlinesite,
.inlinesite2,
=> return @ptrCast(inline_prefix),
else => {},
}
}
return null;
}
};
pub const BinaryAnnotation = union(enum) {
code_offset: u32,
change_code_offset_base: u32,
change_code_offset: u32,
change_code_length: u32,
change_file: u32,
change_line_offset: i32,
change_line_end_delta: u32,
change_range_kind: RangeKind,
change_column_start: u32,
change_column_end_delta: i32,
change_code_offset_and_line_offset: struct { code_delta: u32, line_delta: i32 },
change_code_length_and_code_offset: struct { length: u32, delta: u32 },
change_column_end: u32,
pub const RangeKind = enum(u32) { expression = 0, statement = 1 };
/// A virtual machine that processed binary annotations.
pub const RangeIterator = struct {
annotations: Iterator,
curr: PartialRange,
/// The previous range is tracked as the code length is sometimes implied by the subsequent
/// range.
prev: ?PartialRange,
const PartialRange = struct {
line_offset: i32,
file_id: ?u32,
code_offset: u32,
code_length: ?u32,
/// Resolves a partial range to a range with a definite length, or returns null if this
/// is not possible.
fn resolve(self: PartialRange, next_code_offset: ?u32) ?Range {
return .{
.line_offset = self.line_offset,
.file_id = self.file_id,
.code_offset = self.code_offset,
.code_length = b: {
if (self.code_length) |l| break :b l;
const end = next_code_offset orelse return null;
break :b end - self.code_offset;
},
};
}
};
pub fn init(annotations: Iterator) RangeIterator {
return .{
.annotations = annotations,
.curr = .{
.line_offset = 0,
.file_id = null,
.code_offset = 0,
.code_length = null,
},
.prev = null,
};
}
pub const Range = struct {
line_offset: i32,
file_id: ?u32,
code_offset: u32,
code_length: u32,
pub fn contains(self: Range, offset_in_func: usize) bool {
return self.code_offset <= offset_in_func and
offset_in_func < self.code_offset + self.code_length;
}
};
pub fn next(self: *RangeIterator) error{InvalidDebugInfo}!?Range {
while (try self.annotations.next()) |annotation| {
switch (annotation) {
.change_code_offset => |delta| {
self.curr.code_offset += delta;
},
.change_code_length => |length| {
if (self.prev) |*prev| prev.code_length = prev.code_length orelse length;
self.curr.code_offset += length;
},
// LLVM has code to emit these, but I wasn't able to figure out how trigger it
// so this logic is untested.
.change_file => |file_id| {
self.curr.file_id = file_id;
},
// LLVM never emits this opcode, but it's clear enough how to interpret it so we
// may as well handle it in case they emit it in the future
.change_code_length_and_code_offset => |info| {
self.curr.code_length = info.length;
self.curr.code_offset += info.delta;
},
.change_line_offset => |delta| {
self.curr.line_offset += delta;
},
.change_code_offset_and_line_offset => |info| {
self.curr.code_offset += info.code_delta;
self.curr.line_offset += info.line_delta;
},
// Not emitted by LLVM at the time of writing, and we don't want to add support
// without a test case. Safe to ignore since we don't use this info right now.
.change_line_end_delta,
.change_column_start,
.change_column_end_delta,
.change_column_end,
=> {},
// Not emitted by LLVM at the time of writing. Various sources conflict on how
// these opcodes should be interpreted, so we make no attempt to handle them.
.code_offset,
.change_code_offset_base,
.change_range_kind,
=> {
self.annotations = .empty;
self.prev = null;
return null;
},
}
// If we have a new code offset, return the previous range if it exists, resolving
// its length if necessary.
switch (annotation) {
.change_code_offset,
.change_code_offset_and_line_offset,
.change_code_length_and_code_offset,
=> {},
else => continue,
}
defer self.prev = self.curr;
const prev = self.prev orelse continue;
return prev.resolve(self.curr.code_offset);
}
// If we've processed all the binary operations but still have a previous range leftover
// with a known length, return it.
const prev = self.prev orelse return null;
defer self.prev = null;
return prev.resolve(null);
}
};
pub const Iterator = struct {
reader: Io.Reader,
pub const empty: Iterator = .{ .reader = .ending_instance };
pub fn next(self: *Iterator) error{InvalidDebugInfo}!?BinaryAnnotation {
return take(&self.reader) catch |err| switch (err) {
error.ReadFailed => return error.InvalidDebugInfo,
error.EndOfStream => return null,
};
}
};
pub fn take(reader: *Io.Reader) Io.Reader.Error!BinaryAnnotation {
const op = std.enums.fromInt(
pdb.BinaryAnnotationOpcode,
try takePackedU32(reader),
) orelse return error.ReadFailed;
switch (op) {
// Microsoft's docs say that invalid is used as padding, though it is left ambiguous
// whether padding is allowed internally or only after all instructions are complete.
// Empirically, the latter appears to be the case, at least with the output from LLVM
// that I've tested.
.invalid => return error.EndOfStream,
.code_offset => return .{
.code_offset = try expect(takePackedU32(reader)),
},
.change_code_offset_base => return .{
.change_code_offset_base = try expect(takePackedU32(reader)),
},
.change_code_offset => return .{
.change_code_offset = try expect(takePackedU32(reader)),
},
.change_code_length => return .{
.change_code_length = try expect(takePackedU32(reader)),
},
.change_file => return .{
.change_file = try expect(takePackedU32(reader)),
},
.change_line_offset => return .{
.change_line_offset = try expect(takePackedI32(reader)),
},
.change_line_end_delta => return .{
.change_line_end_delta = try expect(takePackedU32(reader)),
},
.change_range_kind => return .{
.change_range_kind = std.enums.fromInt(
RangeKind,
try expect(takePackedU32(reader)),
) orelse return error.ReadFailed,
},
.change_column_start => return .{
.change_column_start = try expect(takePackedU32(reader)),
},
.change_column_end_delta => return .{
.change_column_end_delta = try expect(takePackedI32(reader)),
},
.change_code_offset_and_line_offset => {
const EncodedArgs = packed struct(u32) {
code_delta: u4,
encoded_line_delta: u28,
};
const args: EncodedArgs = @bitCast(try expect(takePackedU32(reader)));
return .{
.change_code_offset_and_line_offset = .{
.code_delta = args.code_delta,
.line_delta = decodeI32(args.encoded_line_delta),
},
};
},
.change_code_length_and_code_offset => return .{
.change_code_length_and_code_offset = .{
.length = try expect(takePackedU32(reader)),
.delta = try expect(takePackedU32(reader)),
},
},
.change_column_end => return .{
.change_column_end = try expect(takePackedU32(reader)),
},
}
}
// Adapted from:
// https://github.com/microsoft/microsoft-pdb/blob/805655a28bd8198004be2ac27e6e0290121a5e89/include/cvinfo.h#L4942
pub fn takePackedU32(reader: *Io.Reader) Io.Reader.Error!u32 {
const b0: u32 = try reader.takeByte();
if (b0 & 0x80 == 0x00) return b0;
const b1: u32 = try reader.takeByte();
if (b0 & 0xC0 == 0x80) return ((b0 & 0x3F) << 8) | b1;
const b2: u32 = try reader.takeByte();
const b3: u32 = try reader.takeByte();
if (b0 & 0xE0 == 0xC0) return ((b0 & 0x1f) << 24) | (b1 << 16) | (b2 << 8) | b3;
return error.ReadFailed;
}
pub fn takePackedI32(reader: *Io.Reader) Io.Reader.Error!i32 {
return decodeI32(try takePackedU32(reader));
}
pub fn decodeI32(u: u32) i32 {
const i: i32 = @bitCast(u);
if (i & 1 != 0) {
return -(i >> 1);
} else {
return i >> 1;
}
}
fn expect(value: anytype) error{ReadFailed}!@typeInfo(@TypeOf(value)).error_union.payload {
comptime assert(@typeInfo(@TypeOf(value)).error_union.error_set == Io.Reader.Error);
return value catch error.ReadFailed;
}
};
pub fn findInlineeName(self: *const Pdb, inlinee: u32) ?[]const u8 {
// According to LLVM, the high bit *can* be used to indicate that a type index comes from the
// ipi stream in which case that bit needs to be cleared. LLVM doesn't generate data in this
// manner, but we may as well handle it since it just involves a single bitwise and.
// https://llvm.org/docs/PDB/TpiStream.html#type-indices
const type_index = inlinee & 0x7FFFFFFF;
var reader: Io.Reader = .fixed(self.ipi orelse return null);
const header = reader.takeStructPointer(pdb.IpiStreamHeader) catch return null;
for (header.type_index_begin..header.type_index_end) |curr_type_index| {
const prefix = reader.takeStructPointer(pdb.LfRecordPrefix) catch return null;
if (prefix.len < 2) return null;
reader.discardAll(prefix.len - @sizeOf(u16)) catch return null;
if (curr_type_index == type_index) {
switch (prefix.kind) {
.func_id => {
const func: *align(1) pdb.LfFuncId = @ptrCast(prefix);
return std.mem.sliceTo(@as([*:0]const u8, @ptrCast(&func.name[0])), 0);
},
.mfunc_id => {
const func: *align(1) pdb.LfMFuncId = @ptrCast(prefix);
return std.mem.sliceTo(@as([*:0]const u8, @ptrCast(&func.name[0])), 0);
},
else => return null,
}
}
}
return null;
}
pub fn getInlinees(self: *Pdb, module: *Module, proc_sym: *align(1) const pdb.ProcSym) InlineSiteSymIterator {
const module_index = module - self.modules.ptr;
const offset = @intFromPtr(proc_sym) -
@intFromPtr(module.symbols.ptr) +
proc_sym.record_len +
@sizeOf(u16);
const symbols_end = @intFromPtr(module.symbols.ptr) + module.symbols.len;
if (offset > symbols_end or proc_sym.end > symbols_end) return .empty;
return .{
.module_index = module_index,
.offset = offset,
.end = proc_sym.end,
};
}
pub fn getBinaryAnnotations(self: *Pdb, module: *Module, site: *align(1) const pdb.InlineSiteSym) BinaryAnnotation.Iterator {
_ = self;
var start: usize = @intFromPtr(site) + @sizeOf(pdb.InlineSiteSym);
var end = start + site.record_len + @sizeOf(u16) - @sizeOf(pdb.InlineSiteSym);
switch (site.record_kind) {
.inlinesite => {},
.inlinesite2 => start += @sizeOf(pdb.InlineSiteSym2) - @sizeOf(pdb.InlineSiteSym),
else => end = start,
}
if (start < @intFromPtr(module.symbols.ptr) or end > @intFromPtr(module.symbols.ptr) + module.symbols.len) return .empty;
const len = end - start;
const ptr: [*]const u8 = @ptrFromInt(start);
const slice = ptr[0..len];
return .{ .reader = Io.Reader.fixed(slice) };
}
pub fn getInlineSiteSourceLocation(
self: *Pdb,
gpa: Allocator,
mod: *Module,
site: *align(1) const pdb.InlineSiteSym,
inlinee_src_line: *align(1) const pdb.InlineeSourceLine,
offset_in_func: usize,
) !?std.debug.SourceLocation {
var ranges: BinaryAnnotation.RangeIterator = .init(self.getBinaryAnnotations(mod, site));
while (try ranges.next()) |range| {
if (!range.contains(offset_in_func)) continue;
const file_id = range.file_id orelse inlinee_src_line.file_id;
const file_name = try self.getFileName(gpa, mod, file_id);
errdefer self.allocator.free(file_name);
return .{
.line = inlinee_src_line.source_line_num +% @as(u32, @bitCast(range.line_offset)),
// LLVM doesn't currently emit column information for inlined calls in PDBs.
.column = 0,
.file_name = file_name,
};
}
return null;
}
pub fn getFileName(self: *Pdb, gpa: Allocator, mod: *Module, file_id: u32) ![]const u8 {
const checksum_offset = mod.checksum_offset orelse return error.MissingDebugInfo;
const subsect_index = checksum_offset + file_id;
const chksum_hdr: *align(1) pdb.FileChecksumEntryHeader = @ptrCast(&mod.subsect_info[subsect_index]);
const strtab_offset = @sizeOf(pdb.StringTableHeader) + chksum_hdr.file_name_offset;
self.string_table.?.seekTo(strtab_offset) catch return error.InvalidDebugInfo;
const string_reader = &self.string_table.?.interface;
var source_file_name: Io.Writer.Allocating = .init(gpa);
defer source_file_name.deinit();
_ = try string_reader.streamDelimiterLimit(&source_file_name.writer, 0, .limited(1024));
assert(string_reader.buffered()[0] == 0); // TODO change streamDelimiterLimit API
string_reader.toss(1);
return try source_file_name.toOwnedSlice();
}
pub fn getSymbolName(self: *Pdb, proc_sym: *align(1) const pdb.ProcSym) []const u8 {
_ = self;
return std.mem.sliceTo(@as([*:0]const u8, @ptrCast(&proc_sym.name[0])), 0);
}
pub const InlineeSourceLine = struct {
signature: pdb.InlineeSourceLineSignature,
info: *align(1) const pdb.InlineeSourceLine,
fn lessThan(_: void, lhs: InlineeSourceLine, rhs: InlineeSourceLine) bool {
return lhs.info.inlinee < rhs.info.inlinee;
}
fn compare(inlinee: u32, self: InlineeSourceLine) std.math.Order {
return std.math.order(inlinee, self.info.inlinee);
}
};
/// Returns all `InlineeSourceLine`s for a given module with the given inlinee. Ideally there would
/// only be one entry per inlinee, but LLVM appears to assign all functions that share a name the
/// same inlinee ID. This appears to be a bug, so the best the caller can do right now is print all
/// the results.
pub fn getInlineeSourceLines(
self: *Pdb,
mod: *Module,
inlinee: u32,
) []const InlineeSourceLine {
_ = self;
// Binary search to an arbitrary match, if there are other matches they will be adjacent
const any = std.sort.binarySearch(
InlineeSourceLine,
mod.inlinee_source_lines,
inlinee,
InlineeSourceLine.compare,
) orelse return &.{};
// Linearly scan to the first match
const begin = b: {
var begin = any;
while (begin > 0) {
const prev = begin - 1;
if (mod.inlinee_source_lines[prev].info.inlinee != inlinee) break;
begin = prev;
}
break :b begin;
};
// Linearly scan to the last match
const end = b: {
var end = any + 1;
while (end < mod.inlinee_source_lines.len and
mod.inlinee_source_lines[end].info.inlinee == inlinee) : (end += 1)
{}
break :b end;
};
// Return a slice of all the matches
return mod.inlinee_source_lines[begin..end];
}
pub fn getLineNumberInfo(self: *Pdb, gpa: Allocator, module: *Module, address: u64) !std.debug.SourceLocation {
std.debug.assert(module.populated);
const subsect_info = module.subsect_info;
const gpa = self.allocator;
var sect_offset: usize = 0;
var skip_len: usize = undefined;
const checksum_offset = module.checksum_offset orelse return error.MissingDebugInfo;
while (sect_offset != subsect_info.len) : (sect_offset += skip_len) {
const subsect_hdr: *align(1) pdb.DebugSubsectionHeader = @ptrCast(&subsect_info[sect_offset]);
skip_len = subsect_hdr.length;
@@ -290,20 +769,8 @@ pub fn getLineNumberInfo(self: *Pdb, module: *Module, address: u64) !std.debug.S
// line_i == 0 would mean that no matching pdb.LineNumberEntry was found.
if (line_i > 0) {
const subsect_index = checksum_offset + block_hdr.name_index;
const chksum_hdr: *align(1) pdb.FileChecksumEntryHeader = @ptrCast(&module.subsect_info[subsect_index]);
const strtab_offset = @sizeOf(pdb.StringTableHeader) + chksum_hdr.file_name_offset;
try self.string_table.?.seekTo(strtab_offset);
const source_file_name = s: {
const string_reader = &self.string_table.?.interface;
var source_file_name: std.Io.Writer.Allocating = .init(gpa);
defer source_file_name.deinit();
_ = try string_reader.streamDelimiterLimit(&source_file_name.writer, 0, .limited(1024));
assert(string_reader.buffered()[0] == 0); // TODO change streamDelimiterLimit API
string_reader.toss(1);
break :s try source_file_name.toOwnedSlice();
};
errdefer gpa.free(source_file_name);
const file_name = try self.getFileName(gpa, module, block_hdr.name_index);
errdefer gpa.free(file_name);
const line_entry_idx = line_i - 1;
@@ -318,7 +785,7 @@ pub fn getLineNumberInfo(self: *Pdb, module: *Module, address: u64) !std.debug.S
const line_num_entry: *align(1) pdb.LineNumberEntry = @ptrCast(&subsect_info[found_line_index]);
return .{
.file_name = source_file_name,
.file_name = file_name,
.line = line_num_entry.flags.start,
.column = column,
};
@@ -366,7 +833,43 @@ pub fn getModule(self: *Pdb, index: usize) !?*Module {
const gpa = self.allocator;
mod.symbols = try reader.readAlloc(gpa, mod.mod_info.sym_byte_size - 4);
errdefer gpa.free(mod.symbols);
mod.subsect_info = try reader.readAlloc(gpa, mod.mod_info.c13_byte_size);
errdefer gpa.free(mod.subsect_info);
mod.inlinee_source_lines = b: {
var inlinee_source_lines: std.ArrayList(InlineeSourceLine) = .empty;
defer inlinee_source_lines.deinit(gpa);
var subsects: Io.Reader = .fixed(mod.subsect_info);
while (subsects.takeStructPointer(pdb.DebugSubsectionHeader) catch null) |subsect_hdr| {
var subsect: Io.Reader = .fixed(subsects.take(subsect_hdr.length) catch return null);
if (subsect_hdr.kind == .inlinee_lines) {
const inlinee_source_line_signature = subsect.takeEnum(pdb.InlineeSourceLineSignature, .little) catch return error.InvalidDebugInfo;
const has_extra_files = switch (inlinee_source_line_signature) {
.normal => false,
.ex => true,
else => continue,
};
while (subsect.takeStructPointer(pdb.InlineeSourceLine) catch null) |info| {
if (has_extra_files) {
const file_count = subsect.takeInt(u32, .little) catch
return error.InvalidDebugInfo;
const file_bytes = std.math.mul(usize, file_count, @sizeOf(u32)) catch return error.InvalidDebugInfo;
subsect.discardAll(file_bytes) catch
return error.InvalidDebugInfo;
}
try inlinee_source_lines.append(gpa, .{
.signature = inlinee_source_line_signature,
.info = info,
});
}
}
}
std.mem.sortUnstable(InlineeSourceLine, inlinee_source_lines.items, {}, InlineeSourceLine.lessThan);
break :b try inlinee_source_lines.toOwnedSlice(gpa);
};
errdefer gpa.free(mod.inlinee_source_lines);
var sect_offset: usize = 0;
var skip_len: usize = undefined;
@@ -497,7 +1000,7 @@ const MsfStream = struct {
next_read_pos: u64,
blocks: []u32,
block_size: u32,
interface: std.Io.Reader,
interface: Io.Reader,
err: ?Error,
const Error = File.Reader.SeekError;
@@ -527,7 +1030,7 @@ const MsfStream = struct {
};
}
fn stream(r: *std.Io.Reader, w: *std.Io.Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize {
fn stream(r: *Io.Reader, w: *Io.Writer, limit: Io.Limit) Io.Reader.StreamError!usize {
const ms: *MsfStream = @alignCast(@fieldParentPtr("interface", r));
var block_id: usize = @intCast(ms.next_read_pos / ms.block_size);
@@ -595,7 +1098,7 @@ const MsfStream = struct {
}
};
fn readSparseBitVector(reader: *std.Io.Reader, allocator: Allocator) ![]u32 {
fn readSparseBitVector(reader: *Io.Reader, allocator: Allocator) ![]u32 {
const num_words = try reader.takeInt(u32, .little);
var list = std.array_list.Managed(u32).init(allocator);
errdefer list.deinit();
+19 -18
View File
@@ -30,7 +30,15 @@ pub fn deinit(si: *SelfInfo, io: Io) void {
if (si.unwind_cache) |cache| gpa.free(cache);
}
pub fn getSymbol(si: *SelfInfo, io: Io, address: usize) Error!std.debug.Symbol {
pub fn getSymbols(
si: *SelfInfo,
io: Io,
symbol_allocator: Allocator,
text_arena: Allocator,
address: usize,
resolve_inline_callers: bool,
symbols: *std.ArrayList(std.debug.Symbol),
) Error!void {
const gpa = std.debug.getDebugInfoAllocator();
const module = try si.findModule(gpa, io, address, .exclusive);
defer si.rwlock.unlock(io);
@@ -53,28 +61,21 @@ pub fn getSymbol(si: *SelfInfo, io: Io, address: usize) Error!std.debug.Symbol {
};
loaded_elf.scanned_dwarf = true;
}
if (dwarf.getSymbol(gpa, native_endian, vaddr)) |sym| {
return sym;
} else |err| switch (err) {
error.MissingDebugInfo => {},
error.InvalidDebugInfo,
error.OutOfMemory,
=> |e| return e,
error.ReadFailed,
error.EndOfStream,
error.Overflow,
error.StreamTooLong,
=> return error.InvalidDebugInfo,
}
return dwarf.getSymbols(
symbol_allocator,
text_arena,
native_endian,
vaddr,
resolve_inline_callers,
symbols,
);
}
// When DWARF is unavailable, fall back to searching the symtab.
return loaded_elf.file.searchSymtab(gpa, vaddr) catch |err| switch (err) {
try symbols.append(symbol_allocator, loaded_elf.file.searchSymtab(gpa, vaddr) catch |err| switch (err) {
error.NoSymtab, error.NoStrtab => return error.MissingDebugInfo,
error.BadSymtab => return error.InvalidDebugInfo,
error.OutOfMemory => |e| return e,
};
});
}
pub fn getModuleName(si: *SelfInfo, io: Io, address: usize) Error![]const u8 {
const gpa = std.debug.getDebugInfoAllocator();
+18 -7
View File
@@ -22,8 +22,18 @@ pub fn deinit(si: *SelfInfo, io: Io) void {
si.modules.deinit(gpa);
}
pub fn getSymbol(si: *SelfInfo, io: Io, address: usize) Error!std.debug.Symbol {
pub fn getSymbols(
si: *SelfInfo,
io: Io,
symbol_allocator: Allocator,
text_arena: Allocator,
address: usize,
resolve_inline_callers: bool,
symbols: *std.ArrayList(std.debug.Symbol),
) Error!void {
_ = resolve_inline_callers;
const gpa = std.debug.getDebugInfoAllocator();
const module = try si.findModule(gpa, io, address);
defer si.mutex.unlock(io);
@@ -43,23 +53,23 @@ pub fn getSymbol(si: *SelfInfo, io: Io, address: usize) Error!std.debug.Symbol {
const ofile_dwarf, const ofile_vaddr = file.getDwarfForAddress(gpa, io, vaddr) catch {
// Return at least the symbol name if available.
return .{
return symbols.append(symbol_allocator, .{
.name = try file.lookupSymbolName(vaddr),
.compile_unit_name = null,
.source_location = null,
};
});
};
const compile_unit = ofile_dwarf.findCompileUnit(native_endian, ofile_vaddr) catch {
// Return at least the symbol name if available.
return .{
return symbols.append(symbol_allocator, .{
.name = try file.lookupSymbolName(vaddr),
.compile_unit_name = null,
.source_location = null,
};
});
};
return .{
try symbols.append(symbol_allocator, .{
.name = ofile_dwarf.getSymbolName(ofile_vaddr) orelse
try file.lookupSymbolName(vaddr),
.compile_unit_name = compile_unit.die.getAttrString(
@@ -73,11 +83,12 @@ pub fn getSymbol(si: *SelfInfo, io: Io, address: usize) Error!std.debug.Symbol {
},
.source_location = ofile_dwarf.getLineNumberInfo(
gpa,
text_arena,
native_endian,
compile_unit,
ofile_vaddr,
) catch null,
};
});
}
pub fn getModuleName(si: *SelfInfo, io: Io, address: usize) Error![]const u8 {
_ = si;
+137 -36
View File
@@ -1,10 +1,10 @@
mutex: Io.Mutex,
lock: Io.RwLock,
ntdll_handle: ?if (load_dll_notification_procs) *anyopaque else noreturn,
notification_cookie: ?LDR.DLL_NOTIFICATION.COOKIE,
modules: std.ArrayList(Module),
pub const init: SelfInfo = .{
.mutex = .init,
.lock = .init,
.ntdll_handle = null,
.notification_cookie = null,
.modules = .empty,
@@ -25,18 +25,33 @@ pub fn deinit(si: *SelfInfo, io: Io) void {
si.modules.deinit(gpa);
}
pub fn getSymbol(si: *SelfInfo, io: Io, address: usize) Error!std.debug.Symbol {
pub fn getSymbols(
si: *SelfInfo,
io: Io,
symbol_allocator: Allocator,
text_arena: Allocator,
address: usize,
resolve_inline_callers: bool,
symbols: *std.ArrayList(std.debug.Symbol),
) Error!void {
const gpa = std.debug.getDebugInfoAllocator();
try si.mutex.lock(io);
defer si.mutex.unlock(io);
try si.lock.lockShared(io);
defer si.lock.unlockShared(io);
const module = try si.findModule(gpa, address);
const di = try module.getDebugInfo(gpa, io);
return di.getSymbol(gpa, address - @intFromPtr(module.entry.DllBase));
return di.getSymbols(
symbol_allocator,
text_arena,
address - @intFromPtr(module.entry.DllBase),
resolve_inline_callers,
symbols,
);
}
pub fn getModuleName(si: *SelfInfo, io: Io, address: usize) Error![]const u8 {
const gpa = std.debug.getDebugInfoAllocator();
try si.mutex.lock(io);
defer si.mutex.unlock(io);
try si.lock.lockShared(io);
defer si.lock.unlockShared(io);
const module = try si.findModule(gpa, address);
return module.name orelse {
const name = try std.unicode.wtf16LeToWtf8Alloc(gpa, module.entry.BaseDllName.slice());
@@ -46,8 +61,8 @@ pub fn getModuleName(si: *SelfInfo, io: Io, address: usize) Error![]const u8 {
}
pub fn getModuleSlide(si: *SelfInfo, io: Io, address: usize) Error!usize {
const gpa = std.debug.getDebugInfoAllocator();
try si.mutex.lock(io);
defer si.mutex.unlock(io);
try si.lock.lockShared(io);
defer si.lock.unlockShared(io);
const module = try si.findModule(gpa, address);
return module.base_address;
}
@@ -240,7 +255,14 @@ const Module = struct {
arena.deinit();
}
fn getSymbol(di: *DebugInfo, gpa: Allocator, vaddr: usize) Error!std.debug.Symbol {
fn getSymbols(
di: *DebugInfo,
symbol_allocator: Allocator,
text_arena: Allocator,
vaddr: usize,
resolve_inline_callers: bool,
symbols: *std.ArrayList(std.debug.Symbol),
) Error!void {
pdb: {
const pdb = &(di.pdb orelse break :pdb);
var coff_section: *align(1) const coff.SectionHeader = undefined;
@@ -270,32 +292,101 @@ const Module = struct {
} orelse {
return error.InvalidDebugInfo; // bad module index
};
return .{
.name = pdb.getSymbolName(module, vaddr - coff_section.virtual_address),
.compile_unit_name = fs.path.basename(module.obj_file_name),
.source_location = pdb.getLineNumberInfo(
module,
vaddr - coff_section.virtual_address,
) catch null,
};
const addr = vaddr - coff_section.virtual_address;
const maybe_proc = pdb.getProcSym(module, addr);
const compile_unit_name = fs.path.basename(module.obj_file_name);
const symbols_top = symbols.items.len;
if (maybe_proc) |proc| {
const offset_in_func = addr - proc.code_offset;
var last_inlinee: ?u32 = null;
var iter = pdb.getInlinees(module, proc);
while (iter.next(module)) |inline_site| {
// Filter out duplicate inline sites. Tools like llvm-addr2line output
// duplicate sites in the same cases as us if we elide this check,
// implying that they exist in the underlying data and are not indicative
// of a parser bug. No useful information is lost here since an inline site
// can't actually reference itself.
if (inline_site.inlinee == last_inlinee) continue;
// If our address points into this site, get the source location(s) it
// points at
for (pdb.getInlineeSourceLines(
module,
inline_site.inlinee,
)) |inlinee_src_line| {
const maybe_loc = pdb.getInlineSiteSourceLocation(
text_arena,
module,
inline_site,
inlinee_src_line.info,
offset_in_func,
) catch continue;
const loc = maybe_loc orelse continue;
// If we aren't trying to resolve inline callers, and we've matched a
// new inline site, we want to overwrite the previously appended
// results.
if (!resolve_inline_callers and inline_site.inlinee != last_inlinee) {
symbols.items.len = symbols_top;
}
// Only resolve the name if we're resolving inline callers, otherwise
// wait until we're done to avoid duplicated work.
const name = if (resolve_inline_callers)
pdb.findInlineeName(inline_site.inlinee)
else
null;
try symbols.append(symbol_allocator, .{
.name = name,
.compile_unit_name = compile_unit_name,
.source_location = loc,
});
last_inlinee = inline_site.inlinee;
}
}
if (resolve_inline_callers) {
// Inline sites are stored in the pdb in reverse order, so we reverse the
// matching sites here. We could alternatively use the parent fields to
// determine the order, but this would introduce seemingly unecessary
// complexity.
std.mem.reverse(std.debug.Symbol, symbols.items);
} else if (last_inlinee) |inlinee| {
// If we aren't resolving inline callers, then all results will have the
// same inline site, and we resolve its name once at the end.
const name = pdb.findInlineeName(inlinee);
for (symbols.items) |*symbol| symbol.name = name;
}
}
// If there's room for another symbol, add the actual proc
if (resolve_inline_callers or symbols.items.len == 0) {
try symbols.append(symbol_allocator, .{
.name = if (maybe_proc) |proc| pdb.getSymbolName(proc) else null,
.compile_unit_name = compile_unit_name,
.source_location = pdb.getLineNumberInfo(text_arena, module, addr) catch null,
});
}
return;
}
dwarf: {
const dwarf = &(di.dwarf orelse break :dwarf);
const dwarf_address = vaddr + di.coff_image_base;
return dwarf.getSymbol(gpa, native_endian, dwarf_address) catch |err| switch (err) {
error.MissingDebugInfo => break :dwarf,
error.InvalidDebugInfo,
error.OutOfMemory,
=> |e| return e,
error.ReadFailed,
error.EndOfStream,
error.Overflow,
error.StreamTooLong,
=> return error.InvalidDebugInfo,
};
const addr = vaddr + di.coff_image_base;
return dwarf.getSymbols(
symbol_allocator,
text_arena,
native_endian,
addr,
resolve_inline_callers,
symbols,
);
}
return error.MissingDebugInfo;
}
};
@@ -505,6 +596,16 @@ const Module = struct {
error.ReadFailed,
=> |e| return e,
};
pdb.parseIpiStream() catch |err| switch (err) {
error.UnknownPDBVersion => return error.UnsupportedDebugInfo,
error.EndOfStream,
=> return error.InvalidDebugInfo,
error.OutOfMemory,
error.ReadFailed,
=> |e| return e,
};
if (!std.mem.eql(u8, &coff_obj.guid, &pdb.guid) or coff_obj.age != pdb.age)
return error.InvalidDebugInfo;
@@ -531,7 +632,7 @@ const Module = struct {
}
};
/// Assumes we already hold `si.mutex`.
/// Assumes we already hold `si.lock`.
fn findModule(si: *SelfInfo, gpa: Allocator, address: usize) error{ MissingDebugInfo, OutOfMemory, Unexpected }!*Module {
for (si.modules.items) |*mod| {
const base = @intFromPtr(mod.entry.DllBase);
@@ -601,8 +702,8 @@ fn dllNotification(
.LOADED => {},
.UNLOADED => {
const io = std.Options.debug_io;
si.mutex.lockUncancelable(io);
defer si.mutex.unlock(io);
si.lock.lockUncancelable(io);
defer si.lock.unlock(io);
for (si.modules.items, 0..) |*mod, mod_index| {
if (mod.entry.DllBase != data.Unloaded.DllBase) continue;
mod.deinit(std.debug.getDebugInfoAllocator(), io);
+7 -7
View File
@@ -81,7 +81,7 @@
//! Resizing and remapping are forwarded directly to the backing allocator,
//! except where such operations would change the category from large to small.
const builtin = @import("builtin");
const StackTrace = std.builtin.StackTrace;
const StackTrace = std.debug.StackTrace;
const std = @import("std");
const log = std.log.scoped(.DebugAllocator);
@@ -229,7 +229,7 @@ pub fn DebugAllocator(comptime config: Config) type {
std.debug.dumpStackTrace(self.getStackTrace(trace_kind));
}
fn getStackTrace(self: *LargeAlloc, trace_kind: TraceKind) std.builtin.StackTrace {
fn getStackTrace(self: *LargeAlloc, trace_kind: TraceKind) std.debug.StackTrace {
assert(@intFromEnum(trace_kind) < trace_n);
const stack_addresses = &self.stack_addresses[@intFromEnum(trace_kind)];
var len: usize = 0;
@@ -237,8 +237,8 @@ pub fn DebugAllocator(comptime config: Config) type {
len += 1;
}
return .{
.instruction_addresses = stack_addresses,
.index = len,
.return_addresses = stack_addresses[0..len],
.skipped = if (len < stack_addresses.len) .none else .unknown,
};
}
@@ -339,8 +339,8 @@ pub fn DebugAllocator(comptime config: Config) type {
len += 1;
}
return .{
.instruction_addresses = stack_addresses,
.index = len,
.return_addresses = stack_addresses[0..len],
.skipped = if (len < stack_addresses.len) .none else .unknown,
};
}
@@ -508,7 +508,7 @@ pub fn DebugAllocator(comptime config: Config) type {
fn collectStackTrace(first_trace_addr: usize, addr_buf: *[stack_n]usize) void {
const st = std.debug.captureCurrentStackTrace(.{ .first_address = first_trace_addr }, addr_buf);
@memset(addr_buf[@min(st.index, addr_buf.len)..], 0);
@memset(addr_buf[@min(st.return_addresses.len, addr_buf.len)..], 0);
}
fn reportDoubleFree(ret_addr: usize, alloc_stack_trace: StackTrace, free_stack_trace: StackTrace) void {
+143 -4
View File
@@ -314,11 +314,9 @@ pub const SymbolKind = enum(u16) {
pub const TypeIndex = u32;
// TODO According to this header:
// https://github.com/microsoft/microsoft-pdb/blob/082c5290e5aff028ae84e43affa8be717aa7af73/include/cvinfo.h#L3722
// we should define RecordPrefix as part of the ProcSym structure.
// This might be important when we start generating PDB in self-hosted with our own PE linker.
pub const ProcSym = extern struct {
record_len: u16,
record_kind: SymbolKind,
parent: u32,
end: u32,
next: u32,
@@ -508,3 +506,144 @@ pub const SuperBlock = extern struct {
// implement it so we're kind of safe making this assumption for now.
block_map_addr: u32,
};
pub const IpiStreamVersion = enum(u32) {
v40 = 19950410,
v41 = 19951122,
v50 = 19961031,
v70 = 19990903,
v80 = 20040203,
_,
};
pub const IpiStreamHeader = extern struct {
version: IpiStreamVersion,
header_size: u32,
type_index_begin: u32,
type_index_end: u32,
type_record_bytes: u32,
hash_stream_index: u16,
hash_aux_stream_index: u16,
hash_key_size: u32,
num_hash_buckets: u32,
hash_value_buffer_offset: i32,
hash_value_buffer_length: u32,
index_offset_buffer_offset: i32,
index_offset_buffer_length: u32,
hash_adj_buffer_offset: i32,
hash_adj_buffer_length: u32,
};
pub const LfRecordPrefix = extern struct {
len: u16,
kind: LfRecordKind,
};
pub const LfRecordKind = enum(u16) {
pointer = 0x1002,
modifier = 0x1001,
procedure = 0x1008,
mfunction = 0x1009,
label = 0x000e,
arglist = 0x1201,
fieldlist = 0x1203,
array = 0x1503,
class = 0x1504,
structure = 0x1505,
interface = 0x1519,
@"union" = 0x1506,
@"enum" = 0x1507,
typeserver2 = 0x1515,
vftable = 0x151d,
vtshape = 0x000a,
bitfield = 0x1205,
func_id = 0x1601,
mfunc_id = 0x1602,
buildinfo = 0x1603,
substr_list = 0x1604,
string_id = 0x1605,
udt_src_line = 0x1606,
udt_mod_src_line = 0x1607,
methodlist = 0x1206,
precomp = 0x1509,
endprecomp = 0x0014,
bclass = 0x1400,
binterface = 0x151a,
vbclass = 0x1401,
ivbclass = 0x1402,
vfunctab = 0x1409,
stmember = 0x150e,
method = 0x150f,
member = 0x150d,
nesttype = 0x1510,
onemethod = 0x1511,
enumerate = 0x1502,
index = 0x1404,
pad0 = 0xf0,
_,
};
pub const LfFuncId = extern struct {
len: u16,
kind: LfRecordKind,
scope_id: u32,
type: u32,
name: [1]u8, // null-terminated
};
pub const LfMFuncId = extern struct {
len: u16,
kind: LfRecordKind,
parent_type: u32,
type: u32,
name: [1]u8, // null-terminated
};
pub const InlineSiteSym = extern struct {
record_len: u16,
record_kind: SymbolKind,
parent: u32,
end: u32,
inlinee: u32,
};
pub const InlineSiteSym2 = extern struct {
record_len: u16,
record_kind: SymbolKind,
parent: u32,
end: u32,
inlinee: u32,
invocations: u32,
};
pub const InlineeSourceLineSignature = enum(u32) { normal = 0, ex = 1, _ };
pub const InlineeSourceLine = extern struct {
inlinee: u32,
file_id: u32,
source_line_num: u32,
};
pub const InlineeSourceLineEx = extern struct {
inlinee: u32,
file_id: u32,
source_line_num: u32,
count_of_extra_files: u32,
};
pub const BinaryAnnotationOpcode = enum(u8) {
invalid = 0,
code_offset = 1,
change_code_offset_base = 2,
change_code_offset = 3,
change_code_length = 4,
change_file = 5,
change_line_offset = 6,
change_line_end_delta = 7,
change_range_kind = 8,
change_column_start = 9,
change_column_end_delta = 10,
change_code_offset_and_line_offset = 11,
change_code_length_and_code_offset = 12,
change_column_end = 13,
};
+1 -1
View File
@@ -761,7 +761,7 @@ inline fn wrapMain(result: anytype) u8 {
std.log.err("{t}", .{err});
switch (native_os) {
.freestanding, .other => {},
else => if (@errorReturnTrace()) |trace| std.debug.dumpStackTrace(trace),
else => if (@errorReturnTrace()) |trace| std.debug.dumpErrorReturnTrace(trace),
}
return 1;
};
+2
View File
@@ -165,6 +165,8 @@ pub const Options = struct {
/// * `debug.dumpCurrentStackTrace`
/// * `debug.writeStackTrace`
/// * `debug.dumpStackTrace`
/// * `debug.writeErrorReturnTrace`
/// * `debug.dumpErrorReturnTrace`
///
/// Stack traces can generally be collected and printed when debug info is stripped, but are
/// often less useful since they usually cannot be mapped to source locations and/or have bad
+4 -4
View File
@@ -65,7 +65,7 @@ fn alloc(
if (self.alloc_index == self.fail_index) {
if (!self.has_induced_failure) {
const st = std.debug.captureCurrentStackTrace(.{ .first_address = return_address }, &self.stack_addresses);
@memset(self.stack_addresses[@min(st.index, self.stack_addresses.len)..], 0);
@memset(self.stack_addresses[@min(st.return_addresses.len, self.stack_addresses.len)..], 0);
self.has_induced_failure = true;
}
return null;
@@ -131,15 +131,15 @@ fn free(
}
/// Only valid once `has_induced_failure == true`
pub fn getStackTrace(self: *FailingAllocator) std.builtin.StackTrace {
pub fn getStackTrace(self: *FailingAllocator) std.debug.StackTrace {
std.debug.assert(self.has_induced_failure);
var len: usize = 0;
while (len < self.stack_addresses.len and self.stack_addresses[len] != 0) {
len += 1;
}
return .{
.instruction_addresses = &self.stack_addresses,
.index = len,
.return_addresses = self.stack_addresses[0..len],
.skipped = if (len == self.stack_addresses.len) .unknown else .none,
};
}
+2 -2
View File
@@ -9,11 +9,11 @@ pub fn main() !void {
const captured_st = try foo(&stdout.interface, &st_buf);
try std.debug.writeStackTrace(&captured_st, .{ .writer = &stdout.interface, .mode = .no_color });
try stdout.interface.print("stack trace index: {d}\n", .{captured_st.index});
try stdout.interface.print("stack trace index: {d}\n", .{captured_st.return_addresses.len});
try stdout.interface.flush();
}
fn foo(w: *std.Io.Writer, st_buf: []usize) !std.builtin.StackTrace {
fn foo(w: *std.Io.Writer, st_buf: []usize) !std.debug.StackTrace {
try std.debug.writeCurrentStackTrace(.{}, .{ .writer = w, .mode = .no_color });
return std.debug.captureCurrentStackTrace(.{}, st_buf);
}
+30 -17
View File
@@ -1,4 +1,6 @@
pub fn addCases(cases: *@import("tests.zig").ErrorTracesContext) void {
const std = @import("std");
pub fn addCases(cases: *@import("tests.zig").ErrorTracesContext, os: std.Target.Os.Tag) void {
cases.addCase(.{
.name = "return",
.source =
@@ -464,17 +466,33 @@ pub fn addCases(cases: *@import("tests.zig").ErrorTracesContext) void {
\\}
,
.expect_error = "ThisIsSoSad",
.expect_trace =
\\source.zig:8:5: [address] in bar
\\ return error.ThisIsSoSad;
\\ ^
\\source.zig:5:5: [address] in foo
\\ try bar();
\\ ^
\\source.zig:2:5: [address] in main
\\ try foo();
\\ ^
,
.expect_trace = switch (os) {
// LLVM doesn't emit column info in the binary annotations for inlinee callees in PDBs,
// so our expected result is slightly different for Windows than on other operating
// systems.
.windows =>
\\source.zig:8:5: [address] in bar
\\ return error.ThisIsSoSad;
\\ ^
\\source.zig:5: [address] in foo
\\ try bar();
\\
\\source.zig:2:5: [address] in main
\\ try foo();
\\ ^
,
else =>
\\source.zig:8:5: [address] in bar
\\ return error.ThisIsSoSad;
\\ ^
\\source.zig:5:5: [address] in foo
\\ try bar();
\\ ^
\\source.zig:2:5: [address] in main
\\ try foo();
\\ ^
,
},
.disable_trace_optimized = &.{
.{ .x86_64, .freebsd },
.{ .x86_64, .netbsd },
@@ -493,10 +511,5 @@ pub fn addCases(cases: *@import("tests.zig").ErrorTracesContext) void {
.{ .x86_64, .macos },
.{ .aarch64, .macos },
},
// TODO: the standard library has a bug in PDB parsing where given an address corresponding
// to an inline call, the frame we see will be for the *caller*, not the *callee*. As a
// result this test gives bogus results on Windows right now.
// This is a part of https://codeberg.org/ziglang/zig/issues/30847.
.disable_trace_pdb = true,
});
}
-3
View File
@@ -17,8 +17,6 @@ pub const Case = struct {
/// LLVM ReleaseSmall builds always have the trace disabled regardless of this field, because it
/// seems that LLVM is particularly good at optimizing traces away in those.
disable_trace_optimized: []const DisableConfig = &.{},
/// If `true` then we will not test the error trace on Windows due to bugs in PDB handling.
disable_trace_pdb: bool = false,
pub const DisableConfig = struct { std.Target.Cpu.Arch, std.Target.Os.Tag };
pub const Backend = enum { llvm, selfhosted };
@@ -62,7 +60,6 @@ fn addCaseConfig(
const b = self.b;
const error_tracing: bool = tracing: {
if (target.result.os.tag == .windows and case.disable_trace_pdb) break :tracing false;
if (optimize == .Debug) break :tracing true;
if (backend != .llvm) break :tracing true;
if (optimize == .ReleaseSmall) break :tracing false;
+12 -12
View File
@@ -52,24 +52,24 @@ pub fn main(init: std.process.Init) !void {
continue;
}
const src_col_end = std.mem.indexOf(u8, in_line, ": 0x") orelse {
const src_pos_end = std.mem.indexOf(u8, in_line, ": 0x") orelse {
try w.writeAll(in_line);
continue;
};
const src_row_end = std.mem.lastIndexOfScalar(u8, in_line[0..src_col_end], ':') orelse {
try w.writeAll(in_line);
continue;
};
const src_path_end = std.mem.lastIndexOfScalar(u8, in_line[0..src_row_end], ':') orelse {
try w.writeAll(in_line);
continue;
const src_pos_start = b: {
const postfix = ".zig:";
const postfix_index = std.mem.lastIndexOf(u8, in_line[0..src_pos_end], postfix) orelse {
try w.writeAll(in_line);
continue;
};
break :b postfix_index + postfix.len;
};
const addr_end = std.mem.indexOfPos(u8, in_line, src_col_end, " in ") orelse {
const addr_end = std.mem.findPos(u8, in_line, src_pos_end, " in ") orelse {
try w.writeAll(in_line);
continue;
};
const symbol_end = std.mem.indexOfPos(u8, in_line, addr_end, " (") orelse {
const symbol_end = std.mem.findPos(u8, in_line, addr_end, " (") orelse {
try w.writeAll(in_line);
continue;
};
@@ -88,10 +88,10 @@ pub fn main(init: std.process.Init) !void {
//
// ...with that first '_' being replaced by its basename.
const src_path = in_line[0..src_path_end];
const src_path = in_line[0..src_pos_start];
const basename_start = if (std.mem.lastIndexOfAny(u8, src_path, "/\\")) |i| i + 1 else 0;
const symbol_start = addr_end + " in ".len;
try w.writeAll(in_line[basename_start..src_col_end]);
try w.writeAll(in_line[basename_start..src_pos_end]);
try w.writeAll(": [address] in ");
try w.writeAll(in_line[symbol_start..symbol_end]);
try w.writeByte('\n');
+124 -10
View File
@@ -1,4 +1,6 @@
pub fn addCases(cases: *@import("tests.zig").StackTracesContext) void {
const std = @import("std");
pub fn addCases(cases: *@import("tests.zig").StackTracesContext, os: std.Target.Os.Tag) void {
cases.addCase(.{
.name = "simple panic",
.source =
@@ -118,13 +120,13 @@ pub fn addCases(cases: *@import("tests.zig").StackTracesContext) void {
\\ var stack_trace_buf: [8]usize = undefined;
\\ dumpIt(&captureIt(&stack_trace_buf));
\\}
\\fn captureIt(buf: []usize) std.builtin.StackTrace {
\\fn captureIt(buf: []usize) std.debug.StackTrace {
\\ return captureItInner(buf);
\\}
\\fn dumpIt(st: *const std.builtin.StackTrace) void {
\\fn dumpIt(st: *const std.debug.StackTrace) void {
\\ std.debug.dumpStackTrace(st);
\\}
\\fn captureItInner(buf: []usize) std.builtin.StackTrace {
\\fn captureItInner(buf: []usize) std.debug.StackTrace {
\\ return std.debug.captureCurrentStackTrace(.{}, buf);
\\}
\\const std = @import("std");
@@ -159,13 +161,13 @@ pub fn addCases(cases: *@import("tests.zig").StackTracesContext) void {
\\ var stack_trace_buf: [8]usize = undefined;
\\ dumpIt(&captureIt(&stack_trace_buf));
\\}
\\fn captureIt(buf: []usize) std.builtin.StackTrace {
\\fn captureIt(buf: []usize) std.debug.StackTrace {
\\ return captureItInner(buf);
\\}
\\fn dumpIt(st: *const std.builtin.StackTrace) void {
\\fn dumpIt(st: *const std.debug.StackTrace) void {
\\ std.debug.dumpStackTrace(st);
\\}
\\fn captureItInner(buf: []usize) std.builtin.StackTrace {
\\fn captureItInner(buf: []usize) std.debug.StackTrace {
\\ return std.debug.captureCurrentStackTrace(.{}, buf);
\\}
\\const std = @import("std");
@@ -188,13 +190,13 @@ pub fn addCases(cases: *@import("tests.zig").StackTracesContext) void {
\\fn threadMain(stack_trace_buf: []usize) void {
\\ dumpIt(&captureIt(stack_trace_buf));
\\}
\\fn captureIt(buf: []usize) std.builtin.StackTrace {
\\fn captureIt(buf: []usize) std.debug.StackTrace {
\\ return captureItInner(buf);
\\}
\\fn dumpIt(st: *const std.builtin.StackTrace) void {
\\fn dumpIt(st: *const std.debug.StackTrace) void {
\\ std.debug.dumpStackTrace(st);
\\}
\\fn captureItInner(buf: []usize) std.builtin.StackTrace {
\\fn captureItInner(buf: []usize) std.debug.StackTrace {
\\ return std.debug.captureCurrentStackTrace(.{}, buf);
\\}
\\const std = @import("std");
@@ -221,4 +223,116 @@ pub fn addCases(cases: *@import("tests.zig").StackTracesContext) void {
\\
,
});
cases.addCase(.{
.name = "simple inline panic",
.source =
\\pub fn main() void {
\\ foo();
\\}
\\inline fn foo() void {
\\ @panic("oh no");
\\}
\\
,
.unwind = .any,
.expect_panic = true,
.expect = switch (os) {
// LLVM doesn't emit column info in the binary annotations for inlinee callees in PDBs,
// so the first location has only a row.
.windows =>
\\panic: oh no
\\source.zig:5: [address] in foo
\\ @panic("oh no");
\\
\\source.zig:2:8: [address] in main
\\ foo();
\\ ^
\\
,
// On all other platforms, we resolve the innermost inline callee but we don't yet
// resolve the inline callers.
else =>
\\panic: oh no
\\source.zig:5:5: [address] in foo
\\ @panic("oh no");
\\ ^
,
},
.expect_strip = switch (os) {
.windows =>
\\panic: oh no
\\???:?:?: [address] in source.foo
\\???:?:?: [address] in source.main
\\
,
else =>
\\panic: oh no
\\???:?:?: [address] in source.foo
\\
,
},
});
// Make sure all inline calls are resolved and in the right order!
cases.addCase(.{
.name = "nested inline panic",
.source =
\\pub fn main() void {
\\ foo();
\\}
\\inline fn foo() void {
\\ bar();
\\}
\\inline fn bar() void {
\\ baz();
\\}
\\inline fn baz() void {
\\ @panic("oh no");
\\}
\\
,
.unwind = .any,
.expect_panic = true,
// This switch serves a similar purpose as in "inline panic".
.expect = switch (os) {
.windows =>
\\panic: oh no
\\source.zig:11: [address] in baz
\\ @panic("oh no");
\\
\\source.zig:8: [address] in bar
\\ baz();
\\
\\source.zig:5: [address] in foo
\\ bar();
\\
\\source.zig:2:8: [address] in main
\\ foo();
\\ ^
\\
,
else =>
\\panic: oh no
\\source.zig:11:5: [address] in baz
\\ @panic("oh no");
\\ ^
,
},
.expect_strip = switch (os) {
.windows =>
\\panic: oh no
\\???:?:?: [address] in baz
\\???:?:?: [address] in bar
\\???:?:?: [address] in foo
\\???:?:?: [address] in main
\\
,
else =>
\\panic: oh no
\\???:?:?: [address] in baz
\\
,
},
});
}
+20 -2
View File
@@ -12,8 +12,26 @@ pub fn main(init: std.process.Init) void {
var add_addr: usize = undefined;
_ = add(1, 2, &add_addr);
const symbol = di.getSymbol(io, add_addr) catch |err| fatal("failed to get symbol: {t}", .{err});
defer if (symbol.source_location) |sl| std.debug.getDebugInfoAllocator().free(sl.file_name);
const debug_gpa = std.debug.getDebugInfoAllocator();
const symbol_allocator = debug_gpa;
var symbols: std.ArrayList(std.debug.Symbol) = .empty;
defer symbols.deinit(symbol_allocator);
var text_arena: std.heap.ArenaAllocator = .init(debug_gpa);
defer text_arena.deinit();
di.getSymbols(
io,
symbol_allocator,
text_arena.allocator(),
add_addr,
false,
&symbols,
) catch |err| fatal("failed to get symbol: {t}", .{err});
if (symbols.items.len != 1) fatal("expected 1 symbol, found {}", .{symbols.items.len});
const symbol = symbols.items[0];
if (symbol.name == null) fatal("failed to resolve symbol name", .{});
if (symbol.compile_unit_name == null) fatal("failed to resolve compile unit", .{});
+112 -22
View File
@@ -1989,44 +1989,85 @@ const c_abi_targets = blk: {
};
};
/// For stack trace tests, we only test native, because external executors are pretty unreliable at
/// stack tracing. However, if there's a 32-bit equivalent target which the host can trivially run,
/// we may as well at least test that!
fn nativeAndCompatible32bit(b: *std.Build, skip_non_native: bool) []const std.Build.ResolvedTarget {
fn compatible32bitArch(b: *std.Build) ?std.Target.Cpu.Arch {
const host = b.graph.host.result;
const only_native = (&b.graph.host)[0..1];
if (skip_non_native) return only_native;
const arch32: std.Target.Cpu.Arch = switch (host.os.tag) {
return switch (host.os.tag) {
.windows => switch (host.cpu.arch) {
.x86_64 => .x86,
.aarch64 => .thumb,
.aarch64_be => .thumbeb,
else => return only_native,
else => null,
},
.freebsd => switch (host.cpu.arch) {
.aarch64 => .arm,
.aarch64_be => .armeb,
else => return only_native,
else => null,
},
.linux, .netbsd => switch (host.cpu.arch) {
.x86_64 => .x86,
.aarch64 => .arm,
.aarch64_be => .armeb,
else => return only_native,
else => null,
},
else => return only_native,
else => null,
};
}
/// For stack trace tests, we only test native by default, because external executors are pretty
/// unreliable at stack tracing. However, if there's a 32-bit equivalent target which the host can
/// trivially run, we may as well at least test that!
fn nativeAndCompatible32bit(b: *std.Build, skip_non_native: bool) []const std.Build.ResolvedTarget {
const host = b.graph.host.result;
const only_native = (&b.graph.host)[0..1];
if (skip_non_native) return only_native;
const arch32 = compatible32bitArch(b) orelse return only_native;
return b.graph.arena.dupe(std.Build.ResolvedTarget, &.{
b.graph.host,
b.resolveTargetQuery(.{ .cpu_arch = arch32, .os_tag = host.os.tag }),
}) catch @panic("OOM");
}
fn wineAndCompatible32bit(b: *std.Build, skip_non_native: bool) []const std.Build.ResolvedTarget {
var targets: std.ArrayList(std.Build.ResolvedTarget) = .empty;
const host = b.graph.host.result;
targets.append(b.graph.arena, b.resolveTargetQuery(.{
.cpu_arch = host.cpu.arch,
.os_tag = .windows,
})) catch @panic("OOM");
if (!skip_non_native) {
if (compatible32bitArch(b)) |arch| {
targets.append(b.graph.arena, b.resolveTargetQuery(.{
.cpu_arch = arch,
.os_tag = .windows,
})) catch @panic("OOM");
}
}
return targets.toOwnedSlice(b.graph.arena) catch @panic("OOM");
}
fn darlingTargets(b: *std.Build) []const std.Build.ResolvedTarget {
var targets: std.ArrayList(std.Build.ResolvedTarget) = .empty;
const host = b.graph.host.result;
targets.append(b.graph.arena, b.resolveTargetQuery(.{
.cpu_arch = host.cpu.arch,
.os_tag = .macos,
})) catch @panic("OOM");
return targets.toOwnedSlice(b.graph.arena) catch @panic("OOM");
}
pub fn addStackTraceTests(
b: *std.Build,
test_filters: []const []const u8,
skip_non_native: bool,
) *Step {
const step = b.step("test-stack-traces", "Run the stack trace tests");
const convert_exe = b.addExecutable(.{
.name = "convert-stack-trace",
.root_module = b.createModule(.{
@@ -2036,19 +2077,41 @@ pub fn addStackTraceTests(
}),
});
const cases = b.allocator.create(StackTracesContext) catch @panic("OOM");
cases.* = .{
const host_cases = b.allocator.create(StackTracesContext) catch @panic("OOM");
host_cases.* = .{
.b = b,
.step = b.step("test-stack-traces", "Run the stack trace tests"),
.step = step,
.test_filters = test_filters,
.targets = nativeAndCompatible32bit(b, skip_non_native),
.convert_exe = convert_exe,
};
stack_traces.addCases(host_cases, b.graph.host.result.os.tag);
stack_traces.addCases(cases);
if (b.enable_wine) {
const wine_cases = b.allocator.create(StackTracesContext) catch @panic("OOM");
wine_cases.* = .{
.b = b,
.step = step,
.test_filters = test_filters,
.targets = wineAndCompatible32bit(b, skip_non_native),
.convert_exe = convert_exe,
};
stack_traces.addCases(wine_cases, .windows);
}
return cases.step;
if (b.enable_darling) {
const darling_cases = b.allocator.create(StackTracesContext) catch @panic("OOM");
darling_cases.* = .{
.b = b,
.step = step,
.test_filters = test_filters,
.targets = darlingTargets(b),
.convert_exe = convert_exe,
};
stack_traces.addCases(darling_cases, .macos);
}
return step;
}
pub fn addErrorTraceTests(
@@ -2057,6 +2120,8 @@ pub fn addErrorTraceTests(
optimize_modes: []const OptimizeMode,
skip_non_native: bool,
) *Step {
const step = b.step("test-error-traces", "Run the error trace tests");
const convert_exe = b.addExecutable(.{
.name = "convert-stack-trace",
.root_module = b.createModule(.{
@@ -2066,19 +2131,44 @@ pub fn addErrorTraceTests(
}),
});
const cases = b.allocator.create(ErrorTracesContext) catch @panic("OOM");
cases.* = .{
const host_cases = b.allocator.create(ErrorTracesContext) catch @panic("OOM");
host_cases.* = .{
.b = b,
.step = b.step("test-error-traces", "Run the error trace tests"),
.step = step,
.test_filters = test_filters,
.targets = nativeAndCompatible32bit(b, skip_non_native),
.optimize_modes = optimize_modes,
.convert_exe = convert_exe,
};
error_traces.addCases(host_cases, b.graph.host.result.os.tag);
error_traces.addCases(cases);
if (b.enable_wine) {
const wine_cases = b.allocator.create(ErrorTracesContext) catch @panic("OOM");
wine_cases.* = .{
.b = b,
.step = step,
.test_filters = test_filters,
.targets = wineAndCompatible32bit(b, skip_non_native),
.optimize_modes = optimize_modes,
.convert_exe = convert_exe,
};
error_traces.addCases(wine_cases, .windows);
}
return cases.step;
if (b.enable_darling) {
const darling_cases = b.allocator.create(ErrorTracesContext) catch @panic("OOM");
darling_cases.* = .{
.b = b,
.step = step,
.test_filters = test_filters,
.targets = darlingTargets(b),
.optimize_modes = optimize_modes,
.convert_exe = convert_exe,
};
error_traces.addCases(darling_cases, .macos);
}
return step;
}
fn compilerHasPackageManager(b: *std.Build) bool {