Sema: push to error trace when returning from inline function

There is no reason `inline fn`s should not be subject to error tracing:
they are still functions! So, push to the error trace when we return
from one, and add a test checking that inline functions do appear in
error traces.

This also changes how we emit error trace pushes: we no longer duplicate
the AIR `ret` instruction in the "error" and "non-error" code paths. I
suspect this will lead to slightly better unoptimized codegen, but I may
be wrong---I'll take some performance measurements before I merge this.
This commit is contained in:
Matthew Lugg
2026-03-20 22:30:38 +00:00
committed by mlugg
parent a38c6bbcc4
commit 240e5eab0e
4 changed files with 146 additions and 68 deletions
+80 -56
View File
@@ -18034,28 +18034,24 @@ fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!voi
return sema.analyzeRet(block, operand, src, block.src(.{ .node_offset_return_operand = inst_data.src_node }));
}
if (sema.wantErrorReturnTracing(sema.fn_ret_ty)) {
if (sema.wantErrorReturnTracing()) {
const is_non_err = try sema.analyzePtrIsNonErr(block, src, ret_ptr);
return sema.retWithErrTracing(block, src, is_non_err, .ret_load, ret_ptr);
try sema.maybePushErrorTrace(block, src, is_non_err);
}
_ = try block.addUnOp(.ret_load, ret_ptr);
}
fn retWithErrTracing(
fn maybePushErrorTrace(
sema: *Sema,
block: *Block,
parent_block: *Block,
src: LazySrcLoc,
is_non_err: Air.Inst.Ref,
ret_tag: Air.Inst.Tag,
operand: Air.Inst.Ref,
) CompileError!void {
const pt = sema.pt;
const need_check = switch (is_non_err) {
.bool_true => {
_ = try block.addUnOp(ret_tag, operand);
return;
},
.bool_true => return,
.bool_false => false,
else => true,
};
@@ -18068,27 +18064,44 @@ fn retWithErrTracing(
const return_err_fn = Air.internedToRef(try sema.getBuiltin(src, .returnError));
if (!need_check) {
try sema.callBuiltin(block, src, return_err_fn, .never_tail, &.{}, .@"error return");
_ = try block.addUnOp(ret_tag, operand);
try sema.callBuiltin(parent_block, src, return_err_fn, .never_tail, &.{}, .@"error return");
return;
}
var then_block = block.makeSubBlock();
defer then_block.instructions.deinit(gpa);
_ = try then_block.addUnOp(ret_tag, operand);
var err_block = parent_block.makeSubBlock();
defer err_block.instructions.deinit(gpa);
try sema.callBuiltin(&err_block, src, return_err_fn, .never_tail, &.{}, .@"error return");
var else_block = block.makeSubBlock();
defer else_block.instructions.deinit(gpa);
try sema.callBuiltin(&else_block, src, return_err_fn, .never_tail, &.{}, .@"error return");
_ = try else_block.addUnOp(ret_tag, operand);
try parent_block.instructions.ensureUnusedCapacity(gpa, 1);
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).@"struct".fields.len +
then_block.instructions.items.len + else_block.instructions.items.len +
@typeInfo(Air.Block).@"struct".fields.len + 1);
try sema.air_instructions.ensureUnusedCapacity(gpa, 4);
try sema.air_extra.ensureUnusedCapacity(
gpa,
@typeInfo(Air.Block).@"struct".fields.len +
1 + // the main block contains only the `cond_br`
@typeInfo(Air.CondBr).@"struct".fields.len +
1 + // the non-error branch contains only a `br`
err_block.instructions.items.len + 1, // the error branch contains the `returnError` call and a `br`
);
const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
const cond_br_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len + 1);
const then_br_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len + 2);
const else_br_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len + 3);
const block_payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = 1 });
sema.air_extra.appendAssumeCapacity(@intFromEnum(cond_br_inst));
sema.air_instructions.appendAssumeCapacity(.{
.tag = .block,
.data = .{ .ty_pl = .{
.ty = .void_type,
.payload = block_payload,
} },
});
const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(then_block.instructions.items.len),
.else_body_len = @intCast(else_block.instructions.items.len),
.then_body_len = 1,
.else_body_len = @intCast(err_block.instructions.items.len + 1),
.branch_hints = .{
// Weight against error branch.
.true = .likely,
@@ -18098,19 +18111,33 @@ fn retWithErrTracing(
.else_cov = .none,
},
});
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(then_block.instructions.items));
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(else_block.instructions.items));
_ = try block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{
sema.air_extra.appendAssumeCapacity(@intFromEnum(then_br_inst));
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(err_block.instructions.items));
sema.air_extra.appendAssumeCapacity(@intFromEnum(else_br_inst));
sema.air_instructions.appendAssumeCapacity(.{
.tag = .cond_br,
.data = .{ .pl_op = .{
.operand = is_non_err,
.payload = cond_br_payload,
} } });
} },
});
const br_inst_data: Air.Inst = .{
.tag = .br,
.data = .{ .br = .{
.block_inst = block_inst,
.operand = .void_value,
} },
};
sema.air_instructions.appendAssumeCapacity(br_inst_data); // then_br_inst
sema.air_instructions.appendAssumeCapacity(br_inst_data); // else_br_inst
parent_block.instructions.appendAssumeCapacity(block_inst);
}
fn wantErrorReturnTracing(sema: *Sema, fn_ret_ty: Type) bool {
const pt = sema.pt;
const zcu = pt.zcu;
return fn_ret_ty.isError(zcu) and zcu.comp.config.any_error_tracing;
fn wantErrorReturnTracing(sema: *Sema) bool {
const zcu = sema.pt.zcu;
return sema.fn_ret_ty.isError(zcu) and zcu.comp.config.any_error_tracing;
}
fn zirSaveErrRetIndex(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
@@ -18251,9 +18278,11 @@ fn analyzeRet(
else => |e| return e,
};
if (block.inlining) |inlining| {
assert(!inlining.is_generic_instantiation); // can't `return` in a generic param/ret ty expr
if (block.isComptime()) {
const inlining = block.inlining orelse {
return sema.fail(block, src, "function called at runtime cannot return value at comptime", .{});
};
assert(!inlining.is_generic_instantiation); // can't `return` in a generic param/ret ty expr
const ret_val = try sema.resolveConstValue(block, operand_src, operand, null);
inlining.comptime_result = operand;
@@ -18262,36 +18291,31 @@ fn analyzeRet(
}
return error.ComptimeReturn;
}
// We are inlining a function call; rewrite the `ret` as a `break`.
const br_inst = try block.addBr(inlining.merges.block_inst, operand);
try inlining.merges.results.append(sema.gpa, operand);
try inlining.merges.br_list.append(sema.gpa, br_inst.toIndex().?);
try inlining.merges.src_locs.append(sema.gpa, operand_src);
return;
} else if (block.isComptime()) {
return sema.fail(block, src, "function called at runtime cannot return value at comptime", .{});
} else if (sema.func_is_naked) {
const msg = msg: {
if (block.inlining == null and sema.func_is_naked) return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "cannot return from naked function", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(src, msg, "can only return using assembly", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
});
try sema.validateRuntimeValue(block, operand_src, operand);
const air_tag: Air.Inst.Tag = if (block.wantSafety()) .ret_safe else .ret;
if (sema.wantErrorReturnTracing(sema.fn_ret_ty)) {
// Avoid adding a frame to the error return trace in case the value is comptime-known
// to be not an error.
if (sema.wantErrorReturnTracing()) {
const is_non_err = try sema.analyzeIsNonErr(block, operand_src, operand);
return sema.retWithErrTracing(block, src, is_non_err, air_tag, operand);
try sema.maybePushErrorTrace(block, src, is_non_err);
}
_ = try block.addUnOp(air_tag, operand);
if (block.inlining) |inlining| {
assert(!inlining.is_generic_instantiation); // can't `return` in a generic param/ret ty expr
const br_inst = try block.addBr(inlining.merges.block_inst, operand);
try inlining.merges.results.append(sema.gpa, operand);
try inlining.merges.br_list.append(sema.gpa, br_inst.toIndex().?);
try inlining.merges.src_locs.append(sema.gpa, operand_src);
} else {
try sema.validateRuntimeValue(block, operand_src, operand);
const ret_tag: Air.Inst.Tag = if (block.wantSafety()) .ret_safe else .ret;
_ = try block.addUnOp(ret_tag, operand);
}
}
fn floatOpAllowed(tag: Zir.Inst.Tag) bool {
+50
View File
@@ -449,4 +449,54 @@ pub fn addCases(cases: *@import("tests.zig").ErrorTracesContext) void {
.{ .aarch64, .macos },
},
});
cases.addCase(.{
.name = "trace through inline call",
.source =
\\pub fn main() !void {
\\ try foo();
\\}
\\inline fn foo() !void {
\\ try bar();
\\}
\\fn bar() !void {
\\ return error.ThisIsSoSad;
\\}
,
.expect_error = "ThisIsSoSad",
.expect_trace =
\\source.zig:8:5: [address] in bar
\\ return error.ThisIsSoSad;
\\ ^
\\source.zig:5:5: [address] in foo
\\ try bar();
\\ ^
\\source.zig:2:5: [address] in main
\\ try foo();
\\ ^
,
.disable_trace_optimized = &.{
.{ .x86_64, .freebsd },
.{ .x86_64, .netbsd },
.{ .x86_64, .linux },
.{ .x86, .linux },
.{ .aarch64, .freebsd },
.{ .aarch64, .netbsd },
.{ .aarch64, .linux },
.{ .loongarch64, .linux },
.{ .powerpc64le, .linux },
.{ .riscv64, .linux },
.{ .s390x, .linux },
.{ .x86_64, .openbsd },
.{ .x86_64, .windows },
.{ .x86, .windows },
.{ .x86_64, .macos },
.{ .aarch64, .macos },
},
// TODO: the standard library has a bug in PDB parsing where given an address corresponding
// to an inline call, the frame we see will be for the *caller*, not the *callee*. As a
// result this test gives bogus results on Windows right now.
// This is a part of https://codeberg.org/ziglang/zig/issues/30847.
.disable_trace_pdb = true,
});
}
+3
View File
@@ -17,6 +17,8 @@ pub const Case = struct {
/// LLVM ReleaseSmall builds always have the trace disabled regardless of this field, because it
/// seems that LLVM is particularly good at optimizing traces away in those.
disable_trace_optimized: []const DisableConfig = &.{},
/// If `true` then we will not test the error trace on Windows due to bugs in PDB handling.
disable_trace_pdb: bool = false,
pub const DisableConfig = struct { std.Target.Cpu.Arch, std.Target.Os.Tag };
pub const Backend = enum { llvm, selfhosted };
@@ -60,6 +62,7 @@ fn addCaseConfig(
const b = self.b;
const error_tracing: bool = tracing: {
if (target.result.os.tag == .windows and case.disable_trace_pdb) break :tracing false;
if (optimize == .Debug) break :tracing true;
if (backend != .llvm) break :tracing true;
if (optimize == .ReleaseSmall) break :tracing false;
+1
View File
@@ -126,6 +126,7 @@ pub fn addCase(self: *LlvmIr, case: TestCase) void {
.exact => |e| .{ .expected_exact = e },
});
check.setName(name);
check.max_bytes = 64 * 1024 * 1024; // allow fairly big LLVM IR files
self.root_step.dependOn(&check.step);
}