mlugg
2026-01-11 19:44:24 +01:00
30 changed files with 5263 additions and 5076 deletions
+13 -4
View File
@@ -2461,7 +2461,8 @@ or
{#header_open|Tagged union#}
<p>Unions can be declared with an enum tag type.
This turns the union into a <em>tagged</em> union, which makes it eligible
to use with {#link|switch#} expressions.
to use with {#link|switch#} expressions. When switching on tagged unions,
the tag value can be obtained using an additional capture.
Tagged unions coerce to their tag type: {#link|Type Coercion: Unions and Enums#}.
</p>
{#code|test_tagged_union.zig#}
@@ -2594,6 +2595,13 @@ or
{#header_close#}
{#header_open|Switching on Errors#}
<p>
When switching on errors, some special cases are allowed to simplify generic programming patterns:
</p>
{#code|test_switch_on_errors.zig#}
{#header_close#}
{#header_open|Labeled switch#}
<p>
When a switch statement is labeled, it can be referenced from a
@@ -2659,12 +2667,13 @@ or
{#code|test_inline_else.zig#}
<p>
When using an inline prong switching on an union an additional
capture can be used to obtain the union's enum tag value.
When using an inline prong switching on an union an additional capture
can be used to obtain the union's enum tag value at comptime, even though
its payload might only be known at runtime.
</p>
{#code|test_inline_switch_union_tag.zig#}
{#see_also|inline while|inline for#}
{#see_also|inline while|inline for|Tagged union#}
{#header_close#}
{#header_close#}
+55
View File
@@ -0,0 +1,55 @@
const FileOpenError0 = error{
AccessDenied,
OutOfMemory,
FileNotFound,
};
fn openFile0() FileOpenError0 {
return error.OutOfMemory;
}
test "unreachable else prong" {
switch (openFile0()) {
error.AccessDenied, error.FileNotFound => |e| return e,
error.OutOfMemory => {},
// 'openFile0' cannot return any more errors, so an 'else' prong would be
// statically known to be unreachable. Nonetheless, in this case, adding
// one does not raise an "unreachable else prong" compile error:
else => unreachable,
}
// Allowed unreachable else prongs are:
// `else => unreachable,`
// `else => return,`
// `else => |e| return e,` (where `e` is any identifier)
}
const FileOpenError1 = error{
AccessDenied,
SystemResources,
FileNotFound,
};
fn openFile1() FileOpenError1 {
return error.SystemResources;
}
fn openFileGeneric(comptime kind: u1) switch (kind) {
0 => FileOpenError0,
1 => FileOpenError1,
} {
return switch (kind) {
0 => openFile0(),
1 => openFile1(),
};
}
test "comptime unreachable errors not in error set" {
switch (openFileGeneric(1)) {
error.AccessDenied, error.FileNotFound => |e| return e,
error.OutOfMemory => comptime unreachable, // not in `FileOpenError1`!
error.SystemResources => {},
}
}
// test
+8
View File
@@ -18,6 +18,14 @@ test "switch on tagged union" {
.ok => |value| try expect(value == 42),
.not_ok => unreachable,
}
switch (c) {
.ok => |_, tag| {
// Because we're in the '.ok' prong, 'tag' is compile-time known to be '.ok':
comptime std.debug.assert(tag == .ok);
},
.not_ok => unreachable,
}
}
test "get tag type" {
+2 -2
View File
@@ -114,7 +114,7 @@ fn writeHeader(
if (typeflag == .symbolic_link)
header.setLinkname(link_name) catch |err| switch (err) {
error.NameTooLong => try w.writeExtendedHeader(.gnu_long_link, &.{link_name}),
else => return err,
else => |e| return e,
};
try header.write(w.underlying_writer);
}
@@ -131,7 +131,7 @@ fn setPath(w: *Writer, header: *Header, sub_path: []const u8) Error!void {
&.{ w.prefix, "/", sub_path };
try w.writeExtendedHeader(.gnu_long_name, buffers);
},
else => return err,
else => |e| return e,
};
}
+2 -2
View File
@@ -813,7 +813,7 @@ fn expectEqualDeepInner(comptime T: type, expected: T, actual: T) error{TestExpe
}
},
.array => |_| {
.array => {
if (expected.len != actual.len) {
print("Array len not the same, expected {d}, found {d}\n", .{ expected.len, actual.len });
return error.TestExpectedEqual;
@@ -1187,7 +1187,7 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime
return error.MemoryLeakDetected;
}
},
else => return err,
else => |e| return e,
}
}
}
+1109 -1128
View File
@@ -115,7 +115,6 @@ fn setExtra(astgen: *AstGen, index: usize, extra: anytype) void {
Zir.Inst.Call.Flags,
Zir.Inst.BuiltinCall.Flags,
Zir.Inst.SwitchBlock.Bits,
Zir.Inst.SwitchBlockErrUnion.Bits,
Zir.Inst.FuncFancy.Bits,
Zir.Inst.Param.Type,
Zir.Inst.Func.RetTy,
@@ -858,11 +857,11 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE
no_switch_on_err: {
const error_token = if_full.error_token orelse break :no_switch_on_err;
const else_node = if_full.ast.else_expr.unwrap() orelse break :no_switch_on_err;
const full_switch = tree.fullSwitch(else_node) orelse break :no_switch_on_err;
if (full_switch.label_token != null) break :no_switch_on_err;
if (tree.nodeTag(full_switch.ast.condition) != .identifier) break :no_switch_on_err;
if (!mem.eql(u8, tree.tokenSlice(error_token), tree.tokenSlice(tree.nodeMainToken(full_switch.ast.condition)))) break :no_switch_on_err;
return switchExprErrUnion(gz, scope, ri.br(), node, .@"if");
const switch_full = tree.fullSwitch(else_node) orelse break :no_switch_on_err;
if (switch_full.label_token != null) break :no_switch_on_err; // handled in `ifExpr`
if (tree.nodeTag(switch_full.ast.condition) != .identifier) break :no_switch_on_err;
if (!try astgen.tokenIdentEql(error_token, tree.nodeMainToken(switch_full.ast.condition))) break :no_switch_on_err;
return switchExpr(gz, scope, ri.br(), node, switch_full, .{ .@"if" = if_full });
}
return ifExpr(gz, scope, ri.br(), node, if_full);
},
@@ -1024,11 +1023,11 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE
null;
no_switch_on_err: {
const capture_token = payload_token orelse break :no_switch_on_err;
const full_switch = tree.fullSwitch(tree.nodeData(node).node_and_node[1]) orelse break :no_switch_on_err;
if (full_switch.label_token != null) break :no_switch_on_err;
if (tree.nodeTag(full_switch.ast.condition) != .identifier) break :no_switch_on_err;
if (!mem.eql(u8, tree.tokenSlice(capture_token), tree.tokenSlice(tree.nodeMainToken(full_switch.ast.condition)))) break :no_switch_on_err;
return switchExprErrUnion(gz, scope, ri.br(), node, .@"catch");
const switch_full = tree.fullSwitch(tree.nodeData(node).node_and_node[1]) orelse break :no_switch_on_err;
if (switch_full.label_token != null) break :no_switch_on_err; // handled in `orelseCatchExpr`
if (tree.nodeTag(switch_full.ast.condition) != .identifier) break :no_switch_on_err;
if (!try astgen.tokenIdentEql(capture_token, tree.nodeMainToken(switch_full.ast.condition))) break :no_switch_on_err;
return switchExpr(gz, scope, ri.br(), node, switch_full, .@"catch");
}
switch (ri.rl) {
.ref, .ref_coerced_ty => return orelseCatchExpr(
@@ -1108,7 +1107,7 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE
.error_set_decl => return errorSetDecl(gz, ri, node),
.array_access => return arrayAccess(gz, scope, ri, node),
.@"comptime" => return comptimeExprAst(gz, scope, ri, node),
.@"switch", .switch_comma => return switchExpr(gz, scope, ri.br(), node, tree.fullSwitch(node).?),
.@"switch", .switch_comma => return switchExpr(gz, scope, ri.br(), node, tree.fullSwitch(node).?, .none),
.@"nosuspend" => return nosuspendExpr(gz, scope, ri, node),
.@"suspend" => return suspendExpr(gz, scope, node),
@@ -2161,93 +2160,90 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn
const opt_break_label, const opt_rhs = tree.nodeData(node).opt_token_and_opt_node;
// Look for the label in the scope.
var scope = parent_scope;
while (true) {
switch (scope.tag) {
.gen_zir => {
const block_gz = scope.cast(GenZir).?;
find_scope: switch (parent_scope.unwrap()) {
.gen_zir => |gen_zir| {
const scope = &gen_zir.base;
if (block_gz.cur_defer_node.unwrap()) |cur_defer_node| {
// We are breaking out of a `defer` block.
return astgen.failNodeNotes(node, "cannot break out of defer expression", .{}, &.{
try astgen.errNoteNode(
cur_defer_node,
"defer expression here",
.{},
),
});
}
if (gen_zir.cur_defer_node.unwrap()) |cur_defer_node| {
// We are breaking out of a `defer` block.
return astgen.failNodeNotes(node, "cannot break out of defer expression", .{}, &.{
try astgen.errNoteNode(
cur_defer_node,
"defer expression here",
.{},
),
});
}
const block_inst = blk: {
if (opt_break_label.unwrap()) |break_label| {
if (block_gz.label) |*label| {
if (try astgen.tokenIdentEql(label.token, break_label)) {
label.used = true;
break :blk label.block_inst;
}
}
} else if (block_gz.break_block.unwrap()) |i| {
break :blk i;
if (opt_break_label.unwrap()) |break_label| labeled: {
if (gen_zir.label) |*label| {
if (try astgen.tokenIdentEql(label.token, break_label)) {
label.used = true;
break :labeled;
}
// If not the target, start over with the parent
scope = block_gz.parent;
continue;
};
// If we made it here, this block is the target of the break expr
}
// gz without or with different label, continue to parent scopes.
continue :find_scope gen_zir.parent.unwrap();
} else if (!gen_zir.allow_unlabeled_control_flow) {
// This `break` is unlabeled and the gz we've found doesn't allow
// unlabeled control flow. Continue to parent scopes.
continue :find_scope gen_zir.parent.unwrap();
}
const break_tag: Zir.Inst.Tag = if (block_gz.is_inline)
.break_inline
else
.@"break";
const break_tag: Zir.Inst.Tag = if (gen_zir.is_inline)
.break_inline
else
.@"break";
const rhs = opt_rhs.unwrap() orelse {
_ = try rvalue(parent_gz, block_gz.break_result_info, .void_value, node);
try genDefers(parent_gz, scope, parent_scope, .normal_only);
// As our last action before the break, "pop" the error trace if needed
if (!block_gz.is_comptime)
_ = try parent_gz.addRestoreErrRetIndex(.{ .block = block_inst }, .always, node);
_ = try parent_gz.addBreak(break_tag, block_inst, .void_value);
return Zir.Inst.Ref.unreachable_value;
};
const operand = try reachableExpr(parent_gz, parent_scope, block_gz.break_result_info, rhs, node);
if (opt_rhs.unwrap()) |rhs| {
// We have a `break` operand.
const operand = try reachableExpr(parent_gz, parent_scope, gen_zir.break_result_info, rhs, node);
try genDefers(parent_gz, scope, parent_scope, .normal_only);
// As our last action before the break, "pop" the error trace if needed
if (!block_gz.is_comptime)
try restoreErrRetIndex(parent_gz, .{ .block = block_inst }, block_gz.break_result_info, rhs, operand);
switch (block_gz.break_result_info.rl) {
if (!gen_zir.is_comptime) {
try restoreErrRetIndex(parent_gz, .{ .block = gen_zir.break_target }, gen_zir.break_result_info, rhs, operand);
}
switch (gen_zir.break_result_info.rl) {
.ptr => {
// In this case we don't have any mechanism to intercept it;
// we assume the result location is written, and we break with void.
_ = try parent_gz.addBreak(break_tag, block_inst, .void_value);
_ = try parent_gz.addBreak(break_tag, gen_zir.break_target, .void_value);
},
.discard => {
_ = try parent_gz.addBreak(break_tag, block_inst, .void_value);
_ = try parent_gz.addBreak(break_tag, gen_zir.break_target, .void_value);
},
else => {
_ = try parent_gz.addBreakWithSrcNode(break_tag, block_inst, operand, rhs);
_ = try parent_gz.addBreakWithSrcNode(break_tag, gen_zir.break_target, operand, rhs);
},
}
return Zir.Inst.Ref.unreachable_value;
},
.local_val => scope = scope.cast(Scope.LocalVal).?.parent,
.local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
.namespace => break,
.defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent,
.top => unreachable,
}
}
if (opt_break_label.unwrap()) |break_label| {
const label_name = try astgen.identifierTokenString(break_label);
return astgen.failTok(break_label, "label not found: '{s}'", .{label_name});
} else {
return astgen.failNode(node, "break expression outside loop", .{});
return .unreachable_value;
} else {
_ = try rvalue(parent_gz, gen_zir.break_result_info, .void_value, node);
try genDefers(parent_gz, scope, parent_scope, .normal_only);
// As our last action before the break, "pop" the error trace if needed
if (!gen_zir.is_comptime)
_ = try parent_gz.addRestoreErrRetIndex(.{ .block = gen_zir.break_target }, .always, node);
_ = try parent_gz.addBreak(break_tag, gen_zir.break_target, .void_value);
return .unreachable_value;
}
},
.local_val => |local_val| continue :find_scope local_val.parent.unwrap(),
.local_ptr => |local_ptr| continue :find_scope local_ptr.parent.unwrap(),
.defer_normal, .defer_error => |defer_scope| continue :find_scope defer_scope.parent.unwrap(),
.namespace => {
if (opt_break_label.unwrap()) |break_label| {
const label_name = try astgen.identifierTokenString(break_label);
return astgen.failTok(break_label, "label not found: '{s}'", .{label_name});
} else {
return astgen.failNode(node, "break expression outside loop", .{});
}
},
.top => unreachable,
}
}
@@ -2261,101 +2257,104 @@ fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index)
}
// Look for the label in the scope.
var scope = parent_scope;
while (true) {
switch (scope.tag) {
.gen_zir => {
const gen_zir = scope.cast(GenZir).?;
find_scope: switch (parent_scope.unwrap()) {
.gen_zir => |gen_zir| {
const scope = &gen_zir.base;
if (gen_zir.cur_defer_node.unwrap()) |cur_defer_node| {
return astgen.failNodeNotes(node, "cannot continue out of defer expression", .{}, &.{
try astgen.errNoteNode(
cur_defer_node,
"defer expression here",
.{},
),
});
}
const continue_block = gen_zir.continue_block.unwrap() orelse {
scope = gen_zir.parent;
continue;
};
if (opt_break_label.unwrap()) |break_label| blk: {
if (gen_zir.label) |*label| {
if (try astgen.tokenIdentEql(label.token, break_label)) {
const maybe_switch_tag = astgen.instructions.items(.tag)[@intFromEnum(label.block_inst)];
if (opt_rhs != .none) switch (maybe_switch_tag) {
.switch_block, .switch_block_ref => {},
else => return astgen.failNode(node, "cannot continue loop with operand", .{}),
} else switch (maybe_switch_tag) {
.switch_block, .switch_block_ref => return astgen.failNode(node, "cannot continue switch without operand", .{}),
else => {},
}
if (gen_zir.cur_defer_node.unwrap()) |cur_defer_node| {
return astgen.failNodeNotes(node, "cannot continue out of defer expression", .{}, &.{
try astgen.errNoteNode(
cur_defer_node,
"defer expression here",
.{},
),
});
}
label.used = true;
label.used_for_continue = true;
break :blk;
if (opt_break_label.unwrap()) |break_label| labeled: {
if (gen_zir.label) |*label| {
if (try astgen.tokenIdentEql(label.token, break_label)) {
switch (gen_zir.continue_target) {
.none => {
return astgen.failNode(node, "continue outside of loop or labeled switch expression", .{});
},
.@"break" => if (opt_rhs != .none) {
return astgen.failNode(node, "cannot continue loop with operand", .{});
},
.switch_continue => if (opt_rhs == .none) {
return astgen.failNode(node, "cannot continue switch without operand", .{});
},
}
}
// found continue but either it has a different label, or no label
scope = gen_zir.parent;
continue;
} else if (gen_zir.label) |label| {
// This `continue` is unlabeled. If the gz we've found corresponds to a labeled
// `switch`, ignore it and continue to parent scopes.
switch (astgen.instructions.items(.tag)[@intFromEnum(label.block_inst)]) {
.switch_block, .switch_block_ref => {
scope = gen_zir.parent;
continue;
},
else => {},
label.used = true;
label.used_for_continue = true;
break :labeled;
}
}
// gz without or with different label, continue to parent scopes.
continue :find_scope gen_zir.parent.unwrap();
} else if (gen_zir.allow_unlabeled_control_flow) {
// This `continue` is unlabeled. If the gz we've found doesn't
// provide a `continue` target or corresponds to a labeled
// `switch`, ignore it and continue to parent scopes.
switch (gen_zir.continue_target) {
.none, .switch_continue => {
continue :find_scope gen_zir.parent.unwrap();
},
.@"break" => {},
}
} else {
// We don't have a break label and the gz we found doesn't allow
// unlabeled control flow, so we continue to its parent scopes.
continue :find_scope gen_zir.parent.unwrap();
}
if (opt_rhs.unwrap()) |rhs| {
// We need to figure out the result info to use.
// The type should match
switch (gen_zir.continue_target) {
.none => unreachable, // should have failed or continued to parent scopes by now
.@"break" => |block| {
try genDefers(parent_gz, scope, parent_scope, .normal_only);
const break_tag: Zir.Inst.Tag = if (gen_zir.is_inline)
.break_inline
else
.@"break";
if (break_tag == .break_inline) {
_ = try parent_gz.addUnNode(.check_comptime_control_flow, block.toRef(), node);
}
// As our last action before the continue, "pop" the error trace if needed
if (!gen_zir.is_comptime) {
_ = try parent_gz.addRestoreErrRetIndex(.{ .block = block }, .always, node);
}
_ = try parent_gz.addBreak(break_tag, block, .void_value);
return .unreachable_value;
},
.switch_continue => |switch_block| {
const rhs = opt_rhs.unwrap().?; // checked above
const operand = try reachableExpr(parent_gz, parent_scope, gen_zir.continue_result_info, rhs, node);
try genDefers(parent_gz, scope, parent_scope, .normal_only);
// As our last action before the continue, "pop" the error trace if needed
if (!gen_zir.is_comptime)
_ = try parent_gz.addRestoreErrRetIndex(.{ .block = continue_block }, .always, node);
_ = try parent_gz.addBreakWithSrcNode(.switch_continue, continue_block, operand, rhs);
return Zir.Inst.Ref.unreachable_value;
}
try genDefers(parent_gz, scope, parent_scope, .normal_only);
const break_tag: Zir.Inst.Tag = if (gen_zir.is_inline)
.break_inline
else
.@"break";
if (break_tag == .break_inline) {
_ = try parent_gz.addUnNode(.check_comptime_control_flow, continue_block.toRef(), node);
}
// As our last action before the continue, "pop" the error trace if needed
if (!gen_zir.is_comptime)
_ = try parent_gz.addRestoreErrRetIndex(.{ .block = continue_block }, .always, node);
_ = try parent_gz.addBreak(break_tag, continue_block, .void_value);
return Zir.Inst.Ref.unreachable_value;
},
.local_val => scope = scope.cast(Scope.LocalVal).?.parent,
.local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
.defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent,
.namespace => break,
.top => unreachable,
}
}
if (opt_break_label.unwrap()) |break_label| {
const label_name = try astgen.identifierTokenString(break_label);
return astgen.failTok(break_label, "label not found: '{s}'", .{label_name});
} else {
return astgen.failNode(node, "continue expression outside loop", .{});
if (!gen_zir.is_comptime) {
_ = try parent_gz.addRestoreErrRetIndex(.{ .block = switch_block }, .always, node);
}
_ = try parent_gz.addBreakWithSrcNode(.switch_continue, switch_block, operand, rhs);
return .unreachable_value;
},
}
},
.local_val => |local_val| continue :find_scope local_val.parent.unwrap(),
.local_ptr => |local_ptr| continue :find_scope local_ptr.parent.unwrap(),
.defer_normal, .defer_error => |defer_scope| continue :find_scope defer_scope.parent.unwrap(),
.namespace => {
if (opt_break_label.unwrap()) |break_label| {
const label_name = try astgen.identifierTokenString(break_label);
return astgen.failTok(break_label, "label not found: '{s}'", .{label_name});
} else {
return astgen.failNode(node, "continue expression outside loop", .{});
}
},
.top => unreachable,
}
}
@@ -2441,33 +2440,29 @@ fn blockExpr(
fn checkLabelRedefinition(astgen: *AstGen, parent_scope: *Scope, label: Ast.TokenIndex) !void {
// Look for the label in the scope.
var scope = parent_scope;
while (true) {
switch (scope.tag) {
.gen_zir => {
const gen_zir = scope.cast(GenZir).?;
if (gen_zir.label) |prev_label| {
if (try astgen.tokenIdentEql(label, prev_label.token)) {
const label_name = try astgen.identifierTokenString(label);
return astgen.failTokNotes(label, "redefinition of label '{s}'", .{
label_name,
}, &[_]u32{
try astgen.errNoteTok(
prev_label.token,
"previous definition here",
.{},
),
});
}
find_scope: switch (parent_scope.unwrap()) {
.gen_zir => |gen_zir| {
if (gen_zir.label) |prev_label| {
if (try astgen.tokenIdentEql(label, prev_label.token)) {
const label_name = try astgen.identifierTokenString(label);
return astgen.failTokNotes(label, "redefinition of label '{s}'", .{
label_name,
}, &[_]u32{
try astgen.errNoteTok(
prev_label.token,
"previous definition here",
.{},
),
});
}
scope = gen_zir.parent;
},
.local_val => scope = scope.cast(Scope.LocalVal).?.parent,
.local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
.defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent,
.namespace => break,
.top => unreachable,
}
}
continue :find_scope gen_zir.parent.unwrap();
},
.local_val => |local_val| continue :find_scope local_val.parent.unwrap(),
.local_ptr => |local_ptr| continue :find_scope local_ptr.parent.unwrap(),
.defer_normal, .defer_error => |defer_scope| continue :find_scope defer_scope.parent.unwrap(),
.namespace => break :find_scope,
.top => unreachable,
}
}
@@ -2509,10 +2504,9 @@ fn labeledBlockExpr(
try gz.instructions.append(astgen.gpa, block_inst);
var block_scope = gz.makeSubBlock(parent_scope);
block_scope.is_inline = force_comptime;
block_scope.label = GenZir.Label{
.token = label_token,
.block_inst = block_inst,
};
block_scope.label = .{ .token = label_token };
block_scope.break_target = block_inst;
block_scope.continue_target = .none;
block_scope.setBreakResultInfo(block_ri);
if (force_comptime) block_scope.is_comptime = true;
defer block_scope.unstack();
@@ -2983,18 +2977,16 @@ fn countDefers(outer_scope: *Scope, inner_scope: *Scope) struct {
var need_err_code = false;
var scope = inner_scope;
while (scope != outer_scope) {
switch (scope.tag) {
.gen_zir => scope = scope.cast(GenZir).?.parent,
.local_val => scope = scope.cast(Scope.LocalVal).?.parent,
.local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
.defer_normal => {
const defer_scope = scope.cast(Scope.Defer).?;
switch (scope.unwrap()) {
.gen_zir => |gen_zir| scope = gen_zir.parent,
.local_val => |local_val| scope = local_val.parent,
.local_ptr => |local_ptr| scope = local_ptr.parent,
.defer_normal => |defer_scope| {
scope = defer_scope.parent;
have_normal = true;
},
.defer_error => {
const defer_scope = scope.cast(Scope.Defer).?;
.defer_error => |defer_scope| {
scope = defer_scope.parent;
have_err = true;
@@ -3030,17 +3022,15 @@ fn genDefers(
var scope = inner_scope;
while (scope != outer_scope) {
switch (scope.tag) {
.gen_zir => scope = scope.cast(GenZir).?.parent,
.local_val => scope = scope.cast(Scope.LocalVal).?.parent,
.local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
.defer_normal => {
const defer_scope = scope.cast(Scope.Defer).?;
switch (scope.unwrap()) {
.gen_zir => |gen_zir| scope = gen_zir.parent,
.local_val => |local_val| scope = local_val.parent,
.local_ptr => |local_ptr| scope = local_ptr.parent,
.defer_normal => |defer_scope| {
scope = defer_scope.parent;
try gz.addDefer(defer_scope.index, defer_scope.len);
},
.defer_error => {
const defer_scope = scope.cast(Scope.Defer).?;
.defer_error => |defer_scope| {
scope = defer_scope.parent;
switch (which_ones) {
.both_sans_err => {
@@ -3083,10 +3073,9 @@ fn checkUsed(gz: *GenZir, outer_scope: *Scope, inner_scope: *Scope) InnerError!v
var scope = inner_scope;
while (scope != outer_scope) {
switch (scope.tag) {
.gen_zir => scope = scope.cast(GenZir).?.parent,
.local_val => {
const s = scope.cast(Scope.LocalVal).?;
switch (scope.unwrap()) {
.gen_zir => |gen_zir| scope = gen_zir.parent,
.local_val => |s| {
if (s.used == .none and s.discarded == .none) {
try astgen.appendErrorTok(s.token_src, "unused {s}", .{@tagName(s.id_cat)});
} else if (s.used != .none and s.discarded != .none) {
@@ -3096,8 +3085,7 @@ fn checkUsed(gz: *GenZir, outer_scope: *Scope, inner_scope: *Scope) InnerError!v
}
scope = s.parent;
},
.local_ptr => {
const s = scope.cast(Scope.LocalPtr).?;
.local_ptr => |s| {
if (s.used == .none and s.discarded == .none) {
try astgen.appendErrorTok(s.token_src, "unused {s}", .{@tagName(s.id_cat)});
} else {
@@ -3112,10 +3100,9 @@ fn checkUsed(gz: *GenZir, outer_scope: *Scope, inner_scope: *Scope) InnerError!v
});
}
}
scope = s.parent;
},
.defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent,
.defer_normal, .defer_error => |defer_scope| scope = defer_scope.parent,
.namespace => unreachable,
.top => unreachable,
}
@@ -3146,14 +3133,7 @@ fn deferStmt(
}
const remapped_err_code: Zir.Inst.Index = @enumFromInt(gz.astgen.instructions.len);
opt_remapped_err_code = remapped_err_code.toOptional();
try gz.astgen.instructions.append(gz.astgen.gpa, .{
.tag = .extended,
.data = .{ .extended = .{
.opcode = .value_placeholder,
.small = undefined,
.operand = undefined,
} },
});
_ = try gz.astgen.appendPlaceholder();
const remapped_err_code_ref = remapped_err_code.toRef();
local_val_scope = .{
.parent = &defer_gen.base,
@@ -4781,13 +4761,11 @@ fn testDecl(
// Local variables, including function parameters.
const name_str_index = try astgen.identAsString(test_name_token);
var s = scope;
var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already
var num_namespaces_out: u32 = 0;
var capturing_namespace: ?*Scope.Namespace = null;
while (true) switch (s.tag) {
.local_val => {
const local_val = s.cast(Scope.LocalVal).?;
find_scope: switch (scope.unwrap()) {
.local_val => |local_val| {
if (local_val.name == name_str_index) {
local_val.used = .fromToken(test_name_token);
return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{
@@ -4798,10 +4776,9 @@ fn testDecl(
}),
});
}
s = local_val.parent;
continue :find_scope local_val.parent.unwrap();
},
.local_ptr => {
const local_ptr = s.cast(Scope.LocalPtr).?;
.local_ptr => |local_ptr| {
if (local_ptr.name == name_str_index) {
local_ptr.used = .fromToken(test_name_token);
return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{
@@ -4812,12 +4789,11 @@ fn testDecl(
}),
});
}
s = local_ptr.parent;
continue :find_scope local_ptr.parent.unwrap();
},
.gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.namespace => {
const ns = s.cast(Scope.Namespace).?;
.gen_zir => |gen_zir| continue :find_scope gen_zir.parent.unwrap(),
.defer_normal, .defer_error => |defer_scope| continue :find_scope defer_scope.parent.unwrap(),
.namespace => |ns| {
if (ns.decls.get(name_str_index)) |i| {
if (found_already) |f| {
return astgen.failTokNotes(test_name_token, "ambiguous reference", .{}, &.{
@@ -4830,10 +4806,10 @@ fn testDecl(
}
num_namespaces_out += 1;
capturing_namespace = ns;
s = ns.parent;
continue :find_scope ns.parent.unwrap();
},
.top => break,
};
.top => break :find_scope,
}
if (found_already == null) {
const ident_name = try astgen.identifierTokenString(test_name_token);
return astgen.failTok(test_name_token, "use of undeclared identifier '{s}'", .{ident_name});
@@ -6131,7 +6107,30 @@ fn orelseCatchExpr(
break :blk &err_val_scope.base;
};
const else_result = try fullBodyExpr(&else_scope, else_sub_scope, block_scope.break_result_info, rhs, .allow_branch_hint);
const else_result = else_result: {
if (tree.fullSwitch(rhs)) |switch_full| no_switch_on_err: {
if (tree.nodeTag(node) != .@"catch") break :no_switch_on_err;
const catch_token = tree.nodeMainToken(node);
const capture_token = if (tree.tokenTag(catch_token + 1) == .pipe) token: {
break :token catch_token + 2;
} else break :no_switch_on_err;
if (switch_full.label_token == null) break :no_switch_on_err; // must use `switchExpr` with `non_err = .@"if"`
if (tree.nodeTag(switch_full.ast.condition) != .identifier) break :no_switch_on_err;
if (!try astgen.tokenIdentEql(capture_token, tree.nodeMainToken(switch_full.ast.condition))) break :no_switch_on_err;
break :else_result try switchExpr(
&else_scope,
else_sub_scope,
block_scope.break_result_info,
rhs,
switch_full,
.{ .peer_break_target = .{
.block_inst = block,
.block_ri = block_ri,
} },
);
}
break :else_result try fullBodyExpr(&else_scope, else_sub_scope, block_scope.break_result_info, rhs, .allow_branch_hint);
};
if (!else_scope.endsWithNoReturn()) {
// As our last action before the break, "pop" the error trace if needed
if (do_err_trace)
@@ -6484,7 +6483,26 @@ fn ifExpr(
break :s &else_scope.base;
}
};
const else_result = try fullBodyExpr(&else_scope, sub_scope, block_scope.break_result_info, else_node, .allow_branch_hint);
const else_result = else_result: {
if (tree.fullSwitch(else_node)) |switch_full| no_switch_on_err: {
const error_token = if_full.error_token orelse break :no_switch_on_err;
if (switch_full.label_token == null) break :no_switch_on_err; // must use `switchExpr` with `non_err = .@"if"`
if (tree.nodeTag(switch_full.ast.condition) != .identifier) break :no_switch_on_err;
if (!try astgen.tokenIdentEql(error_token, tree.nodeMainToken(switch_full.ast.condition))) break :no_switch_on_err;
break :else_result try switchExpr(
&else_scope,
sub_scope,
block_scope.break_result_info,
else_node,
switch_full,
.{ .peer_break_target = .{
.block_inst = block,
.block_ri = block_ri,
} },
);
}
break :else_result try fullBodyExpr(&else_scope, sub_scope, block_scope.break_result_info, else_node, .allow_branch_hint);
};
if (!else_scope.endsWithNoReturn()) {
// As our last action before the break, "pop" the error trace if needed
if (do_err_trace)
@@ -6574,7 +6592,6 @@ fn whileExpr(
var loop_scope = parent_gz.makeSubBlock(scope);
loop_scope.is_inline = is_inline;
loop_scope.setBreakResultInfo(block_ri);
defer loop_scope.unstack();
var cond_scope = parent_gz.makeSubBlock(&loop_scope.base);
@@ -6707,14 +6724,13 @@ fn whileExpr(
_ = try loop_scope.addNode(repeat_tag, node);
try loop_scope.setBlockBody(loop_block);
loop_scope.break_block = loop_block.toOptional();
loop_scope.continue_block = continue_block.toOptional();
if (while_full.label_token) |label_token| {
loop_scope.label = .{
.token = label_token,
.block_inst = loop_block,
};
loop_scope.label = .{ .token = label_token };
}
loop_scope.allow_unlabeled_control_flow = true;
loop_scope.break_target = loop_block;
loop_scope.continue_target = .{ .@"break" = continue_block };
loop_scope.setBreakResultInfo(block_ri);
// done adding instructions to loop_scope, can now stack then_scope
then_scope.instructions_top = then_scope.instructions.items.len;
@@ -6787,10 +6803,11 @@ fn whileExpr(
break :s &else_scope.base;
}
};
// Remove the continue block and break block so that `continue` and `break`
// control flow apply to outer loops; not this one.
loop_scope.continue_block = .none;
loop_scope.break_block = .none;
// Disallow unlabeled control flow to this scope so that bare `continue`
// and `break` control flow apply to outer loops; not this one.
// Also disallow `continue` targeting the loop label.
loop_scope.allow_unlabeled_control_flow = false;
loop_scope.continue_target = .none;
const else_result = try fullBodyExpr(&else_scope, sub_scope, loop_scope.break_result_info, else_node, .allow_branch_hint);
if (is_statement) {
_ = try addEnsureResult(&else_scope, else_result, else_node);
@@ -6979,14 +6996,12 @@ fn forExpr(
const cond_block = try loop_scope.makeBlockInst(block_tag, node);
try cond_scope.setBlockBody(cond_block);
loop_scope.break_block = loop_block.toOptional();
loop_scope.continue_block = cond_block.toOptional();
if (for_full.label_token) |label_token| {
loop_scope.label = .{
.token = label_token,
.block_inst = loop_block,
};
loop_scope.label = .{ .token = label_token };
}
loop_scope.allow_unlabeled_control_flow = true;
loop_scope.break_target = loop_block;
loop_scope.continue_target = .{ .@"break" = cond_block };
const then_node = for_full.ast.then_expr;
var then_scope = parent_gz.makeSubBlock(&cond_scope.base);
@@ -7077,10 +7092,11 @@ fn forExpr(
if (for_full.ast.else_expr.unwrap()) |else_node| {
const sub_scope = &else_scope.base;
// Remove the continue block and break block so that `continue` and `break`
// control flow apply to outer loops; not this one.
loop_scope.continue_block = .none;
loop_scope.break_block = .none;
// Disallow unlabeled control flow to this scope so that bare `continue`
// and `break` control flow apply to outer loops; not this one.
// Also disallow `continue` targeting the loop label.
loop_scope.allow_unlabeled_control_flow = false;
loop_scope.continue_target = .none;
const else_result = try fullBodyExpr(&else_scope, sub_scope, loop_scope.break_result_info, else_node, .allow_branch_hint);
if (is_statement) {
_ = try addEnsureResult(&else_scope, else_result, else_node);
@@ -7133,503 +7149,34 @@ fn forExpr(
return result;
}
fn switchExprErrUnion(
parent_gz: *GenZir,
scope: *Scope,
ri: ResultInfo,
catch_or_if_node: Ast.Node.Index,
node_ty: enum { @"catch", @"if" },
) InnerError!Zir.Inst.Ref {
const astgen = parent_gz.astgen;
const gpa = astgen.gpa;
const tree = astgen.tree;
const if_full = switch (node_ty) {
.@"catch" => undefined,
.@"if" => tree.fullIf(catch_or_if_node).?,
};
const switch_node, const operand_node, const error_payload = switch (node_ty) {
.@"catch" => .{
tree.nodeData(catch_or_if_node).node_and_node[1],
tree.nodeData(catch_or_if_node).node_and_node[0],
tree.nodeMainToken(catch_or_if_node) + 2,
},
.@"if" => .{
if_full.ast.else_expr.unwrap().?,
if_full.ast.cond_expr,
if_full.error_token.?,
},
};
const switch_full = tree.fullSwitch(switch_node).?;
const do_err_trace = astgen.fn_block != null;
const need_rl = astgen.nodes_need_rl.contains(catch_or_if_node);
const block_ri: ResultInfo = if (need_rl) ri else .{
.rl = switch (ri.rl) {
.ptr => .{ .ty = (try ri.rl.resultType(parent_gz, catch_or_if_node)).? },
.inferred_ptr => .none,
else => ri.rl,
},
.ctx = ri.ctx,
};
const payload_is_ref = switch (node_ty) {
.@"if" => if_full.payload_token != null and tree.tokenTag(if_full.payload_token.?) == .asterisk,
.@"catch" => ri.rl == .ref or ri.rl == .ref_coerced_ty,
};
// We need to call `rvalue` to write through to the pointer only if we had a
// result pointer and aren't forwarding it.
const LocTag = @typeInfo(ResultInfo.Loc).@"union".tag_type.?;
const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl);
var scalar_cases_len: u32 = 0;
var multi_cases_len: u32 = 0;
var inline_cases_len: u32 = 0;
var has_else = false;
var else_node: Ast.Node.OptionalIndex = .none;
var else_src: ?Ast.TokenIndex = null;
for (switch_full.ast.cases) |case_node| {
const case = tree.fullSwitchCase(case_node).?;
if (case.ast.values.len == 0) {
const case_src = case.ast.arrow_token - 1;
if (else_src) |src| {
return astgen.failTokNotes(
case_src,
"multiple else prongs in switch expression",
.{},
&[_]u32{
try astgen.errNoteTok(
src,
"previous else prong here",
.{},
),
},
);
}
has_else = true;
else_node = case_node.toOptional();
else_src = case_src;
continue;
} else if (case.ast.values.len == 1 and
tree.nodeTag(case.ast.values[0]) == .identifier and
mem.eql(u8, tree.tokenSlice(tree.nodeMainToken(case.ast.values[0])), "_"))
{
const case_src = case.ast.arrow_token - 1;
return astgen.failTokNotes(
case_src,
"'_' prong is not allowed when switching on errors",
.{},
&[_]u32{
try astgen.errNoteTok(
case_src,
"consider using 'else'",
.{},
),
},
);
}
for (case.ast.values) |val| {
if (tree.nodeTag(val) == .string_literal)
return astgen.failNode(val, "cannot switch on strings", .{});
}
if (case.ast.values.len == 1 and tree.nodeTag(case.ast.values[0]) != .switch_range) {
scalar_cases_len += 1;
} else {
multi_cases_len += 1;
}
if (case.inline_token != null) {
inline_cases_len += 1;
}
}
const operand_ri: ResultInfo = .{
.rl = if (payload_is_ref) .ref else .none,
.ctx = .error_handling_expr,
};
astgen.advanceSourceCursorToNode(operand_node);
const operand_lc: LineColumn = .{ astgen.source_line - parent_gz.decl_line, astgen.source_column };
const raw_operand = try reachableExpr(parent_gz, scope, operand_ri, operand_node, switch_node);
const item_ri: ResultInfo = .{ .rl = .none };
// This contains the data that goes into the `extra` array for the SwitchBlockErrUnion, except
// the first cases_nodes.len slots are a table that indexes payloads later in the array,
// with the non-error and else case indices coming first, then scalar_cases_len indexes, then
// multi_cases_len indexes
const payloads = &astgen.scratch;
const scratch_top = astgen.scratch.items.len;
const case_table_start = scratch_top;
const scalar_case_table = case_table_start + 1 + @intFromBool(has_else);
const multi_case_table = scalar_case_table + scalar_cases_len;
const case_table_end = multi_case_table + multi_cases_len;
try astgen.scratch.resize(gpa, case_table_end);
defer astgen.scratch.items.len = scratch_top;
var block_scope = parent_gz.makeSubBlock(scope);
// block_scope not used for collecting instructions
block_scope.instructions_top = GenZir.unstacked_top;
block_scope.setBreakResultInfo(block_ri);
// Sema expects a dbg_stmt immediately before switch_block_err_union
try emitDbgStmtForceCurrentIndex(parent_gz, operand_lc);
// This gets added to the parent block later, after the item expressions.
const switch_block = try parent_gz.makeBlockInst(.switch_block_err_union, switch_node);
// We re-use this same scope for all cases, including the special prong, if any.
var case_scope = parent_gz.makeSubBlock(&block_scope.base);
case_scope.instructions_top = GenZir.unstacked_top;
{
const body_len_index: u32 = @intCast(payloads.items.len);
payloads.items[case_table_start] = body_len_index;
try payloads.resize(gpa, body_len_index + 1); // body_len
case_scope.instructions_top = parent_gz.instructions.items.len;
defer case_scope.unstack();
const unwrap_payload_tag: Zir.Inst.Tag = if (payload_is_ref)
.err_union_payload_unsafe_ptr
else
.err_union_payload_unsafe;
const unwrapped_payload = try case_scope.addUnNode(
unwrap_payload_tag,
raw_operand,
catch_or_if_node,
);
switch (node_ty) {
.@"catch" => {
const case_result = switch (ri.rl) {
.ref, .ref_coerced_ty => unwrapped_payload,
else => try rvalue(
&case_scope,
block_scope.break_result_info,
unwrapped_payload,
catch_or_if_node,
),
};
_ = try case_scope.addBreakWithSrcNode(
.@"break",
switch_block,
case_result,
catch_or_if_node,
);
},
.@"if" => {
var payload_val_scope: Scope.LocalVal = undefined;
const then_node = if_full.ast.then_expr;
const then_sub_scope = s: {
assert(if_full.error_token != null);
if (if_full.payload_token) |payload_token| {
const token_name_index = payload_token + @intFromBool(payload_is_ref);
const ident_name = try astgen.identAsString(token_name_index);
const token_name_str = tree.tokenSlice(token_name_index);
if (mem.eql(u8, "_", token_name_str))
break :s &case_scope.base;
try astgen.detectLocalShadowing(
&case_scope.base,
ident_name,
token_name_index,
token_name_str,
.capture,
);
payload_val_scope = .{
.parent = &case_scope.base,
.gen_zir = &case_scope,
.name = ident_name,
.inst = unwrapped_payload,
.token_src = token_name_index,
.id_cat = .capture,
};
try case_scope.addDbgVar(.dbg_var_val, ident_name, unwrapped_payload);
break :s &payload_val_scope.base;
} else {
_ = try case_scope.addUnNode(
.ensure_err_union_payload_void,
raw_operand,
catch_or_if_node,
);
break :s &case_scope.base;
}
};
const then_result = try expr(
&case_scope,
then_sub_scope,
block_scope.break_result_info,
then_node,
);
try checkUsed(parent_gz, &case_scope.base, then_sub_scope);
if (!case_scope.endsWithNoReturn()) {
_ = try case_scope.addBreakWithSrcNode(
.@"break",
switch_block,
then_result,
then_node,
);
}
},
}
const case_slice = case_scope.instructionsSlice();
const body_len = astgen.countBodyLenAfterFixupsExtraRefs(case_slice, &.{switch_block});
try payloads.ensureUnusedCapacity(gpa, body_len);
const capture: Zir.Inst.SwitchBlock.ProngInfo.Capture = switch (node_ty) {
.@"catch" => .none,
.@"if" => if (if_full.payload_token == null)
.none
else if (payload_is_ref)
.by_ref
else
.by_val,
};
payloads.items[body_len_index] = @bitCast(Zir.Inst.SwitchBlock.ProngInfo{
.body_len = @intCast(body_len),
.capture = capture,
.is_inline = false,
.has_tag_capture = false,
});
appendBodyWithFixupsExtraRefsArrayList(astgen, payloads, case_slice, &.{switch_block});
}
const err_name = blk: {
const err_str = tree.tokenSlice(error_payload);
if (mem.eql(u8, err_str, "_")) {
// This is fatal because we already know we're switching on the captured error.
return astgen.failTok(error_payload, "discard of error capture; omit it instead", .{});
}
const err_name = try astgen.identAsString(error_payload);
try astgen.detectLocalShadowing(scope, err_name, error_payload, err_str, .capture);
break :blk err_name;
};
// allocate a shared dummy instruction for the error capture
const err_inst = err_inst: {
const inst: Zir.Inst.Index = @enumFromInt(astgen.instructions.len);
try astgen.instructions.append(astgen.gpa, .{
.tag = .extended,
.data = .{ .extended = .{
.opcode = .value_placeholder,
.small = undefined,
.operand = undefined,
} },
});
break :err_inst inst;
};
// In this pass we generate all the item and prong expressions for error cases.
var multi_case_index: u32 = 0;
var scalar_case_index: u32 = 0;
var any_uses_err_capture = false;
for (switch_full.ast.cases) |case_node| {
const case = tree.fullSwitchCase(case_node).?;
const is_multi_case = case.ast.values.len > 1 or
(case.ast.values.len == 1 and tree.nodeTag(case.ast.values[0]) == .switch_range);
var dbg_var_name: Zir.NullTerminatedString = .empty;
var dbg_var_inst: Zir.Inst.Ref = undefined;
var err_scope: Scope.LocalVal = undefined;
var capture_scope: Scope.LocalVal = undefined;
const sub_scope = blk: {
err_scope = .{
.parent = &case_scope.base,
.gen_zir = &case_scope,
.name = err_name,
.inst = err_inst.toRef(),
.token_src = error_payload,
.id_cat = .capture,
};
const capture_token = case.payload_token orelse break :blk &err_scope.base;
if (tree.tokenTag(capture_token) != .identifier) {
return astgen.failTok(capture_token + 1, "error set cannot be captured by reference", .{});
}
const capture_slice = tree.tokenSlice(capture_token);
if (mem.eql(u8, capture_slice, "_")) {
try astgen.appendErrorTok(capture_token, "discard of error capture; omit it instead", .{});
}
const tag_name = try astgen.identAsString(capture_token);
try astgen.detectLocalShadowing(&case_scope.base, tag_name, capture_token, capture_slice, .capture);
capture_scope = .{
.parent = &case_scope.base,
.gen_zir = &case_scope,
.name = tag_name,
.inst = switch_block.toRef(),
.token_src = capture_token,
.id_cat = .capture,
};
dbg_var_name = tag_name;
dbg_var_inst = switch_block.toRef();
err_scope.parent = &capture_scope.base;
break :blk &err_scope.base;
};
const header_index: u32 = @intCast(payloads.items.len);
const body_len_index = if (is_multi_case) blk: {
payloads.items[multi_case_table + multi_case_index] = header_index;
multi_case_index += 1;
try payloads.resize(gpa, header_index + 3); // items_len, ranges_len, body_len
// items
var items_len: u32 = 0;
for (case.ast.values) |item_node| {
if (tree.nodeTag(item_node) == .switch_range) continue;
items_len += 1;
const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node, .switch_item);
try payloads.append(gpa, @intFromEnum(item_inst));
}
// ranges
var ranges_len: u32 = 0;
for (case.ast.values) |range| {
if (tree.nodeTag(range) != .switch_range) continue;
ranges_len += 1;
const first_node, const last_node = tree.nodeData(range).node_and_node;
const first = try comptimeExpr(parent_gz, scope, item_ri, first_node, .switch_item);
const last = try comptimeExpr(parent_gz, scope, item_ri, last_node, .switch_item);
try payloads.appendSlice(gpa, &[_]u32{
@intFromEnum(first), @intFromEnum(last),
});
}
payloads.items[header_index] = items_len;
payloads.items[header_index + 1] = ranges_len;
break :blk header_index + 2;
} else if (case_node.toOptional() == else_node) blk: {
payloads.items[case_table_start + 1] = header_index;
try payloads.resize(gpa, header_index + 1); // body_len
break :blk header_index;
} else blk: {
payloads.items[scalar_case_table + scalar_case_index] = header_index;
scalar_case_index += 1;
try payloads.resize(gpa, header_index + 2); // item, body_len
const item_node = case.ast.values[0];
const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node, .switch_item);
payloads.items[header_index] = @intFromEnum(item_inst);
break :blk header_index + 1;
};
{
// temporarily stack case_scope on parent_gz
case_scope.instructions_top = parent_gz.instructions.items.len;
defer case_scope.unstack();
if (do_err_trace and nodeMayAppendToErrorTrace(tree, operand_node))
_ = try case_scope.addSaveErrRetIndex(.always);
if (dbg_var_name != .empty) {
try case_scope.addDbgVar(.dbg_var_val, dbg_var_name, dbg_var_inst);
}
const target_expr_node = case.ast.target_expr;
const case_result = try fullBodyExpr(&case_scope, sub_scope, block_scope.break_result_info, target_expr_node, .allow_branch_hint);
// check capture_scope, not err_scope to avoid false positive unused error capture
try checkUsed(parent_gz, &case_scope.base, err_scope.parent);
const uses_err = err_scope.used != .none or err_scope.discarded != .none;
if (uses_err) {
try case_scope.addDbgVar(.dbg_var_val, err_name, err_inst.toRef());
any_uses_err_capture = true;
}
if (!parent_gz.refIsNoReturn(case_result)) {
if (do_err_trace)
try restoreErrRetIndex(
&case_scope,
.{ .block = switch_block },
block_scope.break_result_info,
target_expr_node,
case_result,
);
_ = try case_scope.addBreakWithSrcNode(.@"break", switch_block, case_result, target_expr_node);
}
const case_slice = case_scope.instructionsSlice();
const extra_insts: []const Zir.Inst.Index = if (uses_err) &.{ switch_block, err_inst } else &.{switch_block};
const body_len = astgen.countBodyLenAfterFixupsExtraRefs(case_slice, extra_insts);
try payloads.ensureUnusedCapacity(gpa, body_len);
payloads.items[body_len_index] = @bitCast(Zir.Inst.SwitchBlock.ProngInfo{
.body_len = @intCast(body_len),
.capture = if (case.payload_token != null) .by_val else .none,
.is_inline = case.inline_token != null,
.has_tag_capture = false,
});
appendBodyWithFixupsExtraRefsArrayList(astgen, payloads, case_slice, extra_insts);
}
}
// Now that the item expressions are generated we can add this.
try parent_gz.instructions.append(gpa, switch_block);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.SwitchBlockErrUnion).@"struct".fields.len +
@intFromBool(multi_cases_len != 0) +
payloads.items.len - case_table_end +
(case_table_end - case_table_start) * @typeInfo(Zir.Inst.As).@"struct".fields.len);
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.SwitchBlockErrUnion{
.operand = raw_operand,
.bits = Zir.Inst.SwitchBlockErrUnion.Bits{
.has_multi_cases = multi_cases_len != 0,
.has_else = has_else,
.scalar_cases_len = @intCast(scalar_cases_len),
.any_uses_err_capture = any_uses_err_capture,
.payload_is_ref = payload_is_ref,
},
.main_src_node_offset = parent_gz.nodeIndexToRelative(catch_or_if_node),
});
if (multi_cases_len != 0) {
astgen.extra.appendAssumeCapacity(multi_cases_len);
}
if (any_uses_err_capture) {
astgen.extra.appendAssumeCapacity(@intFromEnum(err_inst));
}
const zir_datas = astgen.instructions.items(.data);
zir_datas[@intFromEnum(switch_block)].pl_node.payload_index = payload_index;
for (payloads.items[case_table_start..case_table_end], 0..) |start_index, i| {
var body_len_index = start_index;
var end_index = start_index;
const table_index = case_table_start + i;
if (table_index < scalar_case_table) {
end_index += 1;
} else if (table_index < multi_case_table) {
body_len_index += 1;
end_index += 2;
} else {
body_len_index += 2;
const items_len = payloads.items[start_index];
const ranges_len = payloads.items[start_index + 1];
end_index += 3 + items_len + 2 * ranges_len;
}
const prong_info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(payloads.items[body_len_index]);
end_index += prong_info.body_len;
astgen.extra.appendSliceAssumeCapacity(payloads.items[start_index..end_index]);
}
if (need_result_rvalue) {
return rvalue(parent_gz, ri, switch_block.toRef(), switch_node);
} else {
return switch_block.toRef();
}
}
const SwitchNonErr = union(enum) {
/// A regular switch expression.
/// Emits `switch_block[_ref]`.
none,
/// `eu catch |err| switch (err) { ... }`
///
/// `switch` must not be labeled.
/// Emits `switch_block_err_union`.
@"catch",
/// `if (eu) |payload| { ... } else |err| switch (err) { ... }`
///
/// `switch` must not be labeled.
/// Emits `switch_block_err_union`.
@"if": Ast.full.If,
/// `eu catch |err| label: switch (err) { ... }`
/// `if (eu) |payload| { ... } else |err| label: switch (err) { ... }`
///
/// `switch` must be labeled.
/// Emits a `condbr` on the non-error body and a regular switch, though the
/// non-error prong and all `break`s from switch prongs are peers.
/// Exists to avoid a rather complex special case of `switch_block_err_union`.
peer_break_target: struct {
/// Refers to the enclosing block of the entire switch-on-err expression.
block_inst: Zir.Inst.Index,
/// Belongs to `block_inst`.
block_ri: ResultInfo,
},
};
fn switchExpr(
parent_gz: *GenZir,
@@ -7637,13 +7184,38 @@ fn switchExpr(
ri: ResultInfo,
node: Ast.Node.Index,
switch_full: Ast.full.Switch,
non_err: SwitchNonErr,
) InnerError!Zir.Inst.Ref {
const astgen = parent_gz.astgen;
const gpa = astgen.gpa;
const tree = astgen.tree;
const operand_node = switch_full.ast.condition;
const switch_node, const operand_node, const err_token = switch (non_err) {
.none, .peer_break_target => .{
node,
switch_full.ast.condition,
undefined,
},
.@"catch" => .{
tree.nodeData(node).node_and_node[1],
tree.nodeData(node).node_and_node[0],
tree.nodeMainToken(node) + 2,
},
.@"if" => |if_full| .{
if_full.ast.else_expr.unwrap().?,
if_full.ast.cond_expr,
if_full.error_token.?,
},
};
const case_nodes = switch_full.ast.cases;
const is_err_switch = non_err != .none;
const needs_non_err_handling = switch (non_err) {
.none => false,
.peer_break_target => false, // handled by parent expression
.@"catch", .@"if" => true,
};
const need_rl = astgen.nodes_need_rl.contains(node);
const block_ri: ResultInfo = if (need_rl) ri else .{
.rl = switch (ri.rl) {
@@ -7653,226 +7225,562 @@ fn switchExpr(
},
.ctx = ri.ctx,
};
// We need to call `rvalue` to write through to the pointer only if we had a
// result pointer and aren't forwarding it.
const LocTag = @typeInfo(ResultInfo.Loc).@"union".tag_type.?;
const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl);
const catch_or_if_node = if (needs_non_err_handling) node else undefined;
const do_err_trace = needs_non_err_handling and astgen.fn_block != null;
const non_err_is_ref: bool = switch (non_err) {
.none, .peer_break_target => undefined,
.@"catch" => ri.rl == .ref or ri.rl == .ref_coerced_ty,
.@"if" => |if_full| if_full.payload_token != null and
tree.tokenTag(if_full.payload_token.?) == .asterisk,
};
if (switch_full.label_token) |label_token| {
try astgen.checkLabelRedefinition(scope, label_token);
}
const err_capture_name: Zir.NullTerminatedString = if (needs_non_err_handling) blk: {
const err_str = tree.tokenSlice(err_token);
if (mem.eql(u8, err_str, "_")) {
// This is fatal because we already know we're switching on the captured error.
return astgen.failTok(err_token, "discard of error capture; omit it instead", .{});
}
const err_name = try astgen.identAsString(err_token);
try astgen.detectLocalShadowing(scope, err_name, err_token, err_str, .capture);
break :blk err_name;
} else undefined;
// We perform two passes over the AST. This first pass is to collect information
// for the following variables, make note of the special prong AST node index,
// and bail out with a compile error if there are multiple special prongs present.
// for the following variables, make note of the special prong AST node indices,
// and bail out with a compile error if there are incompatible special prongs present.
var any_payload_is_ref = false;
var any_has_payload_capture = false;
var any_has_tag_capture = false;
var any_non_inline_capture = false;
var any_maybe_runtime_capture = false;
var scalar_cases_len: u32 = 0;
var multi_cases_len: u32 = 0;
var inline_cases_len: u32 = 0;
var total_items_len: usize = 0;
var total_ranges_len: usize = 0;
var else_case_node: Ast.Node.OptionalIndex = .none;
var else_src: ?Ast.TokenIndex = null;
var underscore_case_node: Ast.Node.OptionalIndex = .none;
var underscore_node: Ast.Node.OptionalIndex = .none;
var underscore_src: ?Ast.TokenIndex = null;
var underscore_additional_items: Zir.SpecialProngs.AdditionalItems = .none;
for (case_nodes) |case_node| {
const case = tree.fullSwitchCase(case_node).?;
if (case.payload_token) |payload_token| {
const ident = if (tree.tokenTag(payload_token) == .asterisk) blk: {
// Capturing errors by reference is never allowed, but as we will
// check for this again later we will fail as late as possible.
any_payload_is_ref = true;
break :blk payload_token + 1;
} else payload_token;
if (!mem.eql(u8, tree.tokenSlice(ident), "_")) {
any_has_payload_capture = true;
// If we're capturing a union, its payload value cannot always be
// comptime-known, even if its prong is inlined as inlining only
// affects its enum tag.
// This check isn't perfect, because for things like enums, the
// entire capture *is* comptime-known for inline prongs! But such
// knowledge requires semantic analysis.
any_maybe_runtime_capture = true;
}
if (tree.tokenTag(ident + 1) == .comma) {
any_has_tag_capture = true;
}
// If the first capture is ignored, then there is no runtime-known
// capture, as the tag capture must be for an inline prong.
// This check isn't perfect, because for things like enums, the
// first prong *is* comptime-known for inline prongs! But such
// knowledge requires semantic analysis.
if (!mem.eql(u8, tree.tokenSlice(ident), "_")) {
any_non_inline_capture = true;
if (case.inline_token == null) {
any_maybe_runtime_capture = true;
}
}
}
// Check for else prong.
if (case.ast.values.len == 0) {
const case_src = case.ast.arrow_token - 1;
if (else_src) |src| {
if (else_case_node.unwrap()) |prev_case_node| {
const prev_else_tok = tree.fullSwitchCase(prev_case_node).?.ast.arrow_token - 1;
const else_tok = case.ast.arrow_token - 1;
return astgen.failTokNotes(
case_src,
else_tok,
"multiple else prongs in switch expression",
.{},
&[_]u32{
try astgen.errNoteTok(
src,
"previous else prong here",
.{},
),
},
&.{try astgen.errNoteTok(prev_else_tok, "previous else prong here", .{})},
);
}
else_case_node = case_node.toOptional();
else_src = case_src;
continue;
}
// Check for '_' prong.
var case_has_underscore = false;
// Check for '_' prong and ranges.
var case_has_ranges = false;
for (case.ast.values) |val| {
switch (tree.nodeTag(val)) {
.identifier => if (mem.eql(u8, tree.tokenSlice(tree.nodeMainToken(val)), "_")) {
const val_src = tree.nodeMainToken(val);
if (underscore_src) |src| {
return astgen.failTokNotes(
val_src,
"multiple '_' prongs in switch expression",
.{},
&[_]u32{
try astgen.errNoteTok(
src,
"previous '_' prong here",
.{},
),
},
);
}
if (case.inline_token != null) {
return astgen.failTok(val_src, "cannot inline '_' prong", .{});
}
underscore_case_node = case_node.toOptional();
underscore_src = val_src;
underscore_node = val.toOptional();
underscore_additional_items = switch (case.ast.values.len) {
0 => unreachable,
1 => .none,
2 => .one,
else => .many,
};
case_has_underscore = true;
.switch_range => {
total_ranges_len += 1;
case_has_ranges = true;
},
.string_literal => return astgen.failNode(val, "cannot switch on strings", .{}),
else => {},
else => |tag| {
total_items_len += 1;
if (tag == .identifier and
mem.eql(u8, tree.tokenSlice(tree.nodeMainToken(val)), "_"))
{
if (is_err_switch) {
const case_src = case.ast.arrow_token - 1;
return astgen.failTokNotes(
case_src,
"'_' prong is not allowed when switching on errors",
.{},
&.{
try astgen.errNoteTok(
case_src,
"consider using 'else'",
.{},
),
},
);
}
if (underscore_node.unwrap()) |prev_src| {
return astgen.failNodeNotes(
val,
"multiple '_' prongs in switch expression",
.{},
&.{try astgen.errNoteNode(prev_src, "previous '_' prong here", .{})},
);
}
if (case.inline_token != null) {
return astgen.failNode(val, "cannot inline '_' prong", .{});
}
underscore_node = val.toOptional();
}
},
}
}
if (case_has_underscore) continue;
if (case.ast.values.len == 1 and tree.nodeTag(case.ast.values[0]) != .switch_range) {
const case_len = case.ast.values.len;
if (case_len == 1 and !case_has_ranges) {
scalar_cases_len += 1;
} else {
} else if (case_len >= 1) {
multi_cases_len += 1;
}
if (case.inline_token != null) {
inline_cases_len += 1;
}
}
const special_prongs: Zir.SpecialProngs = .init(
else_src != null,
underscore_src != null,
underscore_additional_items,
);
const has_else = special_prongs.hasElse();
const has_under = special_prongs.hasUnder();
const has_else = else_case_node != .none;
const has_under = underscore_node != .none;
if (is_err_switch) assert(!has_under); // should have failed by now
const any_ranges = total_ranges_len > 0;
const operand_ri: ResultInfo = .{ .rl = if (any_payload_is_ref) .ref else .none };
astgen.advanceSourceCursorToNode(operand_node);
const operand_lc: LineColumn = .{ astgen.source_line - parent_gz.decl_line, astgen.source_column };
const raw_operand = try expr(parent_gz, scope, operand_ri, operand_node);
const item_ri: ResultInfo = .{ .rl = .none };
// If this switch is labeled, it may have `continue`s targeting it, and thus we need the operand type
// to provide a result type.
const raw_operand_ty_ref = if (switch_full.label_token != null) t: {
break :t try parent_gz.addUnNode(.typeof, raw_operand, operand_node);
} else undefined;
// This contains the data that goes into the `extra` array for the SwitchBlock/SwitchBlockMulti,
// except the first cases_nodes.len slots are a table that indexes payloads later in the array, with
// the special case index coming first, then scalar_case_len indexes, then multi_cases_len indexes
// This contains all of the body lengths (already in the correct order) and
// the bodies they belong to that go into the `extra` array later, except the
// first item_table_end slots are a table that indexes the item bodies (and
// also indirectly the prong bodies, as they are always trailing after their
// item bodies).
const payloads = &astgen.scratch;
const scratch_top = astgen.scratch.items.len;
const case_table_start = scratch_top;
const else_case_index = if (has_else) case_table_start else undefined;
const under_case_index = if (has_under) case_table_start + @intFromBool(has_else) else undefined;
const scalar_case_table = case_table_start + @intFromBool(has_else) + @intFromBool(has_under);
const multi_case_table = scalar_case_table + scalar_cases_len;
const case_table_end = multi_case_table + multi_cases_len;
try astgen.scratch.resize(gpa, case_table_end);
var payloads_end = scratch_top;
// Since range item body pairs are always contiguous we don't technically
// have to keep track of the position of the second body. However handling
// all of the several indices and offsets is complicated enough as it is,
// so for the sake of keeping this function a little bit more simple we do
// it anyway.
const scalar_body_table = payloads_end;
payloads_end += scalar_cases_len;
const multi_item_body_table = payloads_end;
payloads_end += total_items_len + 2 * total_ranges_len - scalar_cases_len;
const multi_prong_body_table = payloads_end;
payloads_end += multi_cases_len;
const body_table_end = payloads_end;
const scalar_prong_infos_start = payloads_end;
payloads_end += scalar_cases_len;
const multi_prong_infos_start = payloads_end;
payloads_end += multi_cases_len;
const multi_case_items_lens_start = payloads_end;
payloads_end += multi_cases_len;
const multi_case_ranges_lens_start = if (any_ranges) blk: {
const multi_case_ranges_lens_start = payloads_end;
payloads_end += multi_cases_len;
break :blk multi_case_ranges_lens_start;
} else undefined;
const scalar_item_infos_start = payloads_end;
payloads_end += scalar_cases_len;
const multi_items_infos_start = payloads_end;
payloads_end += total_items_len - scalar_cases_len + 2 * total_ranges_len;
const bodies_start = payloads_end;
try payloads.resize(gpa, bodies_start);
defer astgen.scratch.items.len = scratch_top;
var non_err_prong_body_start: u32 = undefined;
var else_prong_body_start: u32 = undefined;
var non_err_info: Zir.Inst.SwitchBlock.ProngInfo.NonErr = undefined;
var else_info: Zir.Inst.SwitchBlock.ProngInfo.Else = undefined;
var block_scope = parent_gz.makeSubBlock(scope);
// block_scope not used for collecting instructions
block_scope.instructions_top = GenZir.unstacked_top;
block_scope.setBreakResultInfo(block_ri);
// Sema expects a dbg_stmt immediately before switch_block(_ref)
const operand_ri: ResultInfo = .{
.rl = if (any_payload_is_ref or
(needs_non_err_handling and non_err_is_ref)) .ref else .none,
.ctx = if (do_err_trace) .error_handling_expr else .none,
};
astgen.advanceSourceCursorToNode(operand_node);
const operand_lc: LineColumn = .{ astgen.source_line - parent_gz.decl_line, astgen.source_column };
const raw_operand: Zir.Inst.Ref = if (needs_non_err_handling)
try reachableExpr(parent_gz, scope, operand_ri, operand_node, switch_node)
else
try expr(parent_gz, scope, operand_ri, operand_node);
// Sema expects a dbg_stmt immediately before any kind of switch_block inst.
try emitDbgStmtForceCurrentIndex(parent_gz, operand_lc);
// This gets added to the parent block later, after the item expressions.
const switch_tag: Zir.Inst.Tag = if (any_payload_is_ref) .switch_block_ref else .switch_block;
const switch_block = try parent_gz.makeBlockInst(switch_tag, node);
const switch_tag: Zir.Inst.Tag = switch (non_err) {
.none, .peer_break_target => if (any_payload_is_ref) .switch_block_ref else .switch_block,
.@"if", .@"catch" => .switch_block_err_union,
};
const switch_block = try parent_gz.makeBlockInst(switch_tag, switch_node);
if (switch_full.label_token) |label_token| {
block_scope.continue_block = switch_block.toOptional();
block_scope.continue_result_info = .{
.rl = if (any_payload_is_ref)
.{ .ref_coerced_ty = raw_operand_ty_ref }
else
.{ .coerced_ty = raw_operand_ty_ref },
};
// Set `break` target if applicable; `continue` target may differ!
switch (non_err) {
.none => {
if (switch_full.label_token != null) {
block_scope.break_target = switch_block;
}
block_scope.setBreakResultInfo(block_ri);
},
.@"catch", .@"if" => {
assert(switch_full.label_token == null); // use `peer_break_target` code path instead!
block_scope.setBreakResultInfo(block_ri);
},
.peer_break_target => |peer_break_target| {
block_scope.label = .{
.token = label_token,
.block_inst = switch_block,
};
// `break` can target this via `label.block_inst`
// `break_result_info` already set by `setBreakResultInfo`
// Special case; we have an error switch + label situation and we
// want to generate this:
// ```
// %1 = block({
// %2 = is_non_err(%operand)
// %3 = condbr(%2, {
// %4 = err_union_payload_unsafe(%operand)
// %5 = break(%1, result) // targets enclosing `block`
// }, {
// %6 = err_union_code(%operand)
// %7 = switch_block(%6,
// { ... } => {
// %8 = break(%1, result) // targets enclosing `block`
// },
// { ... } => {
// %9 = switch_continue(%7, result) // targets `switch_block`
// },
// )
// %10 = break(%1, @void_value)
// })
// })
// ```
// to ensure that the non-err case and the switch are only peers when
// breaking from either, but not when continuing the switch. We use
// this lowering to avoiding a rather complex special case in Sema.
assert(switch_full.label_token != null); // use `switch_block_err_union` code path instead!
assert(.block == astgen.instructions.items(.tag)[@intFromEnum(peer_break_target.block_inst)]);
block_scope.break_target = peer_break_target.block_inst;
block_scope.setBreakResultInfo(peer_break_target.block_ri);
},
}
// We re-use this same scope for all cases, including the special prong, if any.
var case_scope = parent_gz.makeSubBlock(&block_scope.base);
case_scope.instructions_top = GenZir.unstacked_top;
// We need a bunch of separate locations to store several capture values:
// `... |err| switch (err) { else => |e| { ... } }` // `err` and `e`
// `... => |payload, tag| { ... }` // `payload` and `tag`
// and result types:
// `foo => { ... }` // `foo` needs a result type
// `... => continue :sw val` // `val` needs a result type
// Some observations:
// - If we just use the switch inst itself we don't need a placeholder!
// - We can always tell for sure whether a capture exists. We also know
// that its existence implies that it has to be used.
// - We can't know whether there are any `continue`s before analyzing all
// prong bodies. At that point we already need a result location. We do
// know whether there even *could* be any though by looking for a label.
// - Sema wants a result location in `zirSwitchContinue`. If that's the
// switch inst itself, there's no need to look at the switch inst data.
// Some conclusions:
// - We should use the switch inst as the continue result location if needed.
// - If we need more insts for captures and our switch inst is already used
// for something else, we start creating placeholder insts.
// If any prong has an inline tag capture, allocate a shared dummy instruction for it
const tag_inst = if (any_has_tag_capture) tag_inst: {
const inst: Zir.Inst.Index = @enumFromInt(astgen.instructions.len);
try astgen.instructions.append(astgen.gpa, .{
.tag = .extended,
.data = .{ .extended = .{
.opcode = .value_placeholder,
.small = undefined,
.operand = undefined,
} },
});
break :tag_inst inst;
} else undefined;
// Prong items use the switch block instruction as their result type.
// No other components of the switch statement are in scope while they are
// being resolved, so this is never a problem.
const item_ri: ResultInfo = .{ .rl = .{ .coerced_ty = switch_block.toRef() } };
var switch_block_inst_is_occupied: bool = false;
if (switch_full.label_token) |label_token| {
block_scope.label = .{ .token = label_token };
block_scope.continue_target = .{ .switch_continue = switch_block };
block_scope.continue_result_info = .{
.rl = if (any_payload_is_ref)
.{ .ref_coerced_ty = switch_block.toRef() }
else
.{ .coerced_ty = switch_block.toRef() },
};
switch_block_inst_is_occupied = true;
// `break_target` and `break_result_info` already set above.
}
if (needs_non_err_handling) {
// `switch_block_err_union` uses the switch block inst as its err capture/
// switch operand. This is always ok as its switch can never have a label.
assert(!switch_block_inst_is_occupied);
switch_block_inst_is_occupied = true;
}
// `... => |payload| { ... }`
const payload_capture_inst, const payload_capture_inst_is_placeholder = inst: {
if (!any_has_payload_capture) break :inst .{ undefined, false };
if (!switch_block_inst_is_occupied) {
switch_block_inst_is_occupied = true;
break :inst .{ switch_block, false };
}
break :inst .{ try astgen.appendPlaceholder(), true };
};
// `... => |_, tag| { ... }`
const tag_capture_inst, const tag_capture_inst_is_placeholder = inst: {
if (!any_has_tag_capture) break :inst .{ undefined, false };
if (!switch_block_inst_is_occupied) {
switch_block_inst_is_occupied = true;
break :inst .{ switch_block, false };
}
break :inst .{ try astgen.appendPlaceholder(), true };
};
var prong_body_extra_insts_buf: [3]Zir.Inst.Index = undefined;
const prong_body_extra_insts: []const Zir.Inst.Index = extra_insts: {
var extra_insts: std.ArrayList(Zir.Inst.Index) = .initBuffer(&prong_body_extra_insts_buf);
if (switch_block_inst_is_occupied) extra_insts.appendAssumeCapacity(switch_block);
if (payload_capture_inst_is_placeholder) extra_insts.appendAssumeCapacity(payload_capture_inst);
if (tag_capture_inst_is_placeholder) extra_insts.appendAssumeCapacity(tag_capture_inst);
break :extra_insts extra_insts.items;
};
const switch_operand, const catch_or_if_operand = if (needs_non_err_handling)
.{ switch_block.toRef(), raw_operand }
else
.{ raw_operand, undefined };
// We re-use this same scope for all case items and contents.
var scratch_scope = parent_gz.makeSubBlock(&block_scope.base);
scratch_scope.instructions_top = GenZir.unstacked_top;
// We have to take care of the non-error body first if there is one.
non_err_body: {
if (!needs_non_err_handling) break :non_err_body;
scratch_scope.instructions_top = parent_gz.instructions.items.len;
defer scratch_scope.unstack();
// It's always ok to use the switch block inst to refer to the error union
// payload as the actual switch statement isn't even in scope yet.
const non_err_payload_inst = switch_block;
var non_err_capture: Zir.Inst.SwitchBlock.ProngInfo.Capture = .none;
switch (non_err) {
.none, .peer_break_target => unreachable,
.@"catch" => {
// We always effectively capture the error union payload; we use
// it to `break` from the entire `switch_block_err_union`.
non_err_capture = if (non_err_is_ref) .by_ref else .by_val;
const then_result = switch (ri.rl) {
.ref, .ref_coerced_ty => non_err_payload_inst.toRef(),
else => try rvalue(
&scratch_scope,
block_scope.break_result_info,
non_err_payload_inst.toRef(),
catch_or_if_node,
),
};
_ = try scratch_scope.addBreakWithSrcNode(
.@"break",
switch_block,
then_result,
catch_or_if_node,
);
},
.@"if" => |if_full| {
var payload_val_scope: Scope.LocalVal = undefined;
const then_node = if_full.ast.then_expr;
const then_sub_scope: *Scope = scope: {
if (if_full.payload_token) |payload_token| {
const ident_token = payload_token + @intFromBool(non_err_is_ref);
const ident_name = try astgen.identAsString(ident_token);
const ident_name_str = tree.tokenSlice(ident_token);
if (mem.eql(u8, "_", ident_name_str)) {
break :scope &scratch_scope.base;
}
non_err_capture = if (non_err_is_ref) .by_ref else .by_val;
try astgen.detectLocalShadowing(&scratch_scope.base, ident_name, ident_token, ident_name_str, .capture);
payload_val_scope = .{
.parent = &scratch_scope.base,
.gen_zir = &scratch_scope,
.name = ident_name,
.inst = non_err_payload_inst.toRef(),
.token_src = ident_token,
.id_cat = .capture,
};
try scratch_scope.addDbgVar(.dbg_var_val, ident_name, non_err_payload_inst.toRef());
break :scope &payload_val_scope.base;
} else {
_ = try scratch_scope.addUnNode(
.ensure_err_union_payload_void,
catch_or_if_operand,
catch_or_if_node,
);
break :scope &scratch_scope.base;
}
};
const then_result = try fullBodyExpr(&scratch_scope, then_sub_scope, block_scope.break_result_info, then_node, .allow_branch_hint);
try checkUsed(parent_gz, &scratch_scope.base, then_sub_scope);
if (!scratch_scope.endsWithNoReturn()) {
_ = try scratch_scope.addBreakWithSrcNode(.@"break", switch_block, then_result, then_node);
}
},
}
const body_slice = scratch_scope.instructionsSlice();
const body_start: u32 = @intCast(payloads.items.len);
const body_len = astgen.countBodyLenAfterFixupsExtraRefs(body_slice, &.{non_err_payload_inst});
try payloads.ensureUnusedCapacity(gpa, body_len);
astgen.appendBodyWithFixupsExtraRefsArrayList(payloads, body_slice, &.{non_err_payload_inst});
non_err_prong_body_start = body_start;
non_err_info = .{
.body_len = @intCast(body_len),
.capture = non_err_capture,
.operand_is_ref = non_err_is_ref,
};
}
// In this pass we generate all the item and prong expressions.
var multi_case_index: u32 = 0;
var scalar_case_index: u32 = 0;
var multi_item_offset: usize = 0;
for (case_nodes) |case_node| {
const case = tree.fullSwitchCase(case_node).?;
const is_multi_case = case.ast.values.len > 1 or
(case.ast.values.len == 1 and tree.nodeTag(case.ast.values[0]) == .switch_range);
const ranges_len: u32 = if (any_ranges) blk: {
var ranges_len: u32 = 0;
for (case.ast.values) |value| {
ranges_len += @intFromBool(tree.nodeTag(value) == .switch_range);
}
break :blk ranges_len;
} else 0;
const items_len: u32 = @intCast(case.ast.values.len - ranges_len);
const is_multi_case = items_len > 1 or ranges_len > 0;
var dbg_var_name: Zir.NullTerminatedString = .empty;
var dbg_var_inst: Zir.Inst.Ref = undefined;
// item/range bodies in order of occurence
var item_i: usize = 0;
var range_i: usize = 0;
for (case.ast.values) |value| {
const is_range = tree.nodeTag(value) == .switch_range;
const range: [2]Ast.Node.Index = if (is_range) tree.nodeData(value).node_and_node else undefined;
const nodes: []const Ast.Node.Index = if (is_range) &range else &.{value};
for (nodes) |item| {
// We lower enum literals, error values and number literals
// manually to save space since they are very commonly used as
// switch case items.
const body_start: u32 = @intCast(payloads.items.len);
const item_info: Zir.Inst.SwitchBlock.ItemInfo = blk: switch (tree.nodeTag(item)) {
.enum_literal => {
const str_index = try astgen.identAsString(tree.nodeMainToken(item));
break :blk .wrap(.{ .enum_literal = str_index });
},
.error_value => {
const ident_token = tree.nodeMainToken(item) + 2; // skip 'error', '.'
const str_index = try astgen.identAsString(ident_token);
break :blk .wrap(.{ .error_value = str_index });
},
else => if (value.toOptional() == underscore_node) {
break :blk .wrap(.under);
} else {
scratch_scope.instructions_top = parent_gz.instructions.items.len;
defer scratch_scope.unstack();
const item_result = try fullBodyExpr(&scratch_scope, scope, item_ri, item, .normal);
if (!scratch_scope.endsWithNoReturn()) {
_ = try scratch_scope.addBreakWithSrcNode(.break_inline, switch_block, item_result, item);
}
const item_slice = scratch_scope.instructionsSlice();
const body_len = astgen.countBodyLenAfterFixupsExtraRefs(item_slice, &.{switch_block});
try payloads.ensureUnusedCapacity(gpa, body_len);
astgen.appendBodyWithFixupsExtraRefsArrayList(payloads, item_slice, &.{switch_block});
break :blk .wrap(.{ .body_len = body_len });
},
};
if (is_multi_case) {
if (is_range) {
const offset = multi_item_offset + items_len + range_i;
payloads.items[multi_item_body_table + offset] = body_start;
payloads.items[multi_items_infos_start + offset] = @bitCast(item_info);
range_i += 1;
} else {
const offset = multi_item_offset + item_i;
payloads.items[multi_item_body_table + offset] = body_start;
payloads.items[multi_items_infos_start + offset] = @bitCast(item_info);
item_i += 1;
}
} else {
payloads.items[scalar_body_table + scalar_case_index] = body_start;
payloads.items[scalar_item_infos_start + scalar_case_index] = @bitCast(item_info);
}
}
}
if (is_multi_case) {
assert(item_i == items_len and range_i == 2 * ranges_len);
payloads.items[multi_case_items_lens_start + multi_case_index] = items_len;
if (any_ranges) {
payloads.items[multi_case_ranges_lens_start + multi_case_index] = ranges_len;
}
multi_item_offset += items_len + 2 * ranges_len;
}
// Capture and prong body
var dbg_var_payload_name: Zir.NullTerminatedString = .empty;
var dbg_var_payload_inst: Zir.Inst.Ref = undefined;
var dbg_var_tag_name: Zir.NullTerminatedString = .empty;
var dbg_var_tag_inst: Zir.Inst.Ref = undefined;
var has_tag_capture = false;
var capture_val_scope: Scope.LocalVal = undefined;
var tag_scope: Scope.LocalVal = undefined;
var err_capture_scope: Scope.LocalVal = undefined;
var payload_capture_scope: Scope.LocalVal = undefined;
var tag_capture_scope: Scope.LocalVal = undefined;
var capture: Zir.Inst.SwitchBlock.ProngInfo.Capture = .none;
const sub_scope = blk: {
const payload_token = case.payload_token orelse break :blk &case_scope.base;
// Check all captures and make them available to the prong body.
// Potential captures are:
// - for regular switch: payload and tag
// - for error switch: switch operand and payload
const prong_body_scope: *Scope = scope: {
const switch_scope: *Scope = if (needs_non_err_handling) blk: {
// We want to have the captured error we're switching on in scope!
err_capture_scope = .{
.parent = &scratch_scope.base,
.gen_zir = &scratch_scope,
.name = err_capture_name,
.inst = switch_operand,
.token_src = err_token,
.id_cat = .capture,
};
break :blk &err_capture_scope.base;
} else &scratch_scope.base;
const payload_token = case.payload_token orelse break :scope switch_scope;
const capture_is_ref = tree.tokenTag(payload_token) == .asterisk;
const ident = payload_token + @intFromBool(capture_is_ref);
@@ -7882,34 +7790,42 @@ fn switchExpr(
var payload_sub_scope: *Scope = undefined;
if (mem.eql(u8, ident_slice, "_")) {
if (capture_is_ref) {
// |*_, tag| is invalid, so we can fail early
return astgen.failTok(payload_token, "pointer modifier invalid on discard", .{});
}
payload_sub_scope = &case_scope.base;
capture = .none;
payload_sub_scope = switch_scope;
} else {
const capture_name = try astgen.identAsString(ident);
try astgen.detectLocalShadowing(&case_scope.base, capture_name, ident, ident_slice, .capture);
capture_val_scope = .{
.parent = &case_scope.base,
.gen_zir = &case_scope,
try astgen.detectLocalShadowing(&scratch_scope.base, capture_name, ident, ident_slice, .capture);
payload_capture_scope = .{
.parent = switch_scope,
.gen_zir = &scratch_scope,
.name = capture_name,
.inst = switch_block.toRef(),
.inst = payload_capture_inst.toRef(),
.token_src = ident,
.id_cat = .capture,
};
dbg_var_name = capture_name;
dbg_var_inst = switch_block.toRef();
payload_sub_scope = &capture_val_scope.base;
dbg_var_payload_name = payload_capture_scope.name;
dbg_var_payload_inst = payload_capture_scope.inst;
payload_sub_scope = &payload_capture_scope.base;
}
const tag_token = if (tree.tokenTag(ident + 1) == .comma)
ident + 2
else
break :blk payload_sub_scope;
if (is_err_switch and capture == .by_ref) {
return astgen.failTok(ident, "error set cannot be captured by reference", .{});
}
const tag_token = if (tree.tokenTag(ident + 1) == .comma) blk: {
break :blk ident + 2;
} else if (capture == .none) {
// discarding the capture is only valid if the tag is captured
// whether the tag capture is discarded is handled below
return astgen.failTok(payload_token, "discard of capture; omit it instead", .{});
} else break :scope payload_sub_scope;
const tag_slice = tree.tokenSlice(tag_token);
if (mem.eql(u8, tag_slice, "_")) {
try astgen.appendErrorTok(tag_token, "discard of tag capture; omit it instead", .{});
} else if (case.inline_token == null) {
return astgen.failTok(tag_token, "tag capture on non-inline prong", .{});
return astgen.failTok(tag_token, "discard of tag capture; omit it instead", .{});
}
const tag_name = try astgen.identAsString(tag_token);
try astgen.detectLocalShadowing(payload_sub_scope, tag_name, tag_token, tag_slice, .@"switch tag capture");
@@ -7917,123 +7833,136 @@ fn switchExpr(
assert(any_has_tag_capture);
has_tag_capture = true;
tag_scope = .{
if (is_err_switch) {
return astgen.failTok(tag_token, "cannot capture tag of error union", .{});
}
tag_capture_scope = .{
.parent = payload_sub_scope,
.gen_zir = &case_scope,
.gen_zir = &scratch_scope,
.name = tag_name,
.inst = tag_inst.toRef(),
.inst = tag_capture_inst.toRef(),
.token_src = tag_token,
.id_cat = .@"switch tag capture",
};
dbg_var_tag_name = tag_name;
dbg_var_tag_inst = tag_inst.toRef();
break :blk &tag_scope.base;
dbg_var_tag_name = tag_capture_scope.name;
dbg_var_tag_inst = tag_capture_scope.inst;
break :scope &tag_capture_scope.base;
};
const header_index: u32 = @intCast(payloads.items.len);
const body_len_index = if (is_multi_case) blk: {
if (case_node.toOptional() == underscore_case_node) {
payloads.items[under_case_index] = header_index;
if (special_prongs.hasOneAdditionalItem()) {
try payloads.resize(gpa, header_index + 2); // item, body_len
const maybe_item_node = case.ast.values[0];
const item_node = if (maybe_item_node.toOptional() == underscore_node)
case.ast.values[1]
else
maybe_item_node;
const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node, .switch_item);
payloads.items[header_index] = @intFromEnum(item_inst);
break :blk header_index + 1;
}
} else {
payloads.items[multi_case_table + multi_case_index] = header_index;
multi_case_index += 1;
}
try payloads.resize(gpa, header_index + 3); // items_len, ranges_len, body_len
if (capture != .none) assert(any_has_payload_capture);
if (is_err_switch) {
assert(!any_payload_is_ref); // should have failed by now
assert(!any_has_tag_capture); // should have failed by now
}
// items
var items_len: u32 = 0;
for (case.ast.values) |item_node| {
if (item_node.toOptional() == underscore_node or
tree.nodeTag(item_node) == .switch_range)
{
continue;
}
items_len += 1;
prong_body: {
scratch_scope.instructions_top = parent_gz.instructions.items.len;
defer scratch_scope.unstack();
const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node, .switch_item);
try payloads.append(gpa, @intFromEnum(item_inst));
}
// ranges
var ranges_len: u32 = 0;
for (case.ast.values) |range| {
if (tree.nodeTag(range) != .switch_range) {
continue;
}
ranges_len += 1;
const first_node, const last_node = tree.nodeData(range).node_and_node;
const first = try comptimeExpr(parent_gz, scope, item_ri, first_node, .switch_item);
const last = try comptimeExpr(parent_gz, scope, item_ri, last_node, .switch_item);
try payloads.appendSlice(gpa, &[_]u32{
@intFromEnum(first), @intFromEnum(last),
});
}
payloads.items[header_index] = items_len;
payloads.items[header_index + 1] = ranges_len;
break :blk header_index + 2;
} else if (case_node.toOptional() == else_case_node) blk: {
payloads.items[else_case_index] = header_index;
try payloads.resize(gpa, header_index + 1); // body_len
break :blk header_index;
} else if (case_node.toOptional() == underscore_case_node) blk: {
assert(!special_prongs.hasAdditionalItems());
payloads.items[under_case_index] = header_index;
try payloads.resize(gpa, header_index + 1); // body_len
break :blk header_index;
} else blk: {
payloads.items[scalar_case_table + scalar_case_index] = header_index;
scalar_case_index += 1;
try payloads.resize(gpa, header_index + 2); // item, body_len
const item_node = case.ast.values[0];
const item_inst = try comptimeExpr(parent_gz, scope, item_ri, item_node, .switch_item);
payloads.items[header_index] = @intFromEnum(item_inst);
break :blk header_index + 1;
};
{
// temporarily stack case_scope on parent_gz
case_scope.instructions_top = parent_gz.instructions.items.len;
defer case_scope.unstack();
if (dbg_var_name != .empty) {
try case_scope.addDbgVar(.dbg_var_val, dbg_var_name, dbg_var_inst);
if (dbg_var_payload_name != .empty) {
try scratch_scope.addDbgVar(.dbg_var_val, dbg_var_payload_name, dbg_var_payload_inst);
}
if (dbg_var_tag_name != .empty) {
try case_scope.addDbgVar(.dbg_var_val, dbg_var_tag_name, dbg_var_tag_inst);
try scratch_scope.addDbgVar(.dbg_var_val, dbg_var_tag_name, dbg_var_tag_inst);
}
if (do_err_trace and nodeMayAppendToErrorTrace(tree, operand_node)) {
_ = try scratch_scope.addSaveErrRetIndex(.always);
}
const target_expr_node = case.ast.target_expr;
const case_result = try fullBodyExpr(&case_scope, sub_scope, block_scope.break_result_info, target_expr_node, .allow_branch_hint);
try checkUsed(parent_gz, &case_scope.base, sub_scope);
if (!parent_gz.refIsNoReturn(case_result)) {
_ = try case_scope.addBreakWithSrcNode(.@"break", switch_block, case_result, target_expr_node);
const case_result = try fullBodyExpr(&scratch_scope, prong_body_scope, block_scope.break_result_info, target_expr_node, .allow_branch_hint);
if (needs_non_err_handling) {
// If we would check `scratch_scope` here, we would get a false
// positive, that being the switch operand itself!
try checkUsed(parent_gz, &err_capture_scope.base, prong_body_scope);
} else {
try checkUsed(parent_gz, &scratch_scope.base, prong_body_scope);
}
if (!scratch_scope.endsWithNoReturn()) {
// As our last action before the break, "pop" the error trace if needed
if (do_err_trace) {
try restoreErrRetIndex(
&scratch_scope,
.{ .block = switch_block },
block_scope.break_result_info,
target_expr_node,
case_result,
);
}
_ = try scratch_scope.addBreakWithSrcNode(.@"break", switch_block, case_result, target_expr_node);
}
const case_slice = case_scope.instructionsSlice();
const extra_insts: []const Zir.Inst.Index = if (has_tag_capture) &.{ switch_block, tag_inst } else &.{switch_block};
const body_len = astgen.countBodyLenAfterFixupsExtraRefs(case_slice, extra_insts);
const body_slice = scratch_scope.instructionsSlice();
const body_start: u32 = @intCast(payloads.items.len);
const body_len = astgen.countBodyLenAfterFixupsExtraRefs(body_slice, prong_body_extra_insts);
try payloads.ensureUnusedCapacity(gpa, body_len);
payloads.items[body_len_index] = @bitCast(Zir.Inst.SwitchBlock.ProngInfo{
astgen.appendBodyWithFixupsExtraRefsArrayList(payloads, body_slice, prong_body_extra_insts);
if (case_node.toOptional() == else_case_node) {
assert(case.ast.values.len == 0);
// Specific `else` bodies can cause Sema to omit the
// "unreachable else prong" error so that certain generic code
// patterns don't trigger it. We do that for these bodies:
// `else => unreachable,`
// `else => return,`
// `else => |e| return e,` (where `e` is any identifier)
const is_simple_noreturn = switch (tree.nodeTag(target_expr_node)) {
.unreachable_literal => true, // `=> unreachable,`
.@"return" => simple_noreturn: {
const retval_node = tree.nodeData(target_expr_node).opt_node.unwrap() orelse {
break :simple_noreturn true; // `=> return,`
};
// Check for `=> |e| return e,`
if (capture != .by_val) break :simple_noreturn false;
if (tree.nodeTag(retval_node) != .identifier) break :simple_noreturn false;
const payload_name = try astgen.identAsString(case.payload_token.?);
const retval_name = try astgen.identAsString(tree.nodeMainToken(retval_node));
break :simple_noreturn payload_name == retval_name;
},
else => false,
};
else_info = .{
.body_len = @intCast(body_len),
.capture = capture,
.is_inline = case.inline_token != null,
.has_tag_capture = has_tag_capture,
.is_simple_noreturn = is_simple_noreturn,
};
else_prong_body_start = body_start;
break :prong_body;
}
// We allow prongs with error items which are not inside the error set
// being switched on if their body is `=> comptime unreachable,`.
const is_comptime_unreach = comptime_unreach: {
if (tree.nodeTag(target_expr_node) != .@"comptime") break :comptime_unreach false;
const comptime_node = tree.nodeData(target_expr_node).node;
break :comptime_unreach tree.nodeTag(comptime_node) == .unreachable_literal;
};
const prong_info: Zir.Inst.SwitchBlock.ProngInfo = .{
.body_len = @intCast(body_len),
.capture = capture,
.is_inline = case.inline_token != null,
.has_tag_capture = has_tag_capture,
});
appendBodyWithFixupsExtraRefsArrayList(astgen, payloads, case_slice, extra_insts);
.is_comptime_unreach = is_comptime_unreach,
};
if (is_multi_case) {
payloads.items[multi_prong_body_table + multi_case_index] = body_start;
payloads.items[multi_prong_infos_start + multi_case_index] = @bitCast(prong_info);
multi_case_index += 1;
} else {
// prong body start is implicit, it's right behind our only item.
payloads.items[scalar_prong_infos_start + scalar_case_index] = @bitCast(prong_info);
scalar_case_index += 1;
}
}
}
assert(scalar_case_index + multi_case_index + @intFromBool(has_else) == case_nodes.len);
assert(multi_items_infos_start + multi_item_offset == bodies_start);
if (switch_full.label_token) |label_token| if (!block_scope.label.?.used) {
try astgen.appendErrorTok(label_token, "unused switch label", .{});
@@ -8042,84 +7971,100 @@ fn switchExpr(
// Now that the item expressions are generated we can add this.
try parent_gz.instructions.append(gpa, switch_block);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.SwitchBlock).@"struct".fields.len +
@intFromBool(multi_cases_len != 0) +
@intFromBool(any_has_tag_capture) +
payloads.items.len - scratch_top);
// We've collected all of the data we need! Now we just have to finalize it
// by copying our bodies from `payloads` to `extra`, this time in the order
// expected by ZIR consumers.
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.SwitchBlock{
.operand = raw_operand,
.bits = Zir.Inst.SwitchBlock.Bits{
.has_multi_cases = multi_cases_len != 0,
.special_prongs = special_prongs,
.any_has_tag_capture = any_has_tag_capture,
.any_non_inline_capture = any_non_inline_capture,
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.SwitchBlock).@"struct".fields.len +
@intFromBool(multi_cases_len > 0) + // multi_cases_len
@intFromBool(payload_capture_inst_is_placeholder) + // payload_capture_placeholder
@intFromBool(tag_capture_inst_is_placeholder) + // tag_capture_placeholder
@intFromBool(needs_non_err_handling) + // catch_or_if_src_node_offset
@intFromBool(needs_non_err_handling) + // non_err_info
@intFromBool(has_else) + // else_info
payloads.items.len - body_table_end); // item infos and bodies
// singular pieces of data
const zir_payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.SwitchBlock{
.raw_operand = raw_operand,
.bits = .{
.has_multi_cases = multi_cases_len > 0,
.any_ranges = any_ranges,
.has_else = has_else,
.has_under = has_under,
.has_continue = switch_full.label_token != null and block_scope.label.?.used_for_continue,
.any_maybe_runtime_capture = any_maybe_runtime_capture,
.payload_capture_inst_is_placeholder = payload_capture_inst_is_placeholder,
.tag_capture_inst_is_placeholder = tag_capture_inst_is_placeholder,
.scalar_cases_len = @intCast(scalar_cases_len),
},
});
astgen.instructions.items(.data)[@intFromEnum(switch_block)].pl_node.payload_index = zir_payload_index;
if (multi_cases_len != 0) {
astgen.extra.appendAssumeCapacity(multi_cases_len);
if (multi_cases_len > 0) astgen.extra.appendAssumeCapacity(multi_cases_len);
if (payload_capture_inst_is_placeholder) astgen.extra.appendAssumeCapacity(@intFromEnum(payload_capture_inst));
if (tag_capture_inst_is_placeholder) astgen.extra.appendAssumeCapacity(@intFromEnum(tag_capture_inst));
if (needs_non_err_handling) {
const catch_or_if_src_node_offset = parent_gz.nodeIndexToRelative(catch_or_if_node);
astgen.extra.appendAssumeCapacity(@bitCast(@intFromEnum(catch_or_if_src_node_offset)));
astgen.extra.appendAssumeCapacity(@bitCast(non_err_info));
}
if (has_else) astgen.extra.appendAssumeCapacity(@bitCast(else_info));
if (any_has_tag_capture) {
astgen.extra.appendAssumeCapacity(@intFromEnum(tag_inst));
const extra_payloads_start = astgen.extra.items.len;
// body lens
astgen.extra.appendSliceAssumeCapacity(payloads.items[body_table_end..bodies_start]);
// bodies
if (needs_non_err_handling) {
const body = payloads.items[non_err_prong_body_start..][0..non_err_info.body_len];
astgen.extra.appendSliceAssumeCapacity(body);
}
const zir_datas = astgen.instructions.items(.data);
zir_datas[@intFromEnum(switch_block)].pl_node.payload_index = payload_index;
if (has_else) {
const start_index = payloads.items[else_case_index];
var end_index = start_index + 1;
const prong_info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(payloads.items[start_index]);
end_index += prong_info.body_len;
astgen.extra.appendSliceAssumeCapacity(payloads.items[start_index..end_index]);
const body = payloads.items[else_prong_body_start..][0..else_info.body_len];
astgen.extra.appendSliceAssumeCapacity(body);
}
if (has_under) {
const start_index = payloads.items[under_case_index];
var body_len_index = start_index;
var end_index = start_index;
switch (underscore_additional_items) {
.none => {
end_index += 1;
},
.one => {
body_len_index += 1;
end_index += 2;
},
.many => {
body_len_index += 2;
const items_len = payloads.items[start_index];
const ranges_len = payloads.items[start_index + 1];
end_index += 3 + items_len + 2 * ranges_len;
},
for (0..scalar_cases_len) |scalar_i| {
const item_info: Zir.Inst.SwitchBlock.ItemInfo = @bitCast(payloads.items[scalar_item_infos_start + scalar_i]);
const item_body_start = payloads.items[scalar_body_table + scalar_i];
const item_body = payloads.items[item_body_start..][0 .. item_info.bodyLen() orelse 0];
const prong_info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(payloads.items[scalar_prong_infos_start + scalar_i]);
const prong_body_start = item_body_start + item_body.len;
const prong_body = payloads.items[prong_body_start..][0..prong_info.body_len];
astgen.extra.appendSliceAssumeCapacity(prong_body);
astgen.extra.appendSliceAssumeCapacity(item_body);
}
var multi_item_i: usize = 0;
for (0..multi_cases_len) |multi_i| {
const prong_body_start = payloads.items[multi_prong_body_table + multi_i];
const prong_info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(payloads.items[multi_prong_infos_start + multi_i]);
const prong_body = payloads.items[prong_body_start..][0..prong_info.body_len];
astgen.extra.appendSliceAssumeCapacity(prong_body);
const items_len = payloads.items[multi_case_items_lens_start + multi_i];
const ranges_len = if (any_ranges) ranges_len: {
break :ranges_len payloads.items[multi_case_ranges_lens_start + multi_i];
} else 0;
// The table entries and body lens are already in the correct order so we
// don't have to differentiate between items and ranges here.
for (0..items_len + 2 * ranges_len) |_| {
const item_info: Zir.Inst.SwitchBlock.ItemInfo = @bitCast(payloads.items[multi_items_infos_start + multi_item_i]);
if (item_info.bodyLen()) |body_len| {
const body_start = payloads.items[multi_item_body_table + multi_item_i];
const body = payloads.items[body_start..][0..body_len];
astgen.extra.appendSliceAssumeCapacity(body);
}
multi_item_i += 1;
}
const prong_info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(payloads.items[body_len_index]);
end_index += prong_info.body_len;
astgen.extra.appendSliceAssumeCapacity(payloads.items[start_index..end_index]);
}
for (payloads.items[scalar_case_table..case_table_end], 0..) |start_index, i| {
var body_len_index = start_index;
var end_index = start_index;
const table_index = scalar_case_table + i;
if (table_index < multi_case_table) {
body_len_index += 1;
end_index += 2;
} else {
body_len_index += 2;
const items_len = payloads.items[start_index];
const ranges_len = payloads.items[start_index + 1];
end_index += 3 + items_len + 2 * ranges_len;
}
const prong_info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(payloads.items[body_len_index]);
end_index += prong_info.body_len;
astgen.extra.appendSliceAssumeCapacity(payloads.items[start_index..end_index]);
}
// Make sure we didn't forget anything...
assert(multi_item_i == total_items_len + 2 * total_ranges_len - scalar_cases_len);
assert(astgen.extra.items.len - extra_payloads_start == payloads.items.len - body_table_end);
if (need_result_rvalue) {
return rvalue(parent_gz, ri, switch_block.toRef(), node);
return rvalue(parent_gz, ri, switch_block.toRef(), switch_node);
} else {
return switch_block.toRef();
}
@@ -8382,7 +8327,6 @@ fn localVarRef(
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const name_str_index = try astgen.identAsString(ident_token);
var s = scope;
var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already
var found_needs_tunnel: bool = undefined; // defined when `found_already != null`
var found_namespaces_out: u32 = undefined; // defined when `found_already != null`
@@ -8392,10 +8336,8 @@ fn localVarRef(
// defined by `num_namespaces_out != 0`
var capturing_namespace: *Scope.Namespace = undefined;
while (true) switch (s.tag) {
.local_val => {
const local_val = s.cast(Scope.LocalVal).?;
find_scope: switch (scope.unwrap()) {
.local_val => |local_val| {
if (local_val.name == name_str_index) {
// Locals cannot shadow anything, so we do not need to look for ambiguous
// references in this case.
@@ -8418,10 +8360,9 @@ fn localVarRef(
return rvalueNoCoercePreRef(gz, ri, value_inst, ident);
}
s = local_val.parent;
continue :find_scope local_val.parent.unwrap();
},
.local_ptr => {
const local_ptr = s.cast(Scope.LocalPtr).?;
.local_ptr => |local_ptr| {
if (local_ptr.name == name_str_index) {
if (ri.rl == .discard and ri.ctx == .assignment) {
local_ptr.discarded = .fromToken(ident_token);
@@ -8470,12 +8411,11 @@ fn localVarRef(
},
}
}
s = local_ptr.parent;
continue :find_scope local_ptr.parent.unwrap();
},
.gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.namespace => {
const ns = s.cast(Scope.Namespace).?;
.gen_zir => |gen_zir| continue :find_scope gen_zir.parent.unwrap(),
.defer_normal, .defer_error => |defer_scope| continue :find_scope defer_scope.parent.unwrap(),
.namespace => |ns| {
if (ns.decls.get(name_str_index)) |i| {
if (found_already) |f| {
return astgen.failNodeNotes(ident, "ambiguous reference", .{}, &.{
@@ -8490,10 +8430,10 @@ fn localVarRef(
}
num_namespaces_out += 1;
capturing_namespace = ns;
s = ns.parent;
continue :find_scope ns.parent.unwrap();
},
.top => break,
};
.top => break :find_scope,
}
if (found_already == null) {
const ident_name = try astgen.identifierTokenString(ident_token);
return astgen.failNode(ident, "use of undeclared identifier '{s}'", .{ident_name});
@@ -11785,6 +11725,26 @@ const Scope = struct {
};
}
fn unwrap(base: *Scope) Unwrapped {
return switch (base.tag) {
inline else => |tag| @unionInit(
Unwrapped,
@tagName(tag),
@alignCast(@fieldParentPtr("base", base)),
),
};
}
const Unwrapped = union(Tag) {
gen_zir: *GenZir,
local_val: *LocalVal,
local_ptr: *LocalPtr,
defer_normal: *Defer,
defer_error: *Defer,
namespace: *Namespace,
top: *Top,
};
const Tag = enum {
gen_zir,
local_val,
@@ -11910,8 +11870,8 @@ const GenZir = struct {
/// whenever we know Sema will analyze the current block with `is_comptime`,
/// for instance when we're within a `struct_decl` or a `block_comptime`.
is_comptime: bool,
/// Whether we're in an expression within a `@TypeOf` operand. In this case, closure of runtime
/// variables is permitted where it is usually not.
/// Whether we're in an expression within a `@TypeOf` operand. In this case,
/// closure of runtime variables is permitted where it is usually not.
is_typeof: bool = false,
/// This is set to true for a `GenZir` of a `block_inline`, indicating that
/// exits from this block should use `break_inline` rather than `break`.
@@ -11932,10 +11892,27 @@ const GenZir = struct {
/// if use is strictly nested. This saves prior size of list for unstacking.
instructions_top: usize,
label: ?Label = null,
break_block: Zir.Inst.OptionalIndex = .none,
continue_block: Zir.Inst.OptionalIndex = .none,
/// If `true`, unlabeled `break` and `continue` exprs can target this `GenZir`.
allow_unlabeled_control_flow: bool = false,
/// If `label` is `null` and `unlabeled_control_flow_target` is `false`,
/// this is unused and may be `undefined`.
/// Otherwise, this is the target for a `break` instruction when a `break`
/// targets this `GenZir`.
break_target: Zir.Inst.Index = undefined,
/// If `label` is `null` and `unlabeled_control_flow_target` is `false`,
/// this is unused and may be `undefined`.
continue_target: union(enum) {
/// A `continue` cannot target this `GenZir`; emit an error.
none,
/// Emit a `break` instruction targeting this block.
@"break": Zir.Inst.Index,
/// Emit a `switch_continue` instruction targeting this `switch_block`.
switch_continue: Zir.Inst.Index,
} = undefined,
/// Only valid when setBreakResultInfo is called.
break_result_info: AstGen.ResultInfo = undefined,
/// If `continue_target` is *not* `switch_continue`, this is unused and may
/// be `undefined`.
continue_result_info: AstGen.ResultInfo = undefined,
suspend_node: Ast.Node.OptionalIndex = .none,
@@ -12002,7 +11979,6 @@ const GenZir = struct {
const Label = struct {
token: Ast.TokenIndex,
block_inst: Zir.Inst.Index,
used: bool = false,
used_for_continue: bool = false,
};
@@ -13365,11 +13341,9 @@ fn detectLocalShadowing(
});
}
var s = scope;
var outer_scope = false;
while (true) switch (s.tag) {
.local_val => {
const local_val = s.cast(Scope.LocalVal).?;
find_scope: switch (scope.unwrap()) {
.local_val => |local_val| {
if (local_val.name == ident_name) {
const name_slice = mem.span(astgen.nullTerminatedString(ident_name));
const name = try gpa.dupe(u8, name_slice);
@@ -13395,10 +13369,9 @@ fn detectLocalShadowing(
),
});
}
s = local_val.parent;
continue :find_scope local_val.parent.unwrap();
},
.local_ptr => {
const local_ptr = s.cast(Scope.LocalPtr).?;
.local_ptr => |local_ptr| {
if (local_ptr.name == ident_name) {
const name_slice = mem.span(astgen.nullTerminatedString(ident_name));
const name = try gpa.dupe(u8, name_slice);
@@ -13424,14 +13397,12 @@ fn detectLocalShadowing(
),
});
}
s = local_ptr.parent;
continue :find_scope local_ptr.parent.unwrap();
},
.namespace => {
.namespace => |ns| {
outer_scope = true;
const ns = s.cast(Scope.Namespace).?;
const decl_node = ns.decls.get(ident_name) orelse {
s = ns.parent;
continue;
continue :find_scope ns.parent.unwrap();
};
const name_slice = mem.span(astgen.nullTerminatedString(ident_name));
const name = try gpa.dupe(u8, name_slice);
@@ -13442,13 +13413,13 @@ fn detectLocalShadowing(
try astgen.errNoteNode(decl_node, "declared here", .{}),
});
},
.gen_zir => {
s = s.cast(GenZir).?.parent;
.gen_zir => |gen_zir| {
outer_scope = true;
continue :find_scope gen_zir.parent.unwrap();
},
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.top => break,
};
.defer_normal, .defer_error => |defer_scope| continue :find_scope defer_scope.parent.unwrap(),
.top => break :find_scope,
}
}
const LineColumn = struct { u32, u32 };
@@ -13685,10 +13656,8 @@ fn scanContainer(
continue;
}
var s = namespace.parent;
while (true) switch (s.tag) {
.local_val => {
const local_val = s.cast(Scope.LocalVal).?;
find_scope: switch (namespace.parent.unwrap()) {
.local_val => |local_val| {
if (local_val.name == name_str_index) {
try astgen.appendErrorTokNotes(name_token, "declaration '{s}' shadows {s} from outer scope", .{
token_bytes, @tagName(local_val.id_cat),
@@ -13700,12 +13669,11 @@ fn scanContainer(
),
});
any_invalid_declarations = true;
break;
break :find_scope;
}
s = local_val.parent;
continue :find_scope local_val.parent.unwrap();
},
.local_ptr => {
const local_ptr = s.cast(Scope.LocalPtr).?;
.local_ptr => |local_ptr| {
if (local_ptr.name == name_str_index) {
try astgen.appendErrorTokNotes(name_token, "declaration '{s}' shadows {s} from outer scope", .{
token_bytes, @tagName(local_ptr.id_cat),
@@ -13717,15 +13685,15 @@ fn scanContainer(
),
});
any_invalid_declarations = true;
break;
break :find_scope;
}
s = local_ptr.parent;
continue :find_scope local_ptr.parent.unwrap();
},
.namespace => s = s.cast(Scope.Namespace).?.parent,
.gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.top => break,
};
.namespace => |ns| continue :find_scope ns.parent.unwrap(),
.gen_zir => |gen_zir| continue :find_scope gen_zir.parent.unwrap(),
.defer_normal, .defer_error => |defer_scope| continue :find_scope defer_scope.parent.unwrap(),
.top => break :find_scope,
}
}
if (!any_duplicates) {
@@ -13776,6 +13744,19 @@ fn scanContainer(
return error.AnalysisFail;
}
fn appendPlaceholder(astgen: *AstGen) Allocator.Error!Zir.Inst.Index {
const inst: Zir.Inst.Index = @enumFromInt(astgen.instructions.len);
try astgen.instructions.append(astgen.gpa, .{
.tag = .extended,
.data = .{ .extended = .{
.opcode = .value_placeholder,
.small = undefined,
.operand = undefined,
} },
});
return inst;
}
/// Assumes capacity for body has already been added. Needed capacity taking into
/// account fixups can be found with `countBodyLenAfterFixups`.
fn appendBodyWithFixups(astgen: *AstGen, body: []const Zir.Inst.Index) void {
+398 -292
View File
@@ -95,7 +95,6 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) ExtraData(T) {
Inst.Call.Flags,
Inst.BuiltinCall.Flags,
Inst.SwitchBlock.Bits,
Inst.SwitchBlockErrUnion.Bits,
Inst.FuncFancy.Bits,
Inst.Declaration.Flags,
Inst.Param.Type,
@@ -350,7 +349,8 @@ pub const Inst = struct {
/// Uses the `break` union field.
break_inline,
/// Branch from within a switch case to the case specified by the operand.
/// Uses the `break` union field. `block_inst` refers to a `switch_block` or `switch_block_ref`.
/// Uses the `break` union field. `block_inst` refers to a `switch_block`/
/// `switch_block_ref`/`switch_block_err_union`.
switch_continue,
/// Checks that comptime control flow does not happen inside a runtime block.
/// Uses the `un_node` union field.
@@ -722,8 +722,10 @@ pub const Inst = struct {
/// A switch expression. Uses the `pl_node` union field.
/// AST node is the switch, payload is `SwitchBlock`. Operand is a pointer.
switch_block_ref,
/// A switch on an error union `a catch |err| switch (err) {...}`.
/// Uses the `pl_node` union field. AST node is the `catch`, payload is `SwitchBlockErrUnion`.
/// A switch on an error union:
/// - `eu catch |err| switch (err) {...}`, AST node is the `catch`.
/// - `if (eu) |payload| {...} else |err| {...}`, AST node is the `if`.
/// Uses the `pl_node` union field. Payload is `SwitchBlock`.
switch_block_err_union,
/// Check that operand type supports the dereference operand (.*).
/// Uses the `un_node` field.
@@ -3293,143 +3295,151 @@ pub const Inst = struct {
};
/// Trailing:
/// 0. multi_cases_len: u32 // if `has_multi_cases`
/// 1. err_capture_inst: u32 // if `any_uses_err_capture`
/// 2. non_err_body {
/// info: ProngInfo,
/// inst: Index // for every `info.body_len`
/// }
/// 3. else_body { // if `has_else`
/// info: ProngInfo,
/// inst: Index // for every `info.body_len`
/// }
/// 4. scalar_cases: { // for every `scalar_cases_len`
/// item: Ref,
/// info: ProngInfo,
/// inst: Index // for every `info.body_len`
/// }
/// 5. multi_cases: { // for every `multi_cases_len`
/// items_len: u32,
/// ranges_len: u32,
/// info: ProngInfo,
/// item: Ref // for every `items_len`
/// ranges: { // for every `ranges_len`
/// item_first: Ref,
/// item_last: Ref,
/// 0. multi_cases_len: u32, // If has_multi_cases is set.
/// 1. payload_capture_placeholder: Inst.Index, // If payload_capture_inst_is_placeholder is set.
/// // Index of instruction prongs use to refer to their payload capture.
/// 2. tag_capture_placeholder: Inst.Index, // If tag_capture_inst_is_placeholder is set.
/// // Index of instruction prongs use to refer to their tag capture.
/// 3. catch_or_if_src_node_offset: Ast.Node.Offset, // If inst is switch_block_err_union.
/// 4. non_err_info: ProngInfo.NonErr, // If inst is switch_block_err_union.
/// 5. else_info: ProngInfo.Else, // If has_else is set.
/// 6. scalar_prong_info: ProngInfo, // for every scalar_cases_len
/// 7. multi_prong_info: ProngInfo, // for every multi_cases_len
/// 8. multi_case_items_len: u32, // for every multi_cases_len
/// 9. multi_case_ranges_len: u32, // If has_ranges is set: for every multi_cases_len
/// 10. scalar_item_info: ItemInfo, // for every scalar_cases_len
/// 11. multi_items_info: { // for every multi_cases_len
/// item_info: ItemInfo, // for each multi_case_items_len
/// range_items_info: { // for each multi_case_ranges_len
/// first_info: ItemInfo,
/// last_info: ItemInfo,
/// }
/// inst: Index // for every `info.body_len`
/// }
///
/// When analyzing a case body, the switch instruction itself refers to the
/// captured error, or to the success value in `non_err_body`. Whether this
/// is captured by reference or by value depends on whether the `byref` bit
/// is set for the corresponding body. `err_capture_inst` refers to the error
/// capture outside of the `switch`, i.e. `err` in
/// `x catch |err| switch (err) { ... }`.
pub const SwitchBlockErrUnion = struct {
operand: Ref,
/// 12. non_err_body {
/// body_inst: Index // for every non_err_info.body_len
/// }
/// 13. else_body: { // If has_else is set.
/// body_inst: Inst.Index, // for every else_info.body_len
/// }
/// 14. scalar_bodies: { // for every scalar_cases_len
/// prong_body: { // for each body_len in scalar_prong_info
/// body_inst: Inst.Index, // for every body_len
/// }
/// item_body: { // for each body_len in scalar_item_info
/// body_inst: Inst.Index, // for every body_len
/// }
/// }
/// 15. multi_bodies: { // for each multi_items_info
/// prong_body: {
/// body_inst: Inst.Index, // for each multi_prong_info.body_len
/// }
/// item_body: { // for each item_info
/// body_inst: Inst.Index, // for every item_info.body_len
/// }
/// range_bodies: { // for each .{first_info, last_info} in range_items_info
/// first_body_inst: Inst.Index, // for every first_info.body_len
/// last_body_inst: Inst.Index, // for every last_info.body_len
/// }
/// }
pub const SwitchBlock = struct {
/// Either `catch`/`if` or `switch` operand.
raw_operand: Ref,
bits: Bits,
main_src_node_offset: Ast.Node.Offset,
pub const Bits = packed struct(u32) {
/// If true, one or more prongs have multiple items.
has_multi_cases: bool,
/// If true, there is an else prong. This is mutually exclusive with `has_under`.
/// If true, one or more prongs have ranges.
/// Only valid if `has_multi_cases` is also set.
any_ranges: bool,
has_else: bool,
any_uses_err_capture: bool,
payload_is_ref: bool,
has_under: bool,
/// If true, at least one prong contains a `continue`.
/// Only valid if `has_label` is set.
has_continue: bool,
// If true, at least one prong has a non-inline payload/tag capture.
any_maybe_runtime_capture: bool,
payload_capture_inst_is_placeholder: bool,
tag_capture_inst_is_placeholder: bool,
scalar_cases_len: ScalarCasesLen,
pub const ScalarCasesLen = u28;
// NOTE maybe don't steal any more bits from poor `scalar_cases_len`
// and split `Bits` into two parts instead, `raw_operand` surely
// wouldn't mind donating a couple of bits for that purpose...
pub const ScalarCasesLen = u24;
};
pub const MultiProng = struct {
items: []const Ref,
body: []const Index,
};
};
/// 0. multi_cases_len: u32 // If has_multi_cases is set.
/// 1. tag_capture_inst: u32 // If any_has_tag_capture is set. Index of instruction prongs use to refer to the inline tag capture.
/// 2. else_body { // If special_prong.hasElse() is set.
/// info: ProngInfo,
/// body member Index for every info.body_len
/// }
/// 3. under_body { // If special_prong.hasUnder() is set.
/// item: Ref, // If special_prong.hasOneAdditionalItem() is set.
/// items_len: u32, // If special_prong.hasManyAdditionalItems() is set.
/// ranges_len: u32, // If special_prong.hasManyAdditionalItems() is set.
/// info: ProngInfo,
/// item: Ref, // for every items_len
/// ranges: { // for every ranges_len
/// item_first: Ref,
/// item_last: Ref,
/// }
/// body member Index for every info.body_len
/// }
/// 4. scalar_cases: { // for every scalar_cases_len
/// item: Ref,
/// info: ProngInfo,
/// body member Index for every info.body_len
/// }
/// 5. multi_cases: { // for every multi_cases_len
/// items_len: u32,
/// ranges_len: u32,
/// info: ProngInfo,
/// item: Ref, // for every items_len
/// ranges: { // for every ranges_len
/// item_first: Ref,
/// item_last: Ref,
/// }
/// body member Index for every info.body_len
/// }
///
/// When analyzing a case body, the switch instruction itself refers to the
/// captured payload. Whether this is captured by reference or by value
/// depends on whether the `byref` bit is set for the corresponding body.
pub const SwitchBlock = struct {
/// The operand passed to the `switch` expression. If this is a
/// `switch_block`, this is the operand value; if `switch_block_ref` it
/// is a pointer to the operand. `switch_block_ref` is always used if
/// any prong has a byref capture.
operand: Ref,
bits: Bits,
/// These are stored in trailing data in `extra` for each prong.
pub const ProngInfo = packed struct(u32) {
body_len: u28,
body_len: u27,
capture: ProngInfo.Capture,
is_inline: bool,
has_tag_capture: bool,
is_comptime_unreach: bool,
pub const Capture = enum(u2) {
none,
by_val,
by_ref,
};
pub const NonErr = packed struct(u32) {
body_len: u29,
capture: ProngInfo.Capture,
operand_is_ref: bool,
};
pub const Else = packed struct(u32) {
body_len: u27,
capture: ProngInfo.Capture,
is_inline: bool,
has_tag_capture: bool,
is_simple_noreturn: bool,
};
};
pub const Bits = packed struct(u32) {
/// If true, one or more prongs have multiple items.
has_multi_cases: bool,
/// Information about the special prong.
special_prongs: SpecialProngs,
/// If true, at least one prong has an inline tag capture.
any_has_tag_capture: bool,
/// If true, at least one prong has a capture which may not
/// be comptime-known via `inline`.
any_non_inline_capture: bool,
/// If true, at least one prong contains a `continue`.
has_continue: bool,
scalar_cases_len: ScalarCasesLen,
pub const ItemInfo = packed struct(u32) {
kind: ItemInfo.Kind,
data: u30,
pub const ScalarCasesLen = u25;
pub const Kind = enum(u2) {
enum_literal,
error_value,
body_len,
under,
};
pub const Unwrapped = union(ItemInfo.Kind) {
enum_literal: Zir.NullTerminatedString,
error_value: Zir.NullTerminatedString,
body_len: u32,
under,
};
pub fn wrap(unwrapped: ItemInfo.Unwrapped) ItemInfo {
const data_uncasted: u32 = switch (unwrapped) {
.enum_literal => |str_index| @intFromEnum(str_index),
.error_value => |str_index| @intFromEnum(str_index),
.body_len => |body_len| body_len,
.under => 0,
};
return .{ .kind = unwrapped, .data = @intCast(data_uncasted) };
}
pub fn unwrap(item_info: ItemInfo) ItemInfo.Unwrapped {
return switch (item_info.kind) {
.enum_literal => .{ .enum_literal = @enumFromInt(item_info.data) },
.error_value => .{ .error_value = @enumFromInt(item_info.data) },
.body_len => .{ .body_len = item_info.data },
.under => .under,
};
}
pub fn bodyLen(item_info: ItemInfo) ?u32 {
return if (item_info.kind == .body_len) item_info.data else null;
}
};
pub const MultiProng = struct {
items: []const Ref,
body: []const Index,
};
pub const Kind = enum { default, ref, err_union };
};
pub const ArrayInitRefTy = struct {
@@ -4004,69 +4014,6 @@ pub const Inst = struct {
};
};
pub const SpecialProngs = enum(u3) {
none = 0b000,
/// Simple `else` prong.
/// `else => {},`
@"else" = 0b001,
/// Simple `_` prong.
/// `_ => {},`
under = 0b010,
/// Both an `else` and a `_` prong.
/// `else => {},`
/// `_ => {},`
under_and_else = 0b011,
/// `_` prong with 1 additional item.
/// `a, _ => {},`
under_one_item = 0b100,
/// Both an `else` and a `_` prong with 1 additional item.
/// `else => {},`
/// `a, _ => {},`
under_one_item_and_else = 0b101,
/// `_` prong with >1 additional items.
/// `a, _, b => {},`
under_many_items = 0b110,
/// Both an `else` and a `_` prong with >1 additional items.
/// `else => {},`
/// `a, _, b => {},`
under_many_items_and_else = 0b111,
pub const AdditionalItems = enum(u3) {
none = @intFromEnum(SpecialProngs.under),
one = @intFromEnum(SpecialProngs.under_one_item),
many = @intFromEnum(SpecialProngs.under_many_items),
};
pub fn init(has_else: bool, has_under: bool, additional_items: AdditionalItems) SpecialProngs {
const else_bit: u3 = @intFromBool(has_else);
const under_bits: u3 = if (has_under)
@intFromEnum(additional_items)
else
@intFromEnum(SpecialProngs.none);
return @enumFromInt(else_bit | under_bits);
}
pub fn hasElse(special_prongs: SpecialProngs) bool {
return (@intFromEnum(special_prongs) & 0b001) != 0;
}
pub fn hasUnder(special_prongs: SpecialProngs) bool {
return (@intFromEnum(special_prongs) & 0b110) != 0;
}
pub fn hasAdditionalItems(special_prongs: SpecialProngs) bool {
return (@intFromEnum(special_prongs) & 0b100) != 0;
}
pub fn hasOneAdditionalItem(special_prongs: SpecialProngs) bool {
return (@intFromEnum(special_prongs) & 0b110) == @intFromEnum(SpecialProngs.under_one_item);
}
pub fn hasManyAdditionalItems(special_prongs: SpecialProngs) bool {
return (@intFromEnum(special_prongs) & 0b110) == @intFromEnum(SpecialProngs.under_many_items);
}
};
pub const DeclIterator = struct {
extra_index: u32,
decls_remaining: u32,
@@ -4842,8 +4789,45 @@ fn findTrackableInner(
const body = zir.bodySlice(extra.end, extra.data.body_len);
try zir.findTrackableBody(gpa, contents, defers, body);
},
.switch_block, .switch_block_ref => return zir.findTrackableSwitch(gpa, contents, defers, inst, .normal),
.switch_block_err_union => return zir.findTrackableSwitch(gpa, contents, defers, inst, .err_union),
.switch_block,
.switch_block_ref,
.switch_block_err_union,
=> {
const zir_switch = zir.getSwitchBlock(inst);
if (zir_switch.non_err_case) |non_err_case| {
try zir.findTrackableBody(gpa, contents, defers, non_err_case.body);
}
if (zir_switch.else_case) |else_case| {
try zir.findTrackableBody(gpa, contents, defers, else_case.body);
}
var extra_index = zir_switch.end;
var case_it = zir_switch.iterateCases();
while (case_it.next()) |case| {
const prong_body = zir.bodySlice(extra_index, case.prong_info.body_len);
extra_index += prong_body.len;
try zir.findTrackableBody(gpa, contents, defers, prong_body);
for (case.item_infos) |item_info| {
if (item_info.bodyLen()) |body_len| {
const item_body = zir.bodySlice(extra_index, body_len);
extra_index += item_body.len;
try zir.findTrackableBody(gpa, contents, defers, item_body);
}
}
for (case.range_infos) |range_info| {
if (range_info[0].bodyLen()) |body_len| {
const first_body = zir.bodySlice(extra_index, body_len);
extra_index += first_body.len;
try zir.findTrackableBody(gpa, contents, defers, first_body);
}
if (range_info[1].bodyLen()) |body_len| {
const last_body = zir.bodySlice(extra_index, body_len);
extra_index += last_body.len;
try zir.findTrackableBody(gpa, contents, defers, last_body);
}
}
}
},
.suspend_block => @panic("TODO iterate suspend block"),
@@ -4890,119 +4874,6 @@ fn findTrackableInner(
}
}
fn findTrackableSwitch(
zir: Zir,
gpa: Allocator,
contents: *DeclContents,
defers: *std.AutoHashMapUnmanaged(u32, void),
inst: Inst.Index,
/// Distinguishes between `switch_block[_ref]` and `switch_block_err_union`.
comptime kind: enum { normal, err_union },
) Allocator.Error!void {
const inst_data = zir.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = zir.extraData(switch (kind) {
.normal => Inst.SwitchBlock,
.err_union => Inst.SwitchBlockErrUnion,
}, inst_data.payload_index);
var extra_index: usize = extra.end;
const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
const multi_cases_len = zir.extra[extra_index];
extra_index += 1;
break :blk multi_cases_len;
} else 0;
if (switch (kind) {
.normal => extra.data.bits.any_has_tag_capture,
.err_union => extra.data.bits.any_uses_err_capture,
}) {
extra_index += 1;
}
const has_special = switch (kind) {
.normal => extra.data.bits.special_prongs != .none,
.err_union => has_special: {
// Handle `non_err_body` first.
const prong_info: Inst.SwitchBlock.ProngInfo = @bitCast(zir.extra[extra_index]);
extra_index += 1;
const body = zir.bodySlice(extra_index, prong_info.body_len);
extra_index += body.len;
try zir.findTrackableBody(gpa, contents, defers, body);
break :has_special extra.data.bits.has_else;
},
};
if (has_special) {
const has_else = if (kind == .normal)
extra.data.bits.special_prongs.hasElse()
else
true;
if (has_else) {
const prong_info: Inst.SwitchBlock.ProngInfo = @bitCast(zir.extra[extra_index]);
extra_index += 1;
const body = zir.bodySlice(extra_index, prong_info.body_len);
extra_index += body.len;
try zir.findTrackableBody(gpa, contents, defers, body);
}
if (kind == .normal) {
const special_prongs = extra.data.bits.special_prongs;
if (special_prongs.hasUnder()) {
var trailing_items_len: u32 = 0;
if (special_prongs.hasOneAdditionalItem()) {
extra_index += 1;
} else if (special_prongs.hasManyAdditionalItems()) {
const items_len = zir.extra[extra_index];
extra_index += 1;
const ranges_len = zir.extra[extra_index];
extra_index += 1;
trailing_items_len = items_len + ranges_len * 2;
}
const prong_info: Inst.SwitchBlock.ProngInfo = @bitCast(zir.extra[extra_index]);
extra_index += 1 + trailing_items_len;
const body = zir.bodySlice(extra_index, prong_info.body_len);
extra_index += body.len;
try zir.findTrackableBody(gpa, contents, defers, body);
}
}
}
{
const scalar_cases_len = extra.data.bits.scalar_cases_len;
for (0..scalar_cases_len) |_| {
extra_index += 1;
const prong_info: Inst.SwitchBlock.ProngInfo = @bitCast(zir.extra[extra_index]);
extra_index += 1;
const body = zir.bodySlice(extra_index, prong_info.body_len);
extra_index += body.len;
try zir.findTrackableBody(gpa, contents, defers, body);
}
}
{
for (0..multi_cases_len) |_| {
const items_len = zir.extra[extra_index];
extra_index += 1;
const ranges_len = zir.extra[extra_index];
extra_index += 1;
const prong_info: Inst.SwitchBlock.ProngInfo = @bitCast(zir.extra[extra_index]);
extra_index += 1;
extra_index += items_len + ranges_len * 2;
const body = zir.bodySlice(extra_index, prong_info.body_len);
extra_index += body.len;
try zir.findTrackableBody(gpa, contents, defers, body);
}
}
}
fn findTrackableBody(
zir: Zir,
gpa: Allocator,
@@ -5337,6 +5208,241 @@ pub fn getAssociatedSrcHash(zir: Zir, inst: Zir.Inst.Index) ?std.zig.SrcHash {
}
}
pub fn getSwitchBlock(zir: *const Zir, switch_inst: Inst.Index) UnwrappedSwitchBlock {
const has_non_err = switch (zir.instructions.items(.tag)[@intFromEnum(switch_inst)]) {
.switch_block, .switch_block_ref => false,
.switch_block_err_union => true,
else => unreachable,
};
const inst_data = zir.instructions.items(.data)[@intFromEnum(switch_inst)].pl_node;
const extra = zir.extraData(Inst.SwitchBlock, inst_data.payload_index);
const bits = extra.data.bits;
var extra_index = extra.end;
const multi_cases_len = if (bits.has_multi_cases) len: {
const multi_cases_len = zir.extra[extra_index];
extra_index += 1;
break :len multi_cases_len;
} else 0;
const payload_capture_placeholder: Inst.OptionalIndex = if (bits.payload_capture_inst_is_placeholder) inst: {
const inst: Inst.Index = @enumFromInt(zir.extra[extra_index]);
extra_index += 1;
break :inst inst.toOptional();
} else .none;
const tag_capture_placeholder: Inst.OptionalIndex = if (bits.tag_capture_inst_is_placeholder) inst: {
const inst: Inst.Index = @enumFromInt(zir.extra[extra_index]);
extra_index += 1;
break :inst inst.toOptional();
} else .none;
const catch_or_if_src_node_offset: Ast.Node.OptionalOffset = if (has_non_err) node_offset: {
const node_offset: Ast.Node.Offset = @enumFromInt(@as(i32, @bitCast(zir.extra[extra_index])));
extra_index += 1;
break :node_offset node_offset.toOptional();
} else .none;
const non_err_info: Inst.SwitchBlock.ProngInfo.NonErr = if (has_non_err) non_err_info: {
const non_err_info: Inst.SwitchBlock.ProngInfo.NonErr = @bitCast(zir.extra[extra_index]);
extra_index += 1;
break :non_err_info non_err_info;
} else undefined;
const else_info: Inst.SwitchBlock.ProngInfo.Else = if (bits.has_else) else_info: {
const else_info: Inst.SwitchBlock.ProngInfo.Else = @bitCast(zir.extra[extra_index]);
extra_index += 1;
break :else_info else_info;
} else undefined;
const scalar_cases_len: u32 = bits.scalar_cases_len;
const prong_infos: []const Inst.SwitchBlock.ProngInfo =
@ptrCast(zir.extra[extra_index..][0 .. scalar_cases_len + multi_cases_len]);
extra_index += prong_infos.len;
const multi_case_items_lens = zir.extra[extra_index..][0..multi_cases_len];
extra_index += multi_case_items_lens.len;
const multi_case_ranges_lens: ?[]const u32 = if (bits.any_ranges) lens: {
const multi_case_ranges_lens = zir.extra[extra_index..][0..multi_cases_len];
extra_index += multi_case_ranges_lens.len;
break :lens multi_case_ranges_lens;
} else null;
var total_items_len: usize = scalar_cases_len;
for (multi_case_items_lens) |items_len| {
total_items_len += items_len;
}
if (multi_case_ranges_lens) |ranges_lens| for (ranges_lens) |ranges_len| {
total_items_len += 2 * ranges_len;
};
const item_infos: []const Inst.SwitchBlock.ItemInfo =
@ptrCast(zir.extra[extra_index..][0..total_items_len]);
extra_index += item_infos.len;
const non_err_case: ?UnwrappedSwitchBlock.Case.NonErr = if (has_non_err) non_err_case: {
const body = zir.bodySlice(extra_index, non_err_info.body_len);
extra_index += body.len;
break :non_err_case .{
.body = body,
.capture = non_err_info.capture,
.operand_is_ref = non_err_info.operand_is_ref,
};
} else null;
const else_case: ?UnwrappedSwitchBlock.Case.Else = if (bits.has_else) else_case: {
const body = zir.bodySlice(extra_index, else_info.body_len);
extra_index += body.len;
break :else_case .{
.index = .@"else",
.body = body,
.capture = else_info.capture,
.is_inline = else_info.is_inline,
.has_tag_capture = else_info.has_tag_capture,
.is_simple_noreturn = else_info.is_simple_noreturn,
};
} else null;
return .{
.main_operand = extra.data.raw_operand,
.switch_src_node_offset = inst_data.src_node,
.catch_or_if_src_node_offset = catch_or_if_src_node_offset,
.payload_capture_placeholder = payload_capture_placeholder,
.tag_capture_placeholder = tag_capture_placeholder,
.has_continue = bits.has_continue,
.any_maybe_runtime_capture = bits.any_maybe_runtime_capture,
.non_err_case = non_err_case,
.else_case = else_case,
.has_under = bits.has_under,
.prong_infos = prong_infos,
.multi_case_items_lens = multi_case_items_lens,
.multi_case_ranges_lens = multi_case_ranges_lens,
.item_infos = item_infos,
.end = extra_index,
};
}
/// Trailing (starting at `end`):
/// 0. case_bodies: { // for each case in Case.Iterator.next()
/// prong_body: {
/// body_inst: Inst.Index, // for every case.prong_info.body_len,
/// }
/// item_body: { // for each body_len in case.item_infos
/// body_inst: Inst.Index, // for every body_len
/// }
/// range_bodies: { // for each .{first_info, last_info} in case.range_infos
/// first_body_inst: Inst.Index, // for every first_info.body_len
/// last_body_inst: Inst.Index, // for every last_info.body_len
/// }
/// }
pub const UnwrappedSwitchBlock = struct {
/// Either `catch`/`if` or `switch` operand.
main_operand: Inst.Ref,
switch_src_node_offset: Ast.Node.Offset,
catch_or_if_src_node_offset: Ast.Node.OptionalOffset,
payload_capture_placeholder: Inst.OptionalIndex,
tag_capture_placeholder: Inst.OptionalIndex,
has_continue: bool,
any_maybe_runtime_capture: bool,
non_err_case: ?Case.NonErr,
else_case: ?Case.Else,
has_under: bool,
// Refer to doc comment and `iterateCases` to access everything below correctly.
prong_infos: []const Inst.SwitchBlock.ProngInfo,
multi_case_items_lens: []const u32,
multi_case_ranges_lens: ?[]const u32,
item_infos: []const Inst.SwitchBlock.ItemInfo,
end: usize,
pub fn anyRanges(unwrapped: *const UnwrappedSwitchBlock) bool {
return unwrapped.multi_case_ranges_lens != null;
}
pub fn scalarCasesLen(unwrapped: *const UnwrappedSwitchBlock) u32 {
return @intCast(unwrapped.prong_infos.len - unwrapped.multi_case_items_lens.len);
}
pub fn multiCasesLen(unwrapped: *const UnwrappedSwitchBlock) u32 {
return @intCast(unwrapped.multi_case_items_lens.len);
}
pub fn totalItemsLen(unwrapped: *const UnwrappedSwitchBlock) u32 {
var total_items_len: u32 = @intCast(unwrapped.item_infos.len);
if (unwrapped.multi_case_ranges_lens) |ranges_lens| {
for (ranges_lens) |len| total_items_len -= len;
}
return total_items_len;
}
pub const Case = struct {
index: Case.Index,
prong_info: Inst.SwitchBlock.ProngInfo,
item_infos: []const Inst.SwitchBlock.ItemInfo,
range_infos: []const [2]Inst.SwitchBlock.ItemInfo,
pub const Index = packed struct(u32) {
kind: enum(u1) { scalar, multi },
value: u31,
pub const @"else": Case.Index = .{
.kind = .scalar,
.value = std.math.maxInt(u31),
};
};
pub const NonErr = struct {
body: []const Inst.Index,
capture: Inst.SwitchBlock.ProngInfo.Capture,
operand_is_ref: bool,
};
pub const Else = struct {
index: Case.Index,
body: []const Inst.Index,
capture: Inst.SwitchBlock.ProngInfo.Capture,
is_inline: bool,
has_tag_capture: bool,
is_simple_noreturn: bool,
};
pub const Iterator = struct {
next_idx: u32,
prong_infos: []const Inst.SwitchBlock.ProngInfo,
multi_case_items_lens: []const u32,
multi_case_ranges_lens: ?[]const u32,
item_infos: []const Inst.SwitchBlock.ItemInfo,
pub fn next(it: *Iterator) ?Case {
const idx = it.next_idx;
if (idx == it.prong_infos.len) return null;
it.next_idx += 1;
const scalar_cases_len = it.prong_infos.len - it.multi_case_items_lens.len;
return if (idx < scalar_cases_len) .{
.index = .{
.kind = .scalar,
.value = @intCast(idx),
},
.prong_info = it.prong_infos[idx],
.item_infos = it.itemInfos(1),
.range_infos = &.{},
} else .{
.index = .{
.kind = .multi,
.value = @intCast(idx - scalar_cases_len),
},
.prong_info = it.prong_infos[idx],
.item_infos = it.itemInfos(it.multi_case_items_lens[idx - scalar_cases_len]),
.range_infos = if (it.multi_case_ranges_lens) |ranges_lens| b: {
break :b @ptrCast(it.itemInfos(2 * ranges_lens[idx - scalar_cases_len]));
} else &.{},
};
}
fn itemInfos(it: *Iterator, count: u32) []const Inst.SwitchBlock.ItemInfo {
const lens = it.item_infos[0..count];
it.item_infos = it.item_infos[count..];
return lens;
}
};
};
pub fn iterateCases(unwrapped: UnwrappedSwitchBlock) Case.Iterator {
return .{
.next_idx = 0,
.prong_infos = unwrapped.prong_infos,
.multi_case_items_lens = unwrapped.multi_case_items_lens,
.multi_case_ranges_lens = unwrapped.multi_case_ranges_lens,
.item_infos = unwrapped.item_infos,
};
}
};
/// When the ZIR update tracking logic must be modified to consider new instructions,
/// change this constant to trigger compile errors at all relevant locations.
pub const inst_tracking_version = 0;
+23 -23
View File
@@ -825,10 +825,10 @@ fn analyzeOperands(
// This logic must synchronize with `will_die_immediately` in `AnalyzeBigOperands.init`.
const immediate_death = if (data.live_set.remove(inst)) blk: {
log.debug("[{}] %{d}: removed from live set", .{ pass, @intFromEnum(inst) });
log.debug("[{t}] {f}: removed from live set", .{ pass, inst });
break :blk false;
} else blk: {
log.debug("[{}] %{d}: immediate death", .{ pass, @intFromEnum(inst) });
log.debug("[{t}] {f}: immediate death", .{ pass, inst });
break :blk true;
};
@@ -849,7 +849,7 @@ fn analyzeOperands(
const mask = @as(Bpi, 1) << @as(OperandInt, @intCast(i));
if ((try data.live_set.fetchPut(gpa, operand, {})) == null) {
log.debug("[{}] %{d}: added %{d} to live set (operand dies here)", .{ pass, @intFromEnum(inst), operand });
log.debug("[{t}] {f}: added {f} to live set (operand dies here)", .{ pass, inst, operand });
tomb_bits |= mask;
}
}
@@ -988,19 +988,19 @@ fn analyzeInstBlock(
},
.main_analysis => {
log.debug("[{}] %{f}: block live set is {f}", .{ pass, inst, fmtInstSet(&data.live_set) });
log.debug("[{t}] {f}: block live set is {f}", .{ pass, inst, fmtInstSet(&data.live_set) });
// We can move the live set because the body should have a noreturn
// instruction which overrides the set.
try data.block_scopes.put(gpa, inst, .{
.live_set = data.live_set.move(),
});
defer {
log.debug("[{}] %{f}: popped block scope", .{ pass, inst });
log.debug("[{t}] {f}: popped block scope", .{ pass, inst });
var scope = data.block_scopes.fetchRemove(inst).?.value;
scope.live_set.deinit(gpa);
}
log.debug("[{}] %{f}: pushed new block scope", .{ pass, inst });
log.debug("[{t}] {f}: pushed new block scope", .{ pass, inst });
try analyzeBody(a, pass, data, body);
// If the block is noreturn, block deaths not only aren't useful, they're impossible to
@@ -1027,7 +1027,7 @@ fn analyzeInstBlock(
}
assert(measured_num == num_deaths); // post-live-set should be a subset of pre-live-set
try a.special.put(gpa, inst, extra_index);
log.debug("[{}] %{f}: block deaths are {f}", .{
log.debug("[{t}] {f}: block deaths are {f}", .{
pass,
inst,
fmtInstList(@ptrCast(a.extra.items[extra_index + 1 ..][0..num_deaths])),
@@ -1064,7 +1064,7 @@ fn writeLoopInfo(
const block_inst = key.*;
a.extra.appendAssumeCapacity(@intFromEnum(block_inst));
}
log.debug("[{}] %{f}: includes breaks to {f}", .{ LivenessPass.loop_analysis, inst, fmtInstSet(&data.breaks) });
log.debug("[{t}] {f}: includes breaks to {f}", .{ LivenessPass.loop_analysis, inst, fmtInstSet(&data.breaks) });
// Now we put the live operands from the loop body in too
const num_live = data.live_set.count();
@@ -1076,7 +1076,7 @@ fn writeLoopInfo(
const alive = key.*;
a.extra.appendAssumeCapacity(@intFromEnum(alive));
}
log.debug("[{}] %{f}: maintain liveness of {f}", .{ LivenessPass.loop_analysis, inst, fmtInstSet(&data.live_set) });
log.debug("[{t}] {f}: maintain liveness of {f}", .{ LivenessPass.loop_analysis, inst, fmtInstSet(&data.live_set) });
try a.special.put(gpa, inst, extra_index);
@@ -1117,7 +1117,7 @@ fn resolveLoopLiveSet(
try data.live_set.ensureUnusedCapacity(gpa, @intCast(loop_live.len));
for (loop_live) |alive| data.live_set.putAssumeCapacity(alive, {});
log.debug("[{}] %{f}: block live set is {f}", .{ LivenessPass.main_analysis, inst, fmtInstSet(&data.live_set) });
log.debug("[{t}] {f}: block live set is {f}", .{ LivenessPass.main_analysis, inst, fmtInstSet(&data.live_set) });
for (breaks) |block_inst| {
// We might break to this block, so include every operand that the block needs alive
@@ -1130,7 +1130,7 @@ fn resolveLoopLiveSet(
}
}
log.debug("[{}] %{f}: loop live set is {f}", .{ LivenessPass.main_analysis, inst, fmtInstSet(&data.live_set) });
log.debug("[{t}] {f}: loop live set is {f}", .{ LivenessPass.main_analysis, inst, fmtInstSet(&data.live_set) });
}
fn analyzeInstLoop(
@@ -1168,7 +1168,7 @@ fn analyzeInstLoop(
.live_set = data.live_set.move(),
});
defer {
log.debug("[{}] %{f}: popped loop block scop", .{ pass, inst });
log.debug("[{t}] {f}: popped loop block scop", .{ pass, inst });
var scope = data.block_scopes.fetchRemove(inst).?.value;
scope.live_set.deinit(gpa);
}
@@ -1269,13 +1269,13 @@ fn analyzeInstCondBr(
}
}
log.debug("[{}] %{f}: 'then' branch mirrored deaths are {f}", .{ pass, inst, fmtInstList(then_mirrored_deaths.items) });
log.debug("[{}] %{f}: 'else' branch mirrored deaths are {f}", .{ pass, inst, fmtInstList(else_mirrored_deaths.items) });
log.debug("[{t}] {f}: 'then' branch mirrored deaths are {f}", .{ pass, inst, fmtInstList(then_mirrored_deaths.items) });
log.debug("[{t}] {f}: 'else' branch mirrored deaths are {f}", .{ pass, inst, fmtInstList(else_mirrored_deaths.items) });
data.live_set.deinit(gpa);
data.live_set = then_live.move(); // Really the union of both live sets
log.debug("[{}] %{f}: new live set is {f}", .{ pass, inst, fmtInstSet(&data.live_set) });
log.debug("[{t}] {f}: new live set is {f}", .{ pass, inst, fmtInstSet(&data.live_set) });
// Write the mirrored deaths to `extra`
const then_death_count = @as(u32, @intCast(then_mirrored_deaths.items.len));
@@ -1343,7 +1343,7 @@ fn analyzeInstSwitchBr(
});
}
defer if (is_dispatch_loop) {
log.debug("[{}] %{f}: popped loop block scop", .{ pass, inst });
log.debug("[{t}] {f}: popped loop block scope", .{ pass, inst });
var scope = data.block_scopes.fetchRemove(inst).?.value;
scope.live_set.deinit(gpa);
};
@@ -1401,13 +1401,13 @@ fn analyzeInstSwitchBr(
}
for (mirrored_deaths, 0..) |mirrored, i| {
log.debug("[{}] %{f}: case {} mirrored deaths are {f}", .{ pass, inst, i, fmtInstList(mirrored.items) });
log.debug("[{t}] {f}: case {} mirrored deaths are {f}", .{ pass, inst, i, fmtInstList(mirrored.items) });
}
data.live_set.deinit(gpa);
data.live_set = all_alive.move();
log.debug("[{}] %{f}: new live set is {f}", .{ pass, inst, fmtInstSet(&data.live_set) });
log.debug("[{t}] {f}: new live set is {f}", .{ pass, inst, fmtInstSet(&data.live_set) });
}
const else_death_count = @as(u32, @intCast(mirrored_deaths[ncases].items.len));
@@ -1506,7 +1506,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
.main_analysis => {
if ((try big.data.live_set.fetchPut(gpa, operand, {})) == null) {
log.debug("[{}] %{f}: added %{f} to live set (operand dies here)", .{ pass, big.inst, operand });
log.debug("[{t}] {f}: added {f} to live set (operand dies here)", .{ pass, big.inst, operand });
big.extra_tombs[extra_byte] |= @as(u32, 1) << extra_bit;
}
},
@@ -1568,9 +1568,9 @@ const FmtInstSet = struct {
return;
}
var it = val.set.keyIterator();
try w.print("%{f}", .{it.next().?.*});
try w.print("{f}", .{it.next().?.*});
while (it.next()) |key| {
try w.print(" %{f}", .{key.*});
try w.print(" {f}", .{key.*});
}
}
};
@@ -1587,9 +1587,9 @@ const FmtInstList = struct {
try w.writeAll("[no instructions]");
return;
}
try w.print("%{f}", .{val.list[0]});
try w.print("{f}", .{val.list[0]});
for (val.list[1..]) |inst| {
try w.print(" %{f}", .{inst});
try w.print(" {f}", .{inst});
}
}
};
+16 -11
View File
@@ -73,7 +73,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.trap, .unreach => {
try self.verifyInstOperands(inst, .{ .none, .none, .none });
// This instruction terminates the function, so everything should be dead
if (self.live.count() > 0) return invalid("%{f}: instructions still alive", .{inst});
if (self.live.count() > 0) return invalid("{f}: instructions still alive", .{inst});
},
// unary
@@ -166,7 +166,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
const un_op = data[@intFromEnum(inst)].un_op;
try self.verifyInstOperands(inst, .{ un_op, .none, .none });
// This instruction terminates the function, so everything should be dead
if (self.live.count() > 0) return invalid("%{f}: instructions still alive", .{inst});
if (self.live.count() > 0) return invalid("{f}: instructions still alive", .{inst});
},
.dbg_var_ptr,
.dbg_var_val,
@@ -441,7 +441,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.repeat => {
const repeat = data[@intFromEnum(inst)].repeat;
const expected_live = self.loops.get(repeat.loop_inst) orelse
return invalid("%{d}: loop %{d} not in scope", .{ @intFromEnum(inst), @intFromEnum(repeat.loop_inst) });
return invalid("{f}: loop {f} not in scope", .{ inst, repeat.loop_inst });
try self.verifyMatchingLiveness(repeat.loop_inst, expected_live);
},
@@ -451,7 +451,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
try self.verifyOperand(inst, br.operand, self.liveness.operandDies(inst, 0));
const expected_live = self.loops.get(br.block_inst) orelse
return invalid("%{d}: loop %{d} not in scope", .{ @intFromEnum(inst), @intFromEnum(br.block_inst) });
return invalid("{f}: loop {f} not in scope", .{ inst, br.block_inst });
try self.verifyMatchingLiveness(br.block_inst, expected_live);
},
@@ -487,7 +487,12 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
if (ip.isNoReturn(block_ty.toIntern())) {
assert(!self.blocks.contains(inst));
} else {
var live = self.blocks.fetchRemove(inst).?.value;
var live = if (self.blocks.fetchRemove(inst)) |kv| kv.value else {
return invalid(
"{f}: block of type '{f}' not terminated correctly",
.{ inst, block_ty.fmtDebug() },
);
};
defer live.deinit(self.gpa);
try self.verifyMatchingLiveness(inst, live);
@@ -502,7 +507,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
// The same stuff should be alive after the loop as before it.
const gop = try self.loops.getOrPut(self.gpa, inst);
if (gop.found_existing) return invalid("%{d}: loop already exists", .{@intFromEnum(inst)});
if (gop.found_existing) return invalid("{f}: loop already exists", .{inst});
defer {
var live = self.loops.fetchRemove(inst).?;
live.value.deinit(self.gpa);
@@ -551,7 +556,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
// after the loop as before it.
{
const gop = try self.loops.getOrPut(self.gpa, inst);
if (gop.found_existing) return invalid("%{d}: loop already exists", .{@intFromEnum(inst)});
if (gop.found_existing) return invalid("{f}: loop already exists", .{inst});
gop.value_ptr.* = self.live.move();
}
defer {
@@ -606,11 +611,11 @@ fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies
return;
};
if (dies) {
if (!self.live.remove(operand)) return invalid("%{f}: dead operand %{f} reused and killed again", .{
if (!self.live.remove(operand)) return invalid("{f}: dead operand {f} reused and killed again", .{
inst, operand,
});
} else {
if (!self.live.contains(operand)) return invalid("%{f}: dead operand %{f} reused", .{ inst, operand });
if (!self.live.contains(operand)) return invalid("{f}: dead operand {f} reused", .{ inst, operand });
}
}
@@ -635,9 +640,9 @@ fn verifyInst(self: *Verify, inst: Air.Inst.Index) Error!void {
}
fn verifyMatchingLiveness(self: *Verify, block: Air.Inst.Index, live: LiveMap) Error!void {
if (self.live.count() != live.count()) return invalid("%{f}: different deaths across branches", .{block});
if (self.live.count() != live.count()) return invalid("{f}: different deaths across branches", .{block});
var live_it = self.live.keyIterator();
while (live_it.next()) |live_inst| if (!live.contains(live_inst.*)) return invalid("%{f}: different deaths across branches", .{block});
while (live_it.next()) |live_inst| if (!live.contains(live_inst.*)) return invalid("{f}: different deaths across branches", .{block});
}
fn invalid(comptime fmt: []const u8, args: anytype) error{LivenessInvalid} {
+64 -64
View File
@@ -1,102 +1,92 @@
const std = @import("std");
const assert = std.debug.assert;
const Order = std.math.Order;
const InternPool = @import("InternPool.zig");
const Type = @import("Type.zig");
const Value = @import("Value.zig");
const Zcu = @import("Zcu.zig");
const RangeSet = @This();
const LazySrcLoc = Zcu.LazySrcLoc;
zcu: *Zcu,
ranges: std.array_list.Managed(Range),
ranges: std.ArrayList(Range),
pub const Range = struct {
first: InternPool.Index,
last: InternPool.Index,
first: Value,
last: Value,
src: LazySrcLoc,
};
pub fn init(allocator: std.mem.Allocator, zcu: *Zcu) RangeSet {
return .{
.zcu = zcu,
.ranges = std.array_list.Managed(Range).init(allocator),
};
pub const empty: RangeSet = .{ .ranges = .empty };
pub fn deinit(self: *RangeSet, allocator: Allocator) void {
self.ranges.deinit(allocator);
self.* = undefined;
}
pub fn deinit(self: *RangeSet) void {
self.ranges.deinit();
pub fn ensureUnusedCapacity(self: *RangeSet, allocator: Allocator, additional_count: usize) Allocator.Error!void {
return self.ranges.ensureUnusedCapacity(allocator, additional_count);
}
pub fn add(
self: *RangeSet,
first: InternPool.Index,
last: InternPool.Index,
src: LazySrcLoc,
) !?LazySrcLoc {
const zcu = self.zcu;
const ip = &zcu.intern_pool;
pub fn addAssumeCapacity(set: *RangeSet, new: Range, ty: Type, zcu: *Zcu) ?LazySrcLoc {
assert(new.first.typeOf(zcu).eql(ty, zcu));
assert(new.last.typeOf(zcu).eql(ty, zcu));
const ty = ip.typeOf(first);
assert(ty == ip.typeOf(last));
for (self.ranges.items) |range| {
assert(ty == ip.typeOf(range.first));
assert(ty == ip.typeOf(range.last));
if (Value.fromInterned(last).compareScalar(.gte, Value.fromInterned(range.first), Type.fromInterned(ty), zcu) and
Value.fromInterned(first).compareScalar(.lte, Value.fromInterned(range.last), Type.fromInterned(ty), zcu))
for (set.ranges.items) |range| {
if (new.last.compareScalar(.gte, range.first, ty, zcu) and
new.first.compareScalar(.lte, range.last, ty, zcu))
{
return range.src; // They overlap.
}
}
try self.ranges.append(.{
.first = first,
.last = last,
.src = src,
});
set.ranges.appendAssumeCapacity(new);
return null;
}
/// Assumes a and b do not overlap
fn lessThan(zcu: *Zcu, a: Range, b: Range) bool {
const ty = Type.fromInterned(zcu.intern_pool.typeOf(a.first));
return Value.fromInterned(a.first).compareScalar(.lt, Value.fromInterned(b.first), ty, zcu);
pub fn add(set: *RangeSet, allocator: Allocator, new: Range, ty: Type, zcu: *Zcu) Allocator.Error!?LazySrcLoc {
try set.ensureUnusedCapacity(allocator, 1);
return set.addAssumeCapacity(new, ty, zcu);
}
pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool {
const zcu = self.zcu;
const ip = &zcu.intern_pool;
assert(ip.typeOf(first) == ip.typeOf(last));
const SortCtx = struct {
ty: Type,
zcu: *Zcu,
};
/// Assumes a and b do not overlap
fn lessThan(ctx: SortCtx, a: Range, b: Range) bool {
return a.first.compareScalar(.lt, b.first, ctx.ty, ctx.zcu);
}
if (self.ranges.items.len == 0)
return false;
pub fn spans(
set: *RangeSet,
allocator: Allocator,
first: Value,
last: Value,
ty: Type,
zcu: *Zcu,
) Allocator.Error!bool {
assert(first.typeOf(zcu).eql(ty, zcu));
assert(last.typeOf(zcu).eql(ty, zcu));
if (set.ranges.items.len == 0) return false;
std.mem.sort(Range, self.ranges.items, zcu, lessThan);
std.mem.sort(Range, set.ranges.items, SortCtx{ .ty = ty, .zcu = zcu }, lessThan);
if (self.ranges.items[0].first != first or
self.ranges.items[self.ranges.items.len - 1].last != last)
if (!set.ranges.items[0].first.eql(first, ty, zcu) or
!set.ranges.items[set.ranges.items.len - 1].last.eql(last, ty, zcu))
{
return false;
}
const limbs = try allocator.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(ty.intInfo(zcu).bits),
);
defer allocator.free(limbs);
var counter: std.math.big.int.Mutable = .init(limbs, 0);
var space: InternPool.Key.Int.Storage.BigIntSpace = undefined;
var counter = try std.math.big.int.Managed.init(self.ranges.allocator);
defer counter.deinit();
// look for gaps
for (self.ranges.items[1..], 0..) |cur, i| {
for (set.ranges.items[1..], 0..) |cur, i| {
// i starts counting from the second item.
const prev = self.ranges.items[i];
const prev = set.ranges.items[i];
// prev.last + 1 == cur.first
try counter.copy(Value.fromInterned(prev.last).toBigInt(&space, zcu));
try counter.addScalar(&counter, 1);
counter.copy(prev.last.toBigInt(&space, zcu));
counter.addScalar(counter.toConst(), 1);
const cur_start_int = Value.fromInterned(cur.first).toBigInt(&space, zcu);
const cur_start_int = cur.first.toBigInt(&space, zcu);
if (!cur_start_int.eql(counter.toConst())) {
return false;
}
@@ -104,3 +94,13 @@ pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !
return true;
}
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const InternPool = @import("InternPool.zig");
const Type = @import("Type.zig");
const Value = @import("Value.zig");
const Zcu = @import("Zcu.zig");
const LazySrcLoc = Zcu.LazySrcLoc;
+2667 -3103
View File
@@ -509,7 +509,7 @@ pub const Block = struct {
.parent = parent,
.sema = parent.sema,
.namespace = parent.namespace,
.instructions = .{},
.instructions = .empty,
.label = null,
.inlining = parent.inlining,
.comptime_reason = parent.comptime_reason,
@@ -1927,9 +1927,8 @@ fn analyzeBodyInner(
break :msg msg;
});
}
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union);
assert(is_non_err != .none);
const is_non_err_val = try sema.resolveConstDefinedValue(block, operand_src, is_non_err, null);
const is_non_err_val = (try sema.resolveIsNonErrVal(block, operand_src, err_union)).?;
if (is_non_err_val.isUndef(zcu)) return sema.failWithUseOfUndef(block, operand_src, null);
if (is_non_err_val.toBool()) {
break :blk try sema.analyzeErrUnionPayload(block, src, err_union_ty, err_union, operand_src, false);
}
@@ -1945,9 +1944,8 @@ fn analyzeBodyInner(
const inline_body = sema.code.bodySlice(extra.end, extra.data.body_len);
const operand = try sema.resolveInst(extra.data.operand);
const err_union = try sema.analyzeLoad(block, src, operand, operand_src);
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(block, operand_src, err_union);
assert(is_non_err != .none);
const is_non_err_val = try sema.resolveConstDefinedValue(block, operand_src, is_non_err, null);
const is_non_err_val = (try sema.resolveIsNonErrVal(block, operand_src, err_union)).?;
if (is_non_err_val.isUndef(zcu)) return sema.failWithUseOfUndef(block, operand_src, null);
if (is_non_err_val.toBool()) {
break :blk try sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false);
}
@@ -6498,26 +6496,23 @@ fn zirSwitchContinue(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) Com
switch (sema.code.instructions.items(.tag)[@intFromEnum(switch_inst)]) {
.switch_block, .switch_block_ref => {},
.switch_block_err_union => unreachable, // wrong code path!
else => unreachable, // assertion failure
}
const switch_payload_index = sema.code.instructions.items(.data)[@intFromEnum(switch_inst)].pl_node.payload_index;
const switch_operand_ref = sema.code.extraData(Zir.Inst.SwitchBlock, switch_payload_index).data.operand;
const switch_operand_ty = sema.typeOf(try sema.resolveInst(switch_operand_ref));
const operand = try sema.coerce(start_block, switch_operand_ty, uncoerced_operand, operand_src);
const operand_ty = (try sema.resolveInst(switch_inst.toRef())).toType();
const operand = try sema.coerce(start_block, operand_ty, uncoerced_operand, operand_src);
try sema.validateRuntimeValue(start_block, operand_src, operand);
// We want to generate a `switch_dispatch` instruction with the switch condition,
// possibly preceded by a store to the stack alloc containing the raw operand.
// However, to avoid too much special-case state in Sema, this is handled by the
// `switch` lowering logic. As such, we will find the `Block` corresponding to the
// parent `switch_block[_ref]` instruction, create a dummy `br`, and add a merge
// to signal to the switch logic to rewrite this into an appropriate dispatch.
// `switch` lowering logic. As such, we will find the `Block` corresponding to
// the parent `switch_block[_ref]` instruction, create a dummy `br`, and add a
// merge to signal to the switch logic to rewrite this into an appropriate dispatch.
var block = start_block;
while (true) {
while (true) : (block = block.parent.?) {
if (block.label) |label| {
if (label.zir_block == switch_inst) {
const br_ref = try start_block.addBr(label.merges.block_inst, operand);
@@ -6531,7 +6526,6 @@ fn zirSwitchContinue(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) Com
return;
}
}
block = block.parent.?;
}
}
@@ -8485,8 +8479,20 @@ fn zirDeclLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index, do_coerce: b
sema.code.nullTerminatedString(extra.field_name_start),
.no_embedded_nulls,
);
const orig_ty: Type = try sema.resolveTypeOrPoison(block, src, extra.lhs) orelse .generic_poison;
return sema.analyzeDeclLiteral(block, src, name, orig_ty, do_coerce);
}
fn analyzeDeclLiteral(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
name: InternPool.NullTerminatedString,
orig_ty: Type,
do_coerce: bool,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const uncoerced_result = res: {
if (orig_ty.toIntern() == .generic_poison_type) {
@@ -8960,6 +8966,7 @@ fn analyzeErrUnionCode(sema: *Sema, block: *Block, src: LazySrcLoc, operand: Air
const result_ty = operand_ty.errorUnionSet(zcu);
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
if (val.getErrorName(zcu) == .none) return .unreachable_value;
return Air.internedToRef((try pt.intern(.{ .err = .{
.ty = result_ty.toIntern(),
.name = zcu.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name,
@@ -8997,7 +9004,7 @@ fn analyzeErrUnionCodePtr(sema: *Sema, block: *Block, src: LazySrcLoc, operand:
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
assert(val.getErrorName(zcu) != .none);
if (val.getErrorName(zcu) == .none) return .unreachable_value;
return Air.internedToRef((try pt.intern(.{ .err = .{
.ty = result_ty.toIntern(),
.name = zcu.intern_pool.indexToKey(val.toIntern()).error_union.val.err_name,
@@ -10519,668 +10526,6 @@ fn zirSliceSentinelTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
return Air.internedToRef(sentinel_ty.toIntern());
}
/// Holds common data used when analyzing or resolving switch prong bodies,
/// including setting up captures.
const SwitchProngAnalysis = struct {
sema: *Sema,
/// The block containing the `switch_block` itself.
parent_block: *Block,
operand: Operand,
/// If this switch is on an error set, this is the type to assign to the
/// `else` prong. If `null`, the prong should be unreachable.
else_error_ty: ?Type,
/// The index of the `switch_block` instruction itself.
switch_block_inst: Zir.Inst.Index,
/// The dummy index into which inline tag captures should be placed. May be
/// undefined if no prong has a tag capture.
tag_capture_inst: Zir.Inst.Index,
const Operand = union(enum) {
/// This switch will be dispatched only once, with the given operand.
simple: struct {
/// The raw switch operand value. Always defined.
by_val: Air.Inst.Ref,
/// The switch operand *pointer*. Defined only if there is a prong
/// with a by-ref capture.
by_ref: Air.Inst.Ref,
/// The switch condition value. For unions, `operand` is the union
/// and `cond` is its enum tag value.
cond: Air.Inst.Ref,
},
/// This switch may be dispatched multiple times with `continue` syntax.
/// As such, the operand is stored in an alloc if needed.
loop: struct {
/// The `alloc` containing the `switch` operand for the active dispatch.
/// Each prong must load from this `alloc` to get captures.
/// If there are no captures, this may be undefined.
operand_alloc: Air.Inst.Ref,
/// Whether `operand_alloc` contains a by-val operand or a by-ref
/// operand.
operand_is_ref: bool,
/// The switch condition value for the *initial* dispatch. For
/// unions, this is the enum tag value.
init_cond: Air.Inst.Ref,
},
};
/// Resolve a switch prong which is determined at comptime to have no peers.
/// Uses `resolveBlockBody`. Sets up captures as needed.
fn resolveProngComptime(
spa: SwitchProngAnalysis,
child_block: *Block,
prong_type: enum { normal, special },
prong_body: []const Zir.Inst.Index,
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
/// Must use the `switch_capture` field in `offset`.
capture_src: LazySrcLoc,
/// The set of all values which can reach this prong. May be undefined
/// if the prong is special or contains ranges.
case_vals: []const Air.Inst.Ref,
/// The inline capture of this prong. If this is not an inline prong,
/// this is `.none`.
inline_case_capture: Air.Inst.Ref,
/// Whether this prong has an inline tag capture. If `true`, then
/// `inline_case_capture` cannot be `.none`.
has_tag_capture: bool,
merges: *Block.Merges,
) CompileError!Air.Inst.Ref {
const sema = spa.sema;
const src = spa.parent_block.nodeOffset(
sema.code.instructions.items(.data)[@intFromEnum(spa.switch_block_inst)].pl_node.src_node,
);
// We can propagate `.cold` hints from this branch since it's comptime-known
// to be taken from the parent branch.
const parent_hint = sema.branch_hint;
defer sema.branch_hint = parent_hint orelse if (sema.branch_hint == .cold) .cold else null;
if (has_tag_capture) {
const tag_ref = try spa.analyzeTagCapture(child_block, capture_src, inline_case_capture);
sema.inst_map.putAssumeCapacity(spa.tag_capture_inst, tag_ref);
}
defer if (has_tag_capture) assert(sema.inst_map.remove(spa.tag_capture_inst));
switch (capture) {
.none => {
return sema.resolveBlockBody(spa.parent_block, src, child_block, prong_body, spa.switch_block_inst, merges);
},
.by_val, .by_ref => {
const capture_ref = try spa.analyzeCapture(
child_block,
capture == .by_ref,
prong_type == .special,
capture_src,
case_vals,
inline_case_capture,
);
if (sema.typeOf(capture_ref).isNoReturn(sema.pt.zcu)) {
// This prong should be unreachable!
return .unreachable_value;
}
sema.inst_map.putAssumeCapacity(spa.switch_block_inst, capture_ref);
defer assert(sema.inst_map.remove(spa.switch_block_inst));
return sema.resolveBlockBody(spa.parent_block, src, child_block, prong_body, spa.switch_block_inst, merges);
},
}
}
/// Analyze a switch prong which may have peers at runtime.
/// Uses `analyzeBodyRuntimeBreak`. Sets up captures as needed.
/// Returns the `BranchHint` for the prong.
fn analyzeProngRuntime(
spa: SwitchProngAnalysis,
case_block: *Block,
prong_type: enum { normal, special },
prong_body: []const Zir.Inst.Index,
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
/// Must use the `switch_capture` field in `offset`.
capture_src: LazySrcLoc,
/// The set of all values which can reach this prong. May be undefined
/// if the prong is special or contains ranges.
case_vals: []const Air.Inst.Ref,
/// The inline capture of this prong. If this is not an inline prong,
/// this is `.none`.
inline_case_capture: Air.Inst.Ref,
/// Whether this prong has an inline tag capture. If `true`, then
/// `inline_case_capture` cannot be `.none`.
has_tag_capture: bool,
) CompileError!std.builtin.BranchHint {
const sema = spa.sema;
if (has_tag_capture) {
const tag_ref = try spa.analyzeTagCapture(case_block, capture_src, inline_case_capture);
sema.inst_map.putAssumeCapacity(spa.tag_capture_inst, tag_ref);
}
defer if (has_tag_capture) assert(sema.inst_map.remove(spa.tag_capture_inst));
switch (capture) {
.none => {
return sema.analyzeBodyRuntimeBreak(case_block, prong_body);
},
.by_val, .by_ref => {
const capture_ref = try spa.analyzeCapture(
case_block,
capture == .by_ref,
prong_type == .special,
capture_src,
case_vals,
inline_case_capture,
);
if (sema.typeOf(capture_ref).isNoReturn(sema.pt.zcu)) {
// No need to analyze any further, the prong is unreachable
return .none;
}
sema.inst_map.putAssumeCapacity(spa.switch_block_inst, capture_ref);
defer assert(sema.inst_map.remove(spa.switch_block_inst));
return sema.analyzeBodyRuntimeBreak(case_block, prong_body);
},
}
}
fn analyzeTagCapture(
spa: SwitchProngAnalysis,
block: *Block,
capture_src: LazySrcLoc,
inline_case_capture: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const sema = spa.sema;
const pt = sema.pt;
const zcu = pt.zcu;
const operand_ty = switch (spa.operand) {
.simple => |s| sema.typeOf(s.by_val),
.loop => |l| ty: {
const alloc_ty = sema.typeOf(l.operand_alloc);
const alloc_child = alloc_ty.childType(zcu);
if (l.operand_is_ref) break :ty alloc_child.childType(zcu);
break :ty alloc_child;
},
};
if (operand_ty.zigTypeTag(zcu) != .@"union") {
const tag_capture_src: LazySrcLoc = .{
.base_node_inst = capture_src.base_node_inst,
.offset = .{ .switch_tag_capture = capture_src.offset.switch_capture },
};
return sema.fail(block, tag_capture_src, "cannot capture tag of non-union type '{f}'", .{
operand_ty.fmt(pt),
});
}
assert(inline_case_capture != .none);
return inline_case_capture;
}
fn analyzeCapture(
spa: SwitchProngAnalysis,
block: *Block,
capture_byref: bool,
is_special_prong: bool,
capture_src: LazySrcLoc,
case_vals: []const Air.Inst.Ref,
inline_case_capture: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const sema = spa.sema;
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const zir_datas = sema.code.instructions.items(.data);
const switch_node_offset = zir_datas[@intFromEnum(spa.switch_block_inst)].pl_node.src_node;
const operand_src = block.src(.{ .node_offset_switch_operand = switch_node_offset });
const operand_val, const operand_ptr = switch (spa.operand) {
.simple => |s| .{ s.by_val, s.by_ref },
.loop => |l| op: {
const loaded = try sema.analyzeLoad(block, operand_src, l.operand_alloc, operand_src);
if (l.operand_is_ref) {
const by_val = try sema.analyzeLoad(block, operand_src, loaded, operand_src);
break :op .{ by_val, loaded };
} else {
break :op .{ loaded, undefined };
}
},
};
const operand_ty = sema.typeOf(operand_val);
const operand_ptr_ty = if (capture_byref) sema.typeOf(operand_ptr) else undefined;
if (inline_case_capture != .none) {
const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inline_case_capture, undefined) catch unreachable;
if (operand_ty.zigTypeTag(zcu) == .@"union") {
const field_index: u32 = @intCast(operand_ty.unionTagFieldIndex(item_val, zcu).?);
const union_obj = zcu.typeToUnion(operand_ty).?;
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
if (capture_byref) {
const ptr_field_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
.flags = .{
.is_const = !operand_ptr_ty.ptrIsMutable(zcu),
.is_volatile = operand_ptr_ty.isVolatilePtr(zcu),
.address_space = operand_ptr_ty.ptrAddressSpace(zcu),
},
});
if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |union_ptr| {
return Air.internedToRef((try union_ptr.ptrField(field_index, pt)).toIntern());
}
return block.addStructFieldPtr(operand_ptr, field_index, ptr_field_ty);
} else {
if (try sema.resolveDefinedValue(block, operand_src, operand_val)) |union_val| {
const tag_and_val = ip.indexToKey(union_val.toIntern()).un;
return Air.internedToRef(tag_and_val.val);
}
return block.addStructFieldVal(operand_val, field_index, field_ty);
}
} else if (capture_byref) {
return sema.uavRef(item_val.toIntern());
} else {
return inline_case_capture;
}
}
if (is_special_prong) {
if (capture_byref) {
return operand_ptr;
}
switch (operand_ty.zigTypeTag(zcu)) {
.error_set => if (spa.else_error_ty) |ty| {
return sema.bitCast(block, ty, operand_val, operand_src, null);
} else {
try sema.analyzeUnreachable(block, operand_src, false);
return .unreachable_value;
},
else => return operand_val,
}
}
switch (operand_ty.zigTypeTag(zcu)) {
.@"union" => {
const union_obj = zcu.typeToUnion(operand_ty).?;
const first_item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, case_vals[0], undefined) catch unreachable;
const first_field_index: u32 = zcu.unionTagFieldIndex(union_obj, first_item_val).?;
const first_field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[first_field_index]);
const field_indices = try sema.arena.alloc(u32, case_vals.len);
for (case_vals, field_indices) |item, *field_idx| {
const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable;
field_idx.* = zcu.unionTagFieldIndex(union_obj, item_val).?;
}
// Fast path: if all the operands are the same type already, we don't need to hit
// PTR! This will also allow us to emit simpler code.
const same_types = for (field_indices[1..]) |field_idx| {
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (!field_ty.eql(first_field_ty, zcu)) break false;
} else true;
const capture_ty = if (same_types) first_field_ty else capture_ty: {
// We need values to run PTR on, so make a bunch of undef constants.
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
for (dummy_captures, field_indices) |*dummy, field_idx| {
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
dummy.* = try pt.undefRef(field_ty);
}
const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len);
for (case_srcs, 0..) |*case_src, i| {
case_src.* = .{
.base_node_inst = capture_src.base_node_inst,
.offset = .{ .switch_case_item = .{
.switch_node_offset = switch_node_offset,
.case_idx = capture_src.offset.switch_capture.case_idx,
.item_idx = .{ .kind = .single, .index = @intCast(i) },
} },
};
}
break :capture_ty sema.resolvePeerTypes(block, capture_src, dummy_captures, .{ .override = case_srcs }) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return error.AnalysisFail;
try sema.reparentOwnedErrorMsg(capture_src, msg, "capture group with incompatible types", .{});
return error.AnalysisFail;
},
else => |e| return e,
};
};
// By-reference captures have some further restrictions which make them easier to emit
if (capture_byref) {
const operand_ptr_info = operand_ptr_ty.ptrInfo(zcu);
const capture_ptr_ty = resolve: {
// By-ref captures of hetereogeneous types are only allowed if all field
// pointer types are peer resolvable to each other.
// We need values to run PTR on, so make a bunch of undef constants.
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
for (field_indices, dummy_captures) |field_idx, *dummy| {
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
const field_ptr_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
.flags = .{
.is_const = operand_ptr_info.flags.is_const,
.is_volatile = operand_ptr_info.flags.is_volatile,
.address_space = operand_ptr_info.flags.address_space,
.alignment = union_obj.fieldAlign(ip, field_idx),
},
});
dummy.* = try pt.undefRef(field_ptr_ty);
}
const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len);
for (case_srcs, 0..) |*case_src, i| {
case_src.* = .{
.base_node_inst = capture_src.base_node_inst,
.offset = .{ .switch_case_item = .{
.switch_node_offset = switch_node_offset,
.case_idx = capture_src.offset.switch_capture.case_idx,
.item_idx = .{ .kind = .single, .index = @intCast(i) },
} },
};
}
break :resolve sema.resolvePeerTypes(block, capture_src, dummy_captures, .{ .override = case_srcs }) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return error.AnalysisFail;
try sema.errNote(capture_src, msg, "this coercion is only possible when capturing by value", .{});
try sema.reparentOwnedErrorMsg(capture_src, msg, "capture group with incompatible types", .{});
return error.AnalysisFail;
},
else => |e| return e,
};
};
if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| {
if (op_ptr_val.isUndef(zcu)) return pt.undefRef(capture_ptr_ty);
const field_ptr_val = try op_ptr_val.ptrField(first_field_index, pt);
return Air.internedToRef((try pt.getCoerced(field_ptr_val, capture_ptr_ty)).toIntern());
}
try sema.requireRuntimeBlock(block, operand_src, null);
return block.addStructFieldPtr(operand_ptr, first_field_index, capture_ptr_ty);
}
if (try sema.resolveDefinedValue(block, operand_src, operand_val)) |operand_val_val| {
if (operand_val_val.isUndef(zcu)) return pt.undefRef(capture_ty);
const union_val = ip.indexToKey(operand_val_val.toIntern()).un;
if (Value.fromInterned(union_val.tag).isUndef(zcu)) return pt.undefRef(capture_ty);
const uncoerced = Air.internedToRef(union_val.val);
return sema.coerce(block, capture_ty, uncoerced, operand_src);
}
try sema.requireRuntimeBlock(block, operand_src, null);
if (same_types) {
return block.addStructFieldVal(operand_val, first_field_index, capture_ty);
}
// We may have to emit a switch block which coerces the operand to the capture type.
// If we can, try to avoid that using in-memory coercions.
const first_non_imc = in_mem: {
for (field_indices, 0..) |field_idx, i| {
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, zcu.getTarget(), LazySrcLoc.unneeded, LazySrcLoc.unneeded, null)) {
break :in_mem i;
}
}
// All fields are in-memory coercible to the resolved type!
// Just take the first field and bitcast the result.
const uncoerced = try block.addStructFieldVal(operand_val, first_field_index, first_field_ty);
return block.addBitCast(capture_ty, uncoerced);
};
// By-val capture with heterogeneous types which are not all in-memory coercible to
// the resolved capture type. We finally have to fall back to the ugly method.
// However, let's first track which operands are in-memory coercible. There may well
// be several, and we can squash all of these cases into the same switch prong using
// a simple bitcast. We'll make this the 'else' prong.
var in_mem_coercible = try std.DynamicBitSet.initFull(sema.arena, field_indices.len);
in_mem_coercible.unset(first_non_imc);
{
const next = first_non_imc + 1;
for (field_indices[next..], next..) |field_idx, i| {
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (.ok != try sema.coerceInMemoryAllowed(block, capture_ty, field_ty, false, zcu.getTarget(), LazySrcLoc.unneeded, LazySrcLoc.unneeded, null)) {
in_mem_coercible.unset(i);
}
}
}
const capture_block_inst = try block.addInstAsIndex(.{
.tag = .block,
.data = .{
.ty_pl = .{
.ty = Air.internedToRef(capture_ty.toIntern()),
.payload = undefined, // updated below
},
},
});
const prong_count = field_indices.len - in_mem_coercible.count();
const estimated_extra = prong_count * 6 + (prong_count / 10); // 2 for Case, 1 item, probably 3 insts; plus hints
var cases_extra = try std.array_list.Managed(u32).initCapacity(sema.gpa, estimated_extra);
defer cases_extra.deinit();
{
// All branch hints are `.none`, so just add zero elems.
comptime assert(@intFromEnum(std.builtin.BranchHint.none) == 0);
const need_elems = std.math.divCeil(usize, prong_count + 1, 10) catch unreachable;
try cases_extra.appendNTimes(0, need_elems);
}
{
// Non-bitcast cases
var it = in_mem_coercible.iterator(.{ .kind = .unset });
while (it.next()) |idx| {
var coerce_block = block.makeSubBlock();
defer coerce_block.instructions.deinit(sema.gpa);
const case_src: LazySrcLoc = .{
.base_node_inst = capture_src.base_node_inst,
.offset = .{ .switch_case_item = .{
.switch_node_offset = switch_node_offset,
.case_idx = capture_src.offset.switch_capture.case_idx,
.item_idx = .{ .kind = .single, .index = @intCast(idx) },
} },
};
const field_idx = field_indices[idx];
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
const uncoerced = try coerce_block.addStructFieldVal(operand_val, field_idx, field_ty);
const coerced = try sema.coerce(&coerce_block, capture_ty, uncoerced, case_src);
_ = try coerce_block.addBr(capture_block_inst, coerced);
try cases_extra.ensureUnusedCapacity(@typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
1 + // `item`, no ranges
coerce_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = 1,
.ranges_len = 0,
.body_len = @intCast(coerce_block.instructions.items.len),
}));
cases_extra.appendAssumeCapacity(@intFromEnum(case_vals[idx])); // item
cases_extra.appendSliceAssumeCapacity(@ptrCast(coerce_block.instructions.items)); // body
}
}
const else_body_len = len: {
// 'else' prong uses a bitcast
var coerce_block = block.makeSubBlock();
defer coerce_block.instructions.deinit(sema.gpa);
const first_imc_item_idx = in_mem_coercible.findFirstSet().?;
const first_imc_field_idx = field_indices[first_imc_item_idx];
const first_imc_field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[first_imc_field_idx]);
const uncoerced = try coerce_block.addStructFieldVal(operand_val, first_imc_field_idx, first_imc_field_ty);
const coerced = try coerce_block.addBitCast(capture_ty, uncoerced);
_ = try coerce_block.addBr(capture_block_inst, coerced);
try cases_extra.appendSlice(@ptrCast(coerce_block.instructions.items));
break :len coerce_block.instructions.items.len;
};
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.SwitchBr).@"struct".fields.len +
cases_extra.items.len +
@typeInfo(Air.Block).@"struct".fields.len +
1);
const switch_br_inst: u32 = @intCast(sema.air_instructions.len);
try sema.air_instructions.append(sema.gpa, .{
.tag = .switch_br,
.data = .{
.pl_op = .{
.operand = undefined, // set by switch below
.payload = sema.addExtraAssumeCapacity(Air.SwitchBr{
.cases_len = @intCast(prong_count),
.else_body_len = @intCast(else_body_len),
}),
},
},
});
sema.air_extra.appendSliceAssumeCapacity(cases_extra.items);
// Set up block body
switch (spa.operand) {
.simple => |s| {
const air_datas = sema.air_instructions.items(.data);
air_datas[switch_br_inst].pl_op.operand = s.cond;
air_datas[@intFromEnum(capture_block_inst)].ty_pl.payload = sema.addExtraAssumeCapacity(Air.Block{
.body_len = 1,
});
sema.air_extra.appendAssumeCapacity(switch_br_inst);
},
.loop => {
// The block must first extract the tag from the loaded union.
const tag_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
try sema.air_instructions.append(sema.gpa, .{
.tag = .get_union_tag,
.data = .{ .ty_op = .{
.ty = Air.internedToRef(union_obj.enum_tag_ty),
.operand = operand_val,
} },
});
const air_datas = sema.air_instructions.items(.data);
air_datas[switch_br_inst].pl_op.operand = tag_inst.toRef();
air_datas[@intFromEnum(capture_block_inst)].ty_pl.payload = sema.addExtraAssumeCapacity(Air.Block{
.body_len = 2,
});
sema.air_extra.appendAssumeCapacity(@intFromEnum(tag_inst));
sema.air_extra.appendAssumeCapacity(switch_br_inst);
},
}
return capture_block_inst.toRef();
},
.error_set => {
if (capture_byref) {
return sema.fail(
block,
capture_src,
"error set cannot be captured by reference",
.{},
);
}
if (case_vals.len == 1) {
const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, case_vals[0], undefined) catch unreachable;
const item_ty = try pt.singleErrorSetType(item_val.getErrorName(zcu).unwrap().?);
return sema.bitCast(block, item_ty, operand_val, operand_src, null);
}
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, case_vals.len);
for (case_vals) |err| {
const err_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, err, undefined) catch unreachable;
names.putAssumeCapacityNoClobber(err_val.getErrorName(zcu).unwrap().?, {});
}
const error_ty = try pt.errorSetFromUnsortedNames(names.keys());
return sema.bitCast(block, error_ty, operand_val, operand_src, null);
},
else => {
// In this case the capture value is just the passed-through value
// of the switch condition.
if (capture_byref) {
return operand_ptr;
} else {
return operand_val;
}
},
}
}
};
fn switchCond(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const operand_ty = sema.typeOf(operand);
switch (operand_ty.zigTypeTag(zcu)) {
.type,
.void,
.bool,
.int,
.float,
.comptime_float,
.comptime_int,
.enum_literal,
.pointer,
.@"fn",
.error_set,
.@"enum",
=> {
if (operand_ty.isSlice(zcu)) {
return sema.fail(block, src, "switch on type '{f}'", .{operand_ty.fmt(pt)});
}
if ((try sema.typeHasOnePossibleValue(operand_ty))) |opv| {
return Air.internedToRef(opv.toIntern());
}
return operand;
},
.@"union" => {
try operand_ty.resolveFields(pt);
const enum_ty = operand_ty.unionTagType(zcu) orelse {
const msg = msg: {
const msg = try sema.errMsg(src, "switch on union with no attached enum", .{});
errdefer msg.destroy(sema.gpa);
if (operand_ty.srcLocOrNull(zcu)) |union_src| {
try sema.errNote(union_src, msg, "consider 'union(enum)' here", .{});
}
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
};
return sema.unionToTag(block, enum_ty, operand, src);
},
.error_union,
.noreturn,
.array,
.@"struct",
.undefined,
.null,
.optional,
.@"opaque",
.vector,
.frame,
.@"anyframe",
=> return sema.fail(block, src, "switch on type '{f}'", .{operand_ty.fmt(pt)}),
}
}
const SwitchErrorSet = std.AutoHashMap(InternPool.NullTerminatedString, LazySrcLoc);
fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -11188,100 +10533,13 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
const pt = sema.pt;
const zcu = pt.zcu;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const switch_src = block.nodeOffset(inst_data.src_node);
const switch_src_node_offset = inst_data.src_node;
const switch_operand_src = block.src(.{ .node_offset_switch_operand = switch_src_node_offset });
const else_prong_src = block.src(.{ .node_offset_switch_else_prong = switch_src_node_offset });
const extra = sema.code.extraData(Zir.Inst.SwitchBlockErrUnion, inst_data.payload_index);
const main_operand_src = block.src(.{ .node_offset_if_cond = extra.data.main_src_node_offset });
const main_src = block.src(.{ .node_offset_main_token = extra.data.main_src_node_offset });
const raw_operand_val = try sema.resolveInst(extra.data.operand);
const zir_switch = sema.code.getSwitchBlock(inst);
const src_node_offset = zir_switch.catch_or_if_src_node_offset.unwrap().?;
const src = block.src(.{ .node_offset_main_token = src_node_offset });
const operand_src = block.src(.{ .node_offset_if_cond = src_node_offset });
// AstGen guarantees that the instruction immediately preceding
// switch_block_err_union is a dbg_stmt
const cond_dbg_node_index: Zir.Inst.Index = @enumFromInt(@intFromEnum(inst) - 1);
var header_extra_index: usize = extra.end;
const scalar_cases_len = extra.data.bits.scalar_cases_len;
const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
const multi_cases_len = sema.code.extra[header_extra_index];
header_extra_index += 1;
break :blk multi_cases_len;
} else 0;
const err_capture_inst: Zir.Inst.Index = if (extra.data.bits.any_uses_err_capture) blk: {
const err_capture_inst: Zir.Inst.Index = @enumFromInt(sema.code.extra[header_extra_index]);
header_extra_index += 1;
// SwitchProngAnalysis wants inst_map to have space for the tag capture.
// Note that the normal capture is referred to via the switch block
// index, which there is already necessarily space for.
try sema.inst_map.ensureSpaceForInstructions(gpa, &.{err_capture_inst});
break :blk err_capture_inst;
} else undefined;
var case_vals = try std.ArrayList(Air.Inst.Ref).initCapacity(gpa, scalar_cases_len + 2 * multi_cases_len);
defer case_vals.deinit(gpa);
const NonError = struct {
body: []const Zir.Inst.Index,
end: usize,
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
};
const non_error_case: NonError = non_error: {
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[header_extra_index]);
const extra_body_start = header_extra_index + 1;
break :non_error .{
.body = sema.code.bodySlice(extra_body_start, info.body_len),
.end = extra_body_start + info.body_len,
.capture = info.capture,
};
};
const Else = struct {
body: []const Zir.Inst.Index,
end: usize,
is_inline: bool,
has_capture: bool,
};
const else_case: Else = if (!extra.data.bits.has_else) .{
.body = &.{},
.end = non_error_case.end,
.is_inline = false,
.has_capture = false,
} else special: {
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[non_error_case.end]);
const extra_body_start = non_error_case.end + 1;
assert(info.capture != .by_ref);
assert(!info.has_tag_capture);
break :special .{
.body = sema.code.bodySlice(extra_body_start, info.body_len),
.end = extra_body_start + info.body_len,
.is_inline = info.is_inline,
.has_capture = info.capture != .none,
};
};
var seen_errors = SwitchErrorSet.init(gpa);
defer seen_errors.deinit();
const operand_ty = sema.typeOf(raw_operand_val);
const operand_err_set = if (extra.data.bits.payload_is_ref)
operand_ty.childType(zcu)
else
operand_ty;
if (operand_err_set.zigTypeTag(zcu) != .error_union) {
return sema.fail(block, switch_src, "expected error union type, found '{f}'", .{
operand_ty.fmt(pt),
});
}
const operand_err_set_ty = operand_err_set.errorUnionSet(zcu);
assert(!zir_switch.has_continue); // wrong codepath!
const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
try sema.air_instructions.append(gpa, .{
@@ -11297,504 +10555,1550 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
.block_inst = block_inst,
},
};
var child_block: Block = .{
.parent = block,
.sema = sema,
.namespace = block.namespace,
.instructions = .{},
.label = &label,
.inlining = block.inlining,
.comptime_reason = block.comptime_reason,
.is_typeof = block.is_typeof,
.c_import_buf = block.c_import_buf,
.runtime_cond = block.runtime_cond,
.runtime_loop = block.runtime_loop,
.runtime_index = block.runtime_index,
.error_return_trace_index = block.error_return_trace_index,
.want_safety = block.want_safety,
.src_base_inst = block.src_base_inst,
.type_name_ctx = block.type_name_ctx,
};
var child_block = block.makeSubBlock();
child_block.label = &label;
const merges = &child_block.label.?.merges;
defer child_block.instructions.deinit(gpa);
defer merges.deinit(gpa);
const resolved_err_set = try sema.resolveInferredErrorSetTy(block, main_src, operand_err_set_ty.toIntern());
if (Type.fromInterned(resolved_err_set).errorSetIsEmpty(zcu)) {
return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges);
}
const non_err_case = zir_switch.non_err_case.?;
const else_error_ty: ?Type = try validateErrSetSwitch(
sema,
block,
&seen_errors,
&case_vals,
operand_err_set_ty,
inst_data,
scalar_cases_len,
multi_cases_len,
.{ .body = else_case.body, .end = else_case.end, .src = else_prong_src },
extra.data.bits.has_else,
);
var non_err_block: Block = child_block.makeSubBlock();
non_err_block.runtime_loop = null;
non_err_block.runtime_cond = operand_src;
non_err_block.runtime_index.increment();
non_err_block.need_debug_scope = null;
defer non_err_block.instructions.deinit(gpa);
var spa: SwitchProngAnalysis = .{
.sema = sema,
.parent_block = block,
.operand = .{
.simple = .{
.by_val = undefined, // must be set to the unwrapped error code before use
.by_ref = undefined,
.cond = raw_operand_val,
},
},
.else_error_ty = else_error_ty,
.switch_block_inst = inst,
.tag_capture_inst = undefined,
var switch_block: Block = child_block.makeSubBlock();
switch_block.runtime_loop = null;
switch_block.runtime_cond = operand_src;
switch_block.runtime_index.increment();
switch_block.need_debug_scope = null;
defer switch_block.instructions.deinit(gpa);
// We begin with unwrapping the error union we're switching on as necessary.
// Then we analyze the non-error prong if it's not comptime-unreachable.
// Lastly, we analyze the error prong(s) as a regular switch.
const raw_switch_operand, const non_err_cond, const non_err_hint = non_err: {
const eu_maybe_ptr = try sema.resolveInst(zir_switch.main_operand);
const err_union_ty: Type = err_union_ty: {
const raw_operand_ty = sema.typeOf(eu_maybe_ptr);
if (!non_err_case.operand_is_ref) break :err_union_ty raw_operand_ty;
try sema.checkPtrOperand(block, operand_src, raw_operand_ty);
break :err_union_ty raw_operand_ty.childType(zcu);
};
if (err_union_ty.zigTypeTag(zcu) != .error_union) {
return sema.fail(block, operand_src, "expected error union type, found '{f}'", .{
err_union_ty.fmt(pt),
});
}
const non_err_cond = if (non_err_case.operand_is_ref)
try sema.analyzePtrIsNonErr(block, operand_src, eu_maybe_ptr)
else
try sema.analyzeIsNonErr(block, operand_src, eu_maybe_ptr);
const non_err_hint: std.builtin.BranchHint = hint: {
// don't analyze the non-error body if it's unreachable
if (non_err_cond == .bool_false) {
break :hint undefined;
}
const eu_payload: Air.Inst.Ref = switch (non_err_case.capture) {
.by_val => try sema.analyzeErrUnionPayload(&non_err_block, src, err_union_ty, eu_maybe_ptr, operand_src, false),
.by_ref => try sema.analyzeErrUnionPayloadPtr(&non_err_block, src, eu_maybe_ptr, false, false),
.none => undefined,
};
if (non_err_case.capture != .none) sema.inst_map.putAssumeCapacity(inst, eu_payload);
defer if (non_err_case.capture != .none) assert(sema.inst_map.remove(inst));
if (non_err_cond == .bool_true) {
// Early return; we don't analyze the switch as it's unreachable.
return sema.resolveBlockBody(block, src, &non_err_block, non_err_case.body, inst, merges);
}
break :hint try sema.analyzeBodyRuntimeBreak(&non_err_block, non_err_case.body);
};
// Emit this into the switch block as it's our error case!
const eu_code = if (non_err_case.operand_is_ref)
try sema.analyzeErrUnionCodePtr(&switch_block, operand_src, eu_maybe_ptr)
else
try sema.analyzeErrUnionCode(&switch_block, operand_src, eu_maybe_ptr);
break :non_err .{
eu_code,
non_err_cond,
non_err_hint,
};
};
if (try sema.resolveDefinedValue(&child_block, main_src, raw_operand_val)) |ov| {
const operand_val = if (extra.data.bits.payload_is_ref)
(try sema.pointerDeref(&child_block, main_src, ov, operand_ty)).?
else
ov;
const validated_switch = try sema.validateSwitchBlock(block, raw_switch_operand, false, inst, &zir_switch);
if (operand_val.errorUnionIsPayload(zcu)) {
return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges);
} else {
const err_val = Value.fromInterned(try pt.intern(.{
.err = .{
.ty = operand_err_set_ty.toIntern(),
.name = operand_val.getErrorName(zcu).unwrap().?,
},
}));
spa.operand.simple.by_val = if (extra.data.bits.payload_is_ref)
try sema.analyzeErrUnionCodePtr(block, switch_operand_src, raw_operand_val)
else
try sema.analyzeErrUnionCode(block, switch_operand_src, raw_operand_val);
const maybe_switch_ref: ?Air.Inst.Ref = ref: {
// make err capture (i.e. switch operand) available to switch prong bodies
sema.inst_map.putAssumeCapacityNoClobber(inst, raw_switch_operand);
defer assert(sema.inst_map.remove(inst));
break :ref try sema.analyzeSwitchBlock(block, &switch_block, raw_switch_operand, false, merges, inst, &zir_switch, &validated_switch);
};
if (extra.data.bits.any_uses_err_capture) {
sema.inst_map.putAssumeCapacity(err_capture_inst, spa.operand.simple.by_val);
}
defer if (extra.data.bits.any_uses_err_capture) assert(sema.inst_map.remove(err_capture_inst));
return resolveSwitchComptime(
sema,
spa,
&child_block,
try sema.switchCond(block, switch_operand_src, spa.operand.simple.by_val),
err_val,
operand_err_set_ty,
switch_src_node_offset,
null,
.{
.body = else_case.body,
.end = else_case.end,
.capture = if (else_case.has_capture) .by_val else .none,
.is_inline = else_case.is_inline,
.has_tag_capture = false,
},
false,
case_vals,
scalar_cases_len,
multi_cases_len,
true,
false,
);
}
}
if (scalar_cases_len + multi_cases_len == 0) {
if (else_error_ty) |ty| if (ty.errorSetIsEmpty(zcu)) {
return sema.resolveBlockBody(block, main_operand_src, &child_block, non_error_case.body, inst, merges);
if (non_err_cond == .bool_false) {
return maybe_switch_ref orelse {
const switch_src = block.nodeOffset(zir_switch.switch_src_node_offset);
return sema.resolveAnalyzedBlock(block, switch_src, &switch_block, merges, false);
};
}
if (maybe_switch_ref) |switch_ref| {
if (sema.typeOf(switch_ref).isNoReturn(zcu)) {
_ = try switch_block.addNoOp(.unreach);
} else {
const br_ref = try switch_block.addBr(merges.block_inst, switch_ref);
try merges.results.append(gpa, switch_ref);
try merges.br_list.append(gpa, br_ref.toIndex().?);
try merges.src_locs.append(gpa, null);
}
}
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).@"struct".fields.len +
non_err_block.instructions.items.len + switch_block.instructions.items.len);
const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(non_err_block.instructions.items.len),
.else_body_len = @intCast(switch_block.instructions.items.len),
.branch_hints = .{
.true = non_err_hint,
.false = .unlikely, // errors are unlikely
// Code coverage is desired for error handling.
.then_cov = .poi,
.else_cov = .poi,
},
});
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(non_err_block.instructions.items));
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(switch_block.instructions.items));
_ = try child_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{
.operand = non_err_cond,
.payload = cond_br_payload,
} } });
return sema.resolveAnalyzedBlock(block, src, &child_block, merges, false);
}
fn zirSwitchBlock(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
operand_is_ref: bool,
) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const zir_switch = sema.code.getSwitchBlock(inst);
const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
try sema.air_instructions.append(sema.gpa, .{
.tag = .block,
.data = undefined,
});
var label: Block.Label = .{
.zir_block = inst,
.merges = .{
.src_locs = .{},
.results = .{},
.br_list = .{},
.block_inst = block_inst,
},
};
var child_block = block.makeSubBlock();
child_block.label = &label;
const merges = &child_block.label.?.merges;
defer child_block.instructions.deinit(sema.gpa);
defer merges.deinit(sema.gpa);
const raw_operand = try sema.resolveInst(zir_switch.main_operand);
const validated_switch = try sema.validateSwitchBlock(block, raw_operand, operand_is_ref, inst, &zir_switch);
const maybe_ref = try sema.analyzeSwitchBlock(block, &child_block, raw_operand, operand_is_ref, merges, inst, &zir_switch, &validated_switch);
return maybe_ref orelse {
const src = block.nodeOffset(zir_switch.switch_src_node_offset);
return sema.resolveAnalyzedBlock(block, src, &child_block, merges, false);
};
}
/// If the switch can be resolved to a value at comptime, this will return a `Ref`
/// that's never `.none`.
/// If not, this will return `null` and emit its instructions into `child_block`.
fn analyzeSwitchBlock(
sema: *Sema,
block: *Block,
child_block: *Block,
raw_operand: Air.Inst.Ref,
operand_is_ref: bool,
merges: *Block.Merges,
switch_inst: Zir.Inst.Index,
zir_switch: *const Zir.UnwrappedSwitchBlock,
validated_switch: *const ValidatedSwitchBlock,
) CompileError!?Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const gpa = sema.gpa;
const src_node_offset = zir_switch.switch_src_node_offset;
const src = block.nodeOffset(src_node_offset);
const operand_src = block.src(.{ .node_offset_switch_operand = src_node_offset });
const has_else = zir_switch.else_case != null;
const has_under = zir_switch.has_under;
const else_case = validated_switch.else_case;
const operand: SwitchOperand, const operand_ty: Type, const maybe_operand_opv: ?Value, const item_ty: Type = operand: {
const val, const ref = if (operand_is_ref)
.{ try sema.analyzeLoad(block, src, raw_operand, operand_src), raw_operand }
else
.{ raw_operand, undefined };
const operand_ty = sema.typeOf(val);
const maybe_operand_opv = try sema.typeHasOnePossibleValue(operand_ty);
const init_cond: Air.Inst.Ref, const item_ty: Type = switch (operand_ty.zigTypeTag(zcu)) {
.@"union" => tag: {
const tag_ty = operand_ty.unionTagType(zcu).?;
const tag_val = try sema.unionToTag(block, tag_ty, val, operand_src);
break :tag .{ tag_val, tag_ty };
},
else => .{
if (maybe_operand_opv) |operand_opv| .fromValue(operand_opv) else val,
operand_ty,
},
};
if (zir_switch.has_continue and !block.isComptime()) {
const operand_alloc: Air.Inst.Ref = if (zir_switch.any_maybe_runtime_capture and
maybe_operand_opv == null)
alloc: {
const operand_ptr_ty = try pt.singleMutPtrType(sema.typeOf(raw_operand));
const operand_alloc = try block.addTy(.alloc, operand_ptr_ty);
_ = try block.addBinOp(.store, operand_alloc, raw_operand);
break :alloc operand_alloc;
} else undefined;
break :operand .{ .{ .loop = .{
.operand_alloc = operand_alloc,
.operand_is_ref = operand_is_ref,
.init_cond = init_cond,
} }, operand_ty, maybe_operand_opv, item_ty };
} else {
// We always use `simple` in the comptime/OPV case, because as far as the
// dispatching logic is concerned, it really is dispatching a single prong.
break :operand .{ .{ .simple = .{
.by_val = val,
.by_ref = ref,
.cond = init_cond,
} }, operand_ty, maybe_operand_opv, item_ty };
}
};
const raw_operand_ty = sema.typeOf(raw_operand);
const union_originally = operand_ty.zigTypeTag(zcu) == .@"union";
const err_set = operand_ty.zigTypeTag(zcu) == .error_set;
if (item_ty.zigTypeTag(zcu) == .@"enum" and
validated_switch.seen_enum_fields.len == 0 and
!operand_ty.isNonexhaustiveEnum(zcu))
{
return .void_value; // switch on empty enum/union
}
const cond_ref = switch (operand) {
.simple => |s| s.cond,
.loop => |l| l.init_cond,
};
// We treat `else` and `_` the same, except if both are present.
const else_is_named_only = has_else and has_under;
const catch_all_case: CatchAllSwitchCase =
if (has_under) .under else if (has_else) .@"else" else .none;
resolve_at_comptime: {
// always runtime; evaluation in comptime scope uses `simple`
if (operand == .loop) break :resolve_at_comptime;
var cur_cond_val = try sema.resolveDefinedValue(child_block, src, cond_ref) orelse {
break :resolve_at_comptime;
};
var cur_operand = operand;
while (true) {
if (sema.resolveSwitchBlock(
block,
child_block,
cur_operand,
raw_operand_ty,
cur_cond_val,
catch_all_case,
else_is_named_only,
merges,
switch_inst,
zir_switch,
validated_switch,
)) |result| {
return result;
} else |err| switch (err) {
error.ComptimeBreak => {
const break_inst = sema.code.instructions.get(@intFromEnum(sema.comptime_break_inst));
if (break_inst.tag != .switch_continue) return error.ComptimeBreak;
const extra = sema.code.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data;
if (extra.block_inst != switch_inst) return error.ComptimeBreak;
// This is a `switch_continue` targeting this block. Change the operand and start over.
const new_operand_src = child_block.nodeOffset(extra.operand_src_node.unwrap().?);
const new_operand_uncoerced = try sema.resolveInst(break_inst.data.@"break".operand);
const new_operand = try sema.coerce(child_block, raw_operand_ty, new_operand_uncoerced, new_operand_src);
try sema.emitBackwardBranch(child_block, src);
const new_val, const new_ref = if (operand_is_ref)
.{ try sema.analyzeLoad(child_block, src, new_operand, new_operand_src), new_operand }
else
.{ new_operand, undefined };
const new_cond_ref = if (union_originally)
try sema.unionToTag(child_block, item_ty, new_val, src)
else
new_val;
cur_cond_val = try sema.resolveConstDefinedValue(child_block, src, new_cond_ref, null);
cur_operand = .{ .simple = .{
.by_val = new_val,
.by_ref = new_ref,
.cond = new_cond_ref,
} };
},
else => |e| return e,
}
}
}
if (child_block.isComptime()) {
_ = try sema.resolveConstDefinedValue(&child_block, main_operand_src, raw_operand_val, null);
_ = try sema.resolveConstDefinedValue(child_block, operand_src, operand.simple.cond, null);
unreachable;
}
const cond = if (extra.data.bits.payload_is_ref) blk: {
try sema.checkErrorType(block, main_src, sema.typeOf(raw_operand_val).elemType2(zcu));
const loaded = try sema.analyzeLoad(block, main_src, raw_operand_val, main_src);
break :blk try sema.analyzeIsNonErr(block, main_src, loaded);
} else blk: {
try sema.checkErrorType(block, main_src, sema.typeOf(raw_operand_val));
break :blk try sema.analyzeIsNonErr(block, main_src, raw_operand_val);
};
if (try sema.typeHasOnePossibleValue(item_ty)) |item_opv| {
// We simplify conditions with OPV to either a `loop` or a `block` since
// we cannot switch on a value which doesn't exist at runtime.
assert(operand == .loop); // `simple` should have already been comptime-resolved above!
var sub_block = child_block.makeSubBlock();
sub_block.runtime_loop = null;
sub_block.runtime_cond = main_operand_src;
sub_block.runtime_index.increment();
sub_block.need_debug_scope = null; // this body is emitted regardless
defer sub_block.instructions.deinit(gpa);
var case_block = child_block.makeSubBlock();
case_block.runtime_loop = null;
case_block.runtime_cond = operand_src;
case_block.runtime_index.increment();
case_block.need_debug_scope = null; // this body is emitted regardless
defer case_block.instructions.deinit(gpa);
const non_error_hint = try sema.analyzeBodyRuntimeBreak(&sub_block, non_error_case.body);
const true_instructions = try sub_block.instructions.toOwnedSlice(gpa);
defer gpa.free(true_instructions);
const case_vals = validated_switch.case_vals;
spa.operand.simple.by_val = if (extra.data.bits.payload_is_ref)
try sema.analyzeErrUnionCodePtr(&sub_block, switch_operand_src, raw_operand_val)
else
try sema.analyzeErrUnionCode(&sub_block, switch_operand_src, raw_operand_val);
const index, const body, const capture, const has_tag_capture, const is_inline, const is_special = find_prong: {
var case_val_idx: usize = 0;
var case_it = zir_switch.iterateCases();
var extra_index = zir_switch.end;
while (case_it.next()) |case| {
const prong_info = case.prong_info;
const prong_body = sema.code.bodySlice(extra_index, prong_info.body_len);
extra_index += prong_body.len;
skip_case: {
if (!err_set) break :skip_case;
// This case might consist of errors which are not in the set
// we're switching on. If so we have to skip it!
const item_refs = case_vals[case_val_idx..][0..case.item_infos.len];
case_val_idx += item_refs.len;
assert(case.range_infos.len == 0);
for (case.item_infos, item_refs) |item_info, item_ref| {
if (item_info.bodyLen()) |body_len| extra_index += body_len;
if (sema.wantSwitchProngBodyAnalysis(block, item_ref, operand_ty, false, true, prong_info.is_comptime_unreach)) {
break :skip_case;
}
}
continue;
}
break :find_prong .{ case.index, prong_body, prong_info.capture, prong_info.has_tag_capture, prong_info.is_inline, false };
}
if (has_else) {
// This *has* to be checked after iterating all regular cases because
// we allow simple noreturn else prongs when switching on error sets!
break :find_prong .{ else_case.index, else_case.body, else_case.capture, else_case.has_tag_capture, else_case.is_inline, true };
}
unreachable; // malformed validated switch
};
if (extra.data.bits.any_uses_err_capture) {
sema.inst_map.putAssumeCapacity(err_capture_inst, spa.operand.simple.by_val);
const analyze_body = sema.wantSwitchProngBodyAnalysis(block, .fromValue(item_opv), operand_ty, union_originally, err_set, false);
if (!analyze_body) return .unreachable_value;
if (!(err_set and
try sema.maybeErrorUnwrap(&case_block, body, cond_ref, operand_src, true)))
{
// Set up captures manually to avoid special cases in the main logic.
const payload_inst: Zir.Inst.Index = if (capture != .none) inst: {
const payload_inst = zir_switch.payload_capture_placeholder.unwrap() orelse switch_inst;
const payload_ref: Air.Inst.Ref = payload_ref: {
const item_val: InternPool.Index = switch (operand_ty.zigTypeTag(zcu)) {
.@"union" => item_val: {
if (maybe_operand_opv) |operand_opv| {
break :item_val zcu.intern_pool.indexToKey(operand_opv.toIntern()).un.val;
}
assert(union_originally); // operand type must be union, otherwise it would be an OPV type here
assert(zir_switch.any_maybe_runtime_capture); // there's a payload capture
const operand_val, const operand_ref = switch (operand) {
.simple => unreachable,
.loop => |l| load_operand: {
const loaded = try sema.analyzeLoad(block, src, l.operand_alloc, src);
if (l.operand_is_ref) {
const by_val = try sema.analyzeLoad(block, src, loaded, src);
break :load_operand .{ by_val, loaded };
} else {
break :load_operand .{ loaded, undefined };
}
},
};
break :payload_ref try sema.analyzeSwitchPayloadCapture(
&case_block,
operand,
operand_val,
operand_ref,
operand_ty,
operand_src,
block.src(.{ .switch_capture = .{
.switch_node_offset = src_node_offset,
.case_idx = index,
} }),
capture == .by_ref,
is_special,
if (!is_special) case_vals else undefined,
if (is_inline) .fromValue(item_opv) else .none,
validated_switch.else_err_ty,
);
},
else => item_opv.toIntern(),
};
break :payload_ref switch (capture) {
.by_val => .fromIntern(item_val),
.by_ref => try sema.uavRef(item_val),
.none => unreachable,
};
};
assert(!sema.typeOf(payload_ref).isNoReturn(sema.pt.zcu));
sema.inst_map.putAssumeCapacity(payload_inst, payload_ref);
break :inst payload_inst;
} else undefined;
defer if (capture != .none) assert(sema.inst_map.remove(payload_inst));
const tag_inst: Zir.Inst.Index = if (has_tag_capture) inst: {
const tag_inst = zir_switch.tag_capture_placeholder.unwrap() orelse switch_inst;
sema.inst_map.putAssumeCapacity(tag_inst, .fromValue(item_opv));
break :inst tag_inst;
} else undefined;
defer if (has_tag_capture) assert(sema.inst_map.remove(tag_inst));
if (zir_switch.has_continue) sema.inst_map.putAssumeCapacity(switch_inst, .fromType(raw_operand_ty));
defer if (zir_switch.has_continue) assert(sema.inst_map.remove(switch_inst));
_ = try sema.analyzeBodyRuntimeBreak(&case_block, body);
}
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).@"struct".fields.len +
case_block.instructions.items.len);
const payload_index = sema.addExtraAssumeCapacity(Air.Block{
.body_len = @intCast(case_block.instructions.items.len),
});
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
const air_tag: Air.Inst.Tag = if (merges.extra_insts.items.len > 0)
.loop
else
.block;
const air_loop_ref = try child_block.addInst(.{
.tag = air_tag,
.data = .{ .ty_pl = .{
.ty = .noreturn_type,
.payload = payload_index,
} },
});
try sema.fixupSwitchContinues(
block,
src,
air_loop_ref,
operand,
operand_is_ref,
item_ty,
.opv,
zir_switch.any_maybe_runtime_capture,
merges,
);
return null;
}
defer if (extra.data.bits.any_uses_err_capture) assert(sema.inst_map.remove(err_capture_inst));
_ = try sema.analyzeSwitchRuntimeBlock(
spa,
&sub_block,
switch_src,
try sema.switchCond(block, switch_operand_src, spa.operand.simple.by_val),
operand_err_set_ty,
switch_operand_src,
case_vals,
.{
.body = else_case.body,
.end = else_case.end,
.capture = if (else_case.has_capture) .by_val else .none,
.is_inline = else_case.is_inline,
.has_tag_capture = false,
},
scalar_cases_len,
multi_cases_len,
false,
undefined,
true,
switch_src_node_offset,
else_prong_src,
false,
undefined,
seen_errors,
undefined,
undefined,
undefined,
cond_dbg_node_index,
true,
null,
undefined,
&.{},
&.{},
assert(maybe_operand_opv == null); // `operand_ty` can only be an OPV type if `item_ty` is one too!
try sema.finishSwitchBr(
block,
child_block,
operand,
raw_operand_ty,
operand_is_ref,
merges,
switch_inst,
zir_switch,
validated_switch,
);
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).@"struct".fields.len +
true_instructions.len + sub_block.instructions.items.len);
_ = try child_block.addInst(.{
.tag = .cond_br,
.data = .{
.pl_op = .{
.operand = cond,
.payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(true_instructions.len),
.else_body_len = @intCast(sub_block.instructions.items.len),
.branch_hints = .{
.true = non_error_hint,
.false = .none,
// Code coverage is desired for error handling.
.then_cov = .poi,
.else_cov = .poi,
},
}),
},
},
});
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(true_instructions));
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(sub_block.instructions.items));
return sema.resolveAnalyzedBlock(block, main_src, &child_block, merges, false);
return null;
}
fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_ref: bool) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
fn finishSwitchBr(
sema: *Sema,
block: *Block,
child_block: *Block,
operand: SwitchOperand,
raw_operand_ty: Type,
operand_is_ref: bool,
merges: *Block.Merges,
switch_inst: Zir.Inst.Index,
zir_switch: *const Zir.UnwrappedSwitchBlock,
validated_switch: *const ValidatedSwitchBlock,
) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const src = block.nodeOffset(inst_data.src_node);
const src_node_offset = inst_data.src_node;
const src_node_offset = zir_switch.switch_src_node_offset;
const src = block.nodeOffset(src_node_offset);
const operand_src = block.src(.{ .node_offset_switch_operand = src_node_offset });
const has_else = zir_switch.else_case != null;
const has_under = zir_switch.has_under;
const else_case = validated_switch.else_case;
const scalar_cases_len = zir_switch.scalarCasesLen();
const multi_cases_len = zir_switch.multiCasesLen();
const operand_ty = if (operand_is_ref)
raw_operand_ty.childType(zcu)
else
raw_operand_ty;
const cond_ref = switch (operand) {
.simple => |s| s.cond,
.loop => |l| l.init_cond,
};
// AstGen guarantees that the instruction immediately preceding
// switch_block[_ref]/switch_block_err_union is a dbg_stmt.
const cond_dbg_node_index: Zir.Inst.Index = @enumFromInt(@intFromEnum(switch_inst) - 1);
const else_is_named_only = has_else and has_under;
const catch_all_case: CatchAllSwitchCase =
if (has_under) .under else if (has_else) .@"else" else .none;
const item_ty = switch (operand_ty.zigTypeTag(zcu)) {
.@"union" => operand_ty.unionTagType(zcu).?,
else => operand_ty,
};
const union_originally = operand_ty.zigTypeTag(zcu) == .@"union";
const err_set = operand_ty.zigTypeTag(zcu) == .error_set;
const estimated_cases_len: u32 = scalar_cases_len + multi_cases_len +
@intFromBool(has_else or has_under);
const BranchHints = struct {
bags: std.ArrayList(u32),
count: u32,
const hints_per_bag = 10;
fn ensureUnusedCapacity(hints: *@This(), gpa_inner: Allocator, additional_count: u32) Allocator.Error!void {
const unused_hints = hints.bags.capacity * hints_per_bag - hints.count;
if (unused_hints >= additional_count) return;
const bags_required = std.math.divCeil(u32, hints.count + additional_count, hints_per_bag) catch unreachable;
return hints.bags.ensureUnusedCapacity(gpa_inner, bags_required);
}
fn appendAssumeCapacity(hints: *@This(), hint: std.builtin.BranchHint) void {
const idx_in_bag = hints.count % hints_per_bag;
var bag: u32 = if (idx_in_bag > 0) hints.bags.pop().? else 0;
bag |= @as(u32, @intFromEnum(hint)) << @intCast(@bitSizeOf(std.builtin.BranchHint) * idx_in_bag);
hints.count += 1;
return hints.bags.appendAssumeCapacity(bag);
}
fn append(hints: *@This(), gpa_inner: Allocator, hint: std.builtin.BranchHint) Allocator.Error!void {
try hints.ensureUnusedCapacity(gpa_inner, 1);
return hints.appendAssumeCapacity(hint);
}
};
var branch_hints: BranchHints = hints: {
const num_bags = std.math.divCeil(u32, estimated_cases_len, BranchHints.hints_per_bag) catch unreachable;
break :hints .{ .bags = try .initCapacity(gpa, num_bags), .count = 0 };
};
defer branch_hints.bags.deinit(gpa);
var cases_extra: std.ArrayList(u32) = try .initCapacity(gpa, estimated_cases_len *
@typeInfo(Air.SwitchBr.Case).@"struct".fields.len);
defer cases_extra.deinit(gpa);
// We will reuse this block for each case.
var case_block = child_block.makeSubBlock();
case_block.runtime_loop = null;
case_block.runtime_cond = operand_src;
case_block.runtime_index.increment();
case_block.need_debug_scope = null; // this body is emitted regardless
defer case_block.instructions.deinit(gpa);
const case_vals = validated_switch.case_vals;
var case_val_idx: usize = 0;
var case_it = zir_switch.iterateCases();
var extra_index = zir_switch.end;
var under_prong: ?struct {
index: Zir.UnwrappedSwitchBlock.Case.Index,
body: []const Zir.Inst.Index,
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
has_tag_capture: bool,
} = null;
var cases_len: u32 = 0;
while (case_it.next()) |case| {
const item_refs = case_vals[case_val_idx..][0..case.item_infos.len];
case_val_idx += item_refs.len;
const range_refs: []const [2]Air.Inst.Ref =
@ptrCast(case_vals[case_val_idx..][0 .. 2 * case.range_infos.len]);
case_val_idx += 2 * range_refs.len;
const prong_info = case.prong_info;
const prong_body = sema.code.bodySlice(extra_index, prong_info.body_len);
extra_index += prong_body.len;
// Enough capacity for inlining regular items, we can't really predict
// how many range items we will end up with (at least not in a safe and
// cheap manner) so we allocate on demand for those.
if (prong_info.is_inline) {
try branch_hints.ensureUnusedCapacity(gpa, @intCast(case.item_infos.len));
}
var emit_bb = false;
var any_analyze_body = false;
var is_under_prong = false;
for (case.item_infos, item_refs, 0..) |item_info, item_ref, item_i| {
if (item_ref == .none) is_under_prong = true;
if (item_info.bodyLen()) |body_len| extra_index += body_len;
const analyze_body = sema.wantSwitchProngBodyAnalysis(block, item_ref, operand_ty, union_originally, err_set, prong_info.is_comptime_unreach);
if (analyze_body) any_analyze_body = true;
if (prong_info.is_inline) {
cases_len += 1;
case_block.instructions.clearRetainingCapacity();
case_block.error_return_trace_index = child_block.error_return_trace_index;
if (emit_bb) {
const bb_src = block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = case.index,
.item_idx = .{ .kind = .single, .value = @intCast(item_i) },
} });
try sema.emitBackwardBranch(block, bb_src);
}
emit_bb = true;
const prong_hint: std.builtin.BranchHint = hint: {
if (analyze_body) break :hint try sema.analyzeSwitchProng(
&case_block,
operand,
operand_ty,
raw_operand_ty,
prong_body,
block.src(.{ .switch_capture = .{
.switch_node_offset = src_node_offset,
.case_idx = case.index,
} }),
prong_info.capture,
prong_info.has_tag_capture,
item_ref,
.{ .item_refs = &.{item_ref} },
validated_switch.else_err_ty,
switch_inst,
zir_switch,
);
_ = try case_block.addNoOp(.unreach);
break :hint .cold; // unreachable branches are cold
};
branch_hints.appendAssumeCapacity(prong_hint);
try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
1 + // `item`, no ranges
case_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = 1,
.ranges_len = 0,
.body_len = @intCast(case_block.instructions.items.len),
}));
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
}
}
for (case.range_infos, range_refs, 0..) |range_info, range_ref, range_i| {
if (range_info[0].bodyLen()) |body_len| extra_index += body_len;
if (range_info[1].bodyLen()) |body_len| extra_index += body_len;
any_analyze_body = true; // always an integer range, always needs analysis
if (prong_info.is_inline) {
var item = sema.resolveConstDefinedValue(block, .unneeded, range_ref[0], undefined) catch unreachable;
const item_last = sema.resolveConstDefinedValue(block, .unneeded, range_ref[1], undefined) catch unreachable;
if (try item.getUnsignedIntSema(pt)) |first_int| {
if (try item_last.getUnsignedIntSema(pt)) |last_int| {
if (std.math.cast(u32, last_int - first_int)) |range_len| {
try branch_hints.ensureUnusedCapacity(gpa, range_len);
}
}
}
var prev_result_overflowed = false;
while (item.compareScalar(.lte, item_last, operand_ty, zcu)) : ({
// Previous validation has resolved any possible lazy values.
const int_val: Value, const int_ty: Type = switch (operand_ty.zigTypeTag(zcu)) {
.int => .{ item, operand_ty },
.@"enum" => b: {
const int_val: Value = .fromInterned(ip.indexToKey(item.toIntern()).enum_tag.int);
break :b .{ int_val, int_val.typeOf(zcu) };
},
else => unreachable,
};
assert(!prev_result_overflowed);
const result = try arith.incrementDefinedInt(sema, int_ty, int_val);
prev_result_overflowed = result.overflow;
item = switch (operand_ty.zigTypeTag(zcu)) {
.int => result.val,
.@"enum" => .fromInterned(try pt.intern(.{ .enum_tag = .{
.ty = operand_ty.toIntern(),
.int = result.val.toIntern(),
} })),
else => unreachable,
};
}) {
cases_len += 1;
case_block.instructions.clearRetainingCapacity();
case_block.error_return_trace_index = child_block.error_return_trace_index;
const item_ref: Air.Inst.Ref = .fromValue(item);
if (emit_bb) {
const bb_src = block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = case.index,
.item_idx = .{ .kind = .range, .value = @intCast(range_i) },
} });
try sema.emitBackwardBranch(block, bb_src);
}
emit_bb = true;
const prong_hint = try sema.analyzeSwitchProng(
&case_block,
operand,
operand_ty,
raw_operand_ty,
prong_body,
block.src(.{ .switch_capture = .{
.switch_node_offset = src_node_offset,
.case_idx = case.index,
} }),
prong_info.capture,
prong_info.has_tag_capture,
item_ref,
.has_ranges,
validated_switch.else_err_ty,
switch_inst,
zir_switch,
);
try branch_hints.append(gpa, prong_hint);
try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
1 + // `item`, no ranges
case_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = 1,
.ranges_len = 0,
.body_len = @intCast(case_block.instructions.items.len),
}));
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
}
}
}
if (prong_info.is_inline) continue; // handled above
if (is_under_prong) {
under_prong = .{
.index = case.index,
.body = prong_body,
.capture = case.prong_info.capture,
.has_tag_capture = case.prong_info.has_tag_capture,
};
continue;
}
cases_len += 1;
case_block.instructions.clearRetainingCapacity();
case_block.error_return_trace_index = child_block.error_return_trace_index;
const prong_hint: std.builtin.BranchHint = hint: {
if (any_analyze_body) break :hint try sema.analyzeSwitchProng(
&case_block,
operand,
operand_ty,
raw_operand_ty,
prong_body,
block.src(.{ .switch_capture = .{
.switch_node_offset = src_node_offset,
.case_idx = case.index,
} }),
prong_info.capture,
prong_info.has_tag_capture,
.none,
.{ .item_refs = item_refs },
validated_switch.else_err_ty,
switch_inst,
zir_switch,
);
_ = try case_block.addNoOp(.unreach);
break :hint .cold; // unreachable branches are cold
};
try branch_hints.append(gpa, prong_hint);
try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
item_refs.len +
2 * range_refs.len +
case_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = @intCast(item_refs.len),
.ranges_len = @intCast(range_refs.len),
.body_len = @intCast(case_block.instructions.items.len),
}));
cases_extra.appendSliceAssumeCapacity(@ptrCast(item_refs));
cases_extra.appendSliceAssumeCapacity(@ptrCast(range_refs));
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
}
const catch_all_extra: []const u32 = catch_all_extra: {
if (catch_all_case == .none and !case_block.wantSafety()) {
try branch_hints.append(gpa, .none);
break :catch_all_extra &.{};
}
var emit_bb = false;
if (has_else and else_case.is_inline) {
const else_prong_src = block.src(.{ .node_offset_switch_else_prong = src_node_offset });
var error_names: InternPool.NullTerminatedString.Slice = undefined;
var min_int: Value = undefined;
check_enumerable: {
switch (item_ty.zigTypeTag(zcu)) {
.@"union" => unreachable,
.@"enum" => if (else_is_named_only or
!item_ty.isNonexhaustiveEnum(zcu) or union_originally)
{
try branch_hints.ensureUnusedCapacity(gpa, @intCast(validated_switch.seen_enum_fields.len));
break :check_enumerable;
},
.error_set => if (!operand_ty.isAnyError(zcu)) {
error_names = item_ty.errorSetNames(zcu);
try branch_hints.ensureUnusedCapacity(gpa, error_names.len);
break :check_enumerable;
},
.int => {
min_int = try item_ty.minInt(pt, item_ty);
break :check_enumerable;
},
.bool, .void => break :check_enumerable,
else => {},
}
return sema.fail(block, else_prong_src, "cannot enumerate values of type '{f}' for 'inline else'", .{
item_ty.fmt(pt),
});
}
var unhandled_it = validated_switch.iterateUnhandledItems(error_names, min_int);
while (try unhandled_it.next(sema, item_ty)) |item_val| {
cases_len += 1;
case_block.instructions.clearRetainingCapacity();
case_block.error_return_trace_index = child_block.error_return_trace_index;
const item_ref: Air.Inst.Ref = .fromValue(item_val);
const analyze_body = sema.wantSwitchProngBodyAnalysis(block, item_ref, operand_ty, union_originally, err_set, false);
if (emit_bb) try sema.emitBackwardBranch(block, else_prong_src);
emit_bb = true;
const prong_hint: std.builtin.BranchHint = hint: {
if (analyze_body) break :hint try sema.analyzeSwitchProng(
&case_block,
operand,
operand_ty,
raw_operand_ty,
else_case.body,
block.src(.{ .switch_capture = .{
.switch_node_offset = src_node_offset,
.case_idx = else_case.index,
} }),
else_case.capture,
else_case.has_tag_capture,
item_ref,
.special,
validated_switch.else_err_ty,
switch_inst,
zir_switch,
);
_ = try case_block.addNoOp(.unreach);
break :hint .cold; // unreachable branches are cold
};
try branch_hints.append(gpa, prong_hint);
try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
1 + // `item`, no ranges
case_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = 1,
.ranges_len = 0,
.body_len = @intCast(case_block.instructions.items.len),
}));
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
}
}
case_block.instructions.clearRetainingCapacity();
case_block.error_return_trace_index = child_block.error_return_trace_index;
if (zcu.backendSupportsFeature(.is_named_enum_value) and
catch_all_case != .none and block.wantSafety() and
item_ty.zigTypeTag(zcu) == .@"enum" and
(!operand_ty.isNonexhaustiveEnum(zcu) or union_originally))
{
try sema.zirDbgStmt(&case_block, cond_dbg_node_index);
const ok = try case_block.addUnOp(.is_named_enum_value, cond_ref);
if (else_is_named_only) {} else {
try sema.addSafetyCheck(&case_block, src, ok, .corrupt_switch);
}
}
if (else_is_named_only and !else_case.is_inline) {
// If we have both an `else` and an `_` prong, all named values go
// into the `else` prong and all unnamed ones go into the `_` prong.
// We will manually enumerate all named values which haven't been
// encountered yet and create an extra prong for them, which will
// evaulate to the `else` body.
assert(operand_ty.isNonexhaustiveEnum(zcu));
cases_len += 1;
const prong_hint: std.builtin.BranchHint = hint: {
if (!else_case.is_inline) break :hint try sema.analyzeSwitchProng(
&case_block,
operand,
operand_ty,
raw_operand_ty,
else_case.body,
block.src(.{ .switch_capture = .{
.switch_node_offset = src_node_offset,
.case_idx = else_case.index,
} }),
else_case.capture,
else_case.has_tag_capture,
.none,
.special,
validated_switch.else_err_ty,
switch_inst,
zir_switch,
);
_ = try case_block.addNoOp(.unreach);
break :hint .cold; // unreachable branches are cold
};
try branch_hints.append(gpa, prong_hint);
try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
(validated_switch.seen_enum_fields.len + 1 - zir_switch.totalItemsLen()) + // +1 because totalItemsLen includes the _
case_block.instructions.items.len);
const extra_case = cases_extra.addManyAsArrayAssumeCapacity(
@typeInfo(Air.SwitchBr.Case).@"struct".fields.len,
);
var items_len: u32 = 0;
for (validated_switch.seen_enum_fields, 0..) |seen_field, field_i| {
if (seen_field != null) continue;
const item_val = try pt.enumValueFieldIndex(item_ty, @intCast(field_i));
const item_ref: Air.Inst.Ref = .fromValue(item_val);
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
items_len += 1;
}
assert(items_len > 0); // `else` must be reachable at this point
extra_case.* = payloadToExtraItems(Air.SwitchBr.Case{
.items_len = items_len,
.ranges_len = 0,
.body_len = @intCast(case_block.instructions.items.len),
});
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
// We fall through to the regular catch-all prong generation.
case_block.instructions.clearRetainingCapacity();
case_block.error_return_trace_index = child_block.error_return_trace_index;
}
const analyze_catch_all_body = analyze_body: {
switch (catch_all_case) {
.none => break :analyze_body false, // we still may want a safety check!
.under => break :analyze_body true, // can't be a union anyway
.@"else" => if (else_case.is_inline) break :analyze_body false,
}
if (union_originally) {
const union_obj = zcu.typeToUnion(operand_ty).?;
for (validated_switch.seen_enum_fields, 0..) |seen_field, field_i| {
if (seen_field != null) continue;
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_i]);
if (!field_ty.isNoReturn(zcu)) break :analyze_body true;
}
break :analyze_body false;
}
if (err_set) {
const else_err_ty = validated_switch.else_err_ty orelse {
assert(else_case.is_simple_noreturn);
break :analyze_body false;
};
if (else_err_ty.errorSetIsEmpty(zcu)) break :analyze_body false;
}
break :analyze_body true;
};
const catch_all_hint = hint: {
if (analyze_catch_all_body) {
const index, const body, const capture, const has_tag_capture = switch (catch_all_case) {
.@"else" => .{ else_case.index, else_case.body, else_case.capture, else_case.has_tag_capture },
.under => .{ under_prong.?.index, under_prong.?.body, under_prong.?.capture, under_prong.?.has_tag_capture },
.none => unreachable,
};
break :hint try sema.analyzeSwitchProng(
&case_block,
operand,
operand_ty,
raw_operand_ty,
body,
block.src(.{ .switch_capture = .{
.switch_node_offset = src_node_offset,
.case_idx = index,
} }),
capture,
has_tag_capture,
.none,
.special,
validated_switch.else_err_ty,
switch_inst,
zir_switch,
);
}
// We still need a terminator in this block, but we have proven
// that it is unreachable.
if (case_block.wantSafety()) {
try sema.zirDbgStmt(&case_block, cond_dbg_node_index);
try sema.safetyPanic(&case_block, src, .corrupt_switch);
} else {
_ = try case_block.addNoOp(.unreach);
}
break :hint .cold; // Safety check / unreachable branches are cold.
};
try branch_hints.append(gpa, catch_all_hint);
break :catch_all_extra @ptrCast(case_block.instructions.items);
};
assert(branch_hints.count == cases_len + 1); // +1 for catch-all hint
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).@"struct".fields.len +
branch_hints.bags.items.len +
cases_extra.items.len +
catch_all_extra.len);
const payload_index = sema.addExtraAssumeCapacity(Air.SwitchBr{
.cases_len = @intCast(cases_len),
.else_body_len = @intCast(catch_all_extra.len),
});
sema.air_extra.appendSliceAssumeCapacity(branch_hints.bags.items);
sema.air_extra.appendSliceAssumeCapacity(cases_extra.items);
sema.air_extra.appendSliceAssumeCapacity(catch_all_extra);
const air_tag: Air.Inst.Tag = if (operand == .loop and merges.extra_insts.items.len > 0)
.loop_switch_br
else
.switch_br;
const air_switch_ref = try child_block.addInst(.{
.tag = air_tag,
.data = .{ .pl_op = .{
.operand = cond_ref,
.payload = payload_index,
} },
});
try sema.fixupSwitchContinues(
block,
src,
air_switch_ref,
operand,
operand_is_ref,
item_ty,
.normal,
zir_switch.any_maybe_runtime_capture,
merges,
);
}
/// This is the counterpart to `zirSwitchContinue`; replaces placeholder `br` insts
/// with their respective finalized inst pointing back at `switch_ref`.
fn fixupSwitchContinues(
sema: *Sema,
block: *Block,
switch_src: LazySrcLoc,
switch_ref: Air.Inst.Ref,
operand: SwitchOperand,
operand_is_ref: bool,
item_ty: Type,
mode: enum { normal, opv },
any_non_inline_capture: bool,
merges: *const Block.Merges,
) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const gpa = sema.gpa;
const air_tag = sema.air_instructions.items(.tag)[@intFromEnum(switch_ref.toIndex().?)];
switch (air_tag) {
.loop_switch_br, .switch_br => assert(mode == .normal),
.loop, .block => assert(mode == .opv),
else => unreachable,
}
switch (air_tag) {
.loop_switch_br, .loop => assert(merges.extra_insts.items.len > 0),
.switch_br, .block => assert(merges.extra_insts.items.len == 0),
else => unreachable,
}
for (merges.extra_insts.items, merges.extra_src_locs.items) |placeholder_inst, dispatch_src| {
var replacement_block = block.makeSubBlock();
defer replacement_block.instructions.deinit(gpa);
assert(sema.air_instructions.items(.tag)[@intFromEnum(placeholder_inst)] == .br);
const new_operand_maybe_ref = sema.air_instructions.items(.data)[@intFromEnum(placeholder_inst)].br.operand;
if (any_non_inline_capture and mode != .opv) {
_ = try replacement_block.addBinOp(.store, operand.loop.operand_alloc, new_operand_maybe_ref);
}
const new_operand_val = if (operand_is_ref)
try sema.analyzeLoad(&replacement_block, dispatch_src, new_operand_maybe_ref, dispatch_src)
else
new_operand_maybe_ref;
const new_cond = try sema.coerce(&replacement_block, item_ty, new_operand_val, dispatch_src);
if (zcu.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and
item_ty.zigTypeTag(zcu) == .@"enum" and !item_ty.isNonexhaustiveEnum(zcu) and
mode == .normal and !try sema.isComptimeKnown(new_cond))
{
const ok = try replacement_block.addUnOp(.is_named_enum_value, new_cond);
try sema.addSafetyCheck(&replacement_block, switch_src, ok, .corrupt_switch);
}
switch (mode) {
.normal => {
_ = try replacement_block.addInst(.{
.tag = .switch_dispatch,
.data = .{ .br = .{
.block_inst = switch_ref.toIndex().?,
.operand = new_cond,
} },
});
},
.opv => {
_ = try replacement_block.addInst(.{
.tag = .repeat,
.data = .{ .repeat = .{
.loop_inst = switch_ref.toIndex().?,
} },
});
},
}
if (replacement_block.instructions.items.len == 1) {
// Optimization: we don't need a block!
sema.air_instructions.set(
@intFromEnum(placeholder_inst),
sema.air_instructions.get(@intFromEnum(replacement_block.instructions.items[0])),
);
continue;
}
// Replace placeholder with a block.
// No `br` is needed as the block is a switch dispatch so necessarily `noreturn`.
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).@"struct".fields.len +
replacement_block.instructions.items.len);
sema.air_instructions.set(@intFromEnum(placeholder_inst), .{
.tag = .block,
.data = .{ .ty_pl = .{
.ty = .noreturn_type,
.payload = sema.addExtraAssumeCapacity(Air.Block{
.body_len = @intCast(replacement_block.instructions.items.len),
}),
} },
});
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(replacement_block.instructions.items));
}
}
const ValidatedSwitchBlock = struct {
seen_enum_fields: []const ?LazySrcLoc,
seen_errors: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, LazySrcLoc),
seen_ranges: []const RangeSet.Range,
true_src: ?LazySrcLoc,
false_src: ?LazySrcLoc,
void_src: ?LazySrcLoc,
case_vals: []const Air.Inst.Ref,
else_case: Zir.UnwrappedSwitchBlock.Case.Else,
else_err_ty: ?Type,
fn iterateUnhandledItems(
validated_switch: *const ValidatedSwitchBlock,
/// May be `undefined` if `item_ty` isn't an `error_set`.
error_names: InternPool.NullTerminatedString.Slice,
/// May be `undefined` if `item_ty` isn't an `int`.
min_int: Value,
) UnhandledIterator {
return .{
.next_idx = 0,
.next_val = min_int,
.error_names = error_names,
.seen_enum_fields = validated_switch.seen_enum_fields,
.seen_errors = &validated_switch.seen_errors,
.seen_ranges = validated_switch.seen_ranges,
.seen_true = validated_switch.true_src != null,
.seen_false = validated_switch.false_src != null,
.seen_void = validated_switch.void_src != null,
};
}
const UnhandledIterator = struct {
next_idx: u32,
next_val: ?Value,
error_names: InternPool.NullTerminatedString.Slice,
seen_enum_fields: []const ?LazySrcLoc,
seen_errors: *const std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, LazySrcLoc),
seen_ranges: []const RangeSet.Range,
seen_true: bool,
seen_false: bool,
seen_void: bool,
fn next(it: *UnhandledIterator, sema: *Sema, item_ty: Type) CompileError!?Value {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (item_ty.zigTypeTag(zcu)) {
.@"union" => unreachable,
.@"enum" => {
for (it.seen_enum_fields[it.next_idx..], it.next_idx..) |seen_field, field_i| {
if (seen_field != null) continue;
it.next_idx = @intCast(field_i + 1);
return try pt.enumValueFieldIndex(item_ty, @intCast(field_i));
}
return null;
},
.error_set => {
for (it.error_names.get(ip)[it.next_idx..], it.next_idx..) |err_name, name_i| {
if (it.seen_errors.contains(err_name)) continue;
it.next_idx = @intCast(name_i + 1);
return .fromInterned(try pt.intern(.{ .err = .{
.ty = item_ty.toIntern(),
.name = err_name,
} }));
}
return null;
},
.int => {
var cur = it.next_val orelse return null;
while (it.next_idx < it.seen_ranges.len and
cur.eql(it.seen_ranges[it.next_idx].first, item_ty, zcu))
{
defer it.next_idx += 1;
const incr = try arith.incrementDefinedInt(
sema,
item_ty,
it.seen_ranges[it.next_idx].last,
);
if (incr.overflow) {
it.next_val = null;
return null;
}
cur = incr.val;
}
const incr = try arith.incrementDefinedInt(sema, item_ty, cur);
it.next_val = if (incr.overflow) null else incr.val;
return cur;
},
.bool => {
if (!it.seen_true) {
it.seen_true = true;
return .true;
}
if (!it.seen_false) {
it.seen_false = true;
return .false;
}
return null;
},
.void => {
if (!it.seen_void) {
it.seen_void = true;
return .void;
}
return null;
},
else => unreachable, // item type is not enumerable
}
}
};
};
/// Validates operand type and `else`/`_` prong usage, resolves all prong items
/// and checks them for duplicates/invalid ranges. Does not emit into `block`.
/// Reserves inst map space for all placeholders associated with `zir_switch`.
/// Contents of returned `ValidatedSwitchBlock` belong to `sema.arena`.
fn validateSwitchBlock(
sema: *Sema,
block: *Block,
raw_operand: Air.Inst.Ref,
operand_is_ref: bool,
switch_inst: Zir.Inst.Index,
zir_switch: *const Zir.UnwrappedSwitchBlock,
) CompileError!ValidatedSwitchBlock {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const gpa = sema.gpa;
const arena = sema.arena;
const src_node_offset = zir_switch.switch_src_node_offset;
const src = block.nodeOffset(src_node_offset);
const operand_src = block.src(.{ .node_offset_switch_operand = src_node_offset });
const else_prong_src = block.src(.{ .node_offset_switch_else_prong = src_node_offset });
const under_prong_src = block.src(.{ .node_offset_switch_under_prong = src_node_offset });
const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index);
var extra_index = zir_switch.end;
const operand: SwitchProngAnalysis.Operand, const raw_operand_ty: Type = op: {
const maybe_ptr = try sema.resolveInst(extra.data.operand);
const val, const ref = if (operand_is_ref)
.{ try sema.analyzeLoad(block, src, maybe_ptr, operand_src), maybe_ptr }
else
.{ maybe_ptr, undefined };
// We want to map values to our placeholders later on.
if (zir_switch.payload_capture_placeholder.unwrap()) |payload_capture_inst| {
assert(payload_capture_inst != switch_inst); // malformed zir
try sema.inst_map.ensureSpaceForInstructions(gpa, &.{payload_capture_inst});
}
if (zir_switch.tag_capture_placeholder.unwrap()) |tag_capture_inst| {
assert(tag_capture_inst != switch_inst); // malformed zir
try sema.inst_map.ensureSpaceForInstructions(gpa, &.{tag_capture_inst});
}
const init_cond = try sema.switchCond(block, operand_src, val);
const operand_ty: Type, const item_ty: Type = check_operand: {
const operand_ty = operand_ty: {
const raw_operand_ty = sema.typeOf(raw_operand);
if (operand_is_ref) {
try sema.checkPtrType(block, operand_src, raw_operand_ty, false);
break :operand_ty raw_operand_ty.childType(zcu);
}
break :operand_ty raw_operand_ty;
};
const operand_ty = sema.typeOf(val);
const item_ty: Type = item_ty: {
switch (operand_ty.zigTypeTag(zcu)) {
.@"enum",
.error_set,
.int,
.comptime_int,
.type,
.enum_literal,
.@"fn",
.bool,
.void,
=> break :item_ty operand_ty,
if (extra.data.bits.has_continue and !block.isComptime()) {
// Even if the operand is comptime-known, this `switch` is runtime.
.@"union" => {
try operand_ty.resolveFields(pt);
const enum_ty = operand_ty.unionTagType(zcu) orelse {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(operand_src, "switch on union with no attached enum", .{});
errdefer msg.destroy(sema.gpa);
if (operand_ty.srcLocOrNull(zcu)) |union_src| {
try sema.errNote(union_src, msg, "consider 'union(enum)' here", .{});
}
break :msg msg;
});
};
break :item_ty enum_ty;
},
.pointer => {
if (!operand_ty.isSlice(zcu)) {
break :item_ty operand_ty;
}
},
else => {},
}
return sema.fail(block, operand_src, "switch on type '{f}'", .{operand_ty.fmt(pt)});
};
if (zir_switch.has_continue and !block.isComptime()) {
if (try operand_ty.comptimeOnlySema(pt)) {
// Even if the operand is comptime-known, this `switch` is runtime.
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(operand_src, "operand of switch loop has comptime-only type '{f}'", .{operand_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.errNote(operand_src, msg, "switch loops are evaluated at runtime outside of comptime scopes", .{});
try sema.explainWhyTypeIsComptime(msg, operand_src, operand_ty);
break :msg msg;
});
}
try sema.validateRuntimeValue(block, operand_src, maybe_ptr);
const operand_alloc = if (extra.data.bits.any_non_inline_capture) a: {
const operand_ptr_ty = try pt.singleMutPtrType(sema.typeOf(maybe_ptr));
const operand_alloc = try block.addTy(.alloc, operand_ptr_ty);
_ = try block.addBinOp(.store, operand_alloc, maybe_ptr);
break :a operand_alloc;
} else undefined;
break :op .{
.{ .loop = .{
.operand_alloc = operand_alloc,
.operand_is_ref = operand_is_ref,
.init_cond = init_cond,
} },
operand_ty,
};
try sema.validateRuntimeValue(block, operand_src, raw_operand);
}
// We always use `simple` in the comptime case, because as far as the dispatching logic
// is concerned, it really is dispatching a single prong. `resolveSwitchComptime` will
// be resposible for recursively resolving different prongs as needed.
break :op .{
.{ .simple = .{
.by_val = val,
.by_ref = ref,
.cond = init_cond,
} },
operand_ty,
};
break :check_operand .{ operand_ty, item_ty };
};
const union_originally = raw_operand_ty.zigTypeTag(zcu) == .@"union";
const err_set = raw_operand_ty.zigTypeTag(zcu) == .error_set;
const cond_ty = switch (raw_operand_ty.zigTypeTag(zcu)) {
.@"union" => raw_operand_ty.unionTagType(zcu).?, // validated by `switchCond` above
else => raw_operand_ty,
};
const has_else = zir_switch.else_case != null;
const has_under = zir_switch.has_under;
// AstGen guarantees that the instruction immediately preceding
// switch_block(_ref) is a dbg_stmt
const cond_dbg_node_index: Zir.Inst.Index = @enumFromInt(@intFromEnum(inst) - 1);
var header_extra_index: usize = extra.end;
const scalar_cases_len = extra.data.bits.scalar_cases_len;
const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
const multi_cases_len = sema.code.extra[header_extra_index];
header_extra_index += 1;
break :blk multi_cases_len;
} else 0;
const tag_capture_inst: Zir.Inst.Index = if (extra.data.bits.any_has_tag_capture) blk: {
const tag_capture_inst: Zir.Inst.Index = @enumFromInt(sema.code.extra[header_extra_index]);
header_extra_index += 1;
// SwitchProngAnalysis wants inst_map to have space for the tag capture.
// Note that the normal capture is referred to via the switch block
// index, which there is already necessarily space for.
try sema.inst_map.ensureSpaceForInstructions(gpa, &.{tag_capture_inst});
break :blk tag_capture_inst;
} else undefined;
var case_vals = try std.ArrayList(Air.Inst.Ref).initCapacity(gpa, scalar_cases_len + 2 * multi_cases_len);
defer case_vals.deinit(gpa);
var single_absorbed_item: Zir.Inst.Ref = .none;
var absorbed_items: []const Zir.Inst.Ref = &.{};
var absorbed_ranges: []const Zir.Inst.Ref = &.{};
const special_prongs = extra.data.bits.special_prongs;
const has_else = special_prongs.hasElse();
const has_under = special_prongs.hasUnder();
const special_else: SpecialProng = if (has_else) blk: {
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[header_extra_index]);
const extra_body_start = header_extra_index + 1;
break :blk .{
.body = sema.code.bodySlice(extra_body_start, info.body_len),
.end = extra_body_start + info.body_len,
.capture = info.capture,
.is_inline = info.is_inline,
.has_tag_capture = info.has_tag_capture,
};
} else .{
.body = &.{},
.end = header_extra_index,
.capture = .none,
.is_inline = false,
.has_tag_capture = false,
};
const special_under: SpecialProng = if (has_under) blk: {
var extra_index = special_else.end;
var trailing_items_len: usize = 0;
if (special_prongs.hasOneAdditionalItem()) {
single_absorbed_item = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
absorbed_items = @ptrCast(&single_absorbed_item);
} else if (special_prongs.hasManyAdditionalItems()) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
absorbed_items = sema.code.refSlice(extra_index + 1, items_len);
absorbed_ranges = sema.code.refSlice(extra_index + 1 + items_len, ranges_len * 2);
trailing_items_len = items_len + ranges_len * 2;
}
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
extra_index += 1 + trailing_items_len;
break :blk .{
.body = sema.code.bodySlice(extra_index, info.body_len),
.end = extra_index + info.body_len,
.capture = info.capture,
.is_inline = info.is_inline,
.has_tag_capture = info.has_tag_capture,
};
} else .{
.body = &.{},
.end = special_else.end,
.capture = .none,
.is_inline = false,
.has_tag_capture = false,
};
const special_end = special_under.end;
var case_vals: std.ArrayList(Air.Inst.Ref) = .empty;
try case_vals.ensureUnusedCapacity(arena, zir_switch.item_infos.len);
// Duplicate checking variables later also used for `inline else`.
var seen_enum_fields: []?LazySrcLoc = &.{};
var seen_errors = SwitchErrorSet.init(gpa);
var range_set = RangeSet.init(gpa, zcu);
var true_count: u8 = 0;
var false_count: u8 = 0;
var seen_errors: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, LazySrcLoc) = .empty;
var seen_sparse_values: std.AutoHashMapUnmanaged(InternPool.Index, LazySrcLoc) = .empty;
var range_set: RangeSet = .empty;
var true_src: ?LazySrcLoc = null;
var false_src: ?LazySrcLoc = null;
var void_src: ?LazySrcLoc = null;
defer {
range_set.deinit();
gpa.free(seen_enum_fields);
seen_errors.deinit();
}
var else_err_ty: ?Type = null;
var empty_enum = false;
const else_case = zir_switch.else_case orelse undefined;
var else_error_ty: ?Type = null;
// Validate usage of '_' prongs.
if (has_under and !raw_operand_ty.isNonexhaustiveEnum(zcu)) {
const msg = msg: {
const msg = try sema.errMsg(
src,
"'_' prong only allowed when switching on non-exhaustive enums",
.{},
);
errdefer msg.destroy(gpa);
try sema.errNote(
under_prong_src,
msg,
"'_' prong here",
.{},
);
try sema.errNote(
src,
msg,
"consider using 'else'",
.{},
);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
// Validate for duplicate items, missing else prong, and invalid range.
switch (cond_ty.zigTypeTag(zcu)) {
.@"union" => unreachable, // handled in `switchCond`
switch (item_ty.zigTypeTag(zcu)) {
.@"union" => unreachable,
.@"enum" => {
seen_enum_fields = try gpa.alloc(?LazySrcLoc, cond_ty.enumFieldCount(zcu));
empty_enum = seen_enum_fields.len == 0 and !cond_ty.isNonexhaustiveEnum(zcu);
seen_enum_fields = try arena.alloc(?LazySrcLoc, item_ty.enumFieldCount(zcu));
@memset(seen_enum_fields, null);
// `range_set` is used for non-exhaustive enum values that do not correspond to any tags.
// `range_set` is used for non-exhaustive enum values that do not
// correspond to any tags. Since this is rare, we only allocate on
// demand in `validateSwitchItem`.
},
.error_set => {
try seen_errors.ensureUnusedCapacity(arena, zir_switch.totalItemsLen());
},
.int, .comptime_int => {
try range_set.ensureUnusedCapacity(arena, zir_switch.totalItemsLen());
},
.enum_literal, .@"fn", .pointer, .type => {
try seen_sparse_values.ensureUnusedCapacity(arena, zir_switch.totalItemsLen());
},
.bool, .void => {},
for (absorbed_items, 0..) |item_ref, item_i| {
_ = try sema.validateSwitchItemEnum(
block,
seen_enum_fields,
&range_set,
item_ref,
cond_ty,
block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = .special_under,
.item_idx = .{ .kind = .single, .index = @intCast(item_i) },
} }),
else => unreachable,
}
// Validate for duplicate items and invalid ranges.
var case_it = zir_switch.iterateCases();
while (case_it.next()) |case| {
const prong_info = case.prong_info;
extra_index += prong_info.body_len;
for (case.item_infos, 0..) |item_info, item_i| {
const item_src = block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = case.index,
.item_idx = .{ .kind = .single, .value = @intCast(item_i) },
} });
if (item_info.unwrap() == .under) {
if (!operand_ty.isNonexhaustiveEnum(zcu)) return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(
src,
"'_' prong only allowed when switching on non-exhaustive enums",
.{},
);
errdefer msg.destroy(gpa);
try sema.errNote(
item_src,
msg,
"'_' prong here",
.{},
);
try sema.errNote(
src,
msg,
"consider using 'else'",
.{},
);
break :msg msg;
});
case_vals.appendAssumeCapacity(.none);
} else {
const item, extra_index = try sema.resolveSwitchItem(block, item_src, item_ty, item_info, extra_index, switch_inst, prong_info.is_comptime_unreach);
try sema.validateSwitchItemOrRange(block, item_src, item.val, null, item_ty, seen_enum_fields, &seen_errors, &seen_sparse_values, &range_set, &true_src, &false_src, &void_src);
case_vals.appendAssumeCapacity(item.ref);
}
}
for (case.range_infos, 0..) |range_info, range_i| {
const range_offset: LazySrcLoc.Offset.SwitchItem = .{
.switch_node_offset = src_node_offset,
.case_idx = case.index,
.item_idx = .{ .kind = .range, .value = @intCast(range_i) },
};
const range_src = block.src(.{ .switch_case_item = range_offset });
const first_src = block.src(.{ .switch_case_item_range_first = range_offset });
const last_src = block.src(.{ .switch_case_item_range_last = range_offset });
const first_item, extra_index = try sema.resolveSwitchItem(block, first_src, item_ty, range_info[0], extra_index, switch_inst, prong_info.is_comptime_unreach);
const last_item, extra_index = try sema.resolveSwitchItem(block, last_src, item_ty, range_info[1], extra_index, switch_inst, prong_info.is_comptime_unreach);
try sema.validateSwitchItemOrRange(block, range_src, first_item.val, last_item.val, item_ty, seen_enum_fields, &seen_errors, &seen_sparse_values, &range_set, &true_src, &false_src, &void_src);
case_vals.appendSliceAssumeCapacity(&.{ first_item.ref, last_item.ref });
}
}
switch (item_ty.zigTypeTag(zcu)) {
.@"union" => unreachable,
.int, .comptime_int => {},
else => if (zir_switch.anyRanges()) {
const range_src = block.src(.{ .node_offset_switch_range = src_node_offset });
const msg = msg: {
const msg = try sema.errMsg(
operand_src,
"ranges not allowed when switching on type '{f}'",
.{operand_ty.fmt(sema.pt)},
);
}
try sema.validateSwitchNoRange(block, @intCast(absorbed_ranges.len), cond_ty, src_node_offset);
errdefer msg.destroy(sema.gpa);
try sema.errNote(
range_src,
msg,
"range here",
.{},
);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
},
}
var extra_index: usize = special_end;
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
extra_index += 1 + info.body_len;
case_vals.appendAssumeCapacity(try sema.validateSwitchItemEnum(
block,
seen_enum_fields,
&range_set,
item_ref,
cond_ty,
block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) },
.item_idx = .{ .kind = .single, .index = 0 },
} }),
));
}
}
{
var multi_i: u32 = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
extra_index += 1;
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + info.body_len;
try case_vals.ensureUnusedCapacity(gpa, items.len);
for (items, 0..) |item_ref, item_i| {
case_vals.appendAssumeCapacity(try sema.validateSwitchItemEnum(
block,
seen_enum_fields,
&range_set,
item_ref,
cond_ty,
block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = .{ .kind = .multi, .index = @intCast(multi_i) },
.item_idx = .{ .kind = .single, .index = @intCast(item_i) },
} }),
));
}
try sema.validateSwitchNoRange(block, ranges_len, cond_ty, src_node_offset);
}
}
// Validate for missing special prongs.
switch (item_ty.zigTypeTag(zcu)) {
.@"union" => unreachable,
.@"enum" => {
const all_tags_handled = for (seen_enum_fields) |seen_src| {
if (seen_src == null) break false;
} else true;
if (has_else) {
if (all_tags_handled) {
if (cond_ty.isNonexhaustiveEnum(zcu)) {
if (item_ty.isNonexhaustiveEnum(zcu)) {
if (has_under) return sema.fail(
block,
else_prong_src,
@@ -11819,9 +12123,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
for (seen_enum_fields, 0..) |seen_src, i| {
if (seen_src != null) continue;
const field_name = cond_ty.enumFieldName(i, zcu);
const field_name = item_ty.enumFieldName(i, zcu);
try sema.addFieldErrNote(
cond_ty,
item_ty,
i,
msg,
"unhandled enumeration value: '{f}'",
@@ -11829,15 +12133,17 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
);
}
try sema.errNote(
cond_ty.srcLoc(zcu),
item_ty.srcLoc(zcu),
msg,
"enum '{f}' declared here",
.{cond_ty.fmt(pt)},
.{item_ty.fmt(pt)},
);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
} else if (special_prongs == .none and cond_ty.isNonexhaustiveEnum(zcu) and !union_originally) {
} else if (!has_else and !has_under and
item_ty.isNonexhaustiveEnum(zcu) and operand_ty.zigTypeTag(zcu) != .@"union")
{
return sema.fail(
block,
src,
@@ -11846,101 +12152,83 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
);
}
},
.error_set => else_error_ty = try validateErrSetSwitch(
sema,
block,
&seen_errors,
&case_vals,
cond_ty,
inst_data,
scalar_cases_len,
multi_cases_len,
.{ .body = special_else.body, .end = special_else.end, .src = else_prong_src },
has_else,
),
.int, .comptime_int => {
var extra_index: usize = special_end;
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
extra_index += 1 + info.body_len;
case_vals.appendAssumeCapacity(try sema.validateSwitchItemInt(
block,
&range_set,
item_ref,
cond_ty,
block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) },
.item_idx = .{ .kind = .single, .index = 0 },
} }),
));
}
}
{
var multi_i: u32 = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
extra_index += 1;
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len;
try case_vals.ensureUnusedCapacity(gpa, items.len);
for (items, 0..) |item_ref, item_i| {
case_vals.appendAssumeCapacity(try sema.validateSwitchItemInt(
.error_set => {
else_err_ty = ty: switch (try sema.resolveInferredErrorSetTy(block, src, item_ty.toIntern())) {
.anyerror_type => {
if (!has_else) {
return sema.fail(
block,
&range_set,
item_ref,
cond_ty,
block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = .{ .kind = .multi, .index = @intCast(multi_i) },
.item_idx = .{ .kind = .single, .index = @intCast(item_i) },
} }),
));
}
try case_vals.ensureUnusedCapacity(gpa, 2 * ranges_len);
var range_i: u32 = 0;
while (range_i < ranges_len) : (range_i += 1) {
const item_first: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const item_last: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const vals = try sema.validateSwitchRange(
block,
&range_set,
item_first,
item_last,
cond_ty,
block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = .{ .kind = .multi, .index = @intCast(multi_i) },
.item_idx = .{ .kind = .range, .index = @intCast(range_i) },
} }),
src,
"else prong required when switching on type 'anyerror'",
.{},
);
case_vals.appendAssumeCapacity(vals[0]);
case_vals.appendAssumeCapacity(vals[1]);
}
break :ty .anyerror;
},
else => |err_set_ty_index| {
const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names;
var maybe_msg: ?*Zcu.ErrorMsg = null;
errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa);
var seen_errors_from_set: u32 = 0;
for (error_names.get(ip)) |error_name| {
if (seen_errors.contains(error_name)) {
seen_errors_from_set += 1;
} else if (!has_else) {
const msg = maybe_msg orelse blk: {
maybe_msg = try sema.errMsg(
src,
"switch must handle all possibilities",
.{},
);
break :blk maybe_msg.?;
};
try sema.errNote(
src,
msg,
"unhandled error value: 'error.{f}'",
.{error_name.fmt(ip)},
);
}
}
extra_index += info.body_len;
}
}
if (maybe_msg) |msg| {
maybe_msg = null;
try sema.addDeclaredHereNote(msg, operand_ty);
return sema.failWithOwnedErrorMsg(block, msg);
}
if (has_else and seen_errors_from_set == error_names.len) {
// This prong is unreachable anyway so we don't need its
// error set type, but we still allow it to exist.
if (else_case.is_simple_noreturn) break :ty null;
return sema.fail(
block,
else_prong_src,
"unreachable else prong; all cases already handled",
.{},
);
}
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, error_names.len);
for (error_names.get(ip)) |error_name| {
if (seen_errors.contains(error_name)) continue;
names.putAssumeCapacityNoClobber(error_name, {});
}
// No need to keep the hash map metadata correct; here we
// extract the (sorted) keys only.
break :ty try pt.errorSetFromUnsortedNames(names.keys());
},
};
},
.int, .comptime_int => |type_tag| {
check_range: {
if (cond_ty.zigTypeTag(zcu) == .int) {
const min_int = try cond_ty.minInt(pt, cond_ty);
const max_int = try cond_ty.maxInt(pt, cond_ty);
if (try range_set.spans(min_int.toIntern(), max_int.toIntern())) {
if (type_tag == .int) {
const min_int = try item_ty.minInt(pt, item_ty);
const max_int = try item_ty.maxInt(pt, item_ty);
if (try range_set.spans(arena, min_int, max_int, item_ty, zcu)) {
if (has_else) {
return sema.fail(
block,
@@ -11952,7 +12240,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
break :check_range;
}
}
if (special_prongs == .none) {
if (!has_else) {
return sema.fail(
block,
src,
@@ -11962,61 +12250,24 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
}
}
},
.bool => {
var extra_index: usize = special_end;
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
extra_index += 1 + info.body_len;
case_vals.appendAssumeCapacity(try sema.validateSwitchItemBool(
block,
&true_count,
&false_count,
item_ref,
block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) },
.item_idx = .{ .kind = .single, .index = 0 },
} }),
));
}
}
{
var multi_i: u32 = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
extra_index += 1;
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + info.body_len;
try case_vals.ensureUnusedCapacity(gpa, items.len);
for (items, 0..) |item_ref, item_i| {
case_vals.appendAssumeCapacity(try sema.validateSwitchItemBool(
block,
&true_count,
&false_count,
item_ref,
block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = .{ .kind = .multi, .index = @intCast(multi_i) },
.item_idx = .{ .kind = .single, .index = @intCast(item_i) },
} }),
));
}
try sema.validateSwitchNoRange(block, ranges_len, cond_ty, src_node_offset);
}
.enum_literal, .@"fn", .pointer, .type => {
if (!has_else) {
return sema.fail(
block,
src,
"else prong required when switching on type '{f}'",
.{item_ty.fmt(pt)},
);
}
},
.bool, .void => |type_tag| {
const all_values_handled = switch (type_tag) {
.bool => true_src != null and false_src != null,
.void => void_src != null,
else => unreachable,
};
if (has_else) {
if (true_count + false_count == 2) {
if (all_values_handled) {
return sema.fail(
block,
else_prong_src,
@@ -12025,7 +12276,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
);
}
} else {
if (true_count + false_count < 2) {
if (!all_values_handled) {
return sema.fail(
block,
src,
@@ -12035,1775 +12286,1086 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
}
}
},
.enum_literal, .void, .@"fn", .pointer, .type => {
if (!has_else) {
return sema.fail(
block,
src,
"else prong required when switching on type '{f}'",
.{cond_ty.fmt(pt)},
);
}
var seen_values = ValueSrcMap{};
defer seen_values.deinit(gpa);
var extra_index: usize = special_end;
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
extra_index += 1;
extra_index += info.body_len;
case_vals.appendAssumeCapacity(try sema.validateSwitchItemSparse(
block,
&seen_values,
item_ref,
cond_ty,
block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) },
.item_idx = .{ .kind = .single, .index = 0 },
} }),
));
}
}
{
var multi_i: u32 = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
extra_index += 1;
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + info.body_len;
try case_vals.ensureUnusedCapacity(gpa, items.len);
for (items, 0..) |item_ref, item_i| {
case_vals.appendAssumeCapacity(try sema.validateSwitchItemSparse(
block,
&seen_values,
item_ref,
cond_ty,
block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = .{ .kind = .multi, .index = @intCast(multi_i) },
.item_idx = .{ .kind = .single, .index = @intCast(item_i) },
} }),
));
}
try sema.validateSwitchNoRange(block, ranges_len, cond_ty, src_node_offset);
}
}
},
.error_union,
.noreturn,
.array,
.@"struct",
.undefined,
.null,
.optional,
.@"opaque",
.vector,
.frame,
.@"anyframe",
.comptime_float,
.float,
=> return sema.fail(block, operand_src, "invalid switch operand type '{f}'", .{
raw_operand_ty.fmt(pt),
}),
else => unreachable,
}
var special_members_only: ?SpecialProng = null;
var special_members_only_src: LazySrcLoc = undefined;
const special_generic, const special_generic_src = if (has_under) b: {
if (has_else) {
special_members_only = special_else;
special_members_only_src = else_prong_src;
}
break :b .{ special_under, under_prong_src };
} else .{ special_else, else_prong_src };
return .{
.seen_enum_fields = seen_enum_fields,
.seen_errors = seen_errors,
.seen_ranges = range_set.ranges.items,
.true_src = true_src,
.false_src = false_src,
.void_src = void_src,
const spa: SwitchProngAnalysis = .{
.sema = sema,
.parent_block = block,
.operand = operand,
.else_error_ty = else_error_ty,
.switch_block_inst = inst,
.tag_capture_inst = tag_capture_inst,
.case_vals = case_vals.items,
.else_case = else_case,
.else_err_ty = else_err_ty,
};
}
const block_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
try sema.air_instructions.append(gpa, .{
.tag = .block,
.data = undefined,
});
var label: Block.Label = .{
.zir_block = inst,
.merges = .{
.src_locs = .{},
.results = .{},
.br_list = .{},
.block_inst = block_inst,
},
fn resolveSwitchBlock(
sema: *Sema,
block: *Block,
child_block: *Block,
operand: SwitchOperand,
raw_operand_ty: Type,
maybe_lazy_cond_val: Value,
catch_all_case: CatchAllSwitchCase,
else_is_named_only: bool,
merges: *Block.Merges,
switch_inst: Zir.Inst.Index,
zir_switch: *const Zir.UnwrappedSwitchBlock,
validated_switch: *const ValidatedSwitchBlock,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const switch_node_offset = zir_switch.switch_src_node_offset;
const operand_ty = sema.typeOf(operand.simple.by_val);
const item_ty = switch (operand_ty.zigTypeTag(zcu)) {
.@"union" => operand_ty.unionTagType(zcu).?,
else => operand_ty,
};
const union_originally = operand_ty.zigTypeTag(zcu) == .@"union";
const err_set = item_ty.zigTypeTag(zcu) == .error_set;
var child_block: Block = .{
.parent = block,
.sema = sema,
.namespace = block.namespace,
.instructions = .{},
.label = &label,
.inlining = block.inlining,
.comptime_reason = block.comptime_reason,
.is_typeof = block.is_typeof,
.c_import_buf = block.c_import_buf,
.runtime_cond = block.runtime_cond,
.runtime_loop = block.runtime_loop,
.runtime_index = block.runtime_index,
.want_safety = block.want_safety,
.error_return_trace_index = block.error_return_trace_index,
.src_base_inst = block.src_base_inst,
.type_name_ctx = block.type_name_ctx,
const cond_ref = operand.simple.cond;
// We have to resolve lazy values to ensure that comparisons with switch
// prong items don't produce false negatives.
const cond_val = try sema.resolveLazyValue(maybe_lazy_cond_val);
const case_vals = validated_switch.case_vals;
var case_val_idx: usize = 0;
var extra_index = zir_switch.end;
var case_it = zir_switch.iterateCases();
var under_prong: ?struct {
index: Zir.UnwrappedSwitchBlock.Case.Index,
body: []const Zir.Inst.Index,
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
has_tag_capture: bool,
} = null;
while (case_it.next()) |case| {
const prong_info = case.prong_info;
const prong_body = sema.code.bodySlice(extra_index, prong_info.body_len);
extra_index += prong_body.len;
for (case.item_infos) |item_info| {
if (item_info.bodyLen()) |body_len| extra_index += body_len;
}
for (case.range_infos) |range_info| {
if (range_info[0].bodyLen()) |body_len| extra_index += body_len;
if (range_info[1].bodyLen()) |body_len| extra_index += body_len;
}
const item_refs = case_vals[case_val_idx..][0..case.item_infos.len];
case_val_idx += item_refs.len;
const range_refs: []const [2]Air.Inst.Ref = @ptrCast(case_vals[case_val_idx..][0 .. 2 * case.range_infos.len]);
case_val_idx += 2 * range_refs.len;
for (item_refs) |item_ref| {
if (item_ref == .none) {
under_prong = .{
.index = case.index,
.body = prong_body,
.capture = case.prong_info.capture,
.has_tag_capture = case.prong_info.has_tag_capture,
};
continue;
}
const item_val = sema.resolveConstDefinedValue(child_block, .unneeded, item_ref, undefined) catch unreachable;
if (cond_val.eql(item_val, item_ty, zcu)) {
if (err_set) try sema.maybeErrorUnwrapComptime(child_block, prong_body, cond_ref);
if (union_originally and operand_ty.unionFieldType(item_val, zcu).?.isNoReturn(zcu)) {
// This prong should be unreachable!
return .unreachable_value;
}
return sema.resolveSwitchProng(
block,
child_block,
operand,
raw_operand_ty,
prong_body,
block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = case.index,
} }),
prong_info.capture,
prong_info.has_tag_capture,
if (prong_info.is_inline) cond_ref else .none,
.{ .item_refs = item_refs },
validated_switch.else_err_ty,
merges,
switch_inst,
zir_switch,
);
}
}
for (range_refs) |range_ref| {
const first_val = sema.resolveConstDefinedValue(child_block, .unneeded, range_ref[0], undefined) catch unreachable;
const last_val = sema.resolveConstDefinedValue(child_block, .unneeded, range_ref[1], undefined) catch unreachable;
if ((try sema.compareAll(cond_val, .gte, first_val, item_ty)) and
(try sema.compareAll(cond_val, .lte, last_val, item_ty)))
{
return sema.resolveSwitchProng(
block,
child_block,
operand,
raw_operand_ty,
prong_body,
block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = case.index,
} }),
prong_info.capture,
prong_info.has_tag_capture,
if (prong_info.is_inline) cond_ref else .none,
.has_ranges,
validated_switch.else_err_ty,
merges,
switch_inst,
zir_switch,
);
}
}
}
const else_case = validated_switch.else_case;
// named-only prong
if (else_is_named_only and item_ty.enumTagFieldIndex(cond_val, zcu) != null) {
assert(item_ty.isNonexhaustiveEnum(zcu));
return sema.resolveSwitchProng(
block,
child_block,
operand,
raw_operand_ty,
else_case.body,
block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = else_case.index,
} }),
else_case.capture,
else_case.has_tag_capture,
if (else_case.is_inline) cond_ref else .none,
.special,
validated_switch.else_err_ty,
merges,
switch_inst,
zir_switch,
);
}
// catch-all prong
const index, const body, const capture, const has_tag_capture, const is_inline = switch (catch_all_case) {
.@"else" => .{ else_case.index, else_case.body, else_case.capture, else_case.has_tag_capture, else_case.is_inline },
.under => .{ under_prong.?.index, under_prong.?.body, under_prong.?.capture, under_prong.?.has_tag_capture, false },
.none => unreachable,
};
const merges = &child_block.label.?.merges;
defer child_block.instructions.deinit(gpa);
defer merges.deinit(gpa);
if (scalar_cases_len + multi_cases_len == 0 and
special_members_only == null and
!special_generic.is_inline)
{
if (empty_enum) {
return .void_value;
}
if (special_prongs == .none) {
return sema.fail(block, src, "switch must handle all possibilities", .{});
}
const init_cond = switch (operand) {
.simple => |s| s.cond,
.loop => |l| l.init_cond,
};
if (zcu.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and
raw_operand_ty.zigTypeTag(zcu) == .@"enum" and !raw_operand_ty.isNonexhaustiveEnum(zcu))
{
try sema.zirDbgStmt(block, cond_dbg_node_index);
const ok = try block.addUnOp(.is_named_enum_value, init_cond);
try sema.addSafetyCheck(block, src, ok, .corrupt_switch);
}
if (err_set and try sema.maybeErrorUnwrap(block, special_generic.body, init_cond, operand_src, false)) {
if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_ref);
if (union_originally) {
for (validated_switch.seen_enum_fields, 0..) |maybe_seen, field_i| {
if (maybe_seen != null) continue;
if (!operand_ty.unionFieldTypeByIndex(field_i, zcu).isNoReturn(zcu)) break;
} else {
// This prong should be unreachable!
return .unreachable_value;
}
}
return sema.resolveSwitchProng(
block,
child_block,
operand,
raw_operand_ty,
body,
block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = index,
} }),
capture,
has_tag_capture,
if (is_inline) cond_ref else .none,
.special,
validated_switch.else_err_ty,
merges,
switch_inst,
zir_switch,
);
}
switch (operand) {
.loop => {}, // always runtime; evaluation in comptime scope uses `simple`
.simple => |s| {
if (try sema.resolveDefinedValue(&child_block, src, s.cond)) |cond_val| {
return resolveSwitchComptimeLoop(
sema,
spa,
&child_block,
if (operand_is_ref)
sema.typeOf(s.by_ref)
else
raw_operand_ty,
cond_ty,
cond_val,
src_node_offset,
special_members_only,
special_generic,
has_under,
case_vals,
scalar_cases_len,
multi_cases_len,
err_set,
empty_enum,
operand_is_ref,
);
}
const SwitchOperand = union(enum) {
/// This switch will be dispatched only once, with the given operand.
simple: struct {
/// The raw switch operand value. Always defined.
by_val: Air.Inst.Ref,
/// The switch operand *pointer*. Defined only if there is a prong
/// with a by-ref capture.
by_ref: Air.Inst.Ref,
/// The switch condition value. For unions, `operand` is the union
/// and `cond` is its enum tag value.
cond: Air.Inst.Ref,
},
/// This switch may be dispatched multiple times with `continue` syntax.
/// As such, the operand is stored in an alloc if needed.
loop: struct {
/// The `alloc` containing the `switch` operand for the active dispatch.
/// Each prong must load from this `alloc` to get captures.
/// If there are no captures, this may be undefined.
operand_alloc: Air.Inst.Ref,
/// Whether `operand_alloc` contains a by-val operand or a by-ref
/// operand.
operand_is_ref: bool,
/// The switch condition value for the *initial* dispatch. For
/// unions, this is the enum tag value.
init_cond: Air.Inst.Ref,
},
};
if (scalar_cases_len + multi_cases_len == 0 and
special_members_only == null and
!special_generic.is_inline and
!extra.data.bits.has_continue)
{
return spa.resolveProngComptime(
&child_block,
.special,
special_generic.body,
special_generic.capture,
block.src(.{ .switch_capture = .{
.switch_node_offset = src_node_offset,
.case_idx = if (has_under) .special_under else .special_else,
} }),
undefined, // case_vals may be undefined for special prongs
.none,
false,
merges,
);
}
},
const CatchAllSwitchCase = enum { none, @"else", under };
const SwitchProngKind = union(enum) {
item_refs: []const Air.Inst.Ref,
has_ranges,
special,
};
/// Resolve a switch prong which is determined at comptime to have no peers.
/// Sets up captures as needed. Uses `analyzeBodyRuntimeBreak`.
fn resolveSwitchProng(
sema: *Sema,
block: *Block,
child_block: *Block,
operand: SwitchOperand,
raw_operand_ty: Type,
prong_body: []const Zir.Inst.Index,
/// Must use the `switch_capture` field in `offset`.
capture_src: LazySrcLoc,
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
has_tag_capture: bool,
inline_case_capture: Air.Inst.Ref,
kind: SwitchProngKind,
else_err_ty: ?Type,
merges: *Block.Merges,
switch_inst: Zir.Inst.Index,
zir_switch: *const Zir.UnwrappedSwitchBlock,
) CompileError!Air.Inst.Ref {
const src_node_offset = zir_switch.switch_src_node_offset;
const src = block.nodeOffset(src_node_offset);
const operand_src = block.src(.{ .node_offset_switch_operand = src_node_offset });
// We can propagate `.cold` hints from this branch since it's comptime-known
// to be taken from the parent branch.
const parent_hint = sema.branch_hint;
defer sema.branch_hint = parent_hint orelse if (sema.branch_hint == .cold) .cold else null;
const payload_inst: Zir.Inst.Index = if (capture != .none) inst: {
const payload_inst = zir_switch.payload_capture_placeholder.unwrap() orelse switch_inst;
const payload_ref = try sema.analyzeSwitchPayloadCapture(
child_block,
operand,
operand.simple.by_val,
operand.simple.by_ref,
sema.typeOf(operand.simple.by_val),
operand_src,
capture_src,
capture == .by_ref,
kind == .special,
switch (kind) {
.item_refs => |item_refs| item_refs,
.has_ranges, .special => undefined,
},
inline_case_capture,
else_err_ty,
);
assert(!sema.typeOf(payload_ref).isNoReturn(sema.pt.zcu));
sema.inst_map.putAssumeCapacity(payload_inst, payload_ref);
break :inst payload_inst;
} else undefined;
defer if (capture != .none) assert(sema.inst_map.remove(payload_inst));
const tag_inst: Zir.Inst.Index = if (has_tag_capture) inst: {
const tag_inst = zir_switch.tag_capture_placeholder.unwrap() orelse switch_inst;
const tag_ref = try sema.analyzeSwitchTagCapture(
child_block,
operand.simple.by_val,
sema.typeOf(operand.simple.by_val),
capture_src,
inline_case_capture,
kind,
);
sema.inst_map.putAssumeCapacity(tag_inst, tag_ref);
break :inst tag_inst;
} else undefined;
defer if (has_tag_capture) assert(sema.inst_map.remove(tag_inst));
if (zir_switch.has_continue) sema.inst_map.putAssumeCapacity(switch_inst, .fromType(raw_operand_ty));
defer if (zir_switch.has_continue) assert(sema.inst_map.remove(switch_inst));
return sema.resolveBlockBody(block, src, child_block, prong_body, switch_inst, merges);
}
fn wantSwitchProngBodyAnalysis(
sema: *Sema,
block: *Block,
item_ref: Air.Inst.Ref,
operand_ty: Type,
union_originally: bool,
err_set: bool,
prong_is_comptime_unreach: bool,
) bool {
const zcu = sema.pt.zcu;
if (union_originally) {
const unresolved_item_val = sema.resolveConstDefinedValue(block, .unneeded, item_ref, undefined) catch unreachable;
const item_val = sema.resolveLazyValue(unresolved_item_val) catch unreachable;
const field_ty = operand_ty.unionFieldType(item_val, zcu).?;
if (field_ty.isNoReturn(zcu)) return false;
}
if (child_block.isComptime()) {
_ = try sema.resolveConstDefinedValue(&child_block, operand_src, operand.simple.cond, null);
unreachable;
if (err_set and prong_is_comptime_unreach) {
const unresolved_item_val = sema.resolveConstDefinedValue(block, .unneeded, item_ref, undefined) catch unreachable;
const item_val = sema.resolveLazyValue(unresolved_item_val) catch unreachable;
const err_name = item_val.getErrorName(zcu).unwrap().?;
if (!Type.errorSetHasFieldIp(&zcu.intern_pool, operand_ty.toIntern(), err_name)) return false;
}
return true;
}
var extra_case_vals: struct {
items: std.ArrayList(Air.Inst.Ref),
ranges: std.ArrayList([2]Air.Inst.Ref),
} = .{ .items = .empty, .ranges = .empty };
defer {
extra_case_vals.items.deinit(gpa);
extra_case_vals.ranges.deinit(gpa);
}
/// Assumes that `operand_ty` has more than one possible value.
/// Sets up captures as needed. Uses `analyzeBodyRuntimeBreak`.
fn analyzeSwitchProng(
sema: *Sema,
case_block: *Block,
operand: SwitchOperand,
operand_ty: Type,
raw_operand_ty: Type,
prong_body: []const Zir.Inst.Index,
/// Must use the `switch_capture` field in `offset`.
capture_src: LazySrcLoc,
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
has_tag_capture: bool,
inline_case_capture: Air.Inst.Ref,
kind: SwitchProngKind,
else_err_ty: ?Type,
switch_inst: Zir.Inst.Index,
zir_switch: *const Zir.UnwrappedSwitchBlock,
) CompileError!std.builtin.BranchHint {
const pt = sema.pt;
const zcu = pt.zcu;
// Runtime switch, if we have a special_members_only prong we need to unroll
// it to a prong with explicit items.
// Although this is potentially the same as `inline else` it does not count
// towards the backward branch quota because it's an implementation detail.
if (special_members_only != null) gen: {
assert(cond_ty.isNonexhaustiveEnum(zcu));
const operand_src = case_block.src(.{ .node_offset_switch_operand = zir_switch.switch_src_node_offset });
var min_i: usize = math.maxInt(usize);
var max_i: usize = 0;
var seen_field_count: usize = 0;
for (seen_enum_fields, 0..) |seen, enum_i| {
if (seen != null) {
seen_field_count += 1;
} else {
min_i = @min(min_i, enum_i);
max_i = @max(max_i, enum_i);
}
}
if (min_i == max_i) {
seen_enum_fields[min_i] = special_members_only_src;
const item_val = try pt.enumValueFieldIndex(cond_ty, @intCast(min_i));
const item_ref = Air.internedToRef(item_val.toIntern());
try extra_case_vals.items.append(gpa, item_ref);
break :gen;
}
const missing_field_count = seen_enum_fields.len - seen_field_count;
extra_case_vals.items = try .initCapacity(gpa, missing_field_count / 2);
extra_case_vals.ranges = try .initCapacity(gpa, missing_field_count / 4);
const int_ty = cond_ty.intTagType(zcu);
var last_val = try pt.enumValueFieldIndex(cond_ty, @intCast(min_i));
var first_ref = Air.internedToRef(last_val.toIntern());
seen_enum_fields[min_i] = special_members_only_src;
for (seen_enum_fields[(min_i + 1)..(max_i + 1)], (min_i + 1)..) |seen, enum_i| {
if (seen != null) continue;
seen_enum_fields[enum_i] = special_members_only_src;
const item_val = try pt.enumValueFieldIndex(cond_ty, @intCast(enum_i));
const item_ref = Air.internedToRef(item_val.toIntern());
const is_next = is_next: {
const prev_int = ip.indexToKey(last_val.toIntern()).enum_tag.int;
const result = try arith.incrementDefinedInt(sema, int_ty, .fromInterned(prev_int));
if (result.overflow) break :is_next false;
const item_int = ip.indexToKey(item_val.toIntern()).enum_tag.int;
break :is_next try sema.valuesEqual(.fromInterned(item_int), result.val, int_ty);
};
if (is_next) {
last_val = item_val;
} else {
const last_ref = Air.internedToRef(last_val.toIntern());
if (first_ref == last_ref) {
try extra_case_vals.items.append(gpa, first_ref);
} else {
try extra_case_vals.ranges.append(gpa, .{ first_ref, last_ref });
}
first_ref = item_ref;
last_val = item_val;
}
}
const last_ref = Air.internedToRef(last_val.toIntern());
if (first_ref == last_ref) {
try extra_case_vals.items.append(gpa, first_ref);
} else {
try extra_case_vals.ranges.append(gpa, .{ first_ref, last_ref });
}
}
const air_switch_ref = try sema.analyzeSwitchRuntimeBlock(
spa,
&child_block,
src,
switch (operand) {
if (operand_ty.zigTypeTag(zcu) == .error_set) {
const cond_ref = switch (operand) {
.simple => |s| s.cond,
.loop => |l| l.init_cond,
},
cond_ty,
operand_src,
case_vals,
special_generic,
scalar_cases_len,
multi_cases_len,
union_originally,
raw_operand_ty,
err_set,
src_node_offset,
special_generic_src,
has_under,
seen_enum_fields,
seen_errors,
range_set,
true_count,
false_count,
cond_dbg_node_index,
false,
special_members_only,
special_members_only_src,
extra_case_vals.items.items,
extra_case_vals.ranges.items,
);
for (merges.extra_insts.items, merges.extra_src_locs.items) |placeholder_inst, dispatch_src| {
var replacement_block = block.makeSubBlock();
defer replacement_block.instructions.deinit(gpa);
assert(sema.air_instructions.items(.tag)[@intFromEnum(placeholder_inst)] == .br);
const new_operand_maybe_ref = sema.air_instructions.items(.data)[@intFromEnum(placeholder_inst)].br.operand;
if (extra.data.bits.any_non_inline_capture) {
_ = try replacement_block.addBinOp(.store, operand.loop.operand_alloc, new_operand_maybe_ref);
};
if (try sema.maybeErrorUnwrap(case_block, prong_body, cond_ref, operand_src, true)) {
// nothing to do here. weight against error branch
return .unlikely;
}
}
const new_operand_val = if (operand_is_ref)
try sema.analyzeLoad(&replacement_block, dispatch_src, new_operand_maybe_ref, dispatch_src)
else
new_operand_maybe_ref;
const new_cond = try sema.switchCond(&replacement_block, dispatch_src, new_operand_val);
if (zcu.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and
cond_ty.zigTypeTag(zcu) == .@"enum" and !cond_ty.isNonexhaustiveEnum(zcu) and
!try sema.isComptimeKnown(new_cond))
const operand_val, const operand_ptr = load_operand: {
if (capture == .none and !has_tag_capture) {
// No need to load the operand for this prong!
break :load_operand .{ undefined, undefined };
}
if (inline_case_capture != .none and
!(capture != .none and operand_ty.zigTypeTag(zcu) == .@"union"))
{
const ok = try replacement_block.addUnOp(.is_named_enum_value, new_cond);
try sema.addSafetyCheck(&replacement_block, src, ok, .corrupt_switch);
// We only need to load the operand if there's a union payload capture
// since it's always runtime-known; only the tag is comptime-known here.
break :load_operand .{ undefined, undefined };
}
_ = try replacement_block.addInst(.{
.tag = .switch_dispatch,
.data = .{ .br = .{
.block_inst = air_switch_ref.toIndex().?,
.operand = new_cond,
} },
});
if (replacement_block.instructions.items.len == 1) {
// Optimization: we don't need a block!
sema.air_instructions.set(
@intFromEnum(placeholder_inst),
sema.air_instructions.get(@intFromEnum(replacement_block.instructions.items[0])),
);
continue;
}
// Replace placeholder with a block.
// No `br` is needed as the block is a switch dispatch so necessarily `noreturn`.
try sema.air_extra.ensureUnusedCapacity(
gpa,
@typeInfo(Air.Block).@"struct".fields.len + replacement_block.instructions.items.len,
);
sema.air_instructions.set(@intFromEnum(placeholder_inst), .{
.tag = .block,
.data = .{ .ty_pl = .{
.ty = .noreturn_type,
.payload = sema.addExtraAssumeCapacity(Air.Block{
.body_len = @intCast(replacement_block.instructions.items.len),
}),
} },
});
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(replacement_block.instructions.items));
}
return sema.resolveAnalyzedBlock(block, src, &child_block, merges, false);
}
const SpecialProng = struct {
body: []const Zir.Inst.Index,
end: usize,
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
is_inline: bool,
has_tag_capture: bool,
};
fn analyzeSwitchRuntimeBlock(
sema: *Sema,
spa: SwitchProngAnalysis,
child_block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
operand_ty: Type,
operand_src: LazySrcLoc,
case_vals: std.ArrayList(Air.Inst.Ref),
else_prong: SpecialProng,
scalar_cases_len: usize,
multi_cases_len: usize,
union_originally: bool,
maybe_union_ty: Type,
err_set: bool,
switch_node_offset: std.zig.Ast.Node.Offset,
else_prong_src: LazySrcLoc,
else_prong_is_underscore: bool,
seen_enum_fields: []?LazySrcLoc,
seen_errors: SwitchErrorSet,
range_set: RangeSet,
true_count: u8,
false_count: u8,
cond_dbg_node_index: Zir.Inst.Index,
allow_err_code_unwrap: bool,
extra_prong: ?SpecialProng,
/// May be `undefined` if `extra_prong` is `null`
extra_prong_src: LazySrcLoc,
extra_prong_items: []const Air.Inst.Ref,
extra_prong_ranges: []const [2]Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const gpa = sema.gpa;
const ip = &zcu.intern_pool;
const block = child_block.parent.?;
const estimated_cases_extra = (scalar_cases_len + multi_cases_len) *
@typeInfo(Air.SwitchBr.Case).@"struct".fields.len + 2;
var cases_extra = try std.ArrayList(u32).initCapacity(gpa, estimated_cases_extra);
defer cases_extra.deinit(gpa);
var branch_hints = try std.ArrayList(std.builtin.BranchHint).initCapacity(gpa, scalar_cases_len);
defer branch_hints.deinit(gpa);
var case_block = child_block.makeSubBlock();
case_block.runtime_loop = null;
case_block.runtime_cond = operand_src;
case_block.runtime_index.increment();
case_block.need_debug_scope = null; // this body is emitted regardless
defer case_block.instructions.deinit(gpa);
var extra_index: usize = else_prong.end;
var scalar_i: usize = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
extra_index += 1;
const body = sema.code.bodySlice(extra_index, info.body_len);
extra_index += info.body_len;
case_block.instructions.shrinkRetainingCapacity(0);
case_block.error_return_trace_index = child_block.error_return_trace_index;
const item = case_vals.items[scalar_i];
// `item` is already guaranteed to be constant known.
const analyze_body = if (union_originally) blk: {
const unresolved_item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable;
const item_val = sema.resolveLazyValue(unresolved_item_val) catch unreachable;
const field_ty = maybe_union_ty.unionFieldType(item_val, zcu).?;
break :blk field_ty.zigTypeTag(zcu) != .noreturn;
} else true;
const prong_hint: std.builtin.BranchHint = if (err_set and
try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap))
h: {
// nothing to do here. weight against error branch
break :h .unlikely;
} else if (analyze_body) h: {
break :h try spa.analyzeProngRuntime(
&case_block,
.normal,
body,
info.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) },
} }),
&.{item},
if (info.is_inline) item else .none,
info.has_tag_capture,
);
} else h: {
_ = try case_block.addNoOp(.unreach);
break :h .none;
};
try branch_hints.append(gpa, prong_hint);
try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
1 + // `item`, no ranges
case_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = 1,
.ranges_len = 0,
.body_len = @intCast(case_block.instructions.items.len),
}));
cases_extra.appendAssumeCapacity(@intFromEnum(item));
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
}
var cases_len = scalar_cases_len;
var case_val_idx: usize = scalar_cases_len;
const multi_cases_len_with_extra_prong = multi_cases_len + @intFromBool(extra_prong != null);
var multi_i: u32 = 0;
while (multi_i < multi_cases_len_with_extra_prong) : (multi_i += 1) {
const is_extra_prong = multi_i == multi_cases_len;
var items: []const Air.Inst.Ref = undefined;
var info: Zir.Inst.SwitchBlock.ProngInfo = undefined;
var ranges: []const [2]Air.Inst.Ref = undefined;
var body: []const Zir.Inst.Index = undefined;
if (is_extra_prong) {
const prong = extra_prong.?;
items = extra_prong_items;
ranges = extra_prong_ranges;
body = prong.body;
info = .{
.body_len = undefined,
.capture = prong.capture,
.is_inline = prong.is_inline,
.has_tag_capture = prong.has_tag_capture,
};
} else {
@branchHint(.likely);
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
info = @bitCast(sema.code.extra[extra_index]);
extra_index += 1 + items_len + ranges_len * 2;
items = case_vals.items[case_val_idx..][0..items_len];
case_val_idx += items_len;
ranges = @ptrCast(case_vals.items[case_val_idx..][0 .. ranges_len * 2]);
case_val_idx += ranges_len * 2;
body = sema.code.bodySlice(extra_index, info.body_len);
extra_index += info.body_len;
}
case_block.instructions.shrinkRetainingCapacity(0);
case_block.error_return_trace_index = child_block.error_return_trace_index;
// Generate all possible cases as scalar prongs.
if (info.is_inline) {
var emit_bb = false;
for (ranges, 0..) |range_items, range_i| {
var item = sema.resolveConstDefinedValue(block, .unneeded, range_items[0], undefined) catch unreachable;
const item_last = sema.resolveConstDefinedValue(block, .unneeded, range_items[1], undefined) catch unreachable;
while (item.compareScalar(.lte, item_last, operand_ty, zcu)) : ({
// Previous validation has resolved any possible lazy values.
const int_val: Value, const int_ty: Type = switch (operand_ty.zigTypeTag(zcu)) {
.int => .{ item, operand_ty },
.@"enum" => b: {
const int_val = Value.fromInterned(ip.indexToKey(item.toIntern()).enum_tag.int);
break :b .{ int_val, int_val.typeOf(zcu) };
},
else => unreachable,
};
const result = try arith.incrementDefinedInt(sema, int_ty, int_val);
assert(!result.overflow);
item = switch (operand_ty.zigTypeTag(zcu)) {
.int => result.val,
.@"enum" => .fromInterned(try pt.intern(.{ .enum_tag = .{
.ty = operand_ty.toIntern(),
.int = result.val.toIntern(),
} })),
else => unreachable,
};
}) {
cases_len += 1;
const item_ref = Air.internedToRef(item.toIntern());
case_block.instructions.shrinkRetainingCapacity(0);
case_block.error_return_trace_index = child_block.error_return_trace_index;
if (emit_bb) {
const bb_src = if (is_extra_prong) extra_prong_src else block.src(.{ .switch_case_item = .{
.switch_node_offset = switch_node_offset,
.case_idx = .{ .kind = .multi, .index = @intCast(multi_i) },
.item_idx = .{ .kind = .range, .index = @intCast(range_i) },
} });
try sema.emitBackwardBranch(block, bb_src);
}
emit_bb = true;
const prong_hint = try spa.analyzeProngRuntime(
&case_block,
.normal,
body,
info.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = .{ .kind = .multi, .index = @intCast(multi_i) },
} }),
undefined, // case_vals may be undefined for ranges
item_ref,
info.has_tag_capture,
);
try branch_hints.append(gpa, prong_hint);
try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
1 + // `item`, no ranges
case_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = 1,
.ranges_len = 0,
.body_len = @intCast(case_block.instructions.items.len),
}));
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
if (item.compareScalar(.eq, item_last, operand_ty, zcu)) break;
}
}
for (items, 0..) |item, item_i| {
cases_len += 1;
case_block.instructions.shrinkRetainingCapacity(0);
case_block.error_return_trace_index = child_block.error_return_trace_index;
const analyze_body = if (union_originally) blk: {
const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable;
const field_ty = maybe_union_ty.unionFieldType(item_val, zcu).?;
break :blk field_ty.zigTypeTag(zcu) != .noreturn;
} else true;
if (emit_bb) {
const bb_src = if (is_extra_prong) extra_prong_src else block.src(.{ .switch_case_item = .{
.switch_node_offset = switch_node_offset,
.case_idx = .{ .kind = .multi, .index = @intCast(multi_i) },
.item_idx = .{ .kind = .single, .index = @intCast(item_i) },
} });
try sema.emitBackwardBranch(block, bb_src);
}
emit_bb = true;
const prong_hint: std.builtin.BranchHint = if (analyze_body) h: {
break :h try spa.analyzeProngRuntime(
&case_block,
.normal,
body,
info.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = .{ .kind = .multi, .index = @intCast(multi_i) },
} }),
&.{item},
item,
info.has_tag_capture,
);
} else h: {
_ = try case_block.addNoOp(.unreach);
break :h .none;
};
try branch_hints.append(gpa, prong_hint);
try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
1 + // `item`, no ranges
case_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = 1,
.ranges_len = 0,
.body_len = @intCast(case_block.instructions.items.len),
}));
cases_extra.appendAssumeCapacity(@intFromEnum(item));
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
}
continue;
}
cases_len += 1;
const analyze_body = if (union_originally)
for (items) |item| {
const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable;
const field_ty = maybe_union_ty.unionFieldType(item_val, zcu).?;
if (field_ty.zigTypeTag(zcu) != .noreturn) break true;
} else false
else
true;
const prong_hint: std.builtin.BranchHint = if (err_set and
try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap))
h: {
// nothing to do here. weight against error branch
break :h .unlikely;
} else if (analyze_body) h: {
break :h try spa.analyzeProngRuntime(
&case_block,
.normal,
body,
info.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = .{ .kind = .multi, .index = @intCast(multi_i) },
} }),
items,
.none,
false,
);
} else h: {
_ = try case_block.addNoOp(.unreach);
break :h .none;
};
try branch_hints.append(gpa, prong_hint);
try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
items.len + ranges.len * 2 +
case_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = @intCast(items.len),
.ranges_len = @intCast(ranges.len),
.body_len = @intCast(case_block.instructions.items.len),
}));
for (items) |item| {
cases_extra.appendAssumeCapacity(@intFromEnum(item));
}
for (ranges) |range| {
cases_extra.appendSliceAssumeCapacity(&.{
@intFromEnum(range[0]),
@intFromEnum(range[1]),
});
}
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
}
const else_body: []const Air.Inst.Index = if (else_prong.body.len != 0 or case_block.wantSafety()) else_body: {
var emit_bb = false;
// If this is true we must have a 'true' else prong and not an underscore because
// underscore prongs can never be inlined. We've already checked for this.
if (else_prong.is_inline) switch (operand_ty.zigTypeTag(zcu)) {
.@"enum" => {
if (operand_ty.isNonexhaustiveEnum(zcu) and !union_originally) {
return sema.fail(block, else_prong_src, "cannot enumerate values of type '{f}' for 'inline else'", .{
operand_ty.fmt(pt),
});
}
for (seen_enum_fields, 0..) |f, i| {
if (f != null) continue;
cases_len += 1;
const item_val = try pt.enumValueFieldIndex(operand_ty, @intCast(i));
const item_ref = Air.internedToRef(item_val.toIntern());
case_block.instructions.shrinkRetainingCapacity(0);
case_block.error_return_trace_index = child_block.error_return_trace_index;
const analyze_body = if (union_originally) blk: {
const field_ty = maybe_union_ty.unionFieldType(item_val, zcu).?;
break :blk field_ty.zigTypeTag(zcu) != .noreturn;
} else true;
if (emit_bb) try sema.emitBackwardBranch(block, else_prong_src);
emit_bb = true;
const prong_hint: std.builtin.BranchHint = if (analyze_body) h: {
break :h try spa.analyzeProngRuntime(
&case_block,
.special,
else_prong.body,
else_prong.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = .special_else,
} }),
&.{item_ref},
item_ref,
else_prong.has_tag_capture,
);
} else h: {
_ = try case_block.addNoOp(.unreach);
break :h .none;
};
try branch_hints.append(gpa, prong_hint);
try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
1 + // `item`, no ranges
case_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = 1,
.ranges_len = 0,
.body_len = @intCast(case_block.instructions.items.len),
}));
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
assert(zir_switch.any_maybe_runtime_capture); // should have caught everything else by now
switch (operand) {
.simple => |s| break :load_operand .{ s.by_val, s.by_ref },
.loop => |l| {
const loaded = try sema.analyzeLoad(case_block, operand_src, l.operand_alloc, operand_src);
if (l.operand_is_ref) {
const by_val = try sema.analyzeLoad(case_block, operand_src, loaded, operand_src);
break :load_operand .{ by_val, loaded };
} else {
break :load_operand .{ loaded, undefined };
}
},
.error_set => {
if (operand_ty.isAnyError(zcu)) {
return sema.fail(block, else_prong_src, "cannot enumerate values of type '{f}' for 'inline else'", .{
operand_ty.fmt(pt),
});
}
const error_names = operand_ty.errorSetNames(zcu);
for (0..error_names.len) |name_index| {
const error_name = error_names.get(ip)[name_index];
if (seen_errors.contains(error_name)) continue;
cases_len += 1;
const item_val = try pt.intern(.{ .err = .{
.ty = operand_ty.toIntern(),
.name = error_name,
} });
const item_ref = Air.internedToRef(item_val);
case_block.instructions.shrinkRetainingCapacity(0);
case_block.error_return_trace_index = child_block.error_return_trace_index;
if (emit_bb) try sema.emitBackwardBranch(block, else_prong_src);
emit_bb = true;
const prong_hint = try spa.analyzeProngRuntime(
&case_block,
.special,
else_prong.body,
else_prong.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = .special_else,
} }),
&.{item_ref},
item_ref,
else_prong.has_tag_capture,
);
try branch_hints.append(gpa, prong_hint);
try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
1 + // `item`, no ranges
case_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = 1,
.ranges_len = 0,
.body_len = @intCast(case_block.instructions.items.len),
}));
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
}
},
.int => {
var it = try RangeSetUnhandledIterator.init(sema, operand_ty, range_set);
while (try it.next()) |cur| {
cases_len += 1;
const item_ref = Air.internedToRef(cur);
case_block.instructions.shrinkRetainingCapacity(0);
case_block.error_return_trace_index = child_block.error_return_trace_index;
if (emit_bb) try sema.emitBackwardBranch(block, else_prong_src);
emit_bb = true;
const prong_hint = try spa.analyzeProngRuntime(
&case_block,
.special,
else_prong.body,
else_prong.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = .special_else,
} }),
&.{item_ref},
item_ref,
else_prong.has_tag_capture,
);
try branch_hints.append(gpa, prong_hint);
try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
1 + // `item`, no ranges
case_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = 1,
.ranges_len = 0,
.body_len = @intCast(case_block.instructions.items.len),
}));
cases_extra.appendAssumeCapacity(@intFromEnum(item_ref));
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
}
},
.bool => {
if (true_count == 0) {
cases_len += 1;
case_block.instructions.shrinkRetainingCapacity(0);
case_block.error_return_trace_index = child_block.error_return_trace_index;
if (emit_bb) try sema.emitBackwardBranch(block, else_prong_src);
emit_bb = true;
const prong_hint = try spa.analyzeProngRuntime(
&case_block,
.special,
else_prong.body,
else_prong.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = .special_else,
} }),
&.{.bool_true},
.bool_true,
else_prong.has_tag_capture,
);
try branch_hints.append(gpa, prong_hint);
try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
1 + // `item`, no ranges
case_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = 1,
.ranges_len = 0,
.body_len = @intCast(case_block.instructions.items.len),
}));
cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_true));
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
}
if (false_count == 0) {
cases_len += 1;
case_block.instructions.shrinkRetainingCapacity(0);
case_block.error_return_trace_index = child_block.error_return_trace_index;
if (emit_bb) try sema.emitBackwardBranch(block, else_prong_src);
emit_bb = true;
const prong_hint = try spa.analyzeProngRuntime(
&case_block,
.special,
else_prong.body,
else_prong.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = .special_else,
} }),
&.{.bool_false},
.bool_false,
else_prong.has_tag_capture,
);
try branch_hints.append(gpa, prong_hint);
try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
1 + // `item`, no ranges
case_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = 1,
.ranges_len = 0,
.body_len = @intCast(case_block.instructions.items.len),
}));
cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_false));
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
}
},
else => return sema.fail(block, else_prong_src, "cannot enumerate values of type '{f}' for 'inline else'", .{
operand_ty.fmt(pt),
}),
};
case_block.instructions.shrinkRetainingCapacity(0);
case_block.error_return_trace_index = child_block.error_return_trace_index;
if (zcu.backendSupportsFeature(.is_named_enum_value) and
else_prong.body.len != 0 and block.wantSafety() and
operand_ty.zigTypeTag(zcu) == .@"enum" and
(!operand_ty.isNonexhaustiveEnum(zcu) or union_originally))
{
try sema.zirDbgStmt(&case_block, cond_dbg_node_index);
const ok = try case_block.addUnOp(.is_named_enum_value, operand);
try sema.addSafetyCheck(&case_block, src, ok, .corrupt_switch);
}
const else_src_idx: LazySrcLoc.Offset.SwitchCaseIndex = if (else_prong_is_underscore)
.special_under
else
.special_else;
const analyze_body = if (union_originally and !else_prong.is_inline)
for (seen_enum_fields, 0..) |seen_field, index| {
if (seen_field != null) continue;
const union_obj = zcu.typeToUnion(maybe_union_ty).?;
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[index]);
if (field_ty.zigTypeTag(zcu) != .noreturn) break true;
} else false
else
true;
const else_hint: std.builtin.BranchHint = if (else_prong.body.len != 0 and err_set and
try sema.maybeErrorUnwrap(&case_block, else_prong.body, operand, operand_src, allow_err_code_unwrap))
h: {
// nothing to do here. weight against error branch
break :h .unlikely;
} else if (else_prong.body.len != 0 and analyze_body and !else_prong.is_inline) h: {
break :h try spa.analyzeProngRuntime(
&case_block,
.special,
else_prong.body,
else_prong.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = else_src_idx,
} }),
undefined, // case_vals may be undefined for special prongs
.none,
false,
);
} else h: {
// We still need a terminator in this block, but we have proven
// that it is unreachable.
if (case_block.wantSafety()) {
try sema.zirDbgStmt(&case_block, cond_dbg_node_index);
try sema.safetyPanic(&case_block, src, .corrupt_switch);
} else {
_ = try case_block.addNoOp(.unreach);
}
// Safety check / unreachable branches are cold.
break :h .cold;
};
try branch_hints.append(gpa, else_hint);
break :else_body case_block.instructions.items;
} else else_body: {
try branch_hints.append(gpa, .none);
break :else_body &.{};
};
assert(branch_hints.items.len == cases_len + 1);
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).@"struct".fields.len +
cases_extra.items.len + else_body.len +
(std.math.divCeil(usize, branch_hints.items.len, 10) catch unreachable)); // branch hints
const payload_index = sema.addExtraAssumeCapacity(Air.SwitchBr{
.cases_len = @intCast(cases_len),
.else_body_len = @intCast(else_body.len),
});
{
// Add branch hints.
var cur_bag: u32 = 0;
for (branch_hints.items, 0..) |hint, idx| {
const idx_in_bag = idx % 10;
cur_bag |= @as(u32, @intFromEnum(hint)) << @intCast(idx_in_bag * 3);
if (idx_in_bag == 9) {
sema.air_extra.appendAssumeCapacity(cur_bag);
cur_bag = 0;
}
}
if (branch_hints.items.len % 10 != 0) {
sema.air_extra.appendAssumeCapacity(cur_bag);
}
}
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(cases_extra.items));
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(else_body));
const has_any_continues = spa.operand == .loop and child_block.label.?.merges.extra_insts.items.len > 0;
return try child_block.addInst(.{
.tag = if (has_any_continues) .loop_switch_br else .switch_br,
.data = .{ .pl_op = .{
.operand = operand,
.payload = payload_index,
} },
});
}
fn resolveSwitchComptimeLoop(
sema: *Sema,
init_spa: SwitchProngAnalysis,
child_block: *Block,
maybe_ptr_operand_ty: Type,
cond_ty: Type,
init_cond_val: Value,
switch_node_offset: std.zig.Ast.Node.Offset,
special_members_only: ?SpecialProng,
special_generic: SpecialProng,
special_generic_is_under: bool,
case_vals: std.ArrayList(Air.Inst.Ref),
scalar_cases_len: u32,
multi_cases_len: u32,
err_set: bool,
empty_enum: bool,
operand_is_ref: bool,
) CompileError!Air.Inst.Ref {
var spa = init_spa;
var cond_val = init_cond_val;
while (true) {
if (resolveSwitchComptime(
sema,
spa,
child_block,
spa.operand.simple.cond,
cond_val,
cond_ty,
switch_node_offset,
special_members_only,
special_generic,
special_generic_is_under,
case_vals,
scalar_cases_len,
multi_cases_len,
err_set,
empty_enum,
)) |result| {
return result;
} else |err| switch (err) {
error.ComptimeBreak => {
const break_inst = sema.code.instructions.get(@intFromEnum(sema.comptime_break_inst));
if (break_inst.tag != .switch_continue) return error.ComptimeBreak;
const extra = sema.code.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data;
if (extra.block_inst != spa.switch_block_inst) return error.ComptimeBreak;
// This is a `switch_continue` targeting this block. Change the operand and start over.
const src = child_block.nodeOffset(extra.operand_src_node.unwrap().?);
const new_operand_uncoerced = try sema.resolveInst(break_inst.data.@"break".operand);
const new_operand = try sema.coerce(child_block, maybe_ptr_operand_ty, new_operand_uncoerced, src);
try sema.emitBackwardBranch(child_block, src);
const val, const ref = if (operand_is_ref)
.{ try sema.analyzeLoad(child_block, src, new_operand, src), new_operand }
else
.{ new_operand, undefined };
const cond_ref = try sema.switchCond(child_block, src, val);
cond_val = try sema.resolveConstDefinedValue(child_block, src, cond_ref, null);
spa.operand = .{ .simple = .{
.by_val = val,
.by_ref = ref,
.cond = cond_ref,
} };
const payload_inst: Zir.Inst.Index = if (capture != .none) inst: {
const payload_inst = zir_switch.payload_capture_placeholder.unwrap() orelse switch_inst;
const payload_ref = try sema.analyzeSwitchPayloadCapture(
case_block,
operand,
operand_val,
operand_ptr,
operand_ty,
operand_src,
capture_src,
capture == .by_ref,
kind == .special,
switch (kind) {
.item_refs => |item_refs| item_refs,
.has_ranges, .special => undefined,
},
else => |e| return e,
}
}
}
fn resolveSwitchComptime(
sema: *Sema,
spa: SwitchProngAnalysis,
child_block: *Block,
cond_operand: Air.Inst.Ref,
operand_val: Value,
operand_ty: Type,
switch_node_offset: std.zig.Ast.Node.Offset,
special_members_only: ?SpecialProng,
special_generic: SpecialProng,
special_generic_is_under: bool,
case_vals: std.ArrayList(Air.Inst.Ref),
scalar_cases_len: u32,
multi_cases_len: u32,
err_set: bool,
empty_enum: bool,
) CompileError!Air.Inst.Ref {
const zcu = sema.pt.zcu;
const merges = &child_block.label.?.merges;
const resolved_operand_val = try sema.resolveLazyValue(operand_val);
var extra_index: usize = special_generic.end;
{
var scalar_i: usize = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
extra_index += 1;
const body = sema.code.bodySlice(extra_index, info.body_len);
extra_index += info.body_len;
const item = case_vals.items[scalar_i];
const item_val = sema.resolveConstDefinedValue(child_block, LazySrcLoc.unneeded, item, undefined) catch unreachable;
if (operand_val.eql(item_val, operand_ty, sema.pt.zcu)) {
if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand);
return spa.resolveProngComptime(
child_block,
.normal,
body,
info.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) },
} }),
&.{item},
if (info.is_inline) cond_operand else .none,
info.has_tag_capture,
merges,
);
}
}
}
{
var multi_i: usize = 0;
var case_val_idx: usize = scalar_cases_len;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
extra_index += 1 + items_len;
const body = sema.code.bodySlice(extra_index + 2 * ranges_len, info.body_len);
const items = case_vals.items[case_val_idx..][0..items_len];
case_val_idx += items_len;
for (items) |item| {
// Validation above ensured these will succeed.
const item_val = sema.resolveConstDefinedValue(child_block, LazySrcLoc.unneeded, item, undefined) catch unreachable;
if (operand_val.eql(item_val, operand_ty, sema.pt.zcu)) {
if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand);
return spa.resolveProngComptime(
child_block,
.normal,
body,
info.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = .{ .kind = .multi, .index = @intCast(multi_i) },
} }),
items,
if (info.is_inline) cond_operand else .none,
info.has_tag_capture,
merges,
);
}
}
var range_i: usize = 0;
while (range_i < ranges_len) : (range_i += 1) {
const range_items = case_vals.items[case_val_idx..][0..2];
extra_index += 2;
case_val_idx += 2;
// Validation above ensured these will succeed.
const first_val = sema.resolveConstDefinedValue(child_block, LazySrcLoc.unneeded, range_items[0], undefined) catch unreachable;
const last_val = sema.resolveConstDefinedValue(child_block, LazySrcLoc.unneeded, range_items[1], undefined) catch unreachable;
if ((try sema.compareAll(resolved_operand_val, .gte, first_val, operand_ty)) and
(try sema.compareAll(resolved_operand_val, .lte, last_val, operand_ty)))
{
if (err_set) try sema.maybeErrorUnwrapComptime(child_block, body, cond_operand);
return spa.resolveProngComptime(
child_block,
.normal,
body,
info.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = .{ .kind = .multi, .index = @intCast(multi_i) },
} }),
undefined, // case_vals may be undefined for ranges
if (info.is_inline) cond_operand else .none,
info.has_tag_capture,
merges,
);
}
}
extra_index += info.body_len;
}
}
if (err_set) try sema.maybeErrorUnwrapComptime(child_block, special_generic.body, cond_operand);
if (empty_enum) {
return .void_value;
}
if (special_members_only) |special| {
assert(operand_ty.isNonexhaustiveEnum(zcu));
if (operand_ty.enumTagFieldIndex(operand_val, zcu)) |_| {
return spa.resolveProngComptime(
child_block,
.special,
special.body,
special.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = .special_else,
} }),
undefined, // case_vals may be undefined for special prongs
if (special.is_inline) cond_operand else .none,
special.has_tag_capture,
merges,
);
}
}
return spa.resolveProngComptime(
child_block,
.special,
special_generic.body,
special_generic.capture,
child_block.src(.{ .switch_capture = .{
.switch_node_offset = switch_node_offset,
.case_idx = if (special_generic_is_under)
.special_under
else
.special_else,
} }),
undefined, // case_vals may be undefined for special prongs
if (special_generic.is_inline) cond_operand else .none,
special_generic.has_tag_capture,
merges,
);
}
const RangeSetUnhandledIterator = struct {
pt: Zcu.PerThread,
cur: ?InternPool.Index,
max: InternPool.Index,
range_i: usize,
ranges: []const RangeSet.Range,
limbs: []math.big.Limb,
const preallocated_limbs = math.big.int.calcTwosCompLimbCount(128);
fn init(sema: *Sema, ty: Type, range_set: RangeSet) !RangeSetUnhandledIterator {
const pt = sema.pt;
const int_type = pt.zcu.intern_pool.indexToKey(ty.toIntern()).int_type;
const needed_limbs = math.big.int.calcTwosCompLimbCount(int_type.bits);
return .{
.pt = pt,
.cur = (try ty.minInt(pt, ty)).toIntern(),
.max = (try ty.maxInt(pt, ty)).toIntern(),
.range_i = 0,
.ranges = range_set.ranges.items,
.limbs = if (needed_limbs > preallocated_limbs)
try sema.arena.alloc(math.big.Limb, needed_limbs)
else
&.{},
};
}
fn addOne(it: *const RangeSetUnhandledIterator, val: InternPool.Index) !?InternPool.Index {
if (val == it.max) return null;
const int = it.pt.zcu.intern_pool.indexToKey(val).int;
switch (int.storage) {
inline .u64, .i64 => |val_int| {
const next_int = @addWithOverflow(val_int, 1);
if (next_int[1] == 0)
return (try it.pt.intValue(.fromInterned(int.ty), next_int[0])).toIntern();
},
.big_int => {},
.lazy_align, .lazy_size => unreachable,
}
var val_space: InternPool.Key.Int.Storage.BigIntSpace = undefined;
const val_bigint = int.storage.toBigInt(&val_space);
var result_limbs: [preallocated_limbs]math.big.Limb = undefined;
var result_bigint = math.big.int.Mutable.init(
if (it.limbs.len > 0) it.limbs else &result_limbs,
0,
inline_case_capture,
else_err_ty,
);
assert(!sema.typeOf(payload_ref).isNoReturn(sema.pt.zcu));
sema.inst_map.putAssumeCapacity(payload_inst, payload_ref);
break :inst payload_inst;
} else undefined;
defer if (capture != .none) assert(sema.inst_map.remove(payload_inst));
result_bigint.addScalar(val_bigint, 1);
return (try it.pt.intValue_big(.fromInterned(int.ty), result_bigint.toConst())).toIntern();
}
const tag_inst: Zir.Inst.Index = if (has_tag_capture) inst: {
const tag_inst = zir_switch.tag_capture_placeholder.unwrap() orelse switch_inst;
const tag_ref = try sema.analyzeSwitchTagCapture(
case_block,
operand_val,
operand_ty,
capture_src,
inline_case_capture,
kind,
);
sema.inst_map.putAssumeCapacity(tag_inst, tag_ref);
break :inst tag_inst;
} else undefined;
defer if (has_tag_capture) assert(sema.inst_map.remove(tag_inst));
fn next(it: *RangeSetUnhandledIterator) !?InternPool.Index {
var cur = it.cur orelse return null;
while (it.range_i < it.ranges.len and cur == it.ranges[it.range_i].first) {
defer it.range_i += 1;
cur = (try it.addOne(it.ranges[it.range_i].last)) orelse {
it.cur = null;
return null;
};
}
it.cur = try it.addOne(cur);
return cur;
}
};
if (zir_switch.has_continue) sema.inst_map.putAssumeCapacity(switch_inst, .fromType(raw_operand_ty));
defer if (zir_switch.has_continue) assert(sema.inst_map.remove(switch_inst));
const ResolvedSwitchItem = struct {
ref: Air.Inst.Ref,
val: InternPool.Index,
};
fn resolveSwitchItemVal(
sema: *Sema,
block: *Block,
item_ref: Zir.Inst.Ref,
/// Coerce `item_ref` to this type.
coerce_ty: Type,
item_src: LazySrcLoc,
) CompileError!ResolvedSwitchItem {
const uncoerced_item = try sema.resolveInst(item_ref);
// Constructing a LazySrcLoc is costly because we only have the switch AST node.
// Only if we know for sure we need to report a compile error do we resolve the
// full source locations.
const item = try sema.coerce(block, coerce_ty, uncoerced_item, item_src);
const maybe_lazy = try sema.resolveConstDefinedValue(block, item_src, item, .{ .simple = .switch_item });
const val = try sema.resolveLazyValue(maybe_lazy);
const new_item = if (val.toIntern() != maybe_lazy.toIntern()) blk: {
break :blk Air.internedToRef(val.toIntern());
} else item;
return .{ .ref = new_item, .val = val.toIntern() };
return sema.analyzeBodyRuntimeBreak(case_block, prong_body);
}
fn validateErrSetSwitch(
fn analyzeSwitchTagCapture(
sema: *Sema,
block: *Block,
seen_errors: *SwitchErrorSet,
case_vals: *std.ArrayList(Air.Inst.Ref),
case_block: *Block,
/// May be `undefined` if `inline_case_capture` is not `.none`.
operand_val: Air.Inst.Ref,
operand_ty: Type,
inst_data: @FieldType(Zir.Inst.Data, "pl_node"),
scalar_cases_len: u32,
multi_cases_len: u32,
else_case: struct { body: []const Zir.Inst.Index, end: usize, src: LazySrcLoc },
has_else: bool,
) CompileError!?Type {
const gpa = sema.gpa;
capture_src: LazySrcLoc,
inline_case_capture: Air.Inst.Ref,
kind: SwitchProngKind,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const tag_capture_src: LazySrcLoc = .{
.base_node_inst = capture_src.base_node_inst,
.offset = .{ .switch_tag_capture = capture_src.offset.switch_capture },
};
if (operand_ty.zigTypeTag(zcu) != .@"union") {
return sema.fail(case_block, tag_capture_src, "cannot capture tag of non-union type '{f}'", .{
operand_ty.fmt(pt),
});
}
if (inline_case_capture != .none) {
return inline_case_capture; // this already is the tag, it's what we're switching on!
}
switch (kind) {
.has_ranges, .special => {},
.item_refs => |refs| if (refs.len == 1) return refs[0],
}
const tag_ty = operand_ty.unionTagType(zcu).?;
return sema.unionToTag(case_block, tag_ty, operand_val, tag_capture_src);
}
fn analyzeSwitchPayloadCapture(
sema: *Sema,
case_block: *Block,
operand: SwitchOperand,
/// May be `undefined` if this is an inline capture and operand is not a union.
operand_val: Air.Inst.Ref,
/// May be `undefined` if `capture_by_ref` is `false` or if `operand_val` is also `undefined`.
operand_ptr: Air.Inst.Ref,
operand_ty: Type,
operand_src: LazySrcLoc,
capture_src: LazySrcLoc,
capture_by_ref: bool,
is_special_prong: bool,
/// May be `undefined` if `is_special_prong` is `true`.
case_vals: []const Air.Inst.Ref,
/// If this is not `.none`, this is an inline capture.
inline_case_capture: Air.Inst.Ref,
else_err_ty: ?Type,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const src_node_offset = inst_data.src_node;
const src = block.nodeOffset(src_node_offset);
const switch_node_offset = operand_src.offset.node_offset_switch_operand;
var extra_index: usize = else_case.end;
{
var scalar_i: u32 = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
extra_index += 1 + info.body_len;
case_vals.appendAssumeCapacity(try sema.validateSwitchItemError(
block,
seen_errors,
item_ref,
operand_ty,
block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) },
.item_idx = .{ .kind = .single, .index = 0 },
} }),
));
}
}
{
var multi_i: u32 = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = sema.code.extra[extra_index];
extra_index += 1;
const ranges_len = sema.code.extra[extra_index];
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]);
extra_index += 1;
const items = sema.code.refSlice(extra_index, items_len);
extra_index += items_len + info.body_len;
try case_vals.ensureUnusedCapacity(gpa, items.len);
for (items, 0..) |item_ref, item_i| {
case_vals.appendAssumeCapacity(try sema.validateSwitchItemError(
block,
seen_errors,
item_ref,
operand_ty,
block.src(.{ .switch_case_item = .{
.switch_node_offset = src_node_offset,
.case_idx = .{ .kind = .multi, .index = @intCast(multi_i) },
.item_idx = .{ .kind = .single, .index = @intCast(item_i) },
} }),
));
if (inline_case_capture != .none) {
const item_val = sema.resolveConstDefinedValue(case_block, .unneeded, inline_case_capture, undefined) catch unreachable;
if (operand_ty.zigTypeTag(zcu) == .@"union") {
const field_index: u32 = @intCast(operand_ty.unionTagFieldIndex(item_val, zcu).?);
const union_obj = zcu.typeToUnion(operand_ty).?;
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_index]);
if (capture_by_ref) {
const operand_ptr_info = sema.typeOf(operand_ptr).ptrInfo(zcu);
const ptr_field_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
.flags = .{
.is_const = operand_ptr_info.flags.is_const,
.is_volatile = operand_ptr_info.flags.is_volatile,
.address_space = operand_ptr_info.flags.address_space,
},
});
return case_block.addStructFieldPtr(operand_ptr, field_index, ptr_field_ty);
} else {
if (try sema.resolveDefinedValue(case_block, operand_src, operand_val)) |union_val| {
const tag_and_val = ip.indexToKey(union_val.toIntern()).un;
return .fromIntern(tag_and_val.val);
}
return case_block.addStructFieldVal(operand_val, field_index, field_ty);
}
try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset);
} else if (capture_by_ref) {
return sema.uavRef(item_val.toIntern());
} else {
return inline_case_capture;
}
}
switch (try sema.resolveInferredErrorSetTy(block, src, operand_ty.toIntern())) {
.anyerror_type => {
if (!has_else) {
return sema.fail(
block,
src,
"else prong required when switching on type 'anyerror'",
.{},
);
}
return .anyerror;
},
else => |err_set_ty_index| else_validation: {
const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names;
var maybe_msg: ?*Zcu.ErrorMsg = null;
errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa);
const operand_ptr_ty = if (capture_by_ref) sema.typeOf(operand_ptr) else undefined;
for (error_names.get(ip)) |error_name| {
if (!seen_errors.contains(error_name) and !has_else) {
const msg = maybe_msg orelse blk: {
maybe_msg = try sema.errMsg(
src,
"switch must handle all possibilities",
.{},
);
break :blk maybe_msg.?;
if (is_special_prong) {
if (capture_by_ref) return operand_ptr;
return switch (operand_ty.zigTypeTag(zcu)) {
.error_set => e: {
if (else_err_ty) |err_ty| {
break :e sema.bitCast(case_block, err_ty, operand_val, operand_src, null);
} else {
try sema.analyzeUnreachable(case_block, operand_src, false);
break :e .unreachable_value;
}
},
else => operand_val,
};
}
switch (operand_ty.zigTypeTag(zcu)) {
.@"union" => {
const union_obj = zcu.typeToUnion(operand_ty).?;
const first_item_val = sema.resolveConstDefinedValue(case_block, .unneeded, case_vals[0], undefined) catch unreachable;
const first_field_index: u32 = zcu.unionTagFieldIndex(union_obj, first_item_val).?;
const first_field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[first_field_index]);
const field_indices = try sema.arena.alloc(u32, case_vals.len);
for (case_vals, field_indices) |item, *field_idx| {
const item_val = sema.resolveConstDefinedValue(case_block, .unneeded, item, undefined) catch unreachable;
field_idx.* = zcu.unionTagFieldIndex(union_obj, item_val).?;
}
// Fast path: if all the operands are the same type already, we don't need to hit
// PTR! This will also allow us to emit simpler code.
const same_types = for (field_indices[1..]) |field_idx| {
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (!field_ty.eql(first_field_ty, zcu)) break false;
} else true;
const capture_ty: Type = capture_ty: {
if (same_types) break :capture_ty first_field_ty;
// We need values to run PTR on, so make a bunch of undef constants.
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
for (dummy_captures, field_indices) |*dummy, field_idx| {
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
dummy.* = try pt.undefRef(field_ty);
}
const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len);
for (case_srcs, 0..) |*case_src, item_i| {
case_src.* = .{
.base_node_inst = capture_src.base_node_inst,
.offset = .{ .switch_case_item = .{
.switch_node_offset = switch_node_offset,
.case_idx = capture_src.offset.switch_capture.case_idx,
.item_idx = .{ .kind = .single, .value = @intCast(item_i) },
} },
};
}
try sema.errNote(
src,
msg,
"unhandled error value: 'error.{f}'",
.{error_name.fmt(ip)},
);
break :capture_ty sema.resolvePeerTypes(
case_block,
capture_src,
dummy_captures,
.{ .override = case_srcs },
) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return error.AnalysisFail;
try sema.reparentOwnedErrorMsg(capture_src, msg, "capture group with incompatible types", .{});
return error.AnalysisFail;
},
else => |e| return e,
};
};
// By-reference captures have some further restrictions which make them easier to emit
if (capture_by_ref) {
const operand_ptr_info = operand_ptr_ty.ptrInfo(zcu);
const capture_ptr_ty = resolve: {
// By-ref captures of hetereogeneous types are only allowed if all field
// pointer types are peer resolvable to each other.
// We need values to run PTR on, so make a bunch of undef constants.
const dummy_captures = try sema.arena.alloc(Air.Inst.Ref, case_vals.len);
for (field_indices, dummy_captures) |field_idx, *dummy| {
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
const field_ptr_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
.flags = .{
.is_const = operand_ptr_info.flags.is_const,
.is_volatile = operand_ptr_info.flags.is_volatile,
.address_space = operand_ptr_info.flags.address_space,
.alignment = union_obj.fieldAlign(ip, field_idx),
},
});
dummy.* = try pt.undefRef(field_ptr_ty);
}
const case_srcs = try sema.arena.alloc(?LazySrcLoc, case_vals.len);
for (case_srcs, 0..) |*case_src, item_i| {
case_src.* = .{
.base_node_inst = capture_src.base_node_inst,
.offset = .{ .switch_case_item = .{
.switch_node_offset = switch_node_offset,
.case_idx = capture_src.offset.switch_capture.case_idx,
.item_idx = .{ .kind = .single, .value = @intCast(item_i) },
} },
};
}
break :resolve sema.resolvePeerTypes(
case_block,
capture_src,
dummy_captures,
.{ .override = case_srcs },
) catch |err| switch (err) {
error.AnalysisFail => {
const msg = sema.err orelse return error.AnalysisFail;
try sema.errNote(capture_src, msg, "this coercion is only possible when capturing by value", .{});
try sema.reparentOwnedErrorMsg(capture_src, msg, "capture group with incompatible types", .{});
return error.AnalysisFail;
},
else => |e| return e,
};
};
if (try sema.resolveDefinedValue(case_block, operand_src, operand_ptr)) |op_ptr_val| {
if (op_ptr_val.isUndef(zcu)) return pt.undefRef(capture_ptr_ty);
const field_ptr_val = try op_ptr_val.ptrField(first_field_index, pt);
return .fromValue(try pt.getCoerced(field_ptr_val, capture_ptr_ty));
}
try sema.requireRuntimeBlock(case_block, operand_src, null);
return case_block.addStructFieldPtr(operand_ptr, first_field_index, capture_ptr_ty);
}
if (try sema.resolveDefinedValue(case_block, operand_src, operand_val)) |operand_val_val| {
if (operand_val_val.isUndef(zcu)) return pt.undefRef(capture_ty);
const union_val = ip.indexToKey(operand_val_val.toIntern()).un;
if (Value.fromInterned(union_val.tag).isUndef(zcu)) return pt.undefRef(capture_ty);
const uncoerced: Air.Inst.Ref = .fromIntern(union_val.val);
return sema.coerce(case_block, capture_ty, uncoerced, operand_src);
}
try sema.requireRuntimeBlock(case_block, operand_src, null);
if (same_types) {
return case_block.addStructFieldVal(operand_val, first_field_index, capture_ty);
}
// We may have to emit a switch block which coerces the operand to the capture type.
// If we can, try to avoid that using in-memory coercions.
const first_non_imc = in_mem: {
for (field_indices, 0..) |field_idx, i| {
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (.ok != try sema.coerceInMemoryAllowed(case_block, capture_ty, field_ty, false, zcu.getTarget(), .unneeded, .unneeded, null)) {
break :in_mem i;
}
}
// All fields are in-memory coercible to the resolved type!
// Just take the first field and bitcast the result.
const uncoerced = try case_block.addStructFieldVal(operand_val, first_field_index, first_field_ty);
return case_block.addBitCast(capture_ty, uncoerced);
};
// By-val capture with heterogeneous types which are not all in-memory coercible to
// the resolved capture type. We finally have to fall back to the ugly method.
// However, let's first track which operands are in-memory coercible. There may well
// be several, and we can squash all of these cases into the same switch prong using
// a simple bitcast. We'll make this the 'else' prong.
var in_mem_coercible: std.DynamicBitSet = try .initFull(sema.arena, field_indices.len);
in_mem_coercible.unset(first_non_imc);
{
const next = first_non_imc + 1;
for (field_indices[next..], next..) |field_idx, i| {
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
if (.ok != try sema.coerceInMemoryAllowed(case_block, capture_ty, field_ty, false, zcu.getTarget(), .unneeded, .unneeded, null)) {
in_mem_coercible.unset(i);
}
}
}
if (maybe_msg) |msg| {
maybe_msg = null;
try sema.addDeclaredHereNote(msg, operand_ty);
return sema.failWithOwnedErrorMsg(block, msg);
const capture_block_inst = try case_block.addInstAsIndex(.{
.tag = .block,
.data = .{
.ty_pl = .{
.ty = .fromType(capture_ty),
.payload = undefined, // updated below
},
},
});
const prong_count = field_indices.len - in_mem_coercible.count();
const estimated_extra = prong_count * 6 + (prong_count / 10); // 2 for Case, 1 item, probably 3 insts; plus hints
var cases_extra = try std.array_list.Managed(u32).initCapacity(sema.gpa, estimated_extra);
defer cases_extra.deinit();
{
// All branch hints are `.none`, so just add zero elems.
comptime assert(@intFromEnum(std.builtin.BranchHint.none) == 0);
const need_elems = std.math.divCeil(usize, prong_count + 1, 10) catch unreachable;
try cases_extra.appendNTimes(0, need_elems);
}
if (has_else and seen_errors.count() == error_names.len) {
// In order to enable common patterns for generic code allow simple else bodies
// else => unreachable,
// else => return,
// else => |e| return e,
// even if all the possible errors were already handled.
const tags = sema.code.instructions.items(.tag);
const datas = sema.code.instructions.items(.data);
for (else_case.body) |else_inst| switch (tags[@intFromEnum(else_inst)]) {
.dbg_stmt,
.dbg_var_val,
.ret_type,
.as_node,
.ret_node,
.@"unreachable",
.@"defer",
.defer_err_code,
.err_union_code,
.ret_err_value_code,
.save_err_ret_index,
.restore_err_ret_index_unconditional,
.restore_err_ret_index_fn_entry,
.is_non_err,
.ret_is_non_err,
.condbr,
=> {},
.extended => switch (datas[@intFromEnum(else_inst)].extended.opcode) {
.restore_err_ret_index => {},
else => break,
},
else => break,
} else break :else_validation;
{
// Non-bitcast cases
var it = in_mem_coercible.iterator(.{ .kind = .unset });
while (it.next()) |idx| {
var coerce_block = case_block.makeSubBlock();
defer coerce_block.instructions.deinit(sema.gpa);
const case_src: LazySrcLoc = .{
.base_node_inst = capture_src.base_node_inst,
.offset = .{ .switch_case_item = .{
.switch_node_offset = switch_node_offset,
.case_idx = capture_src.offset.switch_capture.case_idx,
.item_idx = .{ .kind = .single, .value = @intCast(idx) },
} },
};
const field_idx = field_indices[idx];
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
const uncoerced = try coerce_block.addStructFieldVal(operand_val, field_idx, field_ty);
const coerced = try sema.coerce(&coerce_block, capture_ty, uncoerced, case_src);
_ = try coerce_block.addBr(capture_block_inst, coerced);
try cases_extra.ensureUnusedCapacity(@typeInfo(Air.SwitchBr.Case).@"struct".fields.len +
1 + // `item`, no ranges
coerce_block.instructions.items.len);
cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{
.items_len = 1,
.ranges_len = 0,
.body_len = @intCast(coerce_block.instructions.items.len),
}));
cases_extra.appendAssumeCapacity(@intFromEnum(case_vals[idx])); // item
cases_extra.appendSliceAssumeCapacity(@ptrCast(coerce_block.instructions.items)); // body
}
}
const else_body_len = len: {
// 'else' prong uses a bitcast
var coerce_block = case_block.makeSubBlock();
defer coerce_block.instructions.deinit(sema.gpa);
const first_imc_item_idx = in_mem_coercible.findFirstSet().?;
const first_imc_field_idx = field_indices[first_imc_item_idx];
const first_imc_field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[first_imc_field_idx]);
const uncoerced = try coerce_block.addStructFieldVal(operand_val, first_imc_field_idx, first_imc_field_ty);
const coerced = try coerce_block.addBitCast(capture_ty, uncoerced);
_ = try coerce_block.addBr(capture_block_inst, coerced);
try cases_extra.appendSlice(@ptrCast(coerce_block.instructions.items));
break :len coerce_block.instructions.items.len;
};
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.SwitchBr).@"struct".fields.len +
cases_extra.items.len +
@typeInfo(Air.Block).@"struct".fields.len +
1);
const switch_br_inst: u32 = @intCast(sema.air_instructions.len);
try sema.air_instructions.append(sema.gpa, .{
.tag = .switch_br,
.data = .{
.pl_op = .{
.operand = undefined, // set by switch below
.payload = sema.addExtraAssumeCapacity(Air.SwitchBr{
.cases_len = @intCast(prong_count),
.else_body_len = @intCast(else_body_len),
}),
},
},
});
sema.air_extra.appendSliceAssumeCapacity(cases_extra.items);
// Set up block body
switch (operand) {
.simple => |s| {
const air_datas = sema.air_instructions.items(.data);
air_datas[switch_br_inst].pl_op.operand = s.cond;
air_datas[@intFromEnum(capture_block_inst)].ty_pl.payload =
sema.addExtraAssumeCapacity(Air.Block{ .body_len = 1 });
sema.air_extra.appendAssumeCapacity(switch_br_inst);
},
.loop => {
// The block must first extract the tag from the loaded union.
const tag_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len);
try sema.air_instructions.append(sema.gpa, .{
.tag = .get_union_tag,
.data = .{ .ty_op = .{
.ty = .fromIntern(union_obj.enum_tag_ty),
.operand = operand_val,
} },
});
const air_datas = sema.air_instructions.items(.data);
air_datas[switch_br_inst].pl_op.operand = tag_inst.toRef();
air_datas[@intFromEnum(capture_block_inst)].ty_pl.payload =
sema.addExtraAssumeCapacity(Air.Block{ .body_len = 2 });
sema.air_extra.appendAssumeCapacity(@intFromEnum(tag_inst));
sema.air_extra.appendAssumeCapacity(switch_br_inst);
},
}
return capture_block_inst.toRef();
},
.error_set => {
if (capture_by_ref) {
return sema.fail(
block,
else_case.src,
"unreachable else prong; all cases already handled",
case_block,
capture_src,
"error set cannot be captured by reference",
.{},
);
}
if (case_vals.len == 1) {
const item_val = sema.resolveConstDefinedValue(case_block, .unneeded, case_vals[0], undefined) catch unreachable;
const item_ty = try pt.singleErrorSetType(item_val.getErrorName(zcu).unwrap().?);
return sema.bitCast(case_block, item_ty, operand_val, operand_src, null);
}
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, error_names.len);
for (error_names.get(ip)) |error_name| {
if (seen_errors.contains(error_name)) continue;
names.putAssumeCapacityNoClobber(error_name, {});
try names.ensureUnusedCapacity(sema.arena, case_vals.len);
for (case_vals) |err| {
const err_val = sema.resolveConstDefinedValue(case_block, .unneeded, err, undefined) catch unreachable;
names.putAssumeCapacityNoClobber(err_val.getErrorName(zcu).unwrap().?, {});
}
const error_ty = try pt.errorSetFromUnsortedNames(names.keys());
return sema.bitCast(case_block, error_ty, operand_val, operand_src, null);
},
else => {
// In this case the capture value is just the passed-through value of the
// switch condition. It is comptime-known if there is only one item.
if (capture_by_ref) {
return operand_ptr;
} else if (case_vals.len == 1) {
return case_vals[0];
} else {
return operand_val;
}
// No need to keep the hash map metadata correct; here we
// extract the (sorted) keys only.
return try pt.errorSetFromUnsortedNames(names.keys());
},
}
return null;
}
fn validateSwitchRange(
const ResolvedSwitchItem = struct {
ref: Air.Inst.Ref,
val: Value,
};
const ResolvedSwitchItemAndExtraIndex = struct { ResolvedSwitchItem, usize };
fn resolveSwitchItem(
sema: *Sema,
block: *Block,
range_set: *RangeSet,
first_ref: Zir.Inst.Ref,
last_ref: Zir.Inst.Ref,
operand_ty: Type,
item_src: LazySrcLoc,
) CompileError![2]Air.Inst.Ref {
const first_src: LazySrcLoc = .{
.base_node_inst = item_src.base_node_inst,
.offset = .{ .switch_case_item_range_first = item_src.offset.switch_case_item },
item_ty: Type,
item_info: Zir.Inst.SwitchBlock.ItemInfo,
extra_index: usize,
switch_inst: Zir.Inst.Index,
prong_is_comptime_unreach: bool,
) CompileError!ResolvedSwitchItemAndExtraIndex {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const comp = zcu.comp;
const gpa = comp.gpa;
const io = comp.io;
var end = extra_index;
const uncoerced: Air.Inst.Ref, const uncoerced_ty: Type = uncoerced: switch (item_info.unwrap()) {
.under => unreachable, // caller must check this before calling us
.enum_literal => |str_index| {
const zir_str = sema.code.nullTerminatedString(str_index);
const name = try ip.getOrPutString(gpa, io, pt.tid, zir_str, .no_embedded_nulls);
const uncoerced = try sema.analyzeDeclLiteral(block, item_src, name, item_ty, false);
break :uncoerced .{ uncoerced, .enum_literal };
},
.error_value => |str_index| {
const zir_str = sema.code.nullTerminatedString(str_index);
const name = try ip.getOrPutString(gpa, io, pt.tid, zir_str, .no_embedded_nulls);
// Make sure there's an error integer value associated with `name`.
_ = try pt.getErrorValue(name);
const err_set_ty = try pt.singleErrorSetType(name);
const uncoerced = Air.internedToRef(try pt.intern(.{ .err = .{
.ty = err_set_ty.toIntern(),
.name = name,
} }));
break :uncoerced .{ uncoerced, err_set_ty };
},
.body_len => |body_len| {
const body = sema.code.bodySlice(extra_index, body_len);
end += body.len;
const uncoerced = ref: {
// The result location of item bodies is `.{ .coerce_ty = switch_inst }`.
sema.inst_map.putAssumeCapacity(switch_inst, .fromType(item_ty));
defer assert(sema.inst_map.remove(switch_inst));
const old_comptime_reason = block.comptime_reason;
defer block.comptime_reason = old_comptime_reason;
block.comptime_reason = .{ .reason = .{
.src = item_src,
.r = .{ .simple = .switch_item },
} };
break :ref try sema.resolveInlineBody(block, body, switch_inst);
};
break :uncoerced .{ uncoerced, sema.typeOf(uncoerced) };
},
};
const last_src: LazySrcLoc = .{
.base_node_inst = item_src.base_node_inst,
.offset = .{ .switch_case_item_range_last = item_src.offset.switch_case_item },
const item_ref: Air.Inst.Ref = item_ref: {
if (item_ty.zigTypeTag(zcu) == .error_set and
uncoerced_ty.zigTypeTag(zcu) == .error_set)
{
// We allow prongs with errors which are not part of the error set
// being switched on if their prong body is `=> comptime unreachable,`.
switch (try sema.coerceInMemoryAllowedErrorSets(block, item_ty, uncoerced_ty, item_src, item_src)) {
.ok => if (try sema.resolveValue(uncoerced)) |uncoerced_val| {
break :item_ref try sema.coerceInMemory(uncoerced_val, item_ty);
},
.missing_error => if (prong_is_comptime_unreach) {
break :item_ref uncoerced;
},
.from_anyerror => {},
else => unreachable,
}
}
break :item_ref try sema.coerce(block, item_ty, uncoerced, item_src);
};
const first = try sema.resolveSwitchItemVal(block, first_ref, operand_ty, first_src);
const last = try sema.resolveSwitchItemVal(block, last_ref, operand_ty, last_src);
if (try Value.fromInterned(first.val).compareAll(.gt, Value.fromInterned(last.val), operand_ty, sema.pt)) {
return sema.fail(block, item_src, "range start value is greater than the end value", .{});
}
const maybe_prev_src = try range_set.add(first.val, last.val, item_src);
try sema.validateSwitchDupe(block, maybe_prev_src, item_src);
return .{ first.ref, last.ref };
}
const maybe_lazy = try sema.resolveConstDefinedValue(block, item_src, item_ref, .{ .simple = .switch_item });
fn validateSwitchItemInt(
sema: *Sema,
block: *Block,
range_set: *RangeSet,
item_ref: Zir.Inst.Ref,
operand_ty: Type,
item_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, item_src);
const maybe_prev_src = try range_set.add(item.val, item.val, item_src);
try sema.validateSwitchDupe(block, maybe_prev_src, item_src);
return item.ref;
}
// We have to resolve lazy values here to avoid false negatives when detecting
// duplicate items and comparing items to a comptime-known switch operand.
fn validateSwitchItemEnum(
sema: *Sema,
block: *Block,
seen_fields: []?LazySrcLoc,
range_set: *RangeSet,
item_ref: Zir.Inst.Ref,
operand_ty: Type,
item_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const ip = &sema.pt.zcu.intern_pool;
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, item_src);
const int = ip.indexToKey(item.val).enum_tag.int;
const field_index = ip.loadEnumType(ip.typeOf(item.val)).tagValueIndex(ip, int) orelse {
const maybe_prev_src = try range_set.add(int, int, item_src);
try sema.validateSwitchDupe(block, maybe_prev_src, item_src);
return item.ref;
};
const maybe_prev_src = seen_fields[field_index];
seen_fields[field_index] = item_src;
try sema.validateSwitchDupe(block, maybe_prev_src, item_src);
return item.ref;
}
fn validateSwitchItemError(
sema: *Sema,
block: *Block,
seen_errors: *SwitchErrorSet,
item_ref: Zir.Inst.Ref,
operand_ty: Type,
item_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, item_src);
const error_name = sema.pt.zcu.intern_pool.indexToKey(item.val).err.name;
const maybe_prev_src = if (try seen_errors.fetchPut(error_name, item_src)) |prev|
prev.value
const val = try sema.resolveLazyValue(maybe_lazy);
const ref: Air.Inst.Ref = if (val.toIntern() == maybe_lazy.toIntern())
item_ref
else
null;
try sema.validateSwitchDupe(block, maybe_prev_src, item_src);
return item.ref;
.fromValue(val);
return .{ .{ .ref = ref, .val = val }, end };
}
fn validateSwitchDupe(
fn validateSwitchItemOrRange(
sema: *Sema,
block: *Block,
maybe_prev_src: ?LazySrcLoc,
item_src: LazySrcLoc,
/// If `opt_last_val` is not `null`, this refers to the first val of a range.
item_val: Value,
opt_last_val: ?Value,
item_ty: Type,
seen_enum_fields: []?LazySrcLoc,
seen_errors: *std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, LazySrcLoc),
seen_sparse_values: *std.AutoHashMapUnmanaged(InternPool.Index, LazySrcLoc),
range_set: *RangeSet,
true_src: *?LazySrcLoc,
false_src: *?LazySrcLoc,
void_src: *?LazySrcLoc,
) CompileError!void {
const prev_item_src = maybe_prev_src orelse return;
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(
item_src,
"duplicate switch value",
.{},
);
errdefer msg.destroy(sema.gpa);
try sema.errNote(
prev_item_src,
msg,
"previous value here",
.{},
);
break :msg msg;
});
}
fn validateSwitchItemBool(
sema: *Sema,
block: *Block,
true_count: *u8,
false_count: *u8,
item_ref: Zir.Inst.Ref,
item_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const item = try sema.resolveSwitchItemVal(block, item_ref, .bool, item_src);
if (Value.fromInterned(item.val).toBool()) {
true_count.* += 1;
} else {
false_count.* += 1;
}
if (true_count.* > 1 or false_count.* > 1) {
return sema.fail(block, item_src, "duplicate switch value", .{});
}
return item.ref;
}
const ValueSrcMap = std.AutoHashMapUnmanaged(InternPool.Index, LazySrcLoc);
fn validateSwitchItemSparse(
sema: *Sema,
block: *Block,
seen_values: *ValueSrcMap,
item_ref: Zir.Inst.Ref,
operand_ty: Type,
item_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const item = try sema.resolveSwitchItemVal(block, item_ref, operand_ty, item_src);
const kv = try seen_values.fetchPut(sema.gpa, item.val, item_src) orelse return item.ref;
try sema.validateSwitchDupe(block, kv.value, item_src);
unreachable;
}
fn validateSwitchNoRange(
sema: *Sema,
block: *Block,
ranges_len: u32,
operand_ty: Type,
src_node_offset: std.zig.Ast.Node.Offset,
) CompileError!void {
if (ranges_len == 0)
return;
const operand_src = block.src(.{ .node_offset_switch_operand = src_node_offset });
const range_src = block.src(.{ .node_offset_switch_range = src_node_offset });
const msg = msg: {
const msg = try sema.errMsg(
operand_src,
"ranges not allowed when switching on type '{f}'",
.{operand_ty.fmt(sema.pt)},
);
errdefer msg.destroy(sema.gpa);
try sema.errNote(
range_src,
msg,
"range here",
.{},
);
break :msg msg;
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const maybe_prev_src: ?LazySrcLoc = maybe_prev_src: switch (item_ty.zigTypeTag(zcu)) {
.@"union" => unreachable,
.@"enum" => {
const int = ip.indexToKey(item_val.toIntern()).enum_tag.int;
if (ip.loadEnumType(item_ty.toIntern()).tagValueIndex(ip, int)) |field_index| {
const maybe_prev_src = seen_enum_fields[field_index];
seen_enum_fields[field_index] = item_src;
break :maybe_prev_src maybe_prev_src;
} else {
break :maybe_prev_src try range_set.add(sema.arena, .{
.first = .fromInterned(int),
.last = .fromInterned(int),
.src = item_src,
}, .fromInterned(ip.typeOf(int)), zcu);
}
},
.error_set => {
const error_name = ip.indexToKey(item_val.toIntern()).err.name;
break :maybe_prev_src if (seen_errors.fetchPutAssumeCapacity(error_name, item_src)) |prev|
prev.value
else
null;
},
.int, .comptime_int => {
if (opt_last_val) |last_val| {
const first_val = item_val;
if (try first_val.compareAll(.gt, last_val, item_ty, pt)) {
return sema.fail(block, item_src, "range start value is greater than the end value", .{});
}
break :maybe_prev_src range_set.addAssumeCapacity(.{
.first = first_val,
.last = last_val,
.src = item_src,
}, item_ty, zcu);
} else {
break :maybe_prev_src range_set.addAssumeCapacity(.{
.first = item_val,
.last = item_val,
.src = item_src,
}, item_ty, zcu);
}
},
.enum_literal, .@"fn", .pointer, .type => {
break :maybe_prev_src if (seen_sparse_values.fetchPutAssumeCapacity(item_val.toIntern(), item_src)) |prev|
prev.value
else
null;
},
.bool => {
if (item_val.toBool()) {
if (true_src.*) |prev_src| break :maybe_prev_src prev_src;
true_src.* = item_src;
} else {
if (false_src.*) |prev_src| break :maybe_prev_src prev_src;
false_src.* = item_src;
}
break :maybe_prev_src null;
},
.void => {
if (void_src.*) |prev_src| break :maybe_prev_src prev_src;
void_src.* = item_src;
break :maybe_prev_src null;
},
else => unreachable, // should have already checked for invalid types
};
return sema.failWithOwnedErrorMsg(block, msg);
if (maybe_prev_src) |prev_src| {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(
item_src,
"duplicate switch value",
.{},
);
errdefer msg.destroy(sema.gpa);
try sema.errNote(
prev_src,
msg,
"previous value here",
.{},
);
break :msg msg;
});
}
}
fn maybeErrorUnwrap(
@@ -18687,14 +18249,13 @@ fn zirTry(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError!
break :msg msg;
});
}
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union);
if (is_non_err != .none) {
if (try sema.resolveIsNonErrVal(parent_block, operand_src, err_union)) |is_non_err_val| {
// We can propagate `.cold` hints from this branch since it's comptime-known
// to be taken from the parent branch.
const parent_hint = sema.branch_hint;
defer sema.branch_hint = parent_hint orelse if (sema.branch_hint == .cold) .cold else null;
const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?;
if (is_non_err_val.isUndef(zcu)) return sema.failWithUseOfUndef(parent_block, operand_src, null);
if (is_non_err_val.toBool()) {
return sema.analyzeErrUnionPayload(parent_block, src, err_union_ty, err_union, operand_src, false);
}
@@ -18751,14 +18312,13 @@ fn zirTryPtr(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileErr
break :msg msg;
});
}
const is_non_err = try sema.analyzeIsNonErrComptimeOnly(parent_block, operand_src, err_union);
if (is_non_err != .none) {
if (try sema.resolveIsNonErrVal(parent_block, operand_src, err_union)) |is_non_err_val| {
// We can propagate `.cold` hints from this branch since it's comptime-known
// to be taken from the parent branch.
const parent_hint = sema.branch_hint;
defer sema.branch_hint = parent_hint orelse if (sema.branch_hint == .cold) .cold else null;
const is_non_err_val = (try sema.resolveDefinedValue(parent_block, operand_src, is_non_err)).?;
if (is_non_err_val.isUndef(zcu)) return sema.failWithUseOfUndef(parent_block, operand_src, null);
if (is_non_err_val.toBool()) {
return sema.analyzeErrUnionPayloadPtr(parent_block, src, operand, false, false);
}
@@ -31798,58 +31358,73 @@ fn analyzeIsNull(
return block.addUnOp(air_tag, operand);
}
fn analyzePtrIsNonErrComptimeOnly(
fn resolvePtrIsNonErrVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
) CompileError!?Value {
const pt = sema.pt;
const zcu = pt.zcu;
const ptr_ty = sema.typeOf(operand);
assert(ptr_ty.zigTypeTag(zcu) == .pointer);
const child_ty = ptr_ty.childType(zcu);
const child_tag = child_ty.zigTypeTag(zcu);
if (child_tag != .error_set and child_tag != .error_union) return .bool_true;
if (child_tag == .error_set) return .bool_false;
assert(child_tag == .error_union);
if (try sema.resolveIsNonErrFromType(block, src, child_ty)) |res| {
return res;
}
assert(child_ty.zigTypeTag(zcu) == .error_union);
_ = block;
_ = src;
if (try sema.resolveValue(operand)) |eu_ptr_val| {
if (eu_ptr_val.isUndef(zcu)) return .undef_bool;
if (try sema.pointerDeref(block, src, eu_ptr_val, ptr_ty)) |err_union| {
if (err_union.isUndef(zcu)) return .undef_bool;
return .makeBool(err_union.getErrorName(zcu) == .none);
}
}
return .none;
return null;
}
fn analyzeIsNonErrComptimeOnly(
fn resolveIsNonErrVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
) CompileError!?Value {
const zcu = sema.pt.zcu;
if (try sema.resolveIsNonErrFromType(block, src, sema.typeOf(operand))) |res| {
return res;
}
assert(sema.typeOf(operand).zigTypeTag(zcu) == .error_union);
if (try sema.resolveValue(operand)) |err_union| {
if (err_union.isUndef(zcu)) return .undef_bool;
return .makeBool(err_union.getErrorName(zcu) == .none);
}
return null;
}
fn resolveIsNonErrFromType(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
operand_ty: Type,
) CompileError!?Value {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const operand_ty = sema.typeOf(operand);
const ot = operand_ty.zigTypeTag(zcu);
if (ot != .error_set and ot != .error_union) return .bool_true;
if (ot == .error_set) return .bool_false;
if (ot != .error_set and ot != .error_union) return .true;
if (ot == .error_set) return .false;
assert(ot == .error_union);
const payload_ty = operand_ty.errorUnionPayload(zcu);
if (payload_ty.zigTypeTag(zcu) == .noreturn) {
return .bool_false;
return .false;
}
if (operand == .undef) {
return .undef_bool;
} else if (@intFromEnum(operand) < InternPool.static_len) {
// None of the ref tags can be errors.
return .bool_true;
}
const maybe_operand_val = try sema.resolveValue(operand);
// exception if the error union error set is known to be empty,
// we allow the comparison but always make it comptime-known.
const set_ty = ip.errorUnionSet(operand_ty.toIntern());
@@ -31865,26 +31440,23 @@ fn analyzeIsNonErrComptimeOnly(
else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk,
}
if (maybe_operand_val != null) break :blk;
// Try to avoid resolving inferred error set if possible.
if (ies.errors.count() != 0) return .none;
if (ies.errors.count() != 0) return null;
switch (ies.resolved) {
.anyerror_type => return .none,
.anyerror_type => return null,
.none => {},
else => switch (ip.indexToKey(ies.resolved).error_set_type.names.len) {
0 => return .bool_true,
else => return .none,
0 => return .true,
else => return null,
},
}
// We do not have a comptime answer because this inferred error
// set is not resolved, and an instruction later in this function
// body may or may not cause an error to be added to this set.
return .none;
return null;
},
else => switch (ip.indexToKey(set_ty)) {
.error_set_type => |error_set_type| {
if (error_set_type.names.len == 0) return .bool_true;
if (error_set_type.names.len == 0) return .true;
},
.inferred_error_set_type => |func_index| blk: {
// If the error set is empty, we must return a comptime true or false.
@@ -31896,39 +31468,35 @@ fn analyzeIsNonErrComptimeOnly(
.none => {},
else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk,
}
if (maybe_operand_val != null) break :blk;
if (sema.fn_ret_ty_ies) |ies| {
if (ies.func == func_index) {
// Try to avoid resolving inferred error set if possible.
if (ies.errors.count() != 0) return .none;
if (ies.errors.count() != 0) return null;
switch (ies.resolved) {
.anyerror_type => return .none,
.anyerror_type => return null,
.none => {},
else => switch (ip.indexToKey(ies.resolved).error_set_type.names.len) {
0 => return .bool_true,
else => return .none,
0 => return .true,
else => return null,
},
}
// We do not have a comptime answer because this inferred error
// set is not resolved, and an instruction later in this function
// body may or may not cause an error to be added to this set.
return .none;
return null;
}
}
const resolved_ty = try sema.resolveInferredErrorSet(block, src, set_ty);
if (resolved_ty == .anyerror_type)
break :blk;
if (ip.indexToKey(resolved_ty).error_set_type.names.len == 0)
return .bool_true;
return .true;
},
else => unreachable,
},
}
if (maybe_operand_val) |err_union| {
return if (err_union.isUndef(zcu)) .undef_bool else if (err_union.getErrorName(zcu) == .none) .bool_true else .bool_false;
}
return .none;
return null;
}
fn analyzeIsNonErr(
@@ -31937,12 +31505,10 @@ fn analyzeIsNonErr(
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const result = try sema.analyzeIsNonErrComptimeOnly(block, src, operand);
if (result == .none) {
try sema.requireRuntimeBlock(block, src, null);
return block.addUnOp(.is_non_err, operand);
if (try sema.resolveIsNonErrVal(block, src, operand)) |val| {
return .fromValue(val);
} else {
return result;
return block.addUnOp(.is_non_err, operand);
}
}
@@ -31952,12 +31518,10 @@ fn analyzePtrIsNonErr(
src: LazySrcLoc,
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const result = try sema.analyzePtrIsNonErrComptimeOnly(block, src, operand);
if (result == .none) {
try sema.requireRuntimeBlock(block, src, null);
return block.addUnOp(.is_non_err_ptr, operand);
if (try sema.resolvePtrIsNonErrVal(block, src, operand)) |val| {
return .fromValue(val);
} else {
return result;
return block.addUnOp(.is_non_err_ptr, operand);
}
}
+15 -15
View File
@@ -1933,12 +1933,12 @@ pub fn isPtrLikeOptional(ty: Type, zcu: *const Zcu) bool {
};
}
/// For *[N]T, returns [N]T.
/// For *T, returns T.
/// For [*]T, returns T.
/// For @Vector(N, T), returns T.
/// For [N]T, returns T.
/// For ?T, returns T.
/// For `*[N]T`, returns `[N]T`.
/// For `*T`, returns `T`.
/// For `[*]T`, returns `T`.
/// For `@Vector(N, T)`, returns `T`.
/// For `[N]T`, returns `T`.
/// For `?T`, returns `T`.
pub fn childType(ty: Type, zcu: *const Zcu) Type {
return childTypeIp(ty, &zcu.intern_pool);
}
@@ -1947,15 +1947,15 @@ pub fn childTypeIp(ty: Type, ip: *const InternPool) Type {
return Type.fromInterned(ip.childType(ty.toIntern()));
}
/// For *[N]T, returns T.
/// For ?*T, returns T.
/// For ?*[N]T, returns T.
/// For ?[*]T, returns T.
/// For *T, returns T.
/// For [*]T, returns T.
/// For [N]T, returns T.
/// For []T, returns T.
/// For anyframe->T, returns T.
/// For `*[N]T`, returns `T`.
/// For `?*T`, returns `T`.
/// For `?*[N]T`, returns `T`.
/// For `?[*]T`, returns `T`.
/// For `*T`, returns `T`.
/// For `[*]T`, returns `T`.
/// For `[N]T`, returns `T`.
/// For `[]T`, returns `T`.
/// For `anyframe->T`, returns `T`.
pub fn elemType2(ty: Type, zcu: *const Zcu) Type {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
+21 -68
View File
@@ -878,7 +878,7 @@ pub const Namespace = struct {
ns: Namespace,
zcu: *Zcu,
name: InternPool.NullTerminatedString,
writer: anytype,
writer: *Writer,
) @TypeOf(writer).Error!void {
const sep: u8 = if (ns.parent.unwrap()) |parent| sep: {
try zcu.namespacePtr(parent).renderFullyQualifiedDebugName(
@@ -1125,7 +1125,7 @@ pub const File = struct {
return file.sub_file_path.len - ext.len;
}
pub fn renderFullyQualifiedName(file: File, writer: anytype) !void {
pub fn renderFullyQualifiedName(file: File, writer: *Writer) !void {
// Convert all the slashes into dots and truncate the extension.
const ext = std.fs.path.extension(file.sub_file_path);
const noext = file.sub_file_path[0 .. file.sub_file_path.len - ext.len];
@@ -1135,7 +1135,7 @@ pub const File = struct {
};
}
pub fn renderFullyQualifiedDebugName(file: File, writer: anytype) !void {
pub fn renderFullyQualifiedDebugName(file: File, writer: *Writer) !void {
for (file.sub_file_path) |byte| switch (byte) {
'/', '\\' => try writer.writeByte('/'),
else => try writer.writeByte(byte),
@@ -1742,27 +1742,6 @@ pub const SrcLoc = struct {
} else unreachable;
},
.node_offset_switch_under_prong => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const switch_node = node_off.toAbsolute(src_loc.base_node);
_, const extra_index = tree.nodeData(switch_node).node_and_extra;
const case_nodes = tree.extraDataSlice(tree.extraData(extra_index, Ast.Node.SubRange), Ast.Node.Index);
for (case_nodes) |case_node| {
const case = tree.fullSwitchCase(case_node).?;
for (case.ast.values) |val| {
if (tree.nodeTag(val) == .identifier and
mem.eql(u8, tree.tokenSlice(tree.nodeMainToken(val)), "_"))
{
return tree.tokensToSpan(
tree.firstToken(case_node),
tree.lastToken(case_node),
tree.nodeMainToken(val),
);
}
}
} else unreachable;
},
.node_offset_switch_range => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const switch_node = node_off.toAbsolute(src_loc.base_node);
@@ -2176,34 +2155,22 @@ pub const SrcLoc = struct {
var multi_i: u32 = 0;
var scalar_i: u32 = 0;
var underscore_node: Ast.Node.OptionalIndex = .none;
const case = case: for (case_nodes) |case_node| {
const case: Ast.full.SwitchCase = case: for (case_nodes) |case_node| {
const case = tree.fullSwitchCase(case_node).?;
if (case.ast.values.len == 0) {
if (want_case_idx == LazySrcLoc.Offset.SwitchCaseIndex.special_else) {
if (want_case_idx == Zir.UnwrappedSwitchBlock.Case.Index.@"else") {
break :case case;
}
continue :case;
}
if (underscore_node == .none) for (case.ast.values) |val_node| {
if (tree.nodeTag(val_node) == .identifier and
mem.eql(u8, tree.tokenSlice(tree.nodeMainToken(val_node)), "_"))
{
underscore_node = val_node.toOptional();
if (want_case_idx == LazySrcLoc.Offset.SwitchCaseIndex.special_under) {
break :case case;
}
continue :case;
}
};
const is_multi = case.ast.values.len != 1 or
tree.nodeTag(case.ast.values[0]) == .switch_range;
switch (want_case_idx.kind) {
.scalar => if (!is_multi and want_case_idx.index == scalar_i)
.scalar => if (!is_multi and want_case_idx.value == scalar_i)
break :case case,
.multi => if (is_multi and want_case_idx.index == multi_i)
.multi => if (is_multi and want_case_idx.value == multi_i)
break :case case,
}
@@ -2214,12 +2181,12 @@ pub const SrcLoc = struct {
}
} else unreachable;
const want_item = switch (src_loc.lazy) {
const want_item_idx = switch (src_loc.lazy) {
.switch_case_item,
.switch_case_item_range_first,
.switch_case_item_range_last,
=> |x| item_idx: {
assert(want_case_idx != LazySrcLoc.Offset.SwitchCaseIndex.special_else);
assert(want_case_idx != Zir.UnwrappedSwitchBlock.Case.Index.@"else");
break :item_idx x.item_idx;
},
.switch_capture, .switch_tag_capture => {
@@ -2242,16 +2209,14 @@ pub const SrcLoc = struct {
else => unreachable,
};
switch (want_item.kind) {
switch (want_item_idx.kind) {
.single => {
var item_i: u32 = 0;
for (case.ast.values) |item_node| {
if (item_node.toOptional() == underscore_node or
tree.nodeTag(item_node) == .switch_range)
{
if (tree.nodeTag(item_node) == .switch_range) {
continue;
}
if (item_i != want_item.index) {
if (item_i != want_item_idx.value) {
item_i += 1;
continue;
}
@@ -2264,7 +2229,7 @@ pub const SrcLoc = struct {
if (tree.nodeTag(item_node) != .switch_range) {
continue;
}
if (range_i != want_item.index) {
if (range_i != want_item_idx.value) {
range_i += 1;
continue;
}
@@ -2446,10 +2411,6 @@ pub const LazySrcLoc = struct {
/// by taking this AST node index offset from the containing base node,
/// which points to a switch expression AST node. Next, navigate to the else prong.
node_offset_switch_else_prong: Ast.Node.Offset,
/// The source location points to the `_` prong of a switch expression, found
/// by taking this AST node index offset from the containing base node,
/// which points to a switch expression AST node. Next, navigate to the `_` prong.
node_offset_switch_under_prong: Ast.Node.Offset,
/// The source location points to all the ranges of a switch expression, found
/// by taking this AST node index offset from the containing base node,
/// which points to a switch expression AST node. Next, navigate to any of the
@@ -2642,29 +2603,21 @@ pub const LazySrcLoc = struct {
/// The offset of the switch AST node.
switch_node_offset: Ast.Node.Offset,
/// The index of the case to point to within this switch.
case_idx: SwitchCaseIndex,
case_idx: Zir.UnwrappedSwitchBlock.Case.Index,
/// The index of the item to point to within this case.
item_idx: SwitchItemIndex,
item_idx: SwitchItem.Index,
pub const Index = packed struct(u32) {
kind: enum(u1) { single, range },
value: u31,
};
};
pub const SwitchCapture = struct {
/// The offset of the switch AST node.
switch_node_offset: Ast.Node.Offset,
/// The index of the case whose capture to point to.
case_idx: SwitchCaseIndex,
};
pub const SwitchCaseIndex = packed struct(u32) {
kind: enum(u1) { scalar, multi },
index: u31,
pub const special_else: SwitchCaseIndex = @bitCast(@as(u32, std.math.maxInt(u32)));
pub const special_under: SwitchCaseIndex = @bitCast(@as(u32, std.math.maxInt(u32) - 1));
};
pub const SwitchItemIndex = packed struct(u32) {
kind: enum(u1) { single, range },
index: u31,
case_idx: Zir.UnwrappedSwitchBlock.Case.Index,
};
pub const ArrayCat = struct {
+6 -2
View File
@@ -6432,7 +6432,7 @@ pub const FuncGen = struct {
// Don't worry about the size of the type -- it's irrelevant, because the prong values could be fairly dense.
// If they are, then we will construct a jump table.
const min, const max = self.switchCaseItemRange(switch_br);
const min, const max = self.switchCaseItemRange(switch_br) orelse break :jmp_table null;
const min_int = min.getUnsignedInt(zcu) orelse break :jmp_table null;
const max_int = max.getUnsignedInt(zcu) orelse break :jmp_table null;
const table_len = max_int - min_int + 1;
@@ -6595,7 +6595,7 @@ pub const FuncGen = struct {
}
}
fn switchCaseItemRange(self: *FuncGen, switch_br: Air.UnwrappedSwitch) [2]Value {
fn switchCaseItemRange(self: *FuncGen, switch_br: Air.UnwrappedSwitch) ?[2]Value {
const zcu = self.ng.pt.zcu;
var it = switch_br.iterateCases();
var min: ?Value = null;
@@ -6619,6 +6619,10 @@ pub const FuncGen = struct {
if (high) max = vals[1];
}
}
if (min == null) {
assert(max == null);
return null;
}
return .{ min.?, max.? };
}
+4 -1
View File
@@ -4114,7 +4114,10 @@ fn airIsErr(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode, op_kind
const zcu = cg.pt.zcu;
const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
const operand = try cg.resolveInst(un_op);
const err_union_ty = cg.typeOf(un_op);
const err_union_ty = switch (op_kind) {
.value => cg.typeOf(un_op),
.ptr => cg.typeOf(un_op).childType(zcu),
};
const pl_ty = err_union_ty.errorUnionPayload(zcu);
const result: WValue = result: {
+107 -290
View File
@@ -447,10 +447,9 @@ const Writer = struct {
.switch_block,
.switch_block_ref,
.switch_block_err_union,
=> try self.writeSwitchBlock(stream, inst),
.switch_block_err_union => try self.writeSwitchBlockErrUnion(stream, inst),
.field_ptr_load,
.field_ptr,
.decl_literal,
@@ -1987,322 +1986,140 @@ const Writer = struct {
try self.writeSrcNode(stream, inst_data.src_node);
}
fn writeSwitchBlockErrUnion(self: *Writer, stream: *std.Io.Writer, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = self.code.extraData(Zir.Inst.SwitchBlockErrUnion, inst_data.payload_index);
fn writeSwitchBlock(
self: *Writer,
stream: *std.Io.Writer,
inst: Zir.Inst.Index,
) !void {
const zir_switch = self.code.getSwitchBlock(inst);
var extra_index = zir_switch.end;
var extra_index: usize = extra.end;
const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
const multi_cases_len = self.code.extra[extra_index];
extra_index += 1;
break :blk multi_cases_len;
} else 0;
const err_capture_inst: Zir.Inst.Index = if (extra.data.bits.any_uses_err_capture) blk: {
const tag_capture_inst = self.code.extra[extra_index];
extra_index += 1;
break :blk @enumFromInt(tag_capture_inst);
} else undefined;
try self.writeInstRef(stream, extra.data.operand);
if (extra.data.bits.any_uses_err_capture) {
try stream.writeAll(", err_capture=");
try self.writeInstIndex(stream, err_capture_inst);
}
try self.writeInstRef(stream, zir_switch.main_operand);
self.indent += 2;
{
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
assert(!info.is_inline);
const body = self.code.bodySlice(extra_index, info.body_len);
extra_index += body.len;
if (zir_switch.non_err_case) |non_err_case| {
if (non_err_case.operand_is_ref) try stream.writeAll(" ref");
try stream.writeAll(",\n");
try stream.splatByteAll(' ', self.indent);
try self.writeSwitchCaptures(stream, non_err_case.capture, false, inst, &zir_switch);
try stream.writeAll("non_err => ");
try self.writeBracedBody(stream, body);
try self.writeBracedBody(stream, non_err_case.body);
try stream.writeAll(" ");
try self.writeSrcNode(stream, zir_switch.catch_or_if_src_node_offset.unwrap().?);
}
if (extra.data.bits.has_else) {
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
const capture_text = switch (info.capture) {
.none => "",
.by_val => "by_val ",
.by_ref => "by_ref ",
};
const inline_text = if (info.is_inline) "inline " else "";
const body = self.code.bodySlice(extra_index, info.body_len);
extra_index += body.len;
if (zir_switch.else_case) |else_case| {
try stream.writeAll(",\n");
try stream.splatByteAll(' ', self.indent);
try stream.print("{s}{s}else => ", .{ capture_text, inline_text });
try self.writeBracedBody(stream, body);
try self.writeSwitchCaptures(stream, else_case.capture, else_case.has_tag_capture, inst, &zir_switch);
if (else_case.is_inline) try stream.writeAll("inline ");
try stream.writeAll("else => ");
try self.writeBracedBody(stream, else_case.body);
}
{
const scalar_cases_len = extra.data.bits.scalar_cases_len;
var scalar_i: usize = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
extra_index += 1;
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
const body = self.code.bodySlice(extra_index, info.body_len);
extra_index += info.body_len;
var case_it = zir_switch.iterateCases();
while (case_it.next()) |case| {
try stream.writeAll(",\n");
try stream.splatByteAll(' ', self.indent);
try stream.writeAll(",\n");
try stream.splatByteAll(' ', self.indent);
switch (info.capture) {
.none => {},
.by_val => try stream.writeAll("by_val "),
.by_ref => try stream.writeAll("by_ref "),
const prong_info = case.prong_info;
try self.writeSwitchCaptures(stream, prong_info.capture, prong_info.has_tag_capture, inst, &zir_switch);
if (prong_info.is_inline) try stream.writeAll("inline ");
const prong_body = self.code.bodySlice(extra_index, prong_info.body_len);
extra_index += prong_body.len;
for (case.item_infos, 0..) |item_info, i| {
if (i > 0) try stream.writeAll(", ");
switch (item_info.unwrap()) {
.enum_literal => |str_index| {
const str = self.code.nullTerminatedString(str_index);
try stream.print("\".{f}\"", .{std.zig.fmtString(str)});
},
.error_value => |str_index| {
const str = self.code.nullTerminatedString(str_index);
try stream.print("\"error.{f}\"", .{std.zig.fmtString(str)});
},
.under => try stream.writeByte('_'),
.body_len => |body_len| {
const item_body = self.code.bodySlice(extra_index, body_len);
extra_index += item_body.len;
try self.writeBracedDecl(stream, item_body);
},
}
if (info.is_inline) try stream.writeAll("inline ");
try self.writeInstRef(stream, item_ref);
try stream.writeAll(" => ");
try self.writeBracedBody(stream, body);
}
}
{
var multi_i: usize = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = self.code.extra[extra_index];
extra_index += 1;
const ranges_len = self.code.extra[extra_index];
extra_index += 1;
const info = @as(Zir.Inst.SwitchBlock.ProngInfo, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
const items = self.code.refSlice(extra_index, items_len);
extra_index += items_len;
try stream.writeAll(",\n");
try stream.splatByteAll(' ', self.indent);
switch (info.capture) {
.none => {},
.by_val => try stream.writeAll("by_val "),
.by_ref => try stream.writeAll("by_ref "),
for (case.range_infos, 0..) |range_info, i| {
if (i > 0 and case.item_infos.len == 0) try stream.writeAll(", ");
switch (range_info[0].unwrap()) {
.enum_literal => |str_index| {
const str = self.code.nullTerminatedString(str_index);
try stream.print("\".{f}\"", .{std.zig.fmtString(str)});
},
.error_value => |str_index| {
const str = self.code.nullTerminatedString(str_index);
try stream.print("\"error.{f}\"", .{std.zig.fmtString(str)});
},
.under => unreachable, // '_..._' is not allowed
.body_len => |body_len| {
const item_body = self.code.bodySlice(extra_index, body_len);
extra_index += item_body.len;
try self.writeBracedDecl(stream, item_body);
},
}
if (info.is_inline) try stream.writeAll("inline ");
for (items, 0..) |item_ref, item_i| {
if (item_i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, item_ref);
try stream.writeAll("...");
switch (range_info[1].unwrap()) {
.enum_literal => |str_index| {
const str = self.code.nullTerminatedString(str_index);
try stream.print("\".{f}\"", .{std.zig.fmtString(str)});
},
.error_value => |str_index| {
const str = self.code.nullTerminatedString(str_index);
try stream.print("\"error.{f}\"", .{std.zig.fmtString(str)});
},
.under => unreachable, // '_..._' is not allowed
.body_len => |body_len| {
const item_body = self.code.bodySlice(extra_index, body_len);
extra_index += item_body.len;
try self.writeBracedDecl(stream, item_body);
},
}
var range_i: usize = 0;
while (range_i < ranges_len) : (range_i += 1) {
const item_first = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
extra_index += 1;
const item_last = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
extra_index += 1;
if (range_i != 0 or items.len != 0) {
try stream.writeAll(", ");
}
try self.writeInstRef(stream, item_first);
try stream.writeAll("...");
try self.writeInstRef(stream, item_last);
}
const body = self.code.bodySlice(extra_index, info.body_len);
extra_index += info.body_len;
try stream.writeAll(" => ");
try self.writeBracedBody(stream, body);
}
try stream.writeAll(" => ");
try self.writeBracedBody(stream, prong_body);
}
self.indent -= 2;
try stream.writeAll(") ");
try self.writeSrcNode(stream, inst_data.src_node);
try self.writeSrcNode(stream, zir_switch.switch_src_node_offset);
}
fn writeSwitchBlock(self: *Writer, stream: *std.Io.Writer, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = self.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index);
var extra_index: usize = extra.end;
const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
const multi_cases_len = self.code.extra[extra_index];
extra_index += 1;
break :blk multi_cases_len;
} else 0;
const tag_capture_inst: Zir.Inst.Index = if (extra.data.bits.any_has_tag_capture) blk: {
const tag_capture_inst = self.code.extra[extra_index];
extra_index += 1;
break :blk @enumFromInt(tag_capture_inst);
} else undefined;
try self.writeInstRef(stream, extra.data.operand);
if (extra.data.bits.any_has_tag_capture) {
try stream.writeAll(", tag_capture=");
try self.writeInstIndex(stream, tag_capture_inst);
fn writeSwitchCaptures(
self: *Writer,
stream: *std.Io.Writer,
capture: Zir.Inst.SwitchBlock.ProngInfo.Capture,
has_tag_capture: bool,
switch_inst: Zir.Inst.Index,
zir_switch: *const Zir.UnwrappedSwitchBlock,
) !void {
if (capture != .none) {
try stream.print("{t}=", .{capture});
const capture_inst = zir_switch.payload_capture_placeholder.unwrap() orelse switch_inst;
try self.writeInstIndex(stream, capture_inst);
try stream.writeAll(" ");
}
self.indent += 2;
const special_prongs = extra.data.bits.special_prongs;
if (special_prongs.hasElse()) {
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(self.code.extra[extra_index]);
const capture_text = switch (info.capture) {
.none => "",
.by_val => "by_val ",
.by_ref => "by_ref ",
};
const inline_text = if (info.is_inline) "inline " else "";
extra_index += 1;
const body = self.code.bodySlice(extra_index, info.body_len);
extra_index += body.len;
try stream.writeAll(",\n");
try stream.splatByteAll(' ', self.indent);
try stream.print("{s}{s}else => ", .{ capture_text, inline_text });
try self.writeBracedBody(stream, body);
if (has_tag_capture) {
try stream.writeAll("tag=");
const capture_inst = zir_switch.tag_capture_placeholder.unwrap() orelse switch_inst;
try self.writeInstIndex(stream, capture_inst);
try stream.writeAll(" ");
}
if (special_prongs.hasUnder()) {
var single_item_ref: Zir.Inst.Ref = .none;
var items_len: u32 = 0;
var ranges_len: u32 = 0;
if (special_prongs.hasOneAdditionalItem()) {
single_item_ref = @enumFromInt(self.code.extra[extra_index]);
extra_index += 1;
} else if (special_prongs.hasManyAdditionalItems()) {
items_len = self.code.extra[extra_index];
extra_index += 1;
ranges_len = self.code.extra[extra_index];
extra_index += 1;
}
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(self.code.extra[extra_index]);
extra_index += 1;
const items = self.code.refSlice(extra_index, items_len);
extra_index += items_len;
try stream.writeAll(",\n");
try stream.splatByteAll(' ', self.indent);
switch (info.capture) {
.none => {},
.by_val => try stream.writeAll("by_val "),
.by_ref => try stream.writeAll("by_ref "),
}
if (info.is_inline) try stream.writeAll("inline ");
try stream.writeAll("_");
if (single_item_ref != .none) {
try stream.writeAll(", ");
try self.writeInstRef(stream, single_item_ref);
}
for (items) |item_ref| {
try stream.writeAll(", ");
try self.writeInstRef(stream, item_ref);
}
var range_i: usize = 0;
while (range_i < ranges_len) : (range_i += 1) {
const item_first: Zir.Inst.Ref = @enumFromInt(self.code.extra[extra_index]);
extra_index += 1;
const item_last: Zir.Inst.Ref = @enumFromInt(self.code.extra[extra_index]);
extra_index += 1;
try stream.writeAll(", ");
try self.writeInstRef(stream, item_first);
try stream.writeAll("...");
try self.writeInstRef(stream, item_last);
}
const body = self.code.bodySlice(extra_index, info.body_len);
extra_index += info.body_len;
try stream.writeAll(" => ");
try self.writeBracedBody(stream, body);
}
{
const scalar_cases_len = extra.data.bits.scalar_cases_len;
var scalar_i: usize = 0;
while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref: Zir.Inst.Ref = @enumFromInt(self.code.extra[extra_index]);
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(self.code.extra[extra_index]);
extra_index += 1;
const body = self.code.bodySlice(extra_index, info.body_len);
extra_index += info.body_len;
try stream.writeAll(",\n");
try stream.splatByteAll(' ', self.indent);
switch (info.capture) {
.none => {},
.by_val => try stream.writeAll("by_val "),
.by_ref => try stream.writeAll("by_ref "),
}
if (info.is_inline) try stream.writeAll("inline ");
try self.writeInstRef(stream, item_ref);
try stream.writeAll(" => ");
try self.writeBracedBody(stream, body);
}
}
{
var multi_i: usize = 0;
while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = self.code.extra[extra_index];
extra_index += 1;
const ranges_len = self.code.extra[extra_index];
extra_index += 1;
const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(self.code.extra[extra_index]);
extra_index += 1;
const items = self.code.refSlice(extra_index, items_len);
extra_index += items_len;
try stream.writeAll(",\n");
try stream.splatByteAll(' ', self.indent);
switch (info.capture) {
.none => {},
.by_val => try stream.writeAll("by_val "),
.by_ref => try stream.writeAll("by_ref "),
}
if (info.is_inline) try stream.writeAll("inline ");
for (items, 0..) |item_ref, item_i| {
if (item_i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, item_ref);
}
var range_i: usize = 0;
while (range_i < ranges_len) : (range_i += 1) {
const item_first: Zir.Inst.Ref = @enumFromInt(self.code.extra[extra_index]);
extra_index += 1;
const item_last: Zir.Inst.Ref = @enumFromInt(self.code.extra[extra_index]);
extra_index += 1;
if (range_i != 0 or items.len != 0) {
try stream.writeAll(", ");
}
try self.writeInstRef(stream, item_first);
try stream.writeAll("...");
try self.writeInstRef(stream, item_last);
}
const body = self.code.bodySlice(extra_index, info.body_len);
extra_index += info.body_len;
try stream.writeAll(" => ");
try self.writeBracedBody(stream, body);
}
}
self.indent -= 2;
try stream.writeAll(") ");
try self.writeSrcNode(stream, inst_data.src_node);
}
fn writePlNodeField(self: *Writer, stream: *std.Io.Writer, inst: Zir.Inst.Index) !void {
+17
View File
@@ -524,3 +524,20 @@ test "for loop 0 length range" {
comptime unreachable;
}
}
test "labeled break from else" {
const S = struct {
fn doTheTest(x: u32) !void {
var y: u32 = 0;
const ok = label: while (y < x) : (y += 1) {
if (y == 10) break :label false;
} else {
break :label true;
};
try expect(ok);
}
};
try S.doTheTest(5);
try comptime S.doTheTest(5);
}
+14 -28
View File
@@ -513,35 +513,21 @@ test "@intFromPtr on a packed struct field unaligned and nested" {
};
};
switch (comptime @alignOf(S2)) {
4 => {
comptime assert(@TypeOf(&S2.s.base) == *align(4) u8);
comptime assert(@TypeOf(&S2.s.p0.a) == *align(1:0:2) u4);
comptime assert(@TypeOf(&S2.s.p0.b) == *align(1:4:2) u4);
comptime assert(@TypeOf(&S2.s.p0.c) == *u8);
comptime assert(@TypeOf(&S2.s.bit0) == *align(4:24:8) u1);
comptime assert(@TypeOf(&S2.s.p1.a) == *align(4:25:8) u8);
comptime assert(@TypeOf(&S2.s.p2.a) == *align(4:33:8) u7);
comptime assert(@TypeOf(&S2.s.p2.b) == *u8);
comptime assert(@TypeOf(&S2.s.p3.a) == *align(2:0:2) u4);
comptime assert(@TypeOf(&S2.s.p3.b) == *align(2:4:2) u4);
comptime assert(@TypeOf(&S2.s.p3.c) == *u8);
},
8 => {
comptime assert(@TypeOf(&S2.s.base) == *align(8) u8);
comptime assert(@TypeOf(&S2.s.p0.a) == *align(1:0:2) u4);
comptime assert(@TypeOf(&S2.s.p0.b) == *align(1:4:2) u4);
comptime assert(@TypeOf(&S2.s.p0.c) == *u8);
comptime assert(@TypeOf(&S2.s.bit0) == *align(8:24:8) u1);
comptime assert(@TypeOf(&S2.s.p1.a) == *align(8:25:8) u8);
comptime assert(@TypeOf(&S2.s.p2.a) == *align(8:33:8) u7);
comptime assert(@TypeOf(&S2.s.p2.b) == *u8);
comptime assert(@TypeOf(&S2.s.p3.a) == *align(2:0:2) u4);
comptime assert(@TypeOf(&S2.s.p3.b) == *align(2:4:2) u4);
comptime assert(@TypeOf(&S2.s.p3.c) == *u8);
},
else => {},
{
const a = @alignOf(S2);
comptime assert(@TypeOf(&S2.s.base) == *align(a:0:8) u8);
comptime assert(@TypeOf(&S2.s.p0.a) == *align(a:8:8) u4);
comptime assert(@TypeOf(&S2.s.p0.b) == *align(a:12:8) u4);
comptime assert(@TypeOf(&S2.s.p0.c) == *align(a:16:8) u8);
comptime assert(@TypeOf(&S2.s.bit0) == *align(a:24:8) u1);
comptime assert(@TypeOf(&S2.s.p1.a) == *align(a:25:8) u8);
comptime assert(@TypeOf(&S2.s.p2.a) == *align(a:33:8) u7);
comptime assert(@TypeOf(&S2.s.p2.b) == *align(a:40:8) u8);
comptime assert(@TypeOf(&S2.s.p3.a) == *align(a:48:8) u4);
comptime assert(@TypeOf(&S2.s.p3.b) == *align(a:52:8) u4);
comptime assert(@TypeOf(&S2.s.p3.c) == *align(a:56:8) u8);
}
try expect(@intFromPtr(&S2.s.base) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p0.a) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p0.b) - @intFromPtr(&S2.s) == 0);
+187
View File
@@ -1120,3 +1120,190 @@ test "switch on non-exhaustive enum" {
try E.doTheTest(.a);
try comptime E.doTheTest(.a);
}
test "decl literals as switch cases" {
const E = enum(u8) {
bar = 3,
_,
const foo: @This() = @enumFromInt(0xa);
fn doTheTest(e: @This()) !void {
switch (e) {
.bar => return error.TestFailed,
.foo => {},
else => return error.TestFailed,
}
}
};
try E.doTheTest(.foo);
try comptime E.doTheTest(.foo);
}
// TODO audit after #15909 and/or #19855 are decided/implemented
test "switch with uninstantiable union fields" {
const U = union(enum) {
ok: void,
a: noreturn,
b: noreturn,
c: error{},
fn doTheTest(u: @This()) void {
switch (u) {
.ok => {},
.a => comptime unreachable,
.b => comptime unreachable,
.c => comptime unreachable,
}
switch (u) {
.ok => {},
.a, .b, .c => comptime unreachable,
}
switch (u) {
.ok => {},
else => comptime unreachable,
}
switch (u) {
.a => comptime unreachable,
.ok, .b, .c => {},
}
}
};
U.doTheTest(.ok);
comptime U.doTheTest(.ok);
}
test "switch with tag capture" {
const U = union(enum) {
a,
b: i32,
c: u8,
d: i32,
e: noreturn,
fn doTheTest() !void {
try doTheSwitch(.a);
try doTheSwitch(.{ .b = 123 });
try doTheSwitch(.{ .c = 0xFF });
}
fn doTheSwitch(u: @This()) !void {
switch (u) {
.a => |nothing, tag| {
comptime assert(nothing == {});
comptime assert(tag == .a);
try expect(@intFromEnum(tag) == @intFromEnum(@This().a));
},
.b, .d => |_, tag| {
try expect(tag == .b or tag == .d);
},
.e => |payload, tag| {
_ = &payload;
_ = &tag;
comptime unreachable;
},
else => |un, tag| {
try expect(tag == .c);
try expect(un == .c);
try expect(un.c == 0xFF);
},
}
switch (u) {
inline .a, .b, .c => |payload, tag| {
if (@TypeOf(payload) == void) comptime assert(tag == .a);
if (@TypeOf(payload) == i32) comptime assert(tag == .b);
if (@TypeOf(payload) == u8) comptime assert(tag == .c);
},
inline else => |payload, tag| {
if (@TypeOf(payload) == i32) comptime assert(tag == .d);
comptime assert(tag != .e);
},
}
}
};
try U.doTheTest();
try comptime U.doTheTest();
}
test "switch with complex item expressions" {
const S = struct {
fn doTheTest() !void {
try doTheSwitch(2000, 20);
try doTheSwitch(2000, 10);
try doTheSwitch(2000, 5);
try doTheOtherSwitch(@enumFromInt(123));
try doTheOtherSwitch(@enumFromInt(456));
}
fn doTheSwitch(x: u32, comptime factor: u32) !void {
const ok = switch (x) {
num(factor) => true,
typedNum(u32, factor) => true,
blk: {
var val = 400;
val *= factor;
break :blk val;
} => true,
else => false,
};
try expect(ok);
}
fn num(factor: u32) u32 {
return 100 * factor;
}
fn typedNum(comptime T: type, factor: T) T {
return 200 * factor;
}
const E = enum(u32) { _ };
fn doTheOtherSwitch(e: E) !void {
const ok = switch (e) {
@enumFromInt(123) => true,
@enumFromInt(456) => true,
else => false,
};
try expect(ok);
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "switch evaluation order" {
const eu: anyerror!u32 = 0;
_ = eu catch |err| switch (err) {
if (true) @compileError("unreachable") => unreachable,
else => unreachable,
};
}
test "switch resolves lazy values correctly" {
const S = extern struct {
a: u16,
b: i16,
};
switch (@sizeOf(S)) {
4 => {},
else => comptime unreachable,
}
}
test "single-item prong in switch on enum has comptime-known capture" {
const E = enum {
a,
b,
c,
fn doTheTest(e: @This()) !void {
switch (e) {
.a => |tag| comptime assert(tag == .a),
.b => return error.TestFailed,
.c => return error.TestFailed,
}
}
};
try E.doTheTest(.a);
try comptime E.doTheTest(.a);
}
+239
View File
@@ -1,5 +1,6 @@
const builtin = @import("builtin");
const std = @import("std");
const assert = std.debug.assert;
const expect = std.testing.expect;
test "simple switch loop" {
@@ -270,3 +271,241 @@ test "switch loop on non-exhaustive enum" {
try S.doTheTest();
try comptime S.doTheTest();
}
test "switch loop with discarded tag capture" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
const U = union(enum) {
a: u32,
b: u32,
c: u32,
};
fn doTheTest() void {
const a: U = .{ .a = 10 };
blk: switch (a) {
inline .b => |_, tag| {
_ = tag;
continue :blk .{ .c = 20 };
},
else => {},
}
}
};
S.doTheTest();
comptime S.doTheTest();
}
test "switch loop with single catch-all prong" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
const E = enum { a, b, c };
const U = union(E) { a: u32, b: u16, c: u8 };
fn doTheTest() !void {
var x: usize = 0;
label: switch (E.a) {
else => {
x += 1;
if (x == 10) break :label;
if (x >= 5) continue :label .b;
continue :label .c;
},
}
try expect(x == 10);
label: switch (E.a) {
.a, .b, .c => {
x += 1;
if (x == 20) break :label;
if (x >= 15) continue :label .b;
continue :label .c;
},
}
try expect(x == 20);
label: switch (E.a) {
else => if (false) continue :label true,
}
const ok = label: switch (U{ .a = 123 }) {
else => |u| {
const y: u32 = switch (u) {
inline else => |y| y,
};
if (y == 456) break :label true;
continue :label .{ .b = 456 };
},
};
comptime assert(ok);
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "switch loop on type with opv" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
const E = enum { opv };
const U = union(E) { opv: u0 };
fn doTheTest() !void {
var x: usize = 0;
label: switch (E.opv) {
.opv => {
x += 1;
if (x == 10) break :label;
if (x >= 5) continue :label .opv;
continue :label .opv;
},
}
try expect(x == 10);
label: switch (E.opv) {
else => {
x += 1;
if (x == 20) break :label;
if (x >= 15) continue :label .opv;
continue :label .opv;
},
}
try expect(x == 20);
label: switch (E.opv) {
.opv => if (false) continue :label true,
}
label: switch (U{ .opv = 0 }) {
.opv => |val| {
x += 1;
if (x == 30) break :label;
if (x >= 25) continue :label .{ .opv = val };
continue :label .{ .opv = 0 };
},
}
try expect(x == 30);
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "switch loop with tag capture" {
const U = union(enum) {
a,
b: i32,
c: u8,
d: i32,
e: noreturn,
fn doTheTest() !void {
try doTheSwitch(.a);
try doTheSwitch(.{ .b = 123 });
try doTheSwitch(.{ .c = 0xFF });
}
fn doTheSwitch(u: @This()) !void {
const ok1 = label: switch (u) {
.a => |nothing, tag| {
comptime assert(nothing == {});
comptime assert(tag == .a);
try expect(@intFromEnum(tag) == @intFromEnum(@This().a));
continue :label .{ .d = 456 };
},
.b, .d => |_, tag| {
try expect(tag == .b or tag == .d);
continue :label .{ .c = 0x0F };
},
.e => |payload, tag| {
_ = &payload;
_ = &tag;
return error.AnalyzedNoreturnProng;
},
else => |un, tag| {
try expect(tag == .c);
try expect(un == .c);
if (un.c == 0xFF) continue :label .a;
if (un.c == 0x00) break :label false;
break :label true;
},
};
try expect(ok1);
const ok2 = label: switch (u) {
inline .a, .b, .c => |payload, tag| {
if (@TypeOf(payload) == void) {
comptime assert(tag == .a);
continue :label .{ .b = 456 };
}
if (@TypeOf(payload) == i32) {
comptime assert(tag == .b);
continue :label .{ .d = payload };
}
if (@TypeOf(payload) == u8) {
comptime assert(tag == .c);
continue :label .{ .d = payload };
}
},
inline else => |payload, tag| {
if (@TypeOf(payload) == i32) comptime assert(tag == .d);
comptime assert(tag != .e);
if (payload == 0) break :label false;
break :label true;
},
};
try expect(ok2);
}
};
try U.doTheTest();
try comptime U.doTheTest();
}
test "switch loop for error handling" {
const Error = error{ MyError, MyOtherError };
const S = struct {
fn doTheTest() !void {
try doThePayloadSwitch(123);
try doTheErrSwitch(error.MyError);
try doTheErrSwitch(error.MyOtherError);
}
fn doThePayloadSwitch(eu: Error!u32) !void {
const x = eu catch |err| label: switch (err) {
error.MyError => continue :label error.MyOtherError,
error.MyOtherError => break :label 0,
};
try expect(x == 123);
const y = if (eu) |payload| label: {
break :label payload * 2;
} else |err| label: switch (err) {
error.MyError => continue :label error.MyOtherError,
error.MyOtherError => break :label 0,
};
try expect(y == 246);
}
fn doTheErrSwitch(eu: Error!u32) !void {
const x = eu catch |err| label: switch (err) {
error.MyError => continue :label error.MyOtherError,
error.MyOtherError => break :label 123,
};
try expect(x == 123);
const y = if (eu) |payload| label: {
break :label payload * 2;
} else |err| label: switch (err) {
error.MyError => continue :label error.MyOtherError,
error.MyOtherError => break :label 123,
};
try expect(y == 123);
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
+206 -18
View File
@@ -18,6 +18,8 @@ test "switch on error union catch capture" {
try testCapture();
try testInline();
try testEmptyErrSet();
try testUnreachableElseProng();
try testErrNotInSet();
try testAddressOf();
}
@@ -240,20 +242,88 @@ test "switch on error union catch capture" {
{
var a: error{}!u64 = 0;
_ = &a;
const b: u64 = a catch |err| switch (err) {
else => |e| return e,
const b = a catch |err| switch (err) {
undefined => @compileError("unreachable"),
};
try expectEqual(@as(u64, 0), b);
}
}
fn testUnreachableElseProng() !void {
{
var a: error{}!u64 = 0;
_ = &a;
const b = a catch |err| switch (err) {
else => unreachable,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{}!u64 = 0;
_ = &a;
const b: u64 = a catch |err| switch (err) {
error.UnknownError => return error.Fail,
const b = a catch |err| switch (err) {
else => return,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{}!u64 = 0;
_ = &a;
const b = a catch |err| switch (err) {
else => |e| return e,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{MyError}!u64 = error.MyError;
_ = &a;
const b = a catch |err| switch (err) {
error.MyError => 0,
else => unreachable,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{MyError}!u64 = error.MyError;
_ = &a;
const b = a catch |err| switch (err) {
error.MyError => 0,
else => return,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{MyError}!u64 = error.MyError;
_ = &a;
const b = a catch |err| switch (err) {
error.MyError => 0,
else => |e| return e,
};
try expectEqual(@as(u64, 0), b);
}
}
fn testErrNotInSet() !void {
{
var a: error{MyError}!u64 = 0;
_ = &a;
const b = a catch |err| switch (err) {
error.MyError => 1,
error.MyOtherError => comptime unreachable,
error.YetAnotherError, error.ThereIsAnother => comptime unreachable,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{MyError}!u64 = error.MyError;
_ = &a;
const b = a catch |err| switch (err) {
error.MyError => 0,
error.MyOtherError => comptime unreachable,
error.YetAnotherError, error.ThereIsAnother => comptime unreachable,
};
try expectEqual(@as(u64, 0), b);
}
}
fn testAddressOf() !void {
@@ -318,6 +388,9 @@ test "switch on error union if else capture" {
try testInlinePtr();
try testEmptyErrSet();
try testEmptyErrSetPtr();
try testUnreachableElseProng();
try testUnreachableElseProngPtr();
try testErrNotInSet();
try testAddressOf();
}
@@ -755,17 +828,8 @@ test "switch on error union if else capture" {
{
var a: error{}!u64 = 0;
_ = &a;
const b: u64 = if (a) |x| x else |err| switch (err) {
else => |e| return e,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{}!u64 = 0;
_ = &a;
const b: u64 = if (a) |x| x else |err| switch (err) {
error.UnknownError => return error.Fail,
else => |e| return e,
const b = if (a) |x| x else |err| switch (err) {
undefined => @compileError("unreachable"),
};
try expectEqual(@as(u64, 0), b);
}
@@ -775,20 +839,144 @@ test "switch on error union if else capture" {
{
var a: error{}!u64 = 0;
_ = &a;
const b: u64 = if (a) |*x| x.* else |err| switch (err) {
else => |e| return e,
const b = if (a) |*x| x.* else |err| switch (err) {
undefined => @compileError("unreachable"),
};
try expectEqual(@as(u64, 0), b);
}
}
fn testUnreachableElseProng() !void {
{
var a: error{}!u64 = 0;
_ = &a;
const b = if (a) |x| x else |err| switch (err) {
else => unreachable,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{}!u64 = 0;
_ = &a;
const b: u64 = if (a) |*x| x.* else |err| switch (err) {
const b = if (a) |x| x else |err| switch (err) {
error.UnknownError => return error.Fail,
else => return,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{}!u64 = 0;
_ = &a;
const b = if (a) |x| x else |err| switch (err) {
error.UnknownError => return error.Fail,
else => |e| return e,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{MyError}!u64 = error.MyError;
_ = &a;
const b = if (a) |x| x else |err| switch (err) {
error.MyError => 0,
else => unreachable,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{MyError}!u64 = error.MyError;
_ = &a;
const b = if (a) |x| x else |err| switch (err) {
error.MyError => 0,
else => return,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{MyError}!u64 = error.MyError;
_ = &a;
const b = if (a) |x| x else |err| switch (err) {
error.MyError => 0,
else => |e| return e,
};
try expectEqual(@as(u64, 0), b);
}
}
fn testUnreachableElseProngPtr() !void {
{
var a: error{}!u64 = 0;
_ = &a;
const b = if (a) |*x| x.* else |err| switch (err) {
else => unreachable,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{}!u64 = 0;
_ = &a;
const b = if (a) |*x| x.* else |err| switch (err) {
else => return,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{}!u64 = 0;
_ = &a;
const b = if (a) |*x| x.* else |err| switch (err) {
else => |e| return e,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{MyError}!u64 = error.MyError;
_ = &a;
const b = if (a) |*x| x.* else |err| switch (err) {
error.MyError => 0,
else => unreachable,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{MyError}!u64 = error.MyError;
_ = &a;
const b = if (a) |*x| x.* else |err| switch (err) {
error.MyError => 0,
else => return,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{MyError}!u64 = error.MyError;
_ = &a;
const b = if (a) |*x| x.* else |err| switch (err) {
error.MyError => 0,
else => |e| return e,
};
try expectEqual(@as(u64, 0), b);
}
}
fn testErrNotInSet() !void {
{
var a: error{MyError}!u64 = 0;
_ = &a;
const b = if (a) |x| x else |err| switch (err) {
error.MyError => 1,
error.MyOtherError => comptime unreachable,
error.YetAnotherError, error.ThereIsAnother => comptime unreachable,
};
try expectEqual(@as(u64, 0), b);
}
{
var a: error{MyError}!u64 = error.MyError;
_ = &a;
const b = if (a) |x| x else |err| switch (err) {
error.MyError => 0,
error.MyOtherError => comptime unreachable,
error.YetAnotherError, error.ThereIsAnother => comptime unreachable,
};
try expectEqual(@as(u64, 0), b);
}
}
fn testAddressOf() !void {
+17
View File
@@ -399,3 +399,20 @@ test "breaking from a loop in an if statement" {
} else 2;
_ = opt;
}
test "labeled break from else" {
const S = struct {
fn doTheTest(x: u32) !void {
const arr: []const u32 = &.{ 1, 3, 10 };
const ok = label: for (arr) |y| {
if (y == x) break :label false;
} else {
break :label true;
};
try expect(ok);
}
};
try S.doTheTest(5);
try comptime S.doTheTest(5);
}
@@ -0,0 +1,17 @@
export fn entry1() void {
var x: u32 = 0;
result: while (x < 5) : (x += 1) {} else {
continue :result;
}
}
export fn entry2() void {
result: for (0..5) |_| {} else {
continue :result;
}
}
// error
//
// :4:9: error: continue outside of loop or labeled switch expression
// :10:9: error: continue outside of loop or labeled switch expression
@@ -18,4 +18,6 @@ comptime {
// error
//
// :5:9: error: duplicate switch value
// :3:9: note: previous value here
// :13:9: error: duplicate switch value
// :11:9: note: previous value here
@@ -36,11 +36,11 @@ export fn f3() void {
// error
//
// :8:10: error: no field named 'x' in enum 'tmp.E'
// :8:10: error: enum 'tmp.E' has no member named 'x'
// :1:11: note: enum declared here
// :16:10: error: no field named 'x' in enum 'tmp.E'
// :16:10: error: enum 'tmp.E' has no member named 'x'
// :1:11: note: enum declared here
// :24:10: error: no field named 'x' in enum 'tmp.E'
// :24:10: error: enum 'tmp.E' has no member named 'x'
// :1:11: note: enum declared here
// :32:10: error: no field named 'x' in enum 'tmp.E'
// :32:10: error: enum 'tmp.E' has no member named 'x'
// :1:11: note: enum declared here
@@ -0,0 +1,10 @@
export fn foo() void {
const result: u32 = b: {
continue :b 123;
};
_ = result;
}
// error
//
// :3:9: error: continue outside of loop or labeled switch expression
@@ -0,0 +1,16 @@
export fn foo() void {
const S = struct {
fn doTheTest() void {
blk: switch (@as(u8, 'a')) {
'1' => |_| continue :blk '1',
else => {},
}
}
};
S.doTheTest();
comptime S.doTheTest();
}
// error
//
// :5:25: error: discard of capture; omit it instead
@@ -1,18 +1,34 @@
const Error = error{M};
export fn entry() void {
const f: Error!void = void{};
export fn entry1() void {
var f: Error!void = {};
_ = &f;
if (f) {} else |e| switch (e) {}
}
export fn entry2() void {
const f: Error!void = void{};
var f: Error!void = {};
_ = &f;
f catch |e| switch (e) {};
}
export fn entry3() void {
const f: Error!void = error.M;
if (f) {} else |e| switch (e) {}
}
export fn entry4() void {
const f: Error!void = error.M;
f catch |e| switch (e) {};
}
// error
//
// :5:24: error: switch must handle all possibilities
// :5:24: note: unhandled error value: 'error.M'
// :10:17: error: switch must handle all possibilities
// :10:17: note: unhandled error value: 'error.M'
// :6:24: error: switch must handle all possibilities
// :6:24: note: unhandled error value: 'error.M'
// :12:17: error: switch must handle all possibilities
// :12:17: note: unhandled error value: 'error.M'
// :17:24: error: switch must handle all possibilities
// :17:24: note: unhandled error value: 'error.M'
// :22:17: error: switch must handle all possibilities
// :22:17: note: unhandled error value: 'error.M'
@@ -5,6 +5,5 @@ pub fn main() void {
}
// error
// target=x86_64-linux
//
// :2:23: error: expected error union type, found 'bool'
// :2:11: error: expected error union type, found 'bool'
@@ -1,12 +0,0 @@
const E = enum { a, b, c, d };
pub export fn entry() void {
var x: E = .a;
switch (x) {
.a, .b => |aorb, d| @compileLog(aorb, d),
inline .c, .d => |*cord| @compileLog(cord),
}
}
// error
//
// :5:26: error: tag capture on non-inline prong