mirror of
https://codeberg.org/ziglang/zig.git
synced 2026-04-27 19:09:47 +03:00
b79bd31356
This commit aims to simplify and de-duplicate the logic required for semantically analyzing `switch` expressions. The core logic has been moved to `analyzeSwitchBlock`, `resolveSwitchBlock` and `finishSwitchBlock` and has been rewritten around the new iterator-based API exposed by `Zir.UnwrappedSwitchBlock`. All validation logic and switch prong item resolution have been moved to `validateSwitchBlock`, which produces a `ValidatedSwitchBlock` containing all the necessary information for further analysis. `Zir.UnwrappedSwitchBlock`, `ValidatedSwitchBlock` and `SwitchOperand` replace `SwitchProngAnalysis` while adding more flexibility, mainly for better integration with `switch_block_err_union`. `analyzeSwitchBlock` has an explicit code path for OPV types which lowers them to either a `block`-`br` or a `loop`-`repeat` construct instead of a switch. Backends expect `switch` to actually have an operand that exists at runtime, so this is a bug fix and avoids further special cases in the rest of the switch logic. `resolveSwitchBlock` and `finishSwitchBr` exclusively deal with operands which can have more than one value, at comptime and at runtime respectively. This commit also reworks `RangeSet` to be an unmanaged container and adds `Air.SwitchBr.BranchHints` to offload some complexity from Sema to there and save a few bytes of memory in the process. Additionally, some new features have been implemented: - decl literals and everything else requiring a result type (`@enumFromInt`!) may now be used as switch prong items - union tag captures are now allowed for all prongs, not just `inline` ones - switch prongs may contain errors which are not in the error set being switched on, if these prongs contain `=> comptime unreachable` and some bugs have been fixed: - lots of issues with switching on OPV types are now fixed - the rules around unreachable `else` prongs when switching on errors now apply to *any* switch on an error, not just to `switch_block_err_union`, and are applied properly based on the AST - switching on `void` no longer requires an `else` prong unconditionally - lazy values are properly resolved before any comparisons with prong items - evaluation order between all kinds of switch statements is now the same, with or without label
107 lines
3.0 KiB
Zig
107 lines
3.0 KiB
Zig
const RangeSet = @This();
|
|
|
|
ranges: std.ArrayList(Range),
|
|
|
|
pub const Range = struct {
|
|
first: Value,
|
|
last: Value,
|
|
src: LazySrcLoc,
|
|
};
|
|
|
|
pub const empty: RangeSet = .{ .ranges = .empty };
|
|
|
|
pub fn deinit(self: *RangeSet, allocator: Allocator) void {
|
|
self.ranges.deinit(allocator);
|
|
self.* = undefined;
|
|
}
|
|
|
|
pub fn ensureUnusedCapacity(self: *RangeSet, allocator: Allocator, additional_count: usize) Allocator.Error!void {
|
|
return self.ranges.ensureUnusedCapacity(allocator, additional_count);
|
|
}
|
|
|
|
pub fn addAssumeCapacity(set: *RangeSet, new: Range, ty: Type, zcu: *Zcu) ?LazySrcLoc {
|
|
assert(new.first.typeOf(zcu).eql(ty, zcu));
|
|
assert(new.last.typeOf(zcu).eql(ty, zcu));
|
|
|
|
for (set.ranges.items) |range| {
|
|
if (new.last.compareScalar(.gte, range.first, ty, zcu) and
|
|
new.first.compareScalar(.lte, range.last, ty, zcu))
|
|
{
|
|
return range.src; // They overlap.
|
|
}
|
|
}
|
|
set.ranges.appendAssumeCapacity(new);
|
|
return null;
|
|
}
|
|
|
|
pub fn add(set: *RangeSet, allocator: Allocator, new: Range, ty: Type, zcu: *Zcu) Allocator.Error!?LazySrcLoc {
|
|
try set.ensureUnusedCapacity(allocator, 1);
|
|
return set.addAssumeCapacity(new, ty, zcu);
|
|
}
|
|
|
|
const SortCtx = struct {
|
|
ty: Type,
|
|
zcu: *Zcu,
|
|
};
|
|
/// Assumes a and b do not overlap
|
|
fn lessThan(ctx: SortCtx, a: Range, b: Range) bool {
|
|
return a.first.compareScalar(.lt, b.first, ctx.ty, ctx.zcu);
|
|
}
|
|
|
|
pub fn spans(
|
|
set: *RangeSet,
|
|
allocator: Allocator,
|
|
first: Value,
|
|
last: Value,
|
|
ty: Type,
|
|
zcu: *Zcu,
|
|
) Allocator.Error!bool {
|
|
assert(first.typeOf(zcu).eql(ty, zcu));
|
|
assert(last.typeOf(zcu).eql(ty, zcu));
|
|
if (set.ranges.items.len == 0) return false;
|
|
|
|
std.mem.sort(Range, set.ranges.items, SortCtx{ .ty = ty, .zcu = zcu }, lessThan);
|
|
|
|
if (!set.ranges.items[0].first.eql(first, ty, zcu) or
|
|
!set.ranges.items[set.ranges.items.len - 1].last.eql(last, ty, zcu))
|
|
{
|
|
return false;
|
|
}
|
|
|
|
const limbs = try allocator.alloc(
|
|
std.math.big.Limb,
|
|
std.math.big.int.calcTwosCompLimbCount(ty.intInfo(zcu).bits),
|
|
);
|
|
defer allocator.free(limbs);
|
|
var counter: std.math.big.int.Mutable = .init(limbs, 0);
|
|
|
|
var space: InternPool.Key.Int.Storage.BigIntSpace = undefined;
|
|
|
|
// look for gaps
|
|
for (set.ranges.items[1..], 0..) |cur, i| {
|
|
// i starts counting from the second item.
|
|
const prev = set.ranges.items[i];
|
|
|
|
// prev.last + 1 == cur.first
|
|
counter.copy(prev.last.toBigInt(&space, zcu));
|
|
counter.addScalar(counter.toConst(), 1);
|
|
|
|
const cur_start_int = cur.first.toBigInt(&space, zcu);
|
|
if (!cur_start_int.eql(counter.toConst())) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
const std = @import("std");
|
|
const assert = std.debug.assert;
|
|
const Allocator = std.mem.Allocator;
|
|
|
|
const InternPool = @import("InternPool.zig");
|
|
const Type = @import("Type.zig");
|
|
const Value = @import("Value.zig");
|
|
const Zcu = @import("Zcu.zig");
|
|
const LazySrcLoc = Zcu.LazySrcLoc;
|