compiler: rework inferred error sets

* move inferred error sets into InternPool.
   - they are now represented by pointing directly at the corresponding
     function body value.
 * inferred error set working memory is now in Sema and expires after
   the Sema for the function corresponding to the inferred error set is
   finished having its body analyzed.
 * error sets use a InternPool.Index.Slice rather than an actual slice
   to avoid lifetime issues.
This commit is contained in:
Andrew Kelley
2023-07-08 23:39:37 -07:00
parent 55e89255e1
commit f3dc53f6b5
7 changed files with 1038 additions and 740 deletions
+6 -4
View File
@@ -1669,8 +1669,9 @@ pub fn ArrayHashMapUnmanaged(
inline fn checkedHash(ctx: anytype, key: anytype) u32 {
comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(key), K, u32, true);
// If you get a compile error on the next line, it means that
const hash = ctx.hash(key); // your generic hash function doesn't accept your key
// If you get a compile error on the next line, it means that your
// generic hash function doesn't accept your key.
const hash = ctx.hash(key);
if (@TypeOf(hash) != u32) {
@compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic hash function that returns the wrong type!\n" ++
@typeName(u32) ++ " was expected, but found " ++ @typeName(@TypeOf(hash)));
@@ -1679,8 +1680,9 @@ pub fn ArrayHashMapUnmanaged(
}
inline fn checkedEql(ctx: anytype, a: anytype, b: K, b_index: usize) bool {
comptime std.hash_map.verifyContext(@TypeOf(ctx), @TypeOf(a), K, u32, true);
// If you get a compile error on the next line, it means that
const eql = ctx.eql(a, b, b_index); // your generic eql function doesn't accept (self, adapt key, K, index)
// If you get a compile error on the next line, it means that your
// generic eql function doesn't accept (self, adapt key, K, index).
const eql = ctx.eql(a, b, b_index);
if (@TypeOf(eql) != bool) {
@compileError("Context " ++ @typeName(@TypeOf(ctx)) ++ " has a generic eql function that returns the wrong type!\n" ++
@typeName(bool) ++ " was expected, but found " ++ @typeName(@TypeOf(eql)));
+510 -204
View File
@@ -53,14 +53,6 @@ allocated_unions: std.SegmentedList(Module.Union, 0) = .{},
/// When a Union object is freed from `allocated_unions`, it is pushed into this stack.
unions_free_list: std.ArrayListUnmanaged(Module.Union.Index) = .{},
/// InferredErrorSet objects are stored in this data structure because:
/// * They contain pointers such as the errors map and the set of other inferred error sets.
/// * They need to be mutated after creation.
allocated_inferred_error_sets: std.SegmentedList(Module.InferredErrorSet, 0) = .{},
/// When a Struct object is freed from `allocated_inferred_error_sets`, it is
/// pushed into this stack.
inferred_error_sets_free_list: std.ArrayListUnmanaged(Module.InferredErrorSet.Index) = .{},
/// Some types such as enums, structs, and unions need to store mappings from field names
/// to field index, or value to field index. In such cases, they will store the underlying
/// field names and values directly, relying on one of these maps, stored separately,
@@ -143,12 +135,24 @@ pub const NullTerminatedString = enum(u32) {
empty = 0,
_,
/// An array of `NullTerminatedString` existing within the `extra` array.
/// This type exists to provide a struct with lifetime that is
/// not invalidated when items are added to the `InternPool`.
pub const Slice = struct {
start: u32,
len: u32,
pub fn get(slice: Slice, ip: *const InternPool) []NullTerminatedString {
return @ptrCast(ip.extra.items[slice.start..][0..slice.len]);
}
};
pub fn toString(self: NullTerminatedString) String {
return @as(String, @enumFromInt(@intFromEnum(self)));
return @enumFromInt(@intFromEnum(self));
}
pub fn toOptional(self: NullTerminatedString) OptionalNullTerminatedString {
return @as(OptionalNullTerminatedString, @enumFromInt(@intFromEnum(self)));
return @enumFromInt(@intFromEnum(self));
}
const Adapter = struct {
@@ -238,7 +242,8 @@ pub const Key = union(enum) {
enum_type: EnumType,
func_type: FuncType,
error_set_type: ErrorSetType,
inferred_error_set_type: Module.InferredErrorSet.Index,
/// The payload is the function body, either a `func_decl` or `func_instance`.
inferred_error_set_type: Index,
/// Typed `undefined`. This will never be `none`; untyped `undefined` is represented
/// via `simple_value` and has a named `Index` tag for it.
@@ -287,14 +292,14 @@ pub const Key = union(enum) {
pub const ErrorSetType = struct {
/// Set of error names, sorted by null terminated string index.
names: []const NullTerminatedString,
names: NullTerminatedString.Slice,
/// This is ignored by `get` but will always be provided by `indexToKey`.
names_map: OptionalMapIndex = .none,
/// Look up field index based on field name.
pub fn nameIndex(self: ErrorSetType, ip: *const InternPool, name: NullTerminatedString) ?u32 {
const map = &ip.maps.items[@intFromEnum(self.names_map.unwrap().?)];
const adapter: NullTerminatedString.Adapter = .{ .strings = self.names };
const adapter: NullTerminatedString.Adapter = .{ .strings = self.names.get(ip) };
const field_index = map.getIndexAdapted(name, adapter) orelse return null;
return @as(u32, @intCast(field_index));
}
@@ -565,6 +570,9 @@ pub const Key = union(enum) {
/// Index into extra array of the `zir_body_inst` corresponding to this function.
/// Used for mutating that data.
zir_body_inst_extra_index: u32,
/// Index into extra array of the resolved inferred error set for this function.
/// Used for mutating that data.
resolved_error_set_extra_index: u32,
/// When a generic function is instantiated, branch_quota is inherited from the
/// active Sema context. Importantly, this value is also updated when an existing
/// generic function instantiation is found and called.
@@ -603,13 +611,21 @@ pub const Key = union(enum) {
return @ptrCast(&ip.extra.items[func.analysis_extra_index]);
}
/// Returns a pointer that becomes invalid after any additions to the `InternPool`.
pub fn zirBodyInst(func: *const Func, ip: *const InternPool) *Zir.Inst.Index {
return @ptrCast(&ip.extra.items[func.zir_body_inst_extra_index]);
}
/// Returns a pointer that becomes invalid after any additions to the `InternPool`.
pub fn branchQuota(func: *const Func, ip: *const InternPool) *u32 {
return &ip.extra.items[func.zir_body_inst_extra_index];
}
/// Returns a pointer that becomes invalid after any additions to the `InternPool`.
pub fn resolvedErrorSet(func: *const Func, ip: *const InternPool) *Index {
assert(func.analysis(ip).inferred_error_set);
return @ptrCast(&ip.extra.items[func.resolved_error_set_extra_index]);
}
};
pub const Int = struct {
@@ -750,7 +766,7 @@ pub const Key = union(enum) {
};
pub fn hash32(key: Key, ip: *const InternPool) u32 {
return @as(u32, @truncate(key.hash64(ip)));
return @truncate(key.hash64(ip));
}
pub fn hash64(key: Key, ip: *const InternPool) u64 {
@@ -914,11 +930,7 @@ pub const Key = union(enum) {
return hasher.final();
},
.error_set_type => |error_set_type| {
var hasher = Hash.init(seed);
for (error_set_type.names) |elem| std.hash.autoHash(&hasher, elem);
return hasher.final();
},
.error_set_type => |x| Hash.hash(seed, std.mem.sliceAsBytes(x.names.get(ip))),
.anon_struct_type => |anon_struct_type| {
var hasher = Hash.init(seed);
@@ -1225,7 +1237,7 @@ pub const Key = union(enum) {
},
.error_set_type => |a_info| {
const b_info = b.error_set_type;
return std.mem.eql(NullTerminatedString, a_info.names, b_info.names);
return std.mem.eql(NullTerminatedString, a_info.names.get(ip), b_info.names.get(ip));
},
.inferred_error_set_type => |a_info| {
const b_info = b.inferred_error_set_type;
@@ -1518,13 +1530,14 @@ pub const Index = enum(u32) {
type_optional: DataIsIndex,
type_anyframe: DataIsIndex,
type_error_union: struct { data: *Key.ErrorUnionType },
type_anyerror_union: DataIsIndex,
type_error_set: struct {
const @"data.names_len" = opaque {};
data: *Tag.ErrorSet,
@"trailing.names.len": *@"data.names_len",
trailing: struct { names: []NullTerminatedString },
},
type_inferred_error_set: struct { data: Module.InferredErrorSet.Index },
type_inferred_error_set: DataIsIndex,
type_enum_auto: struct {
const @"data.fields_len" = opaque {};
data: *EnumAuto,
@@ -1916,11 +1929,14 @@ pub const Tag = enum(u8) {
/// An error union type.
/// data is payload to `Key.ErrorUnionType`.
type_error_union,
/// An error union type of the form `anyerror!T`.
/// data is `Index` of payload type.
type_anyerror_union,
/// An error set type.
/// data is payload to `ErrorSet`.
type_error_set,
/// The inferred error set type of a function.
/// data is `Module.InferredErrorSet.Index`.
/// data is `Index` of a `func_decl` or `func_instance`.
type_inferred_error_set,
/// An enum type with auto-numbered tag values.
/// The enum is exhaustive.
@@ -2156,6 +2172,7 @@ pub const Tag = enum(u8) {
.type_optional => unreachable,
.type_anyframe => unreachable,
.type_error_union => ErrorUnionType,
.type_anyerror_union => unreachable,
.type_error_set => ErrorSet,
.type_inferred_error_set => unreachable,
.type_enum_auto => EnumAuto,
@@ -2251,6 +2268,10 @@ pub const Tag = enum(u8) {
ty: Index,
};
/// Trailing:
/// 0. If `analysis.inferred_error_set` is `true`, `Index` of an `error_set` which
/// is a regular error set corresponding to the finished inferred error set.
/// A `none` value marks that the inferred error set is not resolved yet.
pub const FuncDecl = struct {
analysis: FuncAnalysis,
owner_decl: Module.Decl.Index,
@@ -2263,10 +2284,10 @@ pub const Tag = enum(u8) {
};
/// Trailing:
/// 0. For each parameter of generic_owner: Index
/// - comptime parameter: the comptime-known value
/// - anytype parameter: the type of the runtime-known value
/// - otherwise: `none`
/// 0. If `analysis.inferred_error_set` is `true`, `Index` of an `error_set` which
/// is a regular error set corresponding to the finished inferred error set.
/// A `none` value marks that the inferred error set is not resolved yet.
/// 1. For each parameter of generic_owner: `Index` if comptime, otherwise `none`
pub const FuncInstance = struct {
analysis: FuncAnalysis,
// Needed by the linker for codegen. Not part of hashing or equality.
@@ -2312,14 +2333,19 @@ pub const Tag = enum(u8) {
};
/// State that is mutable during semantic analysis. This data is not used for
/// equality or hashing.
/// equality or hashing, except for `inferred_error_set` which is considered
/// to be part of the type of the function.
pub const FuncAnalysis = packed struct(u32) {
state: State,
is_cold: bool,
is_noinline: bool,
calls_or_awaits_errorable_fn: bool,
stack_alignment: Alignment,
_: u15 = 0,
/// True if this function has an inferred error set.
inferred_error_set: bool,
_: u14 = 0,
pub const State = enum(u8) {
/// This function has not yet undergone analysis, because we have not
@@ -2710,9 +2736,6 @@ pub fn deinit(ip: *InternPool, gpa: Allocator) void {
ip.unions_free_list.deinit(gpa);
ip.allocated_unions.deinit(gpa);
ip.inferred_error_sets_free_list.deinit(gpa);
ip.allocated_inferred_error_sets.deinit(gpa);
ip.decls_free_list.deinit(gpa);
ip.allocated_decls.deinit(gpa);
@@ -2780,19 +2803,15 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
return .{ .ptr_type = ptr_info };
},
.type_optional => .{ .opt_type = @as(Index, @enumFromInt(data)) },
.type_anyframe => .{ .anyframe_type = @as(Index, @enumFromInt(data)) },
.type_optional => .{ .opt_type = @enumFromInt(data) },
.type_anyframe => .{ .anyframe_type = @enumFromInt(data) },
.type_error_union => .{ .error_union_type = ip.extraData(Key.ErrorUnionType, data) },
.type_error_set => {
const error_set = ip.extraDataTrail(Tag.ErrorSet, data);
const names_len = error_set.data.names_len;
const names = ip.extra.items[error_set.end..][0..names_len];
return .{ .error_set_type = .{
.names = @ptrCast(names),
.names_map = error_set.data.names_map.toOptional(),
} };
},
.type_anyerror_union => .{ .error_union_type = .{
.error_set_type = .anyerror_type,
.payload_type = @enumFromInt(data),
} },
.type_error_set => ip.indexToKeyErrorSetType(data),
.type_inferred_error_set => .{
.inferred_error_set_type = @enumFromInt(data),
},
@@ -2870,7 +2889,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
},
.type_enum_explicit => ip.indexToKeyEnum(data, .explicit),
.type_enum_nonexhaustive => ip.indexToKeyEnum(data, .nonexhaustive),
.type_function => .{ .func_type = ip.indexToKeyFuncType(data) },
.type_function => .{ .func_type = ip.extraFuncType(data) },
.undef => .{ .undef = @as(Index, @enumFromInt(data)) },
.runtime_value => .{ .runtime_value = ip.extraData(Tag.TypeValue, data) },
@@ -3117,12 +3136,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
} };
},
.extern_func => .{ .extern_func = ip.extraData(Tag.ExternFunc, data) },
.func_instance => {
@panic("TODO");
},
.func_decl => {
@panic("TODO");
},
.func_instance => .{ .func = ip.indexToKeyFuncInstance(data) },
.func_decl => .{ .func = ip.indexToKeyFuncDecl(data) },
.only_possible_value => {
const ty = @as(Index, @enumFromInt(data));
const ty_item = ip.items.get(@intFromEnum(ty));
@@ -3227,8 +3242,19 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
};
}
fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType {
const type_function = ip.extraDataTrail(Tag.TypeFunction, data);
fn indexToKeyErrorSetType(ip: *const InternPool, data: u32) Key {
const error_set = ip.extraDataTrail(Tag.ErrorSet, data);
return .{ .error_set_type = .{
.names = .{
.start = @intCast(error_set.end),
.len = error_set.data.names_len,
},
.names_map = error_set.data.names_map.toOptional(),
} };
}
fn extraFuncType(ip: *const InternPool, extra_index: u32) Key.FuncType {
const type_function = ip.extraDataTrail(Tag.TypeFunction, extra_index);
var index: usize = type_function.end;
const comptime_bits: u32 = if (!type_function.data.flags.has_comptime_bits) 0 else b: {
const x = ip.extra.items[index];
@@ -3256,14 +3282,22 @@ fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType {
.cc_is_generic = type_function.data.flags.cc_is_generic,
.section_is_generic = type_function.data.flags.section_is_generic,
.addrspace_is_generic = type_function.data.flags.addrspace_is_generic,
.is_generic = comptime_bits != 0 or
type_function.data.flags.align_is_generic or
type_function.data.flags.cc_is_generic or
type_function.data.flags.section_is_generic or
type_function.data.flags.addrspace_is_generic,
.is_generic = type_function.data.flags.is_generic,
};
}
fn indexToKeyFuncDecl(ip: *const InternPool, data: u32) Key.Func {
_ = ip;
_ = data;
@panic("TODO");
}
fn indexToKeyFuncInstance(ip: *const InternPool, data: u32) Key.Func {
_ = ip;
_ = data;
@panic("TODO");
}
fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key {
const enum_explicit = ip.extraDataTrail(EnumExplicit, data);
const names = @as(
@@ -3301,7 +3335,7 @@ fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key
pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, key, adapter);
if (gop.found_existing) return @as(Index, @enumFromInt(gop.index));
if (gop.found_existing) return @enumFromInt(gop.index);
try ip.items.ensureUnusedCapacity(gpa, 1);
switch (key) {
.int_type => |int_type| {
@@ -3392,17 +3426,20 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
});
},
.error_union_type => |error_union_type| {
ip.items.appendAssumeCapacity(.{
ip.items.appendAssumeCapacity(if (error_union_type.error_set_type == .anyerror_type) .{
.tag = .type_anyerror_union,
.data = @intFromEnum(error_union_type.payload_type),
} else .{
.tag = .type_error_union,
.data = try ip.addExtra(gpa, error_union_type),
});
},
.error_set_type => |error_set_type| {
assert(error_set_type.names_map == .none);
assert(std.sort.isSorted(NullTerminatedString, error_set_type.names, {}, NullTerminatedString.indexLessThan));
assert(std.sort.isSorted(NullTerminatedString, error_set_type.names.get(ip), {}, NullTerminatedString.indexLessThan));
const names_map = try ip.addMap(gpa);
try addStringsToMap(ip, gpa, names_map, error_set_type.names);
const names_len = @as(u32, @intCast(error_set_type.names.len));
try addStringsToMap(ip, gpa, names_map, error_set_type.names.get(ip));
const names_len = error_set_type.names.len;
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names_len);
ip.items.appendAssumeCapacity(.{
.tag = .type_error_set,
@@ -3411,7 +3448,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.names_map = names_map,
}),
});
ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(error_set_type.names)));
ip.extra.appendSliceAssumeCapacity(@ptrCast(error_set_type.names.get(ip)));
},
.inferred_error_set_type => |ies_index| {
ip.items.appendAssumeCapacity(.{
@@ -4207,7 +4244,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(memoized_call.arg_values)));
},
}
return @as(Index, @enumFromInt(ip.items.len - 1));
return @enumFromInt(ip.items.len - 1);
}
/// This is equivalent to `Key.FuncType` but adjusted to have a slice for `param_types`.
@@ -4216,13 +4253,13 @@ pub const GetFuncTypeKey = struct {
return_type: Index,
comptime_bits: u32,
noalias_bits: u32,
alignment: Alignment,
cc: std.builtin.CallingConvention,
/// `null` means generic.
alignment: ?Alignment,
/// `null` means generic.
cc: ?std.builtin.CallingConvention,
is_var_args: bool,
is_generic: bool,
is_noinline: bool,
align_is_generic: bool,
cc_is_generic: bool,
section_is_generic: bool,
addrspace_is_generic: bool,
};
@@ -4244,40 +4281,42 @@ pub fn getFuncType(ip: *InternPool, gpa: Allocator, key: GetFuncTypeKey) Allocat
params_len);
try ip.items.ensureUnusedCapacity(gpa, 1);
ip.items.appendAssumeCapacity(.{
.tag = .type_function,
.data = ip.addExtraAssumeCapacity(Tag.TypeFunction{
.params_len = params_len,
.return_type = key.return_type,
.flags = .{
.alignment = key.alignment,
.cc = key.cc,
.is_var_args = key.is_var_args,
.has_comptime_bits = key.comptime_bits != 0,
.has_noalias_bits = key.noalias_bits != 0,
.is_generic = key.is_generic,
.is_noinline = key.is_noinline,
.align_is_generic = key.align_is_generic,
.cc_is_generic = key.cc_is_generic,
.section_is_generic = key.section_is_generic,
.addrspace_is_generic = key.addrspace_is_generic,
},
}),
const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{
.params_len = params_len,
.return_type = key.return_type,
.flags = .{
.alignment = key.alignment orelse .none,
.cc = key.cc orelse .Unspecified,
.is_var_args = key.is_var_args,
.has_comptime_bits = key.comptime_bits != 0,
.has_noalias_bits = key.noalias_bits != 0,
.is_generic = key.is_generic,
.is_noinline = key.is_noinline,
.align_is_generic = key.alignment == null,
.cc_is_generic = key.cc == null,
.section_is_generic = key.section_is_generic,
.addrspace_is_generic = key.addrspace_is_generic,
},
});
if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits);
if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits);
ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types));
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, Key{
.func_type = indexToKeyFuncType(ip, @intCast(ip.items.len - 1)),
.func_type = extraFuncType(ip, func_type_extra_index),
}, adapter);
if (!gop.found_existing) return @enumFromInt(ip.items.len - 1);
if (gop.found_existing) {
ip.extra.items.len = prev_extra_len;
return @enumFromInt(gop.index);
}
// An existing function type was found; undo the additions to our two arrays.
ip.items.len -= 1;
ip.extra.items.len = prev_extra_len;
return @enumFromInt(gop.index);
ip.items.appendAssumeCapacity(.{
.tag = .type_function,
.data = func_type_extra_index,
});
return @enumFromInt(ip.items.len - 1);
}
pub const GetExternFuncKey = struct {
@@ -4299,19 +4338,71 @@ pub fn getExternFunc(ip: *InternPool, gpa: Allocator, key: GetExternFuncKey) All
}
pub const GetFuncDeclKey = struct {
fn_owner_decl: Module.Decl.Index,
param_types: []const Index,
owner_decl: Module.Decl.Index,
ty: Index,
zir_body_inst: Zir.Inst.Index,
lbrace_line: u32,
rbrace_line: u32,
lbrace_column: u32,
rbrace_column: u32,
cc: ?std.builtin.CallingConvention,
is_noinline: bool,
};
pub fn getFuncDecl(ip: *InternPool, gpa: Allocator, key: GetFuncDeclKey) Allocator.Error!Index {
// The strategy here is to add the function type unconditionally, then to
// ask if it already exists, and if so, revert the lengths of the mutated
// arrays. This is similar to what `getOrPutTrailingString` does.
const prev_extra_len = ip.extra.items.len;
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len);
try ip.items.ensureUnusedCapacity(gpa, 1);
ip.items.appendAssumeCapacity(.{
.tag = .func_decl,
.data = ip.addExtraAssumeCapacity(Tag.FuncDecl{
.analysis = .{
.state = if (key.cc == .Inline) .inline_only else .none,
.is_cold = false,
.is_noinline = key.is_noinline,
.calls_or_awaits_errorable_fn = false,
.stack_alignment = .none,
.inferred_error_set = false,
},
.owner_decl = key.owner_decl,
.ty = key.ty,
.zir_body_inst = key.zir_body_inst,
.lbrace_line = key.lbrace_line,
.rbrace_line = key.rbrace_line,
.lbrace_column = key.lbrace_column,
.rbrace_column = key.rbrace_column,
}),
});
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, Key{
.func = indexToKeyFuncDecl(ip, @intCast(ip.items.len - 1)),
}, adapter);
if (!gop.found_existing) return @enumFromInt(ip.items.len - 1);
// An existing function type was found; undo the additions to our two arrays.
ip.items.len -= 1;
ip.extra.items.len = prev_extra_len;
return @enumFromInt(gop.index);
}
pub const GetFuncDeclIesKey = struct {
owner_decl: Module.Decl.Index,
param_types: []Index,
noalias_bits: u32,
comptime_bits: u32,
return_type: Index,
inferred_error_set: bool,
bare_return_type: Index,
/// null means generic.
cc: ?std.builtin.CallingConvention,
/// null means generic.
alignment: ?Alignment,
section: Section,
/// null means generic
address_space: ?std.builtin.AddressSpace,
section_is_generic: bool,
addrspace_is_generic: bool,
is_var_args: bool,
is_generic: bool,
is_noinline: bool,
@@ -4320,63 +4411,258 @@ pub const GetFuncDeclKey = struct {
rbrace_line: u32,
lbrace_column: u32,
rbrace_column: u32,
pub const Section = union(enum) {
generic,
default,
explicit: InternPool.NullTerminatedString,
};
};
pub fn getFuncDecl(ip: *InternPool, gpa: Allocator, key: GetFuncDeclKey) Allocator.Error!Index {
const fn_owner_decl = ip.declPtr(key.fn_owner_decl);
const decl_index = try ip.createDecl(gpa, .{
.name = undefined,
.src_namespace = fn_owner_decl.src_namespace,
.src_node = fn_owner_decl.src_node,
.src_line = fn_owner_decl.src_line,
.has_tv = true,
.owns_tv = true,
.ty = @panic("TODO"),
.val = @panic("TODO"),
.alignment = .none,
.@"linksection" = fn_owner_decl.@"linksection",
.@"addrspace" = fn_owner_decl.@"addrspace",
.analysis = .complete,
.deletion_flag = false,
.zir_decl_index = fn_owner_decl.zir_decl_index,
.src_scope = fn_owner_decl.src_scope,
.generation = 0,
.is_pub = fn_owner_decl.is_pub,
.is_exported = fn_owner_decl.is_exported,
.has_linksection_or_addrspace = fn_owner_decl.has_linksection_or_addrspace,
.has_align = fn_owner_decl.has_align,
.alive = true,
.kind = .anon,
pub fn getFuncDeclIes(ip: *InternPool, gpa: Allocator, key: GetFuncDeclIesKey) Allocator.Error!Index {
// Validate input parameters.
assert(key.bare_return_type != .none);
for (key.param_types) |param_type| assert(param_type != .none);
// The strategy here is to add the function decl unconditionally, then to
// ask if it already exists, and if so, revert the lengths of the mutated
// arrays. This is similar to what `getOrPutTrailingString` does.
const prev_extra_len = ip.extra.items.len;
const params_len: u32 = @intCast(key.param_types.len);
try ip.map.ensureUnusedCapacity(gpa, 4);
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.FuncDecl).Struct.fields.len +
1 + // inferred_error_set
@typeInfo(Tag.ErrorUnionType).Struct.fields.len +
@typeInfo(Tag.TypeFunction).Struct.fields.len +
@intFromBool(key.comptime_bits != 0) +
@intFromBool(key.noalias_bits != 0) +
params_len);
try ip.items.ensureUnusedCapacity(gpa, 4);
ip.items.appendAssumeCapacity(.{
.tag = .func_decl,
.data = ip.addExtraAssumeCapacity(Tag.FuncDecl{
.analysis = .{
.state = if (key.cc == .Inline) .inline_only else .none,
.is_cold = false,
.is_noinline = key.is_noinline,
.calls_or_awaits_errorable_fn = false,
.stack_alignment = .none,
.inferred_error_set = true,
},
.owner_decl = key.owner_decl,
.ty = @enumFromInt(ip.items.len + 1),
.zir_body_inst = key.zir_body_inst,
.lbrace_line = key.lbrace_line,
.rbrace_line = key.rbrace_line,
.lbrace_column = key.lbrace_column,
.rbrace_column = key.rbrace_column,
}),
});
// TODO better names for generic function instantiations
const decl_name = try ip.getOrPutStringFmt(gpa, "{}__anon_{d}", .{
fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index),
ip.extra.appendAssumeCapacity(@intFromEnum(Index.none));
ip.items.appendAssumeCapacity(.{
.tag = .type_error_union,
.data = ip.addExtraAssumeCapacity(Tag.ErrorUnionType{
.error_set_type = @enumFromInt(ip.items.len + 1),
.payload_type = key.bare_return_type,
}),
});
ip.declPtr(decl_index).name = decl_name;
@panic("TODO");
ip.items.appendAssumeCapacity(.{
.tag = .type_inferred_error_set,
.data = @intCast(ip.items.len - 2),
});
const func_type_extra_index = ip.addExtraAssumeCapacity(Tag.TypeFunction{
.params_len = params_len,
.return_type = @enumFromInt(ip.items.len - 2),
.flags = .{
.alignment = key.alignment orelse .none,
.cc = key.cc orelse .Unspecified,
.is_var_args = key.is_var_args,
.has_comptime_bits = key.comptime_bits != 0,
.has_noalias_bits = key.noalias_bits != 0,
.is_generic = key.is_generic,
.is_noinline = key.is_noinline,
.align_is_generic = key.alignment == null,
.cc_is_generic = key.cc == null,
.section_is_generic = key.section_is_generic,
.addrspace_is_generic = key.addrspace_is_generic,
},
});
if (key.comptime_bits != 0) ip.extra.appendAssumeCapacity(key.comptime_bits);
if (key.noalias_bits != 0) ip.extra.appendAssumeCapacity(key.noalias_bits);
ip.extra.appendSliceAssumeCapacity(@ptrCast(key.param_types));
ip.items.appendAssumeCapacity(.{
.tag = .type_function,
.data = func_type_extra_index,
});
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = ip.map.getOrPutAssumeCapacityAdapted(Key{
.func = indexToKeyFuncDecl(ip, @intCast(ip.items.len - 4)),
}, adapter);
if (!gop.found_existing) {
assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{ .error_union_type = .{
.error_set_type = @enumFromInt(ip.items.len - 2),
.payload_type = key.bare_return_type,
} }, adapter).found_existing);
assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{
.inferred_error_set_type = @enumFromInt(ip.items.len - 4),
}, adapter).found_existing);
assert(!ip.map.getOrPutAssumeCapacityAdapted(Key{
.func_type = extraFuncType(ip, func_type_extra_index),
}, adapter).found_existing);
return @enumFromInt(ip.items.len - 4);
}
// An existing function type was found; undo the additions to our two arrays.
ip.items.len -= 4;
ip.extra.items.len = prev_extra_len;
return @enumFromInt(gop.index);
}
pub fn getErrorSetType(
ip: *InternPool,
gpa: Allocator,
names: []const NullTerminatedString,
) Allocator.Error!Index {
assert(std.sort.isSorted(NullTerminatedString, names, {}, NullTerminatedString.indexLessThan));
// The strategy here is to add the type unconditionally, then to ask if it
// already exists, and if so, revert the lengths of the mutated arrays.
// This is similar to what `getOrPutTrailingString` does.
const prev_extra_len = ip.extra.items.len;
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.ErrorSet).Struct.fields.len + names.len);
try ip.items.ensureUnusedCapacity(gpa, 1);
ip.items.appendAssumeCapacity(.{
.tag = .type_error_set,
.data = ip.addExtraAssumeCapacity(Tag.ErrorSet{
.names_len = @intCast(names.len),
.names_map = @enumFromInt(ip.maps.items.len),
}),
});
ip.extra.appendSliceAssumeCapacity(@ptrCast(names));
const adapter: KeyAdapter = .{ .intern_pool = ip };
const key = indexToKeyErrorSetType(ip, @intCast(ip.items.len - 1));
const gop = try ip.map.getOrPutAdapted(gpa, key, adapter);
if (!gop.found_existing) {
_ = ip.addMap(gpa) catch {
ip.items.len -= 1;
ip.extra.items.len = prev_extra_len;
};
return @enumFromInt(ip.items.len - 1);
}
// An existing function type was found; undo the additions to our two arrays.
ip.items.len -= 1;
ip.extra.items.len = prev_extra_len;
return @enumFromInt(gop.index);
}
pub const GetFuncInstanceKey = struct {
param_types: []const Index,
param_types: []Index,
noalias_bits: u32,
return_type: Index,
bare_return_type: Index,
cc: std.builtin.CallingConvention,
alignment: Alignment,
is_noinline: bool,
generic_owner: Index,
inferred_error_set: bool,
};
pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, key: GetFuncInstanceKey) Allocator.Error!Index {
pub fn getFuncInstance(ip: *InternPool, gpa: Allocator, arg: GetFuncInstanceKey) Allocator.Error!Index {
_ = ip;
_ = gpa;
_ = key;
_ = arg;
@panic("TODO");
//const func_ty = try ip.getFuncType(gpa, .{
// .param_types = arg.param_types,
// .bare_return_type = arg.bare_return_type,
// .comptime_bits = arg.comptime_bits,
// .noalias_bits = arg.noalias_bits,
// .alignment = arg.alignment,
// .cc = arg.cc,
// .is_var_args = arg.is_var_args,
// .is_generic = arg.is_generic,
// .is_noinline = arg.is_noinline,
// .section_is_generic = arg.section_is_generic,
// .addrspace_is_generic = arg.addrspace_is_generic,
// .inferred_error_set = arg.inferred_error_set,
//});
//const fn_owner_decl = ip.declPtr(arg.fn_owner_decl);
//const decl_index = try ip.createDecl(gpa, .{
// .name = undefined,
// .src_namespace = fn_owner_decl.src_namespace,
// .src_node = fn_owner_decl.src_node,
// .src_line = fn_owner_decl.src_line,
// .has_tv = true,
// .owns_tv = true,
// .ty = func_ty,
// .val = undefined,
// .alignment = .none,
// .@"linksection" = fn_owner_decl.@"linksection",
// .@"addrspace" = fn_owner_decl.@"addrspace",
// .analysis = .complete,
// .deletion_flag = false,
// .zir_decl_index = fn_owner_decl.zir_decl_index,
// .src_scope = fn_owner_decl.src_scope,
// .generation = arg.generation,
// .is_pub = fn_owner_decl.is_pub,
// .is_exported = fn_owner_decl.is_exported,
// .has_linksection_or_addrspace = fn_owner_decl.has_linksection_or_addrspace,
// .has_align = fn_owner_decl.has_align,
// .alive = true,
// .kind = .anon,
//});
//// TODO: improve this name
//const decl = ip.declPtr(decl_index);
//decl.name = try ip.getOrPutStringFmt(gpa, "{}__anon_{d}", .{
// fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index),
//});
//const gop = try ip.map.getOrPutAdapted(gpa, Key{
// .func = .{
// .ty = func_ty,
// .generic_owner = .none,
// .owner_decl = decl_index,
// // Only the above fields will be read for hashing/equality.
// .analysis_extra_index = undefined,
// .zir_body_inst_extra_index = undefined,
// .branch_quota_extra_index = undefined,
// .resolved_error_set_extra_index = undefined,
// .zir_body_inst = undefined,
// .lbrace_line = undefined,
// .rbrace_line = undefined,
// .lbrace_column = undefined,
// .rbrace_column = undefined,
// .comptime_args = undefined,
// },
//}, KeyAdapter{ .intern_pool = ip });
//if (gop.found_existing) return @enumFromInt(gop.index);
//try ip.items.append(gpa, .{
// .tag = .func_decl,
// .data = try ip.addExtra(gpa, .{
// .analysis = .{
// .state = if (arg.cc == .Inline) .inline_only else .none,
// .is_cold = false,
// .is_noinline = arg.is_noinline,
// .calls_or_awaits_errorable_fn = false,
// .stack_alignment = .none,
// },
// .owner_decl = arg.owner_decl,
// .ty = func_ty,
// .zir_body_inst = arg.zir_body_inst,
// .lbrace_line = arg.lbrace_line,
// .rbrace_line = arg.rbrace_line,
// .lbrace_column = arg.lbrace_column,
// .rbrace_column = arg.rbrace_column,
// }),
//});
//const func_index: InternPool.Index = @enumFromInt(ip.items.len - 1);
//decl.val = func_index.toValue();
//return func_index;
}
/// Provides API for completing an enum type after calling `getIncompleteEnum`.
@@ -4576,15 +4862,15 @@ pub fn finishGetEnum(
.values_map = values_map,
}),
});
ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.names)));
ip.extra.appendSliceAssumeCapacity(@as([]const u32, @ptrCast(enum_type.values)));
return @as(Index, @enumFromInt(ip.items.len - 1));
ip.extra.appendSliceAssumeCapacity(@ptrCast(enum_type.names));
ip.extra.appendSliceAssumeCapacity(@ptrCast(enum_type.values));
return @enumFromInt(ip.items.len - 1);
}
pub fn getIfExists(ip: *const InternPool, key: Key) ?Index {
const adapter: KeyAdapter = .{ .intern_pool = ip };
const index = ip.map.getIndexAdapted(key, adapter) orelse return null;
return @as(Index, @enumFromInt(index));
return @enumFromInt(index);
}
pub fn getAssumeExists(ip: *const InternPool, key: Key) Index {
@@ -4622,7 +4908,7 @@ fn addIndexesToMap(
fn addMap(ip: *InternPool, gpa: Allocator) Allocator.Error!MapIndex {
const ptr = try ip.maps.addOne(gpa);
ptr.* = .{};
return @as(MapIndex, @enumFromInt(ip.maps.items.len - 1));
return @enumFromInt(ip.maps.items.len - 1);
}
/// This operation only happens under compile error conditions.
@@ -4653,23 +4939,28 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
const result = @as(u32, @intCast(ip.extra.items.len));
inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
ip.extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
Index => @intFromEnum(@field(extra, field.name)),
Module.Decl.Index => @intFromEnum(@field(extra, field.name)),
Module.Namespace.Index => @intFromEnum(@field(extra, field.name)),
Module.Namespace.OptionalIndex => @intFromEnum(@field(extra, field.name)),
MapIndex => @intFromEnum(@field(extra, field.name)),
OptionalMapIndex => @intFromEnum(@field(extra, field.name)),
RuntimeIndex => @intFromEnum(@field(extra, field.name)),
String => @intFromEnum(@field(extra, field.name)),
NullTerminatedString => @intFromEnum(@field(extra, field.name)),
OptionalNullTerminatedString => @intFromEnum(@field(extra, field.name)),
i32 => @as(u32, @bitCast(@field(extra, field.name))),
Tag.TypePointer.Flags => @as(u32, @bitCast(@field(extra, field.name))),
Tag.TypeFunction.Flags => @as(u32, @bitCast(@field(extra, field.name))),
Tag.TypePointer.PackedOffset => @as(u32, @bitCast(@field(extra, field.name))),
Tag.TypePointer.VectorIndex => @intFromEnum(@field(extra, field.name)),
Tag.Variable.Flags => @as(u32, @bitCast(@field(extra, field.name))),
Index,
Module.Decl.Index,
Module.Namespace.Index,
Module.Namespace.OptionalIndex,
MapIndex,
OptionalMapIndex,
RuntimeIndex,
String,
NullTerminatedString,
OptionalNullTerminatedString,
Tag.TypePointer.VectorIndex,
=> @intFromEnum(@field(extra, field.name)),
u32,
i32,
FuncAnalysis,
Tag.TypePointer.Flags,
Tag.TypeFunction.Flags,
Tag.TypePointer.PackedOffset,
Tag.Variable.Flags,
=> @bitCast(@field(extra, field.name)),
else => @compileError("bad field type: " ++ @typeName(field.type)),
});
}
@@ -4720,8 +5011,6 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct
inline for (fields, 0..) |field, i| {
const int32 = ip.extra.items[i + index];
@field(result, field.name) = switch (field.type) {
u32 => int32,
Index,
Module.Decl.Index,
Module.Namespace.Index,
@@ -4735,6 +5024,7 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct
Tag.TypePointer.VectorIndex,
=> @enumFromInt(int32),
u32,
i32,
Tag.TypePointer.Flags,
Tag.TypeFunction.Flags,
@@ -5200,19 +5490,11 @@ pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType {
const tags = ip.items.items(.tag);
const datas = ip.items.items(.data);
switch (tags[@intFromEnum(val)]) {
.type_function => return indexToKeyFuncType(ip, datas[@intFromEnum(val)]),
.type_function => return extraFuncType(ip, datas[@intFromEnum(val)]),
else => return null,
}
}
pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.InferredErrorSet.OptionalIndex {
assert(val != .none);
const tags = ip.items.items(.tag);
if (tags[@intFromEnum(val)] != .type_inferred_error_set) return .none;
const datas = ip.items.items(.data);
return @as(Module.InferredErrorSet.Index, @enumFromInt(datas[@intFromEnum(val)])).toOptional();
}
/// includes .comptime_int_type
pub fn isIntegerType(ip: *const InternPool, ty: Index) bool {
return switch (ty) {
@@ -5284,6 +5566,10 @@ pub fn isAggregateType(ip: *const InternPool, ty: Index) bool {
};
}
pub fn errorUnionSet(ip: *const InternPool, ty: Index) Index {
return ip.indexToKey(ty).error_union_type.error_set_type;
}
/// The is only legal because the initializer is not part of the hash.
pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void {
const item = ip.items.get(@intFromEnum(index));
@@ -5354,11 +5640,12 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
.type_optional => 0,
.type_anyframe => 0,
.type_error_union => @sizeOf(Key.ErrorUnionType),
.type_anyerror_union => 0,
.type_error_set => b: {
const info = ip.extraData(Tag.ErrorSet, data);
break :b @sizeOf(Tag.ErrorSet) + (@sizeOf(u32) * info.names_len);
},
.type_inferred_error_set => @sizeOf(Module.InferredErrorSet),
.type_inferred_error_set => 0,
.type_enum_explicit, .type_enum_nonexhaustive => @sizeOf(EnumExplicit),
.type_enum_auto => @sizeOf(EnumAuto),
.type_opaque => @sizeOf(Key.OpaqueType),
@@ -5506,6 +5793,7 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
.type_optional,
.type_anyframe,
.type_error_union,
.type_anyerror_union,
.type_error_set,
.type_inferred_error_set,
.type_enum_explicit,
@@ -5598,14 +5886,6 @@ pub fn unionPtrConst(ip: *const InternPool, index: Module.Union.Index) *const Mo
return ip.allocated_unions.at(@intFromEnum(index));
}
pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.InferredErrorSet.Index) *Module.InferredErrorSet {
return ip.allocated_inferred_error_sets.at(@intFromEnum(index));
}
pub fn inferredErrorSetPtrConst(ip: *const InternPool, index: Module.InferredErrorSet.Index) *const Module.InferredErrorSet {
return ip.allocated_inferred_error_sets.at(@intFromEnum(index));
}
pub fn declPtr(ip: *InternPool, index: Module.Decl.Index) *Module.Decl {
return ip.allocated_decls.at(@intFromEnum(index));
}
@@ -5658,28 +5938,6 @@ pub fn destroyUnion(ip: *InternPool, gpa: Allocator, index: Module.Union.Index)
};
}
pub fn createInferredErrorSet(
ip: *InternPool,
gpa: Allocator,
initialization: Module.InferredErrorSet,
) Allocator.Error!Module.InferredErrorSet.Index {
if (ip.inferred_error_sets_free_list.popOrNull()) |index| {
ip.allocated_inferred_error_sets.at(@intFromEnum(index)).* = initialization;
return index;
}
const ptr = try ip.allocated_inferred_error_sets.addOne(gpa);
ptr.* = initialization;
return @as(Module.InferredErrorSet.Index, @enumFromInt(ip.allocated_inferred_error_sets.len - 1));
}
pub fn destroyInferredErrorSet(ip: *InternPool, gpa: Allocator, index: Module.InferredErrorSet.Index) void {
ip.inferredErrorSetPtr(index).* = undefined;
ip.inferred_error_sets_free_list.append(gpa, index) catch {
// In order to keep `destroyInferredErrorSet` a non-fallible function, we ignore memory
// allocation failures here, instead leaking the InferredErrorSet until garbage collection.
};
}
pub fn createDecl(
ip: *InternPool,
gpa: Allocator,
@@ -5912,6 +6170,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.type_optional,
.type_anyframe,
.type_error_union,
.type_anyerror_union,
.type_error_set,
.type_inferred_error_set,
.type_enum_auto,
@@ -6236,7 +6495,10 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
.type_optional => .Optional,
.type_anyframe => .AnyFrame,
.type_error_union => .ErrorUnion,
.type_error_union,
.type_anyerror_union,
=> .ErrorUnion,
.type_error_set,
.type_inferred_error_set,
@@ -6340,6 +6602,10 @@ pub fn funcAnalysis(ip: *const InternPool, i: Index) *FuncAnalysis {
return @ptrCast(&ip.extra.items[extra_index]);
}
pub fn funcHasInferredErrorSet(ip: *const InternPool, i: Index) bool {
return funcAnalysis(ip, i).inferred_error_set;
}
pub fn funcZirBodyInst(ip: *const InternPool, i: Index) Zir.Inst.Index {
assert(i != .none);
const item = ip.items.get(@intFromEnum(i));
@@ -6356,3 +6622,43 @@ pub fn funcZirBodyInst(ip: *const InternPool, i: Index) Zir.Inst.Index {
};
return ip.extra.items[extra_index];
}
pub fn iesFuncIndex(ip: *const InternPool, ies_index: InternPool.Index) InternPool.Index {
assert(ies_index != .none);
const tags = ip.items.items(.tag);
assert(tags[@intFromEnum(ies_index)] == .type_inferred_error_set);
const func_index = ip.items.items(.data)[@intFromEnum(ies_index)];
switch (tags[func_index]) {
.func_decl, .func_instance => {},
else => unreachable, // assertion failed
}
return @enumFromInt(func_index);
}
/// Returns a mutable pointer to the resolved error set type of an inferred
/// error set function. The returned pointer is invalidated when anything is
/// added to `ip`.
pub fn iesResolved(ip: *const InternPool, ies_index: InternPool.Index) *InternPool.Index {
assert(ies_index != .none);
const tags = ip.items.items(.tag);
const datas = ip.items.items(.data);
assert(tags[@intFromEnum(ies_index)] == .type_inferred_error_set);
const func_index = datas[@intFromEnum(ies_index)];
return funcIesResolved(ip, func_index);
}
/// Returns a mutable pointer to the resolved error set type of an inferred
/// error set function. The returned pointer is invalidated when anything is
/// added to `ip`.
pub fn funcIesResolved(ip: *const InternPool, func_index: InternPool.Index) *InternPool.Index {
const tags = ip.items.items(.tag);
const datas = ip.items.items(.data);
assert(funcHasInferredErrorSet(ip, func_index));
const func_start = datas[@intFromEnum(func_index)];
const extra_index = switch (tags[@intFromEnum(func_index)]) {
.func_decl => func_start + @typeInfo(Tag.FuncDecl).Struct.fields.len,
.func_instance => func_start + @typeInfo(Tag.FuncInstance).Struct.fields.len,
else => unreachable,
};
return @ptrCast(&ip.extra.items[extra_index]);
}
+18 -126
View File
@@ -1297,98 +1297,6 @@ pub const Union = struct {
}
};
/// Some extern function struct memory is owned by the Decl's TypedValue.Managed
/// arena allocator.
pub const ExternFn = struct {
/// The Decl that corresponds to the function itself.
owner_decl: Decl.Index,
/// Library name if specified.
/// For example `extern "c" fn write(...) usize` would have 'c' as library name.
/// Allocated with Module's allocator; outlives the ZIR code.
lib_name: ?[*:0]const u8,
pub fn deinit(extern_fn: *ExternFn, gpa: Allocator) void {
if (extern_fn.lib_name) |lib_name| {
gpa.free(mem.sliceTo(lib_name, 0));
}
}
};
/// This struct is used to keep track of any dependencies related to functions instances
/// that return inferred error sets. Note that a function may be associated to
/// multiple different error sets, for example an inferred error set which
/// this function returns, but also any inferred error sets of called inline
/// or comptime functions.
pub const InferredErrorSet = struct {
/// The function from which this error set originates.
func: InternPool.Index,
/// All currently known errors that this error set contains. This includes
/// direct additions via `return error.Foo;`, and possibly also errors that
/// are returned from any dependent functions. When the inferred error set is
/// fully resolved, this map contains all the errors that the function might return.
errors: NameMap = .{},
/// Other inferred error sets which this inferred error set should include.
inferred_error_sets: std.AutoArrayHashMapUnmanaged(InferredErrorSet.Index, void) = .{},
/// Whether the function returned anyerror. This is true if either of
/// the dependent functions returns anyerror.
is_anyerror: bool = false,
/// Whether this error set is already fully resolved. If true, resolving
/// can skip resolving any dependents of this inferred error set.
is_resolved: bool = false,
pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
pub const Index = enum(u32) {
_,
pub fn toOptional(i: InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(i)));
}
};
pub const OptionalIndex = enum(u32) {
none = std.math.maxInt(u32),
_,
pub fn init(oi: ?InferredErrorSet.Index) InferredErrorSet.OptionalIndex {
return @as(InferredErrorSet.OptionalIndex, @enumFromInt(@intFromEnum(oi orelse return .none)));
}
pub fn unwrap(oi: InferredErrorSet.OptionalIndex) ?InferredErrorSet.Index {
if (oi == .none) return null;
return @as(InferredErrorSet.Index, @enumFromInt(@intFromEnum(oi)));
}
};
pub fn addErrorSet(
self: *InferredErrorSet,
err_set_ty: Type,
ip: *InternPool,
gpa: Allocator,
) !void {
switch (err_set_ty.toIntern()) {
.anyerror_type => {
self.is_anyerror = true;
},
else => switch (ip.indexToKey(err_set_ty.toIntern())) {
.error_set_type => |error_set_type| {
for (error_set_type.names) |name| {
try self.errors.put(gpa, name, {});
}
},
.inferred_error_set_type => |ies_index| {
try self.inferred_error_sets.put(gpa, ies_index, {});
},
else => unreachable,
},
}
}
};
pub const DeclAdapter = struct {
mod: *Module,
@@ -3220,10 +3128,6 @@ pub fn structPtr(mod: *Module, index: Struct.Index) *Struct {
return mod.intern_pool.structPtr(index);
}
pub fn inferredErrorSetPtr(mod: *Module, index: InferredErrorSet.Index) *InferredErrorSet {
return mod.intern_pool.inferredErrorSetPtr(index);
}
pub fn namespacePtrUnwrap(mod: *Module, index: Namespace.OptionalIndex) ?*Namespace {
return mod.namespacePtr(index.unwrap() orelse return null);
}
@@ -4261,6 +4165,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
.owner_decl_index = new_decl_index,
.func_index = .none,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@@ -4342,6 +4247,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
.owner_decl_index = decl_index,
.func_index = .none,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@@ -5289,12 +5195,19 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
.owner_decl_index = decl_index,
.func_index = func_index,
.fn_ret_ty = fn_ty_info.return_type.toType(),
.fn_ret_ty_ies = null,
.owner_func_index = func_index,
.branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota),
.comptime_mutable_decls = &comptime_mutable_decls,
};
defer sema.deinit();
if (func.analysis(ip).inferred_error_set) {
const ies = try arena.create(Sema.InferredErrorSet);
ies.* = .{ .func = func_index };
sema.fn_ret_ty_ies = ies;
}
// reset in case calls to errorable functions are removed.
func.analysis(ip).calls_or_awaits_errorable_fn = false;
@@ -5433,7 +5346,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len +
inner_block.instructions.items.len);
const main_block_index = sema.addExtraAssumeCapacity(Air.Block{
.body_len = @as(u32, @intCast(inner_block.instructions.items.len)),
.body_len = @intCast(inner_block.instructions.items.len),
});
sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items);
sema.air_extra.items[@intFromEnum(Air.ExtraIndex.main_block)] = main_block_index;
@@ -5445,7 +5358,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
// Crucially, this happens *after* we set the function state to success above,
// so that dependencies on the function body will now be satisfied rather than
// result in circular dependency errors.
sema.resolveFnTypes(fn_ty) catch |err| switch (err) {
sema.resolveFnTypes(&inner_block, LazySrcLoc.nodeOffset(0), fn_ty) catch |err| switch (err) {
error.NeededSourceLocation => unreachable,
error.GenericPoison => unreachable,
error.ComptimeReturn => unreachable,
@@ -6595,7 +6508,8 @@ pub fn errorUnionType(mod: *Module, error_set_ty: Type, payload_ty: Type) Alloca
pub fn singleErrorSetType(mod: *Module, name: InternPool.NullTerminatedString) Allocator.Error!Type {
const names: *const [1]InternPool.NullTerminatedString = &name;
return (try mod.intern_pool.get(mod.gpa, .{ .error_set_type = .{ .names = names } })).toType();
const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names);
return new_ty.toType();
}
/// Sorts `names` in place.
@@ -6609,7 +6523,7 @@ pub fn errorSetFromUnsortedNames(
{},
InternPool.NullTerminatedString.indexLessThan,
);
const new_ty = try mod.intern(.{ .error_set_type = .{ .names = names } });
const new_ty = try mod.intern_pool.getErrorSetType(mod.gpa, names);
return new_ty.toType();
}
@@ -6956,16 +6870,6 @@ pub fn typeToFunc(mod: *Module, ty: Type) ?InternPool.Key.FuncType {
return mod.intern_pool.indexToFuncType(ty.toIntern());
}
pub fn typeToInferredErrorSet(mod: *Module, ty: Type) ?*InferredErrorSet {
const index = typeToInferredErrorSetIndex(mod, ty).unwrap() orelse return null;
return mod.inferredErrorSetPtr(index);
}
pub fn typeToInferredErrorSetIndex(mod: *Module, ty: Type) InferredErrorSet.OptionalIndex {
if (ty.ip_index == .none) return .none;
return mod.intern_pool.indexToInferredErrorSetType(ty.toIntern());
}
pub fn funcOwnerDeclPtr(mod: *Module, func_index: InternPool.Index) *Decl {
return mod.declPtr(mod.funcOwnerDeclIndex(func_index));
}
@@ -6974,6 +6878,10 @@ pub fn funcOwnerDeclIndex(mod: *Module, func_index: InternPool.Index) Decl.Index
return mod.funcInfo(func_index).owner_decl;
}
pub fn iesFuncIndex(mod: *const Module, ies_index: InternPool.Index) InternPool.Index {
return mod.intern_pool.iesFuncIndex(ies_index);
}
pub fn funcInfo(mod: *Module, func_index: InternPool.Index) InternPool.Key.Func {
return mod.intern_pool.indexToKey(func_index).func;
}
@@ -7040,19 +6948,3 @@ pub fn getParamName(mod: *Module, func_index: InternPool.Index, index: u32) [:0]
else => unreachable,
};
}
pub fn hasInferredErrorSet(mod: *Module, func: InternPool.Key.Func) bool {
const owner_decl = mod.declPtr(func.owner_decl);
const zir = owner_decl.getFileScope(mod).zir;
const zir_tags = zir.instructions.items(.tag);
switch (zir_tags[func.zir_body_inst]) {
.func => return false,
.func_inferred => return true,
.func_fancy => {
const inst_data = zir.instructions.items(.data)[func.zir_body_inst].pl_node;
const extra = zir.extraData(Zir.Inst.FuncFancy, inst_data.payload_index);
return extra.data.bits.is_inferred_error;
},
else => unreachable,
}
}
+436 -325
View File
@@ -38,6 +38,10 @@ error_return_trace_index_on_fn_entry: Air.Inst.Ref = .none,
/// generic function which uses a type expression for the return type.
/// The type will be `void` in the case that `func` is `null`.
fn_ret_ty: Type,
/// In case of the return type being an error union with an inferred error
/// set, this is the inferred error set. `null` otherwise. Allocated with
/// `Sema.arena`.
fn_ret_ty_ies: ?*InferredErrorSet,
branch_quota: u32 = default_branch_quota,
branch_count: u32 = 0,
/// Populated when returning `error.ComptimeBreak`. Used to communicate the
@@ -128,6 +132,46 @@ const Alignment = InternPool.Alignment;
pub const default_branch_quota = 1000;
pub const default_reference_trace_len = 2;
pub const InferredErrorSet = struct {
/// The function body from which this error set originates.
func: InternPool.Index,
/// All currently known errors that this error set contains. This includes
/// direct additions via `return error.Foo;`, and possibly also errors that
/// are returned from any dependent functions. When the inferred error set is
/// fully resolved, this map contains all the errors that the function might return.
errors: NameMap = .{},
/// Other inferred error sets which this inferred error set should include.
inferred_error_sets: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
pub const NameMap = std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void);
pub fn addErrorSet(
self: *InferredErrorSet,
err_set_ty: Type,
ip: *InternPool,
arena: Allocator,
) !void {
switch (err_set_ty.toIntern()) {
.anyerror_type => {
ip.funcIesResolved(self.func).* = .anyerror_type;
},
else => switch (ip.indexToKey(err_set_ty.toIntern())) {
.error_set_type => |error_set_type| {
for (error_set_type.names.get(ip)) |name| {
try self.errors.put(arena, name, {});
}
},
.inferred_error_set_type => {
try self.inferred_error_sets.put(arena, err_set_ty.toIntern(), {});
},
else => unreachable,
},
}
}
};
/// Stores the mapping from `Zir.Inst.Index -> Air.Inst.Ref`, which is used by sema to resolve
/// instructions during analysis.
/// Instead of a hash table approach, InstMap is simply a slice that is indexed into using the
@@ -1120,7 +1164,7 @@ fn analyzeBodyInner(
.shl_sat => try sema.zirShl(block, inst, .shl_sat),
.ret_ptr => try sema.zirRetPtr(block),
.ret_type => try sema.addType(sema.fn_ret_ty),
.ret_type => Air.internedToRef(sema.fn_ret_ty.toIntern()),
// Instructions that we know to *always* be noreturn based solely on their tag.
// These functions match the return type of analyzeBody so that we can
@@ -3392,7 +3436,7 @@ fn zirErrorSetDecl(
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.ErrorSetDecl, inst_data.payload_index);
var names: Module.InferredErrorSet.NameMap = .{};
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, extra.data.fields_len);
var extra_index = @as(u32, @intCast(extra.end));
@@ -6933,12 +6977,10 @@ fn analyzeCall(
.return_type = owner_info.return_type,
.comptime_bits = 0,
.noalias_bits = owner_info.noalias_bits,
.alignment = owner_info.alignment,
.cc = owner_info.cc,
.alignment = if (owner_info.align_is_generic) null else owner_info.alignment,
.cc = if (owner_info.cc_is_generic) null else owner_info.cc,
.is_var_args = owner_info.is_var_args,
.is_noinline = owner_info.is_noinline,
.align_is_generic = owner_info.align_is_generic,
.cc_is_generic = owner_info.cc_is_generic,
.section_is_generic = owner_info.section_is_generic,
.addrspace_is_generic = owner_info.addrspace_is_generic,
.is_generic = owner_info.is_generic,
@@ -7001,21 +7043,25 @@ fn analyzeCall(
try sema.resolveInst(fn_info.ret_ty_ref);
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst);
// Create a fresh inferred error set type for inline/comptime calls.
const fn_ret_ty = blk: {
if (mod.hasInferredErrorSet(module_fn)) {
const ies_index = try mod.intern_pool.createInferredErrorSet(gpa, .{
.func = module_fn_index,
});
const error_set_ty = try mod.intern(.{ .inferred_error_set_type = ies_index });
break :blk try mod.errorUnionType(error_set_ty.toType(), bare_return_type);
}
break :blk bare_return_type;
};
new_fn_info.return_type = fn_ret_ty.toIntern();
const parent_fn_ret_ty = sema.fn_ret_ty;
sema.fn_ret_ty = fn_ret_ty;
const parent_fn_ret_ty_ies = sema.fn_ret_ty_ies;
sema.fn_ret_ty = bare_return_type;
sema.fn_ret_ty_ies = null;
defer sema.fn_ret_ty = parent_fn_ret_ty;
defer sema.fn_ret_ty_ies = parent_fn_ret_ty_ies;
if (module_fn.analysis(ip).inferred_error_set) {
// Create a fresh inferred error set type for inline/comptime calls.
const error_set_ty = try mod.intern(.{ .inferred_error_set_type = module_fn_index });
const ies = try sema.arena.create(InferredErrorSet);
ies.* = .{ .func = module_fn_index };
sema.fn_ret_ty_ies = ies;
sema.fn_ret_ty = (try ip.get(gpa, .{ .error_union_type = .{
.error_set_type = error_set_ty,
.payload_type = bare_return_type.toIntern(),
} })).toType();
ip.funcIesResolved(module_fn_index).* = .none;
}
// This `res2` is here instead of directly breaking from `res` due to a stage1
// bug generating invalid LLVM IR.
@@ -7059,7 +7105,7 @@ fn analyzeCall(
}
if (is_comptime_call and ensure_result_used) {
try sema.ensureResultUsed(block, fn_ret_ty, call_src);
try sema.ensureResultUsed(block, sema.fn_ret_ty, call_src);
}
const result = result: {
@@ -7089,7 +7135,7 @@ fn analyzeCall(
if (should_memoize and is_comptime_call) {
const result_val = try sema.resolveConstMaybeUndefVal(block, .unneeded, result, "");
const result_interned = try result_val.intern(fn_ret_ty, mod);
const result_interned = try result_val.intern(sema.fn_ret_ty, mod);
// TODO: check whether any external comptime memory was mutated by the
// comptime function call. If so, then do not memoize the call here.
@@ -7114,7 +7160,7 @@ fn analyzeCall(
if (i < fn_params_len) {
const opts: CoerceOpts = .{ .param_src = .{
.func_inst = func,
.param_i = @as(u32, @intCast(i)),
.param_i = @intCast(i),
} };
const param_ty = func_ty_info.param_types.get(ip)[i].toType();
args[i] = sema.analyzeCallArg(
@@ -7433,6 +7479,7 @@ fn instantiateGenericCall(
.owner_decl_index = sema.owner_decl_index,
.func_index = sema.owner_func_index,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_args = comptime_args,
.generic_owner = generic_owner,
@@ -7769,6 +7816,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
defer tracy.end();
const mod = sema.mod;
const ip = &mod.intern_pool;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = extra.node };
@@ -7779,7 +7827,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
if (val.isUndef(mod)) {
return sema.addConstUndef(Type.err_int);
}
const err_name = mod.intern_pool.indexToKey(val.toIntern()).err.name;
const err_name = ip.indexToKey(val.toIntern()).err.name;
return sema.addConstant(try mod.intValue(
Type.err_int,
try mod.getErrorValue(err_name),
@@ -7787,17 +7835,19 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
}
const op_ty = sema.typeOf(uncasted_operand);
try sema.resolveInferredErrorSetTy(block, src, op_ty);
if (!op_ty.isAnyError(mod)) {
const names = op_ty.errorSetNames(mod);
switch (names.len) {
0 => return sema.addConstant(try mod.intValue(Type.err_int, 0)),
1 => {
const int = @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(names[0]).?));
return sema.addIntUnsigned(Type.err_int, int);
},
else => {},
}
switch (try sema.resolveInferredErrorSetTy(block, src, op_ty.toIntern())) {
.anyerror_type => {},
else => |err_set_ty_index| {
const names = ip.indexToKey(err_set_ty_index).error_set_type.names;
switch (names.len) {
0 => return sema.addConstant(try mod.intValue(Type.err_int, 0)),
1 => {
const int: Module.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[0]).?);
return sema.addIntUnsigned(Type.err_int, int);
},
else => {},
}
},
}
try sema.requireRuntimeBlock(block, src, operand_src);
@@ -7846,6 +7896,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
defer tracy.end();
const mod = sema.mod;
const ip = &mod.intern_pool;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node };
@@ -7874,23 +7925,25 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
return Air.Inst.Ref.anyerror_type;
}
if (mod.typeToInferredErrorSetIndex(lhs_ty).unwrap()) |ies_index| {
try sema.resolveInferredErrorSet(block, src, ies_index);
// isAnyError might have changed from a false negative to a true positive after resolution.
if (lhs_ty.isAnyError(mod)) {
return Air.Inst.Ref.anyerror_type;
if (ip.isInferredErrorSetType(lhs_ty.toIntern())) {
switch (try sema.resolveInferredErrorSet(block, src, lhs_ty.toIntern())) {
// isAnyError might have changed from a false negative to a true
// positive after resolution.
.anyerror_type => return .anyerror_type,
else => {},
}
}
if (mod.typeToInferredErrorSetIndex(rhs_ty).unwrap()) |ies_index| {
try sema.resolveInferredErrorSet(block, src, ies_index);
// isAnyError might have changed from a false negative to a true positive after resolution.
if (rhs_ty.isAnyError(mod)) {
return Air.Inst.Ref.anyerror_type;
if (ip.isInferredErrorSetType(rhs_ty.toIntern())) {
switch (try sema.resolveInferredErrorSet(block, src, rhs_ty.toIntern())) {
// isAnyError might have changed from a false negative to a true
// positive after resolution.
.anyerror_type => return .anyerror_type,
else => {},
}
}
const err_set_ty = try sema.errorSetMerge(lhs_ty, rhs_ty);
return sema.addType(err_set_ty);
return Air.internedToRef(err_set_ty.toIntern());
}
fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -8569,6 +8622,12 @@ fn checkCallConvSupportsVarArgs(sema: *Sema, block: *Block, src: LazySrcLoc, cc:
}
}
const Section = union(enum) {
generic,
default,
explicit: InternPool.NullTerminatedString,
};
fn funcCommon(
sema: *Sema,
block: *Block,
@@ -8578,7 +8637,7 @@ fn funcCommon(
alignment: ?Alignment,
/// null means generic poison
address_space: ?std.builtin.AddressSpace,
section: InternPool.GetFuncDeclKey.Section,
section: Section,
/// null means generic poison
cc: ?std.builtin.CallingConvention,
/// this might be Type.generic_poison
@@ -8709,6 +8768,36 @@ fn funcCommon(
const param_types = block.params.items(.ty);
const opt_func_index: InternPool.Index = i: {
if (!is_source_decl) {
assert(has_body);
assert(!is_generic);
assert(comptime_bits == 0);
assert(cc != null);
assert(section != .generic);
assert(address_space != null);
assert(!var_args);
break :i try ip.getFuncInstance(gpa, .{
.param_types = param_types,
.noalias_bits = noalias_bits,
.bare_return_type = bare_return_type.toIntern(),
.cc = cc_resolved,
.alignment = alignment.?,
.is_noinline = is_noinline,
.inferred_error_set = inferred_error_set,
.generic_owner = sema.generic_owner,
});
}
// extern_func and func_decl functions take ownership of `sema.owner_decl`.
sema.owner_decl.@"linksection" = switch (section) {
.generic => .none,
.default => .none,
.explicit => |section_name| section_name.toOptional(),
};
sema.owner_decl.alignment = alignment orelse .none;
sema.owner_decl.@"addrspace" = address_space orelse .generic;
if (is_extern) {
assert(comptime_bits == 0);
assert(cc != null);
@@ -8734,26 +8823,19 @@ fn funcCommon(
if (!has_body) break :i .none;
if (is_source_decl) {
if (inferred_error_set)
try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
if (inferred_error_set) {
try sema.validateErrorUnionPayloadType(block, bare_return_type, ret_ty_src);
break :i try ip.getFuncDeclIes(gpa, .{
.owner_decl = sema.owner_decl_index,
const fn_owner_decl = if (sema.generic_owner != .none)
mod.funcOwnerDeclIndex(sema.generic_owner)
else
sema.owner_decl_index;
break :i try ip.getFuncDecl(gpa, .{
.fn_owner_decl = fn_owner_decl,
.param_types = param_types,
.noalias_bits = noalias_bits,
.comptime_bits = comptime_bits,
.return_type = bare_return_type.toIntern(),
.inferred_error_set = inferred_error_set,
.bare_return_type = bare_return_type.toIntern(),
.cc = cc,
.alignment = alignment,
.section = section,
.address_space = address_space,
.section_is_generic = section == .generic,
.addrspace_is_generic = address_space == null,
.is_var_args = var_args,
.is_generic = final_is_generic,
.is_noinline = is_noinline,
@@ -8766,22 +8848,30 @@ fn funcCommon(
});
}
assert(!is_generic);
assert(comptime_bits == 0);
assert(cc != null);
assert(section != .generic);
assert(address_space != null);
assert(!var_args);
break :i try ip.getFuncInstance(gpa, .{
const func_ty = try ip.getFuncType(gpa, .{
.param_types = param_types,
.noalias_bits = noalias_bits,
.comptime_bits = comptime_bits,
.return_type = bare_return_type.toIntern(),
.cc = cc_resolved,
.alignment = alignment.?,
.cc = cc,
.alignment = alignment,
.section_is_generic = section == .generic,
.addrspace_is_generic = address_space == null,
.is_var_args = var_args,
.is_generic = final_is_generic,
.is_noinline = is_noinline,
});
.generic_owner = sema.generic_owner,
break :i try ip.getFuncDecl(gpa, .{
.owner_decl = sema.owner_decl_index,
.ty = func_ty,
.cc = cc,
.is_noinline = is_noinline,
.zir_body_inst = func_inst,
.lbrace_line = src_locs.lbrace_line,
.rbrace_line = src_locs.rbrace_line,
.lbrace_column = @as(u16, @truncate(src_locs.columns)),
.rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)),
});
};
@@ -8913,10 +9003,8 @@ fn funcCommon(
.noalias_bits = noalias_bits,
.comptime_bits = comptime_bits,
.return_type = return_type.toIntern(),
.cc = cc_resolved,
.cc_is_generic = cc == null,
.alignment = alignment orelse .none,
.align_is_generic = alignment == null,
.cc = cc,
.alignment = alignment,
.section_is_generic = section == .generic,
.addrspace_is_generic = address_space == null,
.is_var_args = var_args,
@@ -10254,7 +10342,7 @@ const SwitchProngAnalysis = struct {
return sema.bitCast(block, item_ty, spa.operand, operand_src, null);
}
var names: Module.InferredErrorSet.NameMap = .{};
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, case_vals.len);
for (case_vals) |err| {
const err_val = sema.resolveConstValue(block, .unneeded, err, "") catch unreachable;
@@ -10622,97 +10710,100 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r
}
}
try sema.resolveInferredErrorSetTy(block, src, operand_ty);
if (operand_ty.isAnyError(mod)) {
if (special_prong != .@"else") {
return sema.fail(
block,
src,
"else prong required when switching on type 'anyerror'",
.{},
);
}
else_error_ty = Type.anyerror;
} else else_validation: {
var maybe_msg: ?*Module.ErrorMsg = null;
errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa);
for (operand_ty.errorSetNames(mod)) |error_name| {
if (!seen_errors.contains(error_name) and special_prong != .@"else") {
const msg = maybe_msg orelse blk: {
maybe_msg = try sema.errMsg(
block,
src,
"switch must handle all possibilities",
.{},
);
break :blk maybe_msg.?;
};
try sema.errNote(
switch (try sema.resolveInferredErrorSetTy(block, src, operand_ty.toIntern())) {
.anyerror_type => {
if (special_prong != .@"else") {
return sema.fail(
block,
src,
msg,
"unhandled error value: 'error.{}'",
.{error_name.fmt(ip)},
"else prong required when switching on type 'anyerror'",
.{},
);
}
}
else_error_ty = Type.anyerror;
},
else => |err_set_ty_index| else_validation: {
const error_names = ip.indexToKey(err_set_ty_index).error_set_type.names;
var maybe_msg: ?*Module.ErrorMsg = null;
errdefer if (maybe_msg) |msg| msg.destroy(sema.gpa);
if (maybe_msg) |msg| {
maybe_msg = null;
try sema.addDeclaredHereNote(msg, operand_ty);
return sema.failWithOwnedErrorMsg(msg);
}
for (error_names.get(ip)) |error_name| {
if (!seen_errors.contains(error_name) and special_prong != .@"else") {
const msg = maybe_msg orelse blk: {
maybe_msg = try sema.errMsg(
block,
src,
"switch must handle all possibilities",
.{},
);
break :blk maybe_msg.?;
};
if (special_prong == .@"else" and seen_errors.count() == operand_ty.errorSetNames(mod).len) {
// In order to enable common patterns for generic code allow simple else bodies
// else => unreachable,
// else => return,
// else => |e| return e,
// even if all the possible errors were already handled.
const tags = sema.code.instructions.items(.tag);
for (special.body) |else_inst| switch (tags[else_inst]) {
.dbg_block_begin,
.dbg_block_end,
.dbg_stmt,
.dbg_var_val,
.ret_type,
.as_node,
.ret_node,
.@"unreachable",
.@"defer",
.defer_err_code,
.err_union_code,
.ret_err_value_code,
.restore_err_ret_index,
.is_non_err,
.ret_is_non_err,
.condbr,
=> {},
else => break,
} else break :else_validation;
try sema.errNote(
block,
src,
msg,
"unhandled error value: 'error.{}'",
.{error_name.fmt(ip)},
);
}
}
return sema.fail(
block,
special_prong_src,
"unreachable else prong; all cases already handled",
.{},
);
}
if (maybe_msg) |msg| {
maybe_msg = null;
try sema.addDeclaredHereNote(msg, operand_ty);
return sema.failWithOwnedErrorMsg(msg);
}
const error_names = operand_ty.errorSetNames(mod);
var names: Module.InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, error_names.len);
for (error_names) |error_name| {
if (seen_errors.contains(error_name)) continue;
if (special_prong == .@"else" and
seen_errors.count() == error_names.len)
{
// In order to enable common patterns for generic code allow simple else bodies
// else => unreachable,
// else => return,
// else => |e| return e,
// even if all the possible errors were already handled.
const tags = sema.code.instructions.items(.tag);
for (special.body) |else_inst| switch (tags[else_inst]) {
.dbg_block_begin,
.dbg_block_end,
.dbg_stmt,
.dbg_var_val,
.ret_type,
.as_node,
.ret_node,
.@"unreachable",
.@"defer",
.defer_err_code,
.err_union_code,
.ret_err_value_code,
.restore_err_ret_index,
.is_non_err,
.ret_is_non_err,
.condbr,
=> {},
else => break,
} else break :else_validation;
names.putAssumeCapacityNoClobber(error_name, {});
}
// No need to keep the hash map metadata correct; here we
// extract the (sorted) keys only.
else_error_ty = try mod.errorSetFromUnsortedNames(names.keys());
return sema.fail(
block,
special_prong_src,
"unreachable else prong; all cases already handled",
.{},
);
}
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, error_names.len);
for (error_names.get(ip)) |error_name| {
if (seen_errors.contains(error_name)) continue;
names.putAssumeCapacityNoClobber(error_name, {});
}
// No need to keep the hash map metadata correct; here we
// extract the (sorted) keys only.
else_error_ty = try mod.errorSetFromUnsortedNames(names.keys());
},
}
},
.Int, .ComptimeInt => {
@@ -16444,50 +16535,51 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.queueFullTypeResolution(error_field_ty);
// If the error set is inferred it must be resolved at this point
try sema.resolveInferredErrorSetTy(block, src, ty);
// Build our list of Error values
// Optional value is only null if anyerror
// Value can be zero-length slice otherwise
const error_field_vals = if (ty.isAnyError(mod)) null else blk: {
const vals = try sema.arena.alloc(InternPool.Index, ty.errorSetNames(mod).len);
for (vals, 0..) |*field_val, i| {
// TODO: write something like getCoercedInts to avoid needing to dupe
const name = try sema.arena.dupe(u8, ip.stringToSlice(ty.errorSetNames(mod)[i]));
const name_val = v: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const new_decl_ty = try mod.arrayType(.{
.len = name.len,
.child = .u8_type,
});
const new_decl = try anon_decl.finish(
new_decl_ty,
(try mod.intern(.{ .aggregate = .{
.ty = new_decl_ty.toIntern(),
.storage = .{ .bytes = name },
} })).toValue(),
.none, // default alignment
);
break :v try mod.intern(.{ .ptr = .{
.ty = .slice_const_u8_type,
.addr = .{ .decl = new_decl },
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
const error_field_vals = switch (try sema.resolveInferredErrorSetTy(block, src, ty.toIntern())) {
.anyerror_type => null,
else => |err_set_ty_index| blk: {
const names = ip.indexToKey(err_set_ty_index).error_set_type.names;
const vals = try sema.arena.alloc(InternPool.Index, names.len);
for (vals, 0..) |*field_val, i| {
// TODO: write something like getCoercedInts to avoid needing to dupe
const name = try sema.arena.dupe(u8, ip.stringToSlice(names.get(ip)[i]));
const name_val = v: {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const new_decl_ty = try mod.arrayType(.{
.len = name.len,
.child = .u8_type,
});
const new_decl = try anon_decl.finish(
new_decl_ty,
(try mod.intern(.{ .aggregate = .{
.ty = new_decl_ty.toIntern(),
.storage = .{ .bytes = name },
} })).toValue(),
.none, // default alignment
);
break :v try mod.intern(.{ .ptr = .{
.ty = .slice_const_u8_type,
.addr = .{ .decl = new_decl },
.len = (try mod.intValue(Type.usize, name.len)).toIntern(),
} });
};
const error_field_fields = .{
// name: []const u8,
name_val,
};
field_val.* = try mod.intern(.{ .aggregate = .{
.ty = error_field_ty.toIntern(),
.storage = .{ .elems = &error_field_fields },
} });
};
}
const error_field_fields = .{
// name: []const u8,
name_val,
};
field_val.* = try mod.intern(.{ .aggregate = .{
.ty = error_field_ty.toIntern(),
.storage = .{ .elems = &error_field_fields },
} });
}
break :blk vals;
break :blk vals;
},
};
// Build our ?[]const Error value
@@ -18055,7 +18147,9 @@ fn addToInferredErrorSet(sema: *Sema, uncasted_operand: Air.Inst.Ref) !void {
const ip = &mod.intern_pool;
assert(sema.fn_ret_ty.zigTypeTag(mod) == .ErrorUnion);
if (mod.typeToInferredErrorSet(sema.fn_ret_ty.errorUnionSet(mod))) |ies| {
if (ip.isInferredErrorSetType(sema.fn_ret_ty.errorUnionSet(mod).toIntern())) {
const ies = sema.fn_ret_ty_ies.?;
assert(ies.func == sema.func_index);
const op_ty = sema.typeOf(uncasted_operand);
switch (op_ty.zigTypeTag(mod)) {
.ErrorSet => try ies.addErrorSet(op_ty, ip, gpa),
@@ -19508,7 +19602,7 @@ fn zirReify(
return sema.addType(Type.anyerror);
const len = try sema.usizeCast(block, src, payload_val.sliceLen(mod));
var names: Module.InferredErrorSet.NameMap = .{};
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(sema.arena, len);
for (0..len) |i| {
const elem_val = try payload_val.elemValue(mod, i);
@@ -20019,8 +20113,6 @@ fn zirReify(
.is_var_args = is_var_args,
.is_generic = false,
.is_noinline = false,
.align_is_generic = false,
.cc_is_generic = false,
.section_is_generic = false,
.addrspace_is_generic = false,
});
@@ -20524,8 +20616,8 @@ fn zirErrSetCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
break :disjoint true;
}
try sema.resolveInferredErrorSetTy(block, src, dest_ty);
try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty);
_ = try sema.resolveInferredErrorSetTy(block, src, dest_ty.toIntern());
_ = try sema.resolveInferredErrorSetTy(block, operand_src, operand_ty.toIntern());
for (dest_ty.errorSetNames(mod)) |dest_err_name| {
if (Type.errorSetHasFieldIp(ip, operand_ty.toIntern(), dest_err_name))
break :disjoint false;
@@ -23505,7 +23597,7 @@ fn zirFuncFancy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
break :blk mod.toEnum(std.builtin.AddressSpace, addrspace_tv.val);
} else target_util.defaultAddressSpace(target, .function);
const section: InternPool.GetFuncDeclKey.Section = if (extra.data.bits.has_section_body) blk: {
const section: Section = if (extra.data.bits.has_section_body) blk: {
const body_len = sema.code.extra[extra_index];
extra_index += 1;
const body = sema.code.extra[extra_index..][0..body_len];
@@ -27750,42 +27842,22 @@ fn coerceInMemoryAllowedErrorSets(
return .ok;
}
if (mod.typeToInferredErrorSetIndex(dest_ty).unwrap()) |dst_ies_index| {
const dst_ies = mod.inferredErrorSetPtr(dst_ies_index);
// We will make an effort to return `ok` without resolving either error set, to
// avoid unnecessary "unable to resolve error set" dependency loop errors.
switch (src_ty.toIntern()) {
.anyerror_type => {},
else => switch (ip.indexToKey(src_ty.toIntern())) {
.inferred_error_set_type => |src_index| {
// If both are inferred error sets of functions, and
// the dest includes the source function, the coercion is OK.
// This check is important because it works without forcing a full resolution
// of inferred error sets.
if (dst_ies.inferred_error_sets.contains(src_index)) {
return .ok;
}
},
.error_set_type => |error_set_type| {
for (error_set_type.names) |name| {
if (!dst_ies.errors.contains(name)) break;
} else return .ok;
},
else => unreachable,
},
if (ip.isInferredErrorSetType(dest_ty.toIntern())) {
const dst_ies_func_index = ip.iesFuncIndex(dest_ty.toIntern());
if (sema.fn_ret_ty_ies) |dst_ies| {
if (dst_ies.func == dst_ies_func_index) {
// We are trying to coerce an error set to the current function's
// inferred error set.
try dst_ies.addErrorSet(src_ty, ip, gpa);
return .ok;
}
}
if (dst_ies.func == sema.owner_func_index) {
// We are trying to coerce an error set to the current function's
// inferred error set.
try dst_ies.addErrorSet(src_ty, ip, gpa);
return .ok;
}
try sema.resolveInferredErrorSet(block, dest_src, dst_ies_index);
// isAnyError might have changed from a false negative to a true positive after resolution.
if (dest_ty.isAnyError(mod)) {
return .ok;
switch (try sema.resolveInferredErrorSet(block, dest_src, dest_ty.toIntern())) {
// isAnyError might have changed from a false negative to a true
// positive after resolution.
.anyerror_type => return .ok,
else => {},
}
}
@@ -27800,17 +27872,15 @@ fn coerceInMemoryAllowedErrorSets(
},
else => switch (ip.indexToKey(src_ty.toIntern())) {
.inferred_error_set_type => |src_index| {
const src_data = mod.inferredErrorSetPtr(src_index);
try sema.resolveInferredErrorSet(block, src_src, src_index);
.inferred_error_set_type => {
const resolved_src_ty = try sema.resolveInferredErrorSet(block, src_src, src_ty.toIntern());
// src anyerror status might have changed after the resolution.
if (src_ty.isAnyError(mod)) {
if (resolved_src_ty == .anyerror_type) {
// dest_ty.isAnyError(mod) == true is already checked for at this point.
return .from_anyerror;
}
for (src_data.errors.keys()) |key| {
for (ip.indexToKey(resolved_src_ty).error_set_type.names.get(ip)) |key| {
if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), key)) {
try missing_error_buf.append(key);
}
@@ -27825,7 +27895,7 @@ fn coerceInMemoryAllowedErrorSets(
return .ok;
},
.error_set_type => |error_set_type| {
for (error_set_type.names) |name| {
for (error_set_type.names.get(ip)) |name| {
if (!Type.errorSetHasFieldIp(ip, dest_ty.toIntern(), name)) {
try missing_error_buf.append(name);
}
@@ -30341,73 +30411,72 @@ fn analyzeIsNonErrComptimeOnly(
operand: Air.Inst.Ref,
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const ip = &mod.intern_pool;
const operand_ty = sema.typeOf(operand);
const ot = operand_ty.zigTypeTag(mod);
if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true;
if (ot == .ErrorSet) return Air.Inst.Ref.bool_false;
if (ot != .ErrorSet and ot != .ErrorUnion) return .bool_true;
if (ot == .ErrorSet) return .bool_false;
assert(ot == .ErrorUnion);
const payload_ty = operand_ty.errorUnionPayload(mod);
if (payload_ty.zigTypeTag(mod) == .NoReturn) {
return Air.Inst.Ref.bool_false;
return .bool_false;
}
if (Air.refToIndex(operand)) |operand_inst| {
switch (sema.air_instructions.items(.tag)[operand_inst]) {
.wrap_errunion_payload => return Air.Inst.Ref.bool_true,
.wrap_errunion_err => return Air.Inst.Ref.bool_false,
.wrap_errunion_payload => return .bool_true,
.wrap_errunion_err => return .bool_false,
else => {},
}
} else if (operand == .undef) {
return sema.addConstUndef(Type.bool);
} else if (@intFromEnum(operand) < InternPool.static_len) {
// None of the ref tags can be errors.
return Air.Inst.Ref.bool_true;
return .bool_true;
}
const maybe_operand_val = try sema.resolveMaybeUndefVal(operand);
// exception if the error union error set is known to be empty,
// we allow the comparison but always make it comptime-known.
const set_ty = operand_ty.errorUnionSet(mod);
switch (set_ty.toIntern()) {
const set_ty = ip.errorUnionSet(operand_ty.toIntern());
switch (set_ty) {
.anyerror_type => {},
else => switch (mod.intern_pool.indexToKey(set_ty.toIntern())) {
else => switch (ip.indexToKey(set_ty)) {
.error_set_type => |error_set_type| {
if (error_set_type.names.len == 0) return Air.Inst.Ref.bool_true;
if (error_set_type.names.len == 0) return .bool_true;
},
.inferred_error_set_type => |ies_index| blk: {
.inferred_error_set_type => |func_index| blk: {
// If the error set is empty, we must return a comptime true or false.
// However we want to avoid unnecessarily resolving an inferred error set
// in case it is already non-empty.
const ies = mod.inferredErrorSetPtr(ies_index);
if (ies.is_anyerror) break :blk;
if (ies.errors.count() != 0) break :blk;
switch (ip.funcIesResolved(func_index).*) {
.anyerror_type => break :blk,
.none => {},
else => |i| if (ip.indexToKey(i).error_set_type.names.len != 0) break :blk,
}
if (maybe_operand_val == null) {
// Try to avoid resolving inferred error set if possible.
if (ies.errors.count() != 0) break :blk;
if (ies.is_anyerror) break :blk;
for (ies.inferred_error_sets.keys()) |other_ies_index| {
if (ies_index == other_ies_index) continue;
try sema.resolveInferredErrorSet(block, src, other_ies_index);
const other_ies = mod.inferredErrorSetPtr(other_ies_index);
if (other_ies.is_anyerror) {
ies.is_anyerror = true;
ies.is_resolved = true;
break :blk;
if (sema.fn_ret_ty_ies) |ies| if (ies.func == func_index) {
// Try to avoid resolving inferred error set if possible.
for (ies.inferred_error_sets.keys()) |other_ies_index| {
if (set_ty == other_ies_index) continue;
const other_resolved =
try sema.resolveInferredErrorSet(block, src, other_ies_index);
if (other_resolved == .anyerror_type) {
ip.funcIesResolved(func_index).* = .anyerror_type;
break :blk;
}
if (ip.indexToKey(other_resolved).error_set_type.names.len != 0)
break :blk;
}
if (other_ies.errors.count() != 0) break :blk;
}
if (ies.func == sema.owner_func_index) {
// We're checking the inferred errorset of the current function and none of
// its child inferred error sets contained any errors meaning that any value
// so far with this type can't contain errors either.
return Air.Inst.Ref.bool_true;
}
try sema.resolveInferredErrorSet(block, src, ies_index);
if (ies.is_anyerror) break :blk;
if (ies.errors.count() == 0) return Air.Inst.Ref.bool_true;
return .bool_true;
};
const resolved_ty = try sema.resolveInferredErrorSet(block, src, set_ty);
if (resolved_ty == .anyerror_type)
break :blk;
if (ip.indexToKey(resolved_ty).error_set_type.names.len == 0)
return .bool_true;
}
},
else => unreachable,
@@ -30419,12 +30488,12 @@ fn analyzeIsNonErrComptimeOnly(
return sema.addConstUndef(Type.bool);
}
if (err_union.getErrorName(mod) == .none) {
return Air.Inst.Ref.bool_true;
return .bool_true;
} else {
return Air.Inst.Ref.bool_false;
return .bool_false;
}
}
return Air.Inst.Ref.none;
return .none;
}
fn analyzeIsNonErr(
@@ -31365,16 +31434,19 @@ fn wrapErrorUnionSet(
if (error_set_type.nameIndex(ip, expected_name) != null) break :ok;
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
.inferred_error_set_type => |ies_index| ok: {
const ies = mod.inferredErrorSetPtr(ies_index);
const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name;
.inferred_error_set_type => |func_index| ok: {
// We carefully do this in an order that avoids unnecessarily
// resolving the destination error set type.
if (ies.is_anyerror) break :ok;
if (ies.errors.contains(expected_name)) break :ok;
if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) break :ok;
const expected_name = mod.intern_pool.indexToKey(val.toIntern()).err.name;
switch (ip.funcIesResolved(func_index).*) {
.anyerror_type => break :ok,
.none => if (.ok == try sema.coerceInMemoryAllowedErrorSets(block, dest_err_set_ty, inst_ty, inst_src, inst_src)) {
break :ok;
},
else => |i| if (ip.indexToKey(i).error_set_type.nameIndex(ip, expected_name) != null) {
break :ok;
},
}
return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
@@ -32862,10 +32934,13 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike {
};
}
pub fn resolveFnTypes(sema: *Sema, fn_ty: Type) CompileError!void {
pub fn resolveFnTypes(sema: *Sema, block: *Block, src: LazySrcLoc, fn_ty: Type) CompileError!void {
const mod = sema.mod;
const ip = &mod.intern_pool;
const fn_ty_info = mod.typeToFunc(fn_ty).?;
if (sema.fn_ret_ty_ies) |ies| try sema.resolveInferredErrorSetPtr(block, src, ies);
try sema.resolveTypeFully(fn_ty_info.return_type.toType());
if (mod.comp.bin_file.options.error_return_tracing and fn_ty_info.return_type.toType().isError(mod)) {
@@ -33173,6 +33248,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
.owner_decl_index = decl_index,
.func_index = .none,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@@ -33223,6 +33299,7 @@ fn semaBackingIntType(mod: *Module, struct_obj: *Module.Struct) CompileError!voi
.owner_decl_index = decl_index,
.func_index = .none,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = undefined,
};
@@ -33797,30 +33874,31 @@ fn resolveTypeFieldsUnion(sema: *Sema, ty: Type, union_obj: *Module.Union) Compi
union_obj.status = .have_field_types;
}
/// Returns a normal error set corresponding to the fully populated inferred
/// error set.
fn resolveInferredErrorSet(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ies_index: Module.InferredErrorSet.Index,
) CompileError!void {
ies_index: InternPool.Index,
) CompileError!InternPool.Index {
const mod = sema.mod;
const ip = &mod.intern_pool;
const ies = mod.inferredErrorSetPtr(ies_index);
if (ies.is_resolved) return;
const func = mod.funcInfo(ies.func);
if (func.analysis(ip).state == .in_progress) {
const func_index = ip.iesFuncIndex(ies_index);
const func = mod.funcInfo(func_index);
const resolved_ty = func.resolvedErrorSet(ip).*;
if (resolved_ty != .none) return resolved_ty;
if (func.analysis(ip).state == .in_progress)
return sema.fail(block, src, "unable to resolve inferred error set", .{});
}
// In order to ensure that all dependencies are properly added to the set, we
// need to ensure the function body is analyzed of the inferred error set.
// However, in the case of comptime/inline function calls with inferred error sets,
// each call gets a new InferredErrorSet object, which contains the same
// `InternPool.Index`. Not only is the function not relevant to the inferred error set
// in this case, it may be a generic function which would cause an assertion failure
// if we called `ensureFuncBodyAnalyzed` on it here.
// In order to ensure that all dependencies are properly added to the set,
// we need to ensure the function body is analyzed of the inferred error
// set. However, in the case of comptime/inline function calls with
// inferred error sets, each call gets a new InferredErrorSet object, which
// contains the `InternPool.Index` of the callee. Not only is the function
// not relevant to the inferred error set in this case, it may be a generic
// function which would cause an assertion failure if we called
// `ensureFuncBodyAnalyzed` on it here.
const ies_func_owner_decl = mod.declPtr(func.owner_decl);
const ies_func_info = mod.typeToFunc(ies_func_owner_decl.ty).?;
// if ies declared by a inline function with generic return type, the return_type should be generic_poison,
@@ -33828,7 +33906,7 @@ fn resolveInferredErrorSet(
// so here we can simply skip this case.
if (ies_func_info.return_type == .generic_poison_type) {
assert(ies_func_info.cc == .Inline);
} else if (mod.typeToInferredErrorSet(ies_func_info.return_type.toType().errorUnionSet(mod)).? == ies) {
} else if (ip.errorUnionSet(ies_func_info.return_type) == ies_index) {
if (ies_func_info.is_generic) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "unable to resolve inferred error set of generic function", .{});
@@ -33841,33 +33919,62 @@ fn resolveInferredErrorSet(
}
// In this case we are dealing with the actual InferredErrorSet object that
// corresponds to the function, not one created to track an inline/comptime call.
try sema.ensureFuncBodyAnalyzed(ies.func);
try sema.ensureFuncBodyAnalyzed(func_index);
}
ies.is_resolved = true;
// This will now have been resolved by the logic at the end of `Module.analyzeFnBody`
// which calls `resolveInferredErrorSetPtr`.
const final_resolved_ty = func.resolvedErrorSet(ip).*;
assert(final_resolved_ty != .none);
return final_resolved_ty;
}
fn resolveInferredErrorSetPtr(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ies: *InferredErrorSet,
) CompileError!void {
const mod = sema.mod;
const ip = &mod.intern_pool;
const func = mod.funcInfo(ies.func);
if (func.resolvedErrorSet(ip).* != .none) return;
const ies_index = ip.errorUnionSet(sema.fn_ret_ty.toIntern());
for (ies.inferred_error_sets.keys()) |other_ies_index| {
if (ies_index == other_ies_index) continue;
try sema.resolveInferredErrorSet(block, src, other_ies_index);
const other_ies = mod.inferredErrorSetPtr(other_ies_index);
for (other_ies.errors.keys()) |key| {
try ies.errors.put(sema.gpa, key, {});
switch (try sema.resolveInferredErrorSet(block, src, other_ies_index)) {
.anyerror_type => {
func.resolvedErrorSet(ip).* = .anyerror_type;
return;
},
else => |error_set_ty_index| {
const names = ip.indexToKey(error_set_ty_index).error_set_type.names;
for (names.get(ip)) |name| {
try ies.errors.put(sema.arena, name, {});
}
},
}
if (other_ies.is_anyerror)
ies.is_anyerror = true;
}
const resolved_error_set_ty = try mod.errorSetFromUnsortedNames(ies.errors.keys());
func.resolvedErrorSet(ip).* = resolved_error_set_ty.toIntern();
}
fn resolveInferredErrorSetTy(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
ty: Type,
) CompileError!void {
ty: InternPool.Index,
) CompileError!InternPool.Index {
const mod = sema.mod;
if (mod.typeToInferredErrorSetIndex(ty).unwrap()) |ies_index| {
try sema.resolveInferredErrorSet(block, src, ies_index);
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty)) {
.error_set_type => return ty,
.inferred_error_set_type => return sema.resolveInferredErrorSet(block, src, ty),
else => unreachable,
}
}
@@ -33937,6 +34044,7 @@ fn semaStructFields(mod: *Module, struct_obj: *Module.Struct) CompileError!void
.owner_decl_index = decl_index,
.func_index = .none,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@@ -34282,6 +34390,7 @@ fn semaUnionFields(mod: *Module, union_obj: *Module.Union) CompileError!void {
.owner_decl_index = decl_index,
.func_index = .none,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
};
@@ -34893,6 +35002,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.var_args_param_type,
.none,
=> unreachable,
_ => switch (mod.intern_pool.items.items(.tag)[@intFromEnum(ty.toIntern())]) {
.type_int_signed, // i0 handled above
.type_int_unsigned, // u0 handled above
@@ -34901,6 +35011,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
.type_optional, // ?noreturn handled above
.type_anyframe,
.type_error_union,
.type_anyerror_union,
.type_error_set,
.type_inferred_error_set,
.type_opaque,
@@ -36354,7 +36465,7 @@ fn errorSetMerge(sema: *Sema, lhs: Type, rhs: Type) !Type {
const arena = sema.arena;
const lhs_names = lhs.errorSetNames(mod);
const rhs_names = rhs.errorSetNames(mod);
var names: Module.InferredErrorSet.NameMap = .{};
var names: InferredErrorSet.NameMap = .{};
try names.ensureUnusedCapacity(arena, lhs_names.len);
for (lhs_names) |name| {
+12 -13
View File
@@ -6061,8 +6061,6 @@ pub const FuncGen = struct {
.is_var_args = false,
.is_generic = false,
.is_noinline = false,
.align_is_generic = false,
.cc_is_generic = false,
.section_is_generic = false,
.addrspace_is_generic = false,
});
@@ -10657,30 +10655,31 @@ fn llvmField(ty: Type, field_index: usize, mod: *Module) ?LlvmField {
}
fn firstParamSRet(fn_info: InternPool.Key.FuncType, mod: *Module) bool {
if (!fn_info.return_type.toType().hasRuntimeBitsIgnoreComptime(mod)) return false;
const return_type = fn_info.return_type.toType();
if (!return_type.hasRuntimeBitsIgnoreComptime(mod)) return false;
const target = mod.getTarget();
switch (fn_info.cc) {
.Unspecified, .Inline => return isByRef(fn_info.return_type.toType(), mod),
.Unspecified, .Inline => return isByRef(return_type, mod),
.C => switch (target.cpu.arch) {
.mips, .mipsel => return false,
.x86_64 => switch (target.os.tag) {
.windows => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory,
else => return firstParamSRetSystemV(fn_info.return_type.toType(), mod),
.windows => return x86_64_abi.classifyWindows(return_type, mod) == .memory,
else => return firstParamSRetSystemV(return_type, mod),
},
.wasm32 => return wasm_c_abi.classifyType(fn_info.return_type.toType(), mod)[0] == .indirect,
.aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory,
.arm, .armeb => switch (arm_c_abi.classifyType(fn_info.return_type.toType(), mod, .ret)) {
.wasm32 => return wasm_c_abi.classifyType(return_type, mod)[0] == .indirect,
.aarch64, .aarch64_be => return aarch64_c_abi.classifyType(return_type, mod) == .memory,
.arm, .armeb => switch (arm_c_abi.classifyType(return_type, mod, .ret)) {
.memory, .i64_array => return true,
.i32_array => |size| return size != 1,
.byval => return false,
},
.riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type.toType(), mod) == .memory,
.riscv32, .riscv64 => return riscv_c_abi.classifyType(return_type, mod) == .memory,
else => return false, // TODO investigate C ABI for other architectures
},
.SysV => return firstParamSRetSystemV(fn_info.return_type.toType(), mod),
.Win64 => return x86_64_abi.classifyWindows(fn_info.return_type.toType(), mod) == .memory,
.Stdcall => return !isScalar(mod, fn_info.return_type.toType()),
.SysV => return firstParamSRetSystemV(return_type, mod),
.Win64 => return x86_64_abi.classifyWindows(return_type, mod) == .memory,
.Stdcall => return !isScalar(mod, return_type),
else => return false,
}
}
+23 -34
View File
@@ -1043,6 +1043,7 @@ pub fn commitDeclState(
var dbg_line_buffer = &decl_state.dbg_line;
var dbg_info_buffer = &decl_state.dbg_info;
const decl = mod.declPtr(decl_index);
const ip = &mod.intern_pool;
const target_endian = self.target.cpu.arch.endian();
@@ -1241,20 +1242,9 @@ pub fn commitDeclState(
while (sym_index < decl_state.abbrev_table.items.len) : (sym_index += 1) {
const symbol = &decl_state.abbrev_table.items[sym_index];
const ty = symbol.type;
const deferred: bool = blk: {
if (ty.isAnyError(mod)) break :blk true;
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.inferred_error_set_type => |ies_index| {
const ies = mod.inferredErrorSetPtr(ies_index);
if (!ies.is_resolved) break :blk true;
},
else => {},
}
break :blk false;
};
if (deferred) continue;
if (ip.isErrorSetType(ty.toIntern())) continue;
symbol.offset = @as(u32, @intCast(dbg_info_buffer.items.len));
symbol.offset = @intCast(dbg_info_buffer.items.len);
try decl_state.addDbgInfoType(mod, di_atom_index, ty);
}
}
@@ -1265,18 +1255,7 @@ pub fn commitDeclState(
if (reloc.target) |target| {
const symbol = decl_state.abbrev_table.items[target];
const ty = symbol.type;
const deferred: bool = blk: {
if (ty.isAnyError(mod)) break :blk true;
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.inferred_error_set_type => |ies_index| {
const ies = mod.inferredErrorSetPtr(ies_index);
if (!ies.is_resolved) break :blk true;
},
else => {},
}
break :blk false;
};
if (deferred) {
if (ip.isErrorSetType(ty.toIntern())) {
log.debug("resolving %{d} deferred until flush", .{target});
try self.global_abbrev_relocs.append(gpa, .{
.target = null,
@@ -2505,18 +2484,18 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
defer arena_alloc.deinit();
const arena = arena_alloc.allocator();
// TODO: don't create a zig type for this, just make the dwarf info
// without touching the zig type system.
const names = try arena.dupe(InternPool.NullTerminatedString, module.global_error_set.keys());
std.mem.sort(InternPool.NullTerminatedString, names, {}, InternPool.NullTerminatedString.indexLessThan);
const error_ty = try module.intern(.{ .error_set_type = .{ .names = names } });
var dbg_info_buffer = std.ArrayList(u8).init(arena);
try addDbgInfoErrorSet(module, error_ty.toType(), self.target, &dbg_info_buffer);
try addDbgInfoErrorSetNames(
module,
Type.anyerror,
module.global_error_set.keys(),
self.target,
&dbg_info_buffer,
);
const di_atom_index = try self.createAtom(.di_atom);
log.debug("updateDeclDebugInfoAllocation in flushModule", .{});
try self.updateDeclDebugInfoAllocation(di_atom_index, @as(u32, @intCast(dbg_info_buffer.items.len)));
try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(dbg_info_buffer.items.len));
log.debug("writeDeclDebugInfo in flushModule", .{});
try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items);
@@ -2633,6 +2612,17 @@ fn addDbgInfoErrorSet(
ty: Type,
target: std.Target,
dbg_info_buffer: *std.ArrayList(u8),
) !void {
return addDbgInfoErrorSetNames(mod, ty, ty.errorSetNames(mod), target, dbg_info_buffer);
}
fn addDbgInfoErrorSetNames(
mod: *Module,
/// Used for printing the type name only.
ty: Type,
error_names: []const InternPool.NullTerminatedString,
target: std.Target,
dbg_info_buffer: *std.ArrayList(u8),
) !void {
const target_endian = target.cpu.arch.endian();
@@ -2655,7 +2645,6 @@ fn addDbgInfoErrorSet(
// DW.AT.const_value, DW.FORM.data8
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian);
const error_names = ty.errorSetNames(mod);
for (error_names) |error_name_ip| {
const int = try mod.getErrorValue(error_name_ip);
const error_name = mod.intern_pool.stringToSlice(error_name_ip);
+33 -34
View File
@@ -251,20 +251,19 @@ pub const Type = struct {
return;
},
.inferred_error_set_type => |index| {
const ies = mod.inferredErrorSetPtr(index);
const func = ies.func;
const func = mod.iesFuncIndex(index);
try writer.writeAll("@typeInfo(@typeInfo(@TypeOf(");
const owner_decl = mod.funcOwnerDeclPtr(func);
try owner_decl.renderFullyQualifiedName(mod, writer);
try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set");
},
.error_set_type => |error_set_type| {
const ip = &mod.intern_pool;
const names = error_set_type.names;
try writer.writeAll("error{");
for (names, 0..) |name, i| {
for (names.get(ip), 0..) |name, i| {
if (i != 0) try writer.writeByte(',');
try writer.print("{}", .{name.fmt(&mod.intern_pool)});
try writer.print("{}", .{name.fmt(ip)});
}
try writer.writeAll("}");
},
@@ -2051,21 +2050,19 @@ pub const Type = struct {
/// Asserts that the type is an error union.
pub fn errorUnionSet(ty: Type, mod: *Module) Type {
return mod.intern_pool.indexToKey(ty.toIntern()).error_union_type.error_set_type.toType();
return mod.intern_pool.errorUnionSet(ty.toIntern()).toType();
}
/// Returns false for unresolved inferred error sets.
pub fn errorSetIsEmpty(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ty.toIntern()) {
.anyerror_type => false,
else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
else => switch (ip.indexToKey(ty.toIntern())) {
.error_set_type => |error_set_type| error_set_type.names.len == 0,
.inferred_error_set_type => |index| {
const inferred_error_set = mod.inferredErrorSetPtr(index);
// Can't know for sure.
if (!inferred_error_set.is_resolved) return false;
if (inferred_error_set.is_anyerror) return false;
return inferred_error_set.errors.count() == 0;
.inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
.none, .anyerror_type => false,
else => |t| ip.indexToKey(t).error_set_type.names.len == 0,
},
else => unreachable,
},
@@ -2076,10 +2073,11 @@ pub const Type = struct {
/// Note that the result may be a false negative if the type did not get error set
/// resolution prior to this call.
pub fn isAnyError(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ty.toIntern()) {
.anyerror_type => true,
else => switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.inferred_error_set_type => |i| mod.inferredErrorSetPtr(i).is_anyerror,
.inferred_error_set_type => |i| ip.funcIesResolved(i).* == .anyerror_type,
else => false,
},
};
@@ -2103,13 +2101,11 @@ pub const Type = struct {
return switch (ty) {
.anyerror_type => true,
else => switch (ip.indexToKey(ty)) {
.error_set_type => |error_set_type| {
return error_set_type.nameIndex(ip, name) != null;
},
.inferred_error_set_type => |index| {
const ies = ip.inferredErrorSetPtrConst(index);
if (ies.is_anyerror) return true;
return ies.errors.contains(name);
.error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null,
.inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
.anyerror_type => true,
.none => false,
else => |t| ip.indexToKey(t).error_set_type.nameIndex(ip, name) != null,
},
else => unreachable,
},
@@ -2129,12 +2125,14 @@ pub const Type = struct {
const field_name_interned = ip.getString(name).unwrap() orelse return false;
return error_set_type.nameIndex(ip, field_name_interned) != null;
},
.inferred_error_set_type => |index| {
const ies = ip.inferredErrorSetPtr(index);
if (ies.is_anyerror) return true;
// If the string is not interned, then the field certainly is not present.
const field_name_interned = ip.getString(name).unwrap() orelse return false;
return ies.errors.contains(field_name_interned);
.inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
.anyerror_type => true,
.none => false,
else => |t| {
// If the string is not interned, then the field certainly is not present.
const field_name_interned = ip.getString(name).unwrap() orelse return false;
return ip.indexToKey(t).error_set_type.nameIndex(ip, field_name_interned) != null;
},
},
else => unreachable,
},
@@ -2943,14 +2941,15 @@ pub const Type = struct {
}
// Asserts that `ty` is an error set and not `anyerror`.
// Asserts that `ty` is resolved if it is an inferred error set.
pub fn errorSetNames(ty: Type, mod: *Module) []const InternPool.NullTerminatedString {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.error_set_type => |x| x.names,
.inferred_error_set_type => |index| {
const inferred_error_set = mod.inferredErrorSetPtr(index);
assert(inferred_error_set.is_resolved);
assert(!inferred_error_set.is_anyerror);
return inferred_error_set.errors.keys();
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.error_set_type => |x| x.names.get(ip),
.inferred_error_set_type => |i| switch (ip.funcIesResolved(i).*) {
.none => unreachable, // unresolved inferred error set
.anyerror_type => unreachable,
else => |t| ip.indexToKey(t).error_set_type.names.get(ip),
},
else => unreachable,
};