remove pointer restriction from restricted types

This commit is contained in:
Jacob Young
2026-04-11 17:27:00 -04:00
parent da85c089b8
commit 24bf438708
29 changed files with 1281 additions and 821 deletions
+2 -2
View File
@@ -5771,9 +5771,9 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
{#header_open|@Restricted#}
<pre>{#syntax#}@Restricted(
comptime Pointer: type,
comptime Underlying: type,
) type{#endsyntax#}</pre>
<p>Returns a restricted pointer type based on the specified pointer type.</p>
<p>Returns a restricted type based on the specified underlying type.</p>
{#header_close#}
{#header_open|@Fn#}
+2 -2
View File
@@ -201,9 +201,9 @@ pub fn FullPanic(comptime panicFn: fn ([]const u8, ?usize) noreturn) type {
@branchHint(.cold);
call("'noreturn' function returned", @returnAddress());
}
pub fn corruptRestrictedPointer() noreturn {
pub fn corruptRestrictedValue() noreturn {
@branchHint(.cold);
call("corrupt restricted pointer value", @returnAddress());
call("corrupt restricted value", @returnAddress());
}
};
}
+1 -1
View File
@@ -135,7 +135,7 @@ pub fn noreturnReturned() noreturn {
@trap();
}
pub fn corruptRestrictedPointer() noreturn {
pub fn corruptRestrictedValue() noreturn {
@branchHint(.cold);
@trap();
}
+2 -2
View File
@@ -127,6 +127,6 @@ pub fn noreturnReturned() noreturn {
call("'noreturn' function returned", null);
}
pub fn corruptRestrictedPointer() noreturn {
call("corrupt restricted pointer value", null);
pub fn corruptRestrictedValue() noreturn {
call("corrupt restricted value", null);
}
+9 -9
View File
@@ -9320,15 +9320,6 @@ fn builtinCall(
});
return rvalue(gz, ri, result, node);
},
.Restricted => {
const unrestricted_ptr_ty = try typeExpr(gz, scope, params[0]);
const result = try gz.addExtendedPayloadSmall(
.reify_restricted,
@intFromEnum(reify_name_strat),
Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = unrestricted_ptr_ty },
);
return rvalue(gz, ri, result, node);
},
.Fn => {
const fn_attrs_ty = try gz.addBuiltinValue(node, .fn_attributes);
const param_types = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_type_type } }, params[0], .fn_param_types);
@@ -9349,6 +9340,15 @@ fn builtinCall(
});
return rvalue(gz, ri, result, node);
},
.Restricted => {
const unrestricted_ty = try typeExpr(gz, scope, params[0]);
const result = try gz.addExtendedPayloadSmall(
.reify_restricted,
@intFromEnum(reify_name_strat),
Zir.Inst.ReifyRestricted{ .node = node, .unrestricted_ty = unrestricted_ty },
);
return rvalue(gz, ri, result, node);
},
.Struct => {
const container_layout_ty = try gz.addBuiltinValue(node, .container_layout);
const layout = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = container_layout_ty } }, params[0], .struct_layout);
+11 -4
View File
@@ -2062,13 +2062,13 @@ pub const Inst = struct {
/// Implements builtin `@Pointer`.
/// `operand` is payload index to `ReifyPointer`.
reify_pointer,
/// Implements builtin `@Restricted`.
/// `operand` is payload index to `UnNode`.
/// `small` contains `NameStrategy`.
reify_restricted,
/// Implements builtin `@Fn`.
/// `operand` is payload index to `ReifyFn`.
reify_fn,
/// Implements builtin `@Restricted`.
/// `operand` is payload index to `ReifyRestricted`.
/// `small` contains `NameStrategy`.
reify_restricted,
/// Implements builtin `@Struct`.
/// `operand` is payload index to `ReifyStruct`.
/// `small` contains `NameStrategy`.
@@ -3266,6 +3266,13 @@ pub const Inst = struct {
fn_attrs: Ref,
};
pub const ReifyRestricted = struct {
/// This node is absolute, because `reify` instructions are tracked across updates, and
/// this simplifies the logic for getting source locations for types.
node: Ast.Node.Index,
unrestricted_ty: Ref,
};
pub const ReifyStruct = struct {
src_line: u32,
/// This node is absolute, because `reify` instructions are tracked across updates, and
+1 -1
View File
@@ -501,7 +501,7 @@ pub fn resolve(options: Options) ResolveError!Config {
};
};
const backend_supports_error_tracing = target_util.backendSupportsFeature(backend, .error_return_trace);
const backend_supports_error_tracing = target_util.backendSupportsFeature(backend, options.incremental, .error_return_trace);
const root_error_tracing = b: {
if (options.root_error_tracing) |x| break :b x;
+191 -109
View File
@@ -1969,7 +1969,6 @@ pub const CaptureValue = packed struct(u32) {
pub const Key = union(enum) {
int_type: IntType,
ptr_type: PtrType,
restricted_ptr_type: RestrictedPtrType,
array_type: ArrayType,
vector_type: VectorType,
opt_type: Index,
@@ -1978,6 +1977,7 @@ pub const Key = union(enum) {
anyframe_type: Index,
error_union_type: ErrorUnionType,
simple_type: SimpleType,
restricted_type: RestrictedType,
/// This represents a struct that has been explicitly declared in source code,
/// or was created with `@Struct`. It is unique and based on a declaration.
struct_type: ContainerType,
@@ -2021,6 +2021,8 @@ pub const Key = union(enum) {
un: Union,
/// An instance of a `packed struct` or `packed union`.
bitpack: Bitpack,
/// An instance of a restricted type.
restricted_value: RestrictedValue,
/// A comptime function call with a memoized result.
memoized_call: Key.MemoizedCall,
@@ -2095,14 +2097,6 @@ pub const Key = union(enum) {
pub const AddressSpace = std.builtin.AddressSpace;
};
/// Extern layout so it can be hashed with `std.mem.asBytes`.
pub const RestrictedPtrType = extern struct {
/// A `reify_restricted` instruction.
zir_index: TrackedInst.Index,
/// The underlying pointer type.
unrestricted_ptr_type: Index,
};
/// Extern so that hashing can be done via memory reinterpreting.
pub const ArrayType = extern struct {
len: u64,
@@ -2206,6 +2200,13 @@ pub const Key = union(enum) {
}
};
pub const RestrictedType = extern struct {
/// A `reify_restricted` instruction.
zir_index: TrackedInst.Index,
/// The underlying unrestricted type.
unrestricted_type: Index,
};
pub const Extern = struct {
/// The name of the extern symbol.
name: NullTerminatedString,
@@ -2581,6 +2582,12 @@ pub const Key = union(enum) {
backing_int_val: Index,
};
pub const RestrictedValue = extern struct {
/// The restricted type.
ty: Index,
unrestricted_value: Index,
};
pub const MemoizedCall = struct {
func: Index,
arg_values: []const Index,
@@ -2599,13 +2606,13 @@ pub const Key = union(enum) {
return switch (key) {
// TODO: assert no padding in these types
inline .ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
.anyframe_type,
.error_union_type,
.simple_type,
.restricted_type,
.simple_value,
.opt,
.undef,
@@ -2614,6 +2621,7 @@ pub const Key = union(enum) {
.enum_tag,
.inferred_error_set_type,
.un,
.restricted_value,
=> |x| Hash.hash(seed, asBytes(&x)),
.int_type => |x| Hash.hash(seed | @shlExact(@as(u64, @intFromEnum(x.signedness)), 63), asBytes(&x.bits)),
@@ -2893,6 +2901,10 @@ pub const Key = union(enum) {
const b_info = b.bitpack;
return a_info.ty == b_info.ty and a_info.backing_int_val == b_info.backing_int_val;
},
.restricted_value => |a_info| {
const b_info = b.restricted_value;
return a_info.ty == b_info.ty and a_info.unrestricted_value == b_info.unrestricted_value;
},
.@"extern" => |a_info| {
const b_info = b.@"extern";
@@ -3027,7 +3039,7 @@ pub const Key = union(enum) {
}
},
.restricted_ptr_type => |a_r| return std.meta.eql(a_r, b.restricted_ptr_type),
.restricted_type => |a_r| return std.meta.eql(a_r, b.restricted_type),
inline .opaque_type, .enum_type, .union_type, .struct_type => |a_info, a_tag_ct| {
const b_info = @field(b, @tagName(a_tag_ct));
@@ -3130,7 +3142,6 @@ pub const Key = union(enum) {
return switch (key) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
@@ -3139,6 +3150,7 @@ pub const Key = union(enum) {
.error_set_type,
.inferred_error_set_type,
.simple_type,
.restricted_type,
.struct_type,
.union_type,
.opaque_type,
@@ -3160,6 +3172,7 @@ pub const Key = union(enum) {
.aggregate,
.un,
.bitpack,
.restricted_value,
=> |x| x.ty,
.enum_literal => .enum_literal_type,
@@ -4187,7 +4200,6 @@ pub const Index = enum(u32) {
type_array_small: struct { data: *Vector },
type_vector: struct { data: *Vector },
type_pointer: struct { data: *Tag.TypePointer },
type_restricted: struct { data: *Tag.TypeRestricted },
type_slice: DataIsIndex,
type_optional: DataIsIndex,
type_anyframe: DataIsIndex,
@@ -4222,6 +4234,7 @@ pub const Index = enum(u32) {
},
},
type_restricted: struct { data: *Tag.TypeRestricted },
type_struct: struct { data: *Tag.TypeStruct },
type_struct_packed_auto: struct { data: *Tag.TypeStructPacked },
type_struct_packed_explicit: struct { data: *Tag.TypeStructPacked },
@@ -4302,6 +4315,7 @@ pub const Index = enum(u32) {
},
repeated: struct { data: *Repeated },
bitpack: struct { data: *Key.Bitpack },
restricted_value: struct { data: *Key.RestrictedValue },
memoized_call: struct {
const @"data.args_len" = opaque {};
@@ -4786,9 +4800,6 @@ pub const Tag = enum(u8) {
/// A slice type.
/// data is Index of underlying pointer type.
type_slice,
/// A restricted pointer type.
/// data is payload to `TypeRestricted`.
type_restricted,
/// An optional type.
/// data is the child type.
type_optional,
@@ -4815,6 +4826,9 @@ pub const Tag = enum(u8) {
/// data is extra index of `TypeTuple`.
type_tuple,
/// A restricted pointer type.
/// data is payload to `TypeRestricted`.
type_restricted,
/// A non-packed struct type.
/// data is extra index of `TypeStruct`.
type_struct,
@@ -5037,6 +5051,9 @@ pub const Tag = enum(u8) {
/// An instance of a `packed struct` or `packed union`.
/// data is extra index to `Key.Bitpack`.
bitpack,
/// An instance of a restricted type.
/// data is extra index to `Key.RestrictedValue`.
restricted_value,
/// A memoized comptime function call result.
/// data is extra index to `MemoizedCall`
@@ -5127,7 +5144,6 @@ pub const Tag = enum(u8) {
.type_array_small = .{ .summary = .@"[{.payload.len%value}]{.payload.child%summary}", .payload = Vector },
.type_vector = .{ .summary = .@"@Vector({.payload.len%value}, {.payload.child%summary})", .payload = Vector },
.type_pointer = .{ .summary = .@"*... {.payload.child%summary}", .payload = TypePointer },
.type_restricted = .{ .summary = .@"@Restricted({.payload.ptr_type%summary})", .payload = TypeRestricted },
.type_slice = .{ .summary = .@"[]... {.data.unwrapped.payload.child%summary}", .data = Index },
.type_optional = .{ .summary = .@"?{.data%summary}", .data = Index },
.type_anyframe = .{ .summary = .@"anyframe->{.data%summary}", .data = Index },
@@ -5171,6 +5187,7 @@ pub const Tag = enum(u8) {
},
},
.type_restricted = .{ .summary = .@"@Restricted({.payload.ptr_type%summary})", .payload = TypeRestricted },
.type_struct = .{
.summary = .@"{.payload.name%summary#\"}",
.payload = TypeStruct,
@@ -5363,6 +5380,7 @@ pub const Tag = enum(u8) {
},
.repeated = .{ .summary = .@"@as({.payload.ty%summary}, @splat({.payload.elem_val%summary}))", .payload = Repeated },
.bitpack = .{ .summary = .@"@as({.payload.ty%summary}, {})", .payload = Key.Bitpack },
.restricted_value = .{ .summary = .@"@as({.payload.unrestricted_value%summary}, {})", .payload = Key.RestrictedValue },
.memoized_call = .{
.summary = .@"@memoize({.payload.func%summary})",
@@ -5390,8 +5408,8 @@ pub const Tag = enum(u8) {
/// The name of this restricted type.
name: NullTerminatedString,
/// The pointer type this restricted type is based on.
unrestricted_ptr_type: Index,
/// The underlying unrestricted type.
unrestricted_type: Index,
};
pub const Extern = struct {
@@ -6486,14 +6504,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
return .{ .ptr_type = ptr_info };
},
.type_restricted => {
const restricted_ptr_info = extraData(unwrapped_index.getExtra(ip), Tag.TypeRestricted, data);
return .{ .restricted_ptr_type = .{
.zir_index = restricted_ptr_info.zir_index,
.unrestricted_ptr_type = restricted_ptr_info.unrestricted_ptr_type,
} };
},
.type_optional => .{ .opt_type = @enumFromInt(data) },
.type_anyframe => .{ .anyframe_type = @enumFromInt(data) },
@@ -6509,6 +6519,13 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
.type_function => .{ .func_type = extraFuncType(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) },
.type_tuple => .{ .tuple_type = extraTypeTuple(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) },
.type_restricted => {
const restricted_info = extraData(unwrapped_index.getExtra(ip), Tag.TypeRestricted, data);
return .{ .restricted_type = .{
.zir_index = restricted_info.zir_index,
.unrestricted_type = restricted_info.unrestricted_type,
} };
},
.type_struct => .{ .struct_type = ns: {
const extra_list = unwrapped_index.getExtra(ip);
const extra = extraDataTrail(extra_list, Tag.TypeStruct, data);
@@ -6903,6 +6920,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
.enum_literal => .{ .enum_literal = @enumFromInt(data) },
.enum_tag => .{ .enum_tag = extraData(unwrapped_index.getExtra(ip), Tag.EnumTag, data) },
.bitpack => .{ .bitpack = extraData(unwrapped_index.getExtra(ip), Key.Bitpack, data) },
.restricted_value => .{ .restricted_value = extraData(unwrapped_index.getExtra(ip), Key.RestrictedValue, data) },
.memoized_call => {
const extra_list = unwrapped_index.getExtra(ip);
@@ -7276,7 +7294,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, key:
.data = try addExtra(extra, ptr_type_adjusted),
});
},
.restricted_ptr_type => unreachable, // instead getReifiedRestrictedType
.array_type => |array_type| {
assert(array_type.child != .none);
assert(array_type.sentinel == .none or ip.typeOf(array_type.sentinel) == array_type.child);
@@ -7382,6 +7399,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, key:
});
},
.restricted_type => unreachable, // instead use: getReifiedRestrictedType
.struct_type => unreachable, // instead use: getDeclaredStructType, getReifiedStructType
.union_type => unreachable, // instead use: getDeclaredUnionType, getReifiedUnionType
.enum_type => unreachable, // instead use: getDeclaredEnumType, getReifiedEnumType, getGeneratedEnumTagType
@@ -7407,11 +7425,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, key:
},
.ptr => |ptr| {
const ptr_type = switch (ip.indexToKey(ptr.ty)) {
.ptr_type => |ptr_type| ptr_type,
.restricted_ptr_type => |restricted_ptr_type| ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
else => unreachable,
};
const ptr_type = ip.indexToKey(ptr.ty).ptr_type;
assert(ptr_type.flags.size != .slice);
items.appendAssumeCapacity(switch (ptr.base_addr) {
.nav => |nav| .{
@@ -7983,6 +7997,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, key:
.data = try addExtra(extra, bitpack),
});
},
.restricted_value => |restricted_value| {
assert(restricted_value.ty.unwrap(ip).getTag(ip) == .type_restricted);
assert(!ip.isUndef(restricted_value.unrestricted_value));
items.appendAssumeCapacity(.{
.tag = .restricted_value,
.data = try addExtra(extra, restricted_value),
});
},
.memoized_call => |memoized_call| {
for (memoized_call.arg_values) |arg| assert(arg != .none);
@@ -8009,11 +8031,11 @@ pub fn getReifiedRestrictedType(
io: Io,
tid: Zcu.PerThread.Id,
zir_index: TrackedInst.Index,
unrestricted_ptr_type: Index,
unrestricted_type: Index,
) Allocator.Error!WipRestrictedType.Result {
var gop = try ip.getOrPutKey(gpa, io, tid, .{ .restricted_ptr_type = .{
var gop = try ip.getOrPutKey(gpa, io, tid, .{ .restricted_type = .{
.zir_index = zir_index,
.unrestricted_ptr_type = unrestricted_ptr_type,
.unrestricted_type = unrestricted_type,
} });
defer gop.deinit();
if (gop == .existing) return .{ .existing = gop.existing };
@@ -8028,7 +8050,7 @@ pub fn getReifiedRestrictedType(
const extra_index = addExtraAssumeCapacity(extra, Tag.TypeRestricted{
.zir_index = zir_index,
.name = undefined,
.unrestricted_ptr_type = unrestricted_ptr_type,
.unrestricted_type = unrestricted_type,
});
items.appendAssumeCapacity(.{
.tag = .type_restricted,
@@ -10028,7 +10050,6 @@ test "basic usage" {
pub fn childType(ip: *const InternPool, i: Index) Index {
return switch (ip.indexToKey(i)) {
.ptr_type => |ptr_type| ptr_type.child,
.restricted_ptr_type => |restricted_ptr_type| ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type.child,
.vector_type => |vector_type| vector_type.child,
.array_type => |array_type| array_type.child,
.opt_type, .anyframe_type => |child| child,
@@ -10111,28 +10132,22 @@ pub fn getCoerced(
.val = .none,
} });
new_ty: switch (ip.indexToKey(new_ty)) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.one, .many, .c => return ip.get(gpa, io, tid, .{ .ptr = .{
.ty = new_ty,
if (ip.isPointerType(new_ty)) switch (ip.indexToKey(new_ty).ptr_type.flags.size) {
.one, .many, .c => return ip.get(gpa, io, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = .int,
.byte_offset = 0,
} }),
.slice => return ip.get(gpa, io, tid, .{ .slice = .{
.ty = new_ty,
.ptr = try ip.get(gpa, io, tid, .{ .ptr = .{
.ty = ip.slicePtrType(new_ty),
.base_addr = .int,
.byte_offset = 0,
} }),
.slice => return ip.get(gpa, io, tid, .{ .slice = .{
.ty = new_ty,
.ptr = try ip.get(gpa, io, tid, .{ .ptr = .{
.ty = ip.slicePtrType(new_ty),
.base_addr = .int,
.byte_offset = 0,
} }),
.len = .undef_usize,
} }),
},
.restricted_ptr_type => |restricted_ptr_type| continue :new_ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => {},
}
.len = .undef_usize,
} }),
};
},
else => {
const unwrapped_val = val.unwrap(ip);
@@ -10211,40 +10226,28 @@ pub fn getCoerced(
},
else => {},
},
.slice => |slice| new_ty: switch (ip.indexToKey(new_ty)) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.one, .many, .c => {},
.slice => return ip.get(gpa, io, tid, .{ .slice = .{
.ty = new_ty,
.ptr = try ip.getCoerced(gpa, io, tid, slice.ptr, ip.slicePtrType(new_ty)),
.len = slice.len,
} }),
},
.restricted_ptr_type => |restricted_ptr_type| continue :new_ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => if (ip.isIntegerType(new_ty)) return ip.getCoerced(gpa, io, tid, slice.ptr, new_ty),
},
.ptr => |ptr| new_ty: switch (ip.indexToKey(new_ty)) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.one, .many, .c => return ip.get(gpa, io, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = ptr.base_addr,
.byte_offset = ptr.byte_offset,
} }),
.slice => {},
},
.restricted_ptr_type => |restricted_ptr_type| continue :new_ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => if (ip.isIntegerType(new_ty)) switch (ptr.base_addr) {
.slice => |slice| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size == .slice)
return ip.get(gpa, io, tid, .{ .slice = .{
.ty = new_ty,
.ptr = try ip.getCoerced(gpa, io, tid, slice.ptr, ip.slicePtrType(new_ty)),
.len = slice.len,
} })
else if (ip.isIntegerType(new_ty))
return ip.getCoerced(gpa, io, tid, slice.ptr, new_ty),
.ptr => |ptr| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size != .slice)
return ip.get(gpa, io, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = ptr.base_addr,
.byte_offset = ptr.byte_offset,
} })
else if (ip.isIntegerType(new_ty))
switch (ptr.base_addr) {
.int => return ip.get(gpa, io, tid, .{ .int = .{
.ty = .usize_type,
.storage = .{ .u64 = @intCast(ptr.byte_offset) },
} }),
else => {},
},
},
.opt => |opt| switch (ip.indexToKey(new_ty)) {
.ptr_type => |ptr_type| return switch (opt.val) {
.none => switch (ptr_type.flags.size) {
@@ -10484,6 +10487,22 @@ pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType {
/// includes .comptime_int_type
pub fn isIntegerType(ip: *const InternPool, ty: Index) bool {
return switch (ty) {
.u0_type,
.i0_type,
.u1_type,
.u8_type,
.i8_type,
.u16_type,
.i16_type,
.u29_type,
.u32_type,
.i32_type,
.u64_type,
.i64_type,
.u80_type,
.u128_type,
.i128_type,
.u256_type,
.usize_type,
.isize_type,
.c_char_type,
@@ -10497,7 +10516,8 @@ pub fn isIntegerType(ip: *const InternPool, ty: Index) bool {
.c_ulonglong_type,
.comptime_int_type,
=> true,
else => switch (ty.unwrap(ip).getTag(ip)) {
else => false,
_ => switch (ty.unwrap(ip).getTag(ip)) {
.type_int_signed,
.type_int_unsigned,
=> true,
@@ -10508,53 +10528,114 @@ pub fn isIntegerType(ip: *const InternPool, ty: Index) bool {
/// does not include .enum_literal_type
pub fn isEnumType(ip: *const InternPool, ty: Index) bool {
return ip.indexToKey(ty) == .enum_type;
return switch (ty.unwrap(ip).getTag(ip)) {
.type_enum_auto, .type_enum_explicit, .type_enum_nonexhaustive => true,
else => false,
};
}
pub fn isVectorType(ip: *const InternPool, ty: Index) bool {
return switch (ty.unwrap(ip).getTag(ip)) {
.type_vector => true,
else => false,
};
}
pub fn isUnion(ip: *const InternPool, ty: Index) bool {
return ip.indexToKey(ty) == .union_type;
return switch (ty.unwrap(ip).getTag(ip)) {
.type_union, .type_union_packed_auto, .type_union_packed_explicit => true,
else => false,
};
}
pub fn isFunctionType(ip: *const InternPool, ty: Index) bool {
return ip.indexToKey(ty) == .func_type;
return ty.unwrap(ip).getTag(ip) == .type_function;
}
pub fn isPointerType(ip: *const InternPool, ty: Index) bool {
return switch (ty.unwrap(ip).getTag(ip)) {
.type_pointer, .type_slice => true,
else => false,
};
}
pub fn isOptionalType(ip: *const InternPool, ty: Index) bool {
return ip.indexToKey(ty) == .opt_type;
return ty.unwrap(ip).getTag(ip) == .type_optional;
}
/// includes .inferred_error_set_type
pub fn isErrorSetType(ip: *const InternPool, ty: Index) bool {
return switch (ty) {
.anyerror_type, .adhoc_inferred_error_set_type => true,
else => switch (ip.indexToKey(ty)) {
.error_set_type, .inferred_error_set_type => true,
else => false,
_ => switch (ty.unwrap(ip).getTag(ip)) {
.type_error_set, .type_inferred_error_set => true,
else => false,
},
};
}
pub fn isInferredErrorSetType(ip: *const InternPool, ty: Index) bool {
return ty == .adhoc_inferred_error_set_type or ip.indexToKey(ty) == .inferred_error_set_type;
return ty == .adhoc_inferred_error_set_type or ty.unwrap(ip).getTag(ip) == .type_inferred_error_set;
}
pub fn isErrorUnionType(ip: *const InternPool, ty: Index) bool {
return ip.indexToKey(ty) == .error_union_type;
return switch (ty) {
.anyerror_void_error_union_type => true,
else => false,
_ => switch (ty.unwrap(ip).getTag(ip)) {
.type_error_union, .type_anyerror_union => true,
else => false,
},
};
}
pub fn isAggregateType(ip: *const InternPool, ty: Index) bool {
return switch (ip.indexToKey(ty)) {
.array_type, .vector_type, .tuple_type, .struct_type => true,
return switch (ty.unwrap(ip).getTag(ip)) {
.type_array_big,
.type_array_small,
.type_vector,
.type_tuple,
.type_struct,
.type_struct_packed_auto,
.type_struct_packed_explicit,
.type_struct_packed_auto_defaults,
.type_struct_packed_explicit_defaults,
=> true,
else => false,
};
}
pub fn isOpaqueType(ip: *const InternPool, ty: Index) bool {
return ty == .anyopaque_type or ty.unwrap(ip).getTag(ip) == .type_opaque;
}
pub fn isRestrictedType(ip: *const InternPool, ty: Index) bool {
return ty.unwrap(ip).getTag(ip) == .type_restricted;
}
pub fn errorUnionSet(ip: *const InternPool, ty: Index) Index {
return ip.indexToKey(ty).error_union_type.error_set_type;
const unwrapped_ty = ty.unwrap(ip);
const item = unwrapped_ty.getItem(ip);
return switch (item.tag) {
.type_error_union => @enumFromInt(unwrapped_ty.getExtra(ip).view().items(.@"0")[
item.data + std.meta.fieldIndex(Key.ErrorUnionType, "error_set_type").?
]),
.type_anyerror_union => .anyerror_type,
else => unreachable,
};
}
pub fn errorUnionPayload(ip: *const InternPool, ty: Index) Index {
return ip.indexToKey(ty).error_union_type.payload_type;
const unwrapped_ty = ty.unwrap(ip);
const item = unwrapped_ty.getItem(ip);
return @enumFromInt(switch (item.tag) {
.type_error_union => unwrapped_ty.getExtra(ip).view().items(.@"0")[
item.data + std.meta.fieldIndex(Key.ErrorUnionType, "payload_type").?
],
.type_anyerror_union => item.data,
else => unreachable,
});
}
pub fn dump(ip: *const InternPool) void {
@@ -10695,7 +10776,6 @@ fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) !vo
.type_array_big => @sizeOf(Array),
.type_vector => @sizeOf(Vector),
.type_pointer => @sizeOf(Tag.TypePointer),
.type_restricted => @sizeOf(Tag.TypeRestricted),
.type_slice => 0,
.type_optional => 0,
.type_anyframe => 0,
@@ -10718,6 +10798,7 @@ fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) !vo
(@as(u32, 4) * @intFromBool(info.flags.has_noalias_bits));
},
.type_restricted => @sizeOf(Tag.TypeRestricted),
.type_struct => b: {
var n: usize = @typeInfo(Tag.TypeStruct).@"struct".fields.len;
const extra = extraDataTrail(extra_list, Tag.TypeStruct, data);
@@ -10908,7 +10989,8 @@ fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) !vo
.func_coerced => @sizeOf(Tag.FuncCoerced),
.only_possible_value => 0,
.union_value => @sizeOf(Key.Union),
.bitpack => 2 * @sizeOf(u32),
.bitpack => @sizeOf(Key.Bitpack),
.restricted_value => @sizeOf(Key.RestrictedValue),
.memoized_call => b: {
const info = extraData(extra_list, MemoizedCall, data);
@@ -10957,7 +11039,6 @@ fn dumpAllFallible(ip: *const InternPool, w: *Io.Writer) anyerror!void {
.type_array_big,
.type_vector,
.type_pointer,
.type_restricted,
.type_optional,
.type_anyframe,
.type_error_union,
@@ -10966,6 +11047,7 @@ fn dumpAllFallible(ip: *const InternPool, w: *Io.Writer) anyerror!void {
.type_inferred_error_set,
.type_tuple,
.type_function,
.type_restricted,
.type_struct,
.type_struct_packed_auto,
.type_struct_packed_explicit,
@@ -11023,6 +11105,7 @@ fn dumpAllFallible(ip: *const InternPool, w: *Io.Writer) anyerror!void {
.func_coerced,
.union_value,
.bitpack,
.restricted_value,
.memoized_call,
=> try w.print("{d}", .{data}),
@@ -11696,7 +11779,6 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.type_array_small,
.type_vector,
.type_pointer,
.type_restricted,
.type_slice,
.type_optional,
.type_anyframe,
@@ -11706,6 +11788,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.type_inferred_error_set,
.type_tuple,
.type_function,
.type_restricted,
.type_struct,
.type_struct_packed_auto,
.type_struct_packed_explicit,
@@ -11753,6 +11836,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.aggregate,
.repeated,
.bitpack,
.restricted_value,
=> |t| {
const extra_list = unwrapped_index.getExtra(ip);
return @enumFromInt(extra_list.view().items(.@"0")[item.data + std.meta.fieldIndex(t.Payload(), "ty").?]);
@@ -12027,7 +12111,7 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
.bool_false => unreachable,
.empty_tuple => unreachable,
_ => switch (index.unwrap(ip).getTag(ip)) {
_ => return switch (index.unwrap(ip).getTag(ip)) {
.removed => unreachable,
.type_int_signed,
@@ -12042,7 +12126,6 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
.type_pointer,
.type_slice,
.type_restricted,
=> .pointer,
.type_optional => .optional,
@@ -12079,6 +12162,8 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
.type_function => .@"fn",
.type_restricted => unreachable,
// values, not types
.undef,
.simple_value,
@@ -12128,6 +12213,7 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
.aggregate,
.repeated,
.bitpack,
.restricted_value,
// memoization, not types
.memoized_call,
=> unreachable,
@@ -12358,11 +12444,7 @@ pub fn addFieldTagValue(
/// encoding instead of `Tag.ptr_uav_aligned` when possible.
fn ptrsHaveSameAlignment(ip: *InternPool, a_ty: Index, a_info: Key.PtrType, b_ty: Index) bool {
if (a_ty == b_ty) return true;
const b_info = switch (ip.indexToKey(b_ty)) {
else => unreachable,
.ptr_type => |ptr_type| ptr_type,
.restricted_ptr_type => |restricted_ptr_type| ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
};
const b_info = ip.indexToKey(b_ty).ptr_type;
return a_info.flags.alignment == b_info.flags.alignment and
(a_info.child == b_info.child or a_info.flags.alignment != .none);
}
+191 -177
View File
@@ -1435,8 +1435,8 @@ fn analyzeBodyInner(
.reify_pointer_sentinel_ty => try sema.zirReifyPointerSentinelTy(block, extended),
.reify_tuple => try sema.zirReifyTuple( block, extended),
.reify_pointer => try sema.zirReifyPointer( block, extended),
.reify_restricted => try sema.zirReifyRestricted( block, extended, inst),
.reify_fn => try sema.zirReifyFn( block, extended),
.reify_restricted => try sema.zirReifyRestricted( block, extended, inst),
.reify_struct => try sema.zirReifyStruct( block, extended, inst),
.reify_union => try sema.zirReifyUnion( block, extended, inst),
.reify_enum => try sema.zirReifyEnum( block, extended, inst),
@@ -4641,10 +4641,11 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
const src = block.nodeOffset(inst_data.src_node);
const operand = sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const operand_unrestricted_ty = operand_ty.unrestrictedType(zcu) orelse operand_ty;
if (operand_ty.zigTypeTag(zcu) != .pointer) {
if (operand_unrestricted_ty.zigTypeTag(zcu) != .pointer) {
return sema.fail(block, src, "cannot dereference non-pointer type '{f}'", .{operand_ty.fmt(pt)});
} else switch (operand_ty.ptrSize(zcu)) {
} else switch (operand_unrestricted_ty.ptrSize(zcu)) {
.one, .c => {},
.many => return sema.fail(block, src, "index syntax required for unknown-length pointer type '{f}'", .{operand_ty.fmt(pt)}),
.slice => return sema.fail(block, src, "index syntax required for slice type '{f}'", .{operand_ty.fmt(pt)}),
@@ -4652,7 +4653,7 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
if (sema.resolveValue(operand)) |val| {
// Error for deref of undef pointer, unless the pointee is OPV in which case it's legal.
if (val.isUndef(zcu) and operand_ty.childType(zcu).classify(zcu) != .one_possible_value) {
if (val.isUndef(zcu) and operand_unrestricted_ty.childType(zcu).classify(zcu) != .one_possible_value) {
return sema.fail(block, src, "cannot dereference undefined value", .{});
}
}
@@ -7815,23 +7816,22 @@ fn analyzeDeclLiteral(
block: *Block,
src: LazySrcLoc,
name: InternPool.NullTerminatedString,
orig_ty: Type,
ty: Type,
do_coerce: bool,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const uncoerced_result = res: {
if (orig_ty.toIntern() == .generic_poison_type) {
var unrestricted_ty = ty.unrestrictedType(zcu) orelse ty;
if (unrestricted_ty.toIntern() == .generic_poison_type) {
// Treat this as a normal enum literal.
break :res Air.internedToRef(try pt.intern(.{ .enum_literal = name }));
}
var ty = orig_ty;
while (true) switch (ty.zigTypeTag(zcu)) {
.error_union => ty = ty.errorUnionPayload(zcu),
.optional => ty = ty.optionalChild(zcu),
.pointer => ty = if (ty.isSinglePointer(zcu)) ty.childType(zcu) else break,
while (true) switch (unrestricted_ty.zigTypeTag(zcu)) {
.error_union => unrestricted_ty = unrestricted_ty.errorUnionPayload(zcu),
.optional => unrestricted_ty = unrestricted_ty.optionalChild(zcu),
.pointer => unrestricted_ty = if (unrestricted_ty.isSinglePointer(zcu)) unrestricted_ty.childType(zcu) else break,
.enum_literal, .error_set => {
// Treat this as a normal enum literal.
break :res Air.internedToRef(try pt.intern(.{ .enum_literal = name }));
@@ -7839,7 +7839,7 @@ fn analyzeDeclLiteral(
else => break,
};
break :res try sema.fieldVal(block, src, Air.internedToRef(ty.toIntern()), name, src);
break :res try sema.fieldVal(block, src, Air.internedToRef(unrestricted_ty.toIntern()), name, src);
};
// Decl literals cannot lookup runtime `var`s.
@@ -7848,7 +7848,7 @@ fn analyzeDeclLiteral(
}
if (do_coerce) {
return sema.coerce(block, orig_ty, uncoerced_result, src);
return sema.coerce(block, ty, uncoerced_result, src);
} else {
return uncoerced_result;
}
@@ -16170,16 +16170,17 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const src = block.nodeOffset(inst_data.src_node);
const ty = try sema.resolveType(block, src, inst_data.operand);
const unrestricted_ty = ty.unrestrictedType(zcu) orelse ty;
const type_info_ty = try sema.getBuiltinType(src, .Type);
const type_info_tag_ty = type_info_ty.unionTagType(zcu).?;
try sema.ensureLayoutResolved(ty, src, .type_info);
try sema.ensureLayoutResolved(unrestricted_ty, src, .type_info);
if (ty.typeDeclInst(zcu)) |type_decl_inst| {
if (unrestricted_ty.typeDeclInst(zcu)) |type_decl_inst| {
try sema.declareDependency(.{ .namespace = type_decl_inst });
}
switch (ty.zigTypeTag(zcu)) {
switch (unrestricted_ty.zigTypeTag(zcu)) {
.type,
.void,
.bool,
@@ -16202,7 +16203,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const fn_info_ty = try sema.getBuiltinType(src, .@"Type.Fn");
const param_info_ty = try sema.getBuiltinType(src, .@"Type.Fn.Param");
const func_ty_info = zcu.typeToFunc(ty).?;
const func_ty_info = zcu.typeToFunc(unrestricted_ty).?;
const param_vals = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len);
var func_is_generic = false;
@@ -16308,7 +16309,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.int => {
const int_info_ty = try sema.getBuiltinType(src, .@"Type.Int");
const signedness_ty = try sema.getBuiltinType(src, .Signedness);
const info = ty.intInfo(zcu);
const info = unrestricted_ty.intInfo(zcu);
const field_values = .{
// signedness: Signedness,
(try pt.enumValueFieldIndex(signedness_ty, @intFromEnum(info.signedness))).toIntern(),
@@ -16326,7 +16327,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field_vals = .{
// bits: u16,
(try pt.intValue(.u16, ty.floatBits(zcu.getTarget()))).toIntern(),
(try pt.intValue(.u16, unrestricted_ty.floatBits(zcu.getTarget()))).toIntern(),
};
return Air.internedToRef((try pt.internUnion(.{
.ty = type_info_ty.toIntern(),
@@ -16335,7 +16336,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
})));
},
.pointer => {
const info = ty.ptrInfo(zcu);
const info = unrestricted_ty.ptrInfo(zcu);
const alignment_ty = try pt.optionalType(.usize_type);
const alignment_val: Value = val: {
const bytes = info.flags.alignment.toByteUnits() orelse {
@@ -16382,7 +16383,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.array => {
const array_field_ty = try sema.getBuiltinType(src, .@"Type.Array");
const info = ty.arrayInfo(zcu);
const info = unrestricted_ty.arrayInfo(zcu);
const field_values = .{
// len: comptime_int,
(try pt.intValue(.comptime_int, info.len)).toIntern(),
@@ -16400,7 +16401,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.vector => {
const vector_field_ty = try sema.getBuiltinType(src, .@"Type.Vector");
const info = ty.arrayInfo(zcu);
const info = unrestricted_ty.arrayInfo(zcu);
const field_values = .{
// len: comptime_int,
(try pt.intValue(.comptime_int, info.len)).toIntern(),
@@ -16418,7 +16419,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field_values = .{
// child: type,
ty.optionalChild(zcu).toIntern(),
unrestricted_ty.optionalChild(zcu).toIntern(),
};
return Air.internedToRef((try pt.internUnion(.{
.ty = type_info_ty.toIntern(),
@@ -16433,7 +16434,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// Build our list of Error values
// Optional value is only null if anyerror
// Value can be zero-length slice otherwise
const error_field_vals = switch (try sema.resolveInferredErrorSetTy(block, src, ty.toIntern())) {
const error_field_vals = switch (try sema.resolveInferredErrorSetTy(block, src, unrestricted_ty.toIntern())) {
.anyerror_type => null,
else => |err_set_ty_index| blk: {
const names = ip.indexToKey(err_set_ty_index).error_set_type.names;
@@ -16522,9 +16523,9 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field_values = .{
// error_set: type,
ty.errorUnionSet(zcu).toIntern(),
unrestricted_ty.errorUnionSet(zcu).toIntern(),
// payload: type,
ty.errorUnionPayload(zcu).toIntern(),
unrestricted_ty.errorUnionPayload(zcu).toIntern(),
};
return Air.internedToRef((try pt.internUnion(.{
.ty = type_info_ty.toIntern(),
@@ -16533,7 +16534,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
})));
},
.@"enum" => {
const enum_obj = ip.loadEnumType(ty.toIntern());
const enum_obj = ip.loadEnumType(unrestricted_ty.toIntern());
const is_exhaustive: Value = .makeBool(!enum_obj.nonexhaustive);
const enum_field_ty = try sema.getBuiltinType(src, .@"Type.EnumField");
@@ -16615,13 +16616,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} });
};
const decls_val = try sema.typeInfoDecls(src, ip.loadEnumType(ty.toIntern()).namespace.toOptional());
const decls_val = try sema.typeInfoDecls(src, ip.loadEnumType(unrestricted_ty.toIntern()).namespace.toOptional());
const type_enum_ty = try sema.getBuiltinType(src, .@"Type.Enum");
const field_values = .{
// tag_type: type,
ip.loadEnumType(ty.toIntern()).int_tag_type,
ip.loadEnumType(unrestricted_ty.toIntern()).int_tag_type,
// fields: []const EnumField,
fields_val,
// decls: []const Declaration,
@@ -16639,7 +16640,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const type_union_ty = try sema.getBuiltinType(src, .@"Type.Union");
const union_field_ty = try sema.getBuiltinType(src, .@"Type.UnionField");
const union_obj = ip.loadUnionType(ty.toIntern());
const union_obj = ip.loadUnionType(unrestricted_ty.toIntern());
const enum_obj = ip.loadEnumType(union_obj.enum_tag_type);
const layout = union_obj.layout;
@@ -16678,7 +16679,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const alignment_ty = try pt.optionalType(.usize_type);
const alignment_val: Value = val: {
const a: Alignment = switch (layout) {
.auto, .@"extern" => ty.explicitFieldAlignment(field_index, zcu),
.auto, .@"extern" => unrestricted_ty.explicitFieldAlignment(field_index, zcu),
.@"packed" => .none,
};
const bytes = a.toByteUnits() orelse {
@@ -16730,11 +16731,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} });
};
const decls_val = try sema.typeInfoDecls(src, ty.getNamespaceIndex(zcu).toOptional());
const decls_val = try sema.typeInfoDecls(src, unrestricted_ty.getNamespaceIndex(zcu).toOptional());
const enum_tag_ty_val = try pt.intern(.{ .opt = .{
.ty = (try pt.optionalType(.type_type)).toIntern(),
.val = if (ty.unionTagType(zcu)) |tag_ty| tag_ty.toIntern() else .none,
.val = if (unrestricted_ty.unionTagType(zcu)) |tag_ty| tag_ty.toIntern() else .none,
} });
const container_layout_ty = try sema.getBuiltinType(src, .@"Type.ContainerLayout");
@@ -16763,7 +16764,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
var struct_field_vals: []InternPool.Index = &.{};
defer gpa.free(struct_field_vals);
fv: {
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
const struct_type = switch (ip.indexToKey(unrestricted_ty.toIntern())) {
.tuple_type => |tuple_type| {
struct_field_vals = try gpa.alloc(InternPool.Index, tuple_type.types.len);
for (struct_field_vals, 0..) |*struct_field_val, field_index| {
@@ -16815,10 +16816,10 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
break :fv;
},
.struct_type => ip.loadStructType(ty.toIntern()),
.struct_type => ip.loadStructType(unrestricted_ty.toIntern()),
else => unreachable,
};
try sema.ensureStructDefaultsResolved(ty, src); // can't do this sooner, since it's not allowed on tuples
try sema.ensureStructDefaultsResolved(unrestricted_ty, src); // can't do this sooner, since it's not allowed on tuples
struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len);
for (struct_field_vals, 0..) |*field_val, field_index| {
@@ -16859,7 +16860,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const alignment_ty = try pt.optionalType(.usize_type);
const alignment_val: Value = val: {
const a: Alignment = switch (struct_type.layout) {
.auto, .@"extern" => ty.explicitFieldAlignment(field_index, zcu),
.auto, .@"extern" => unrestricted_ty.explicitFieldAlignment(field_index, zcu),
.@"packed" => .none,
};
const bytes = a.toByteUnits() orelse {
@@ -16916,11 +16917,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
} });
};
const decls_val = try sema.typeInfoDecls(src, ty.getNamespace(zcu));
const decls_val = try sema.typeInfoDecls(src, unrestricted_ty.getNamespace(zcu));
const backing_integer_val = try pt.intern(.{ .opt = .{
.ty = (try pt.optionalType(.type_type)).toIntern(),
.val = if (zcu.typeToPackedStruct(ty)) |struct_obj| val: {
.val = if (zcu.typeToPackedStruct(unrestricted_ty)) |struct_obj| val: {
assert(Type.fromInterned(struct_obj.packed_backing_int_type).isInt(zcu));
break :val struct_obj.packed_backing_int_type;
} else .none,
@@ -16928,7 +16929,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const container_layout_ty = try sema.getBuiltinType(src, .@"Type.ContainerLayout");
const layout = ty.containerLayout(zcu);
const layout = unrestricted_ty.containerLayout(zcu);
const field_values = [_]InternPool.Index{
// layout: ContainerLayout,
@@ -16940,7 +16941,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// decls: []const Declaration,
decls_val,
// is_tuple: bool,
Value.makeBool(ty.isTuple(zcu)).toIntern(),
Value.makeBool(unrestricted_ty.isTuple(zcu)).toIntern(),
};
return Air.internedToRef((try pt.internUnion(.{
.ty = type_info_ty.toIntern(),
@@ -16951,7 +16952,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.@"opaque" => {
const type_opaque_ty = try sema.getBuiltinType(src, .@"Type.Opaque");
const decls_val = try sema.typeInfoDecls(src, ty.getNamespace(zcu));
const decls_val = try sema.typeInfoDecls(src, unrestricted_ty.getNamespace(zcu));
const field_values = .{
// decls: []const Declaration,
@@ -19212,7 +19213,7 @@ fn arrayInitAnon(
} });
const elem = sema.resolveInst(operand);
types[i] = sema.typeOf(elem).toIntern();
if (Type.fromInterned(types[i]).zigTypeTag(zcu) == .@"opaque") {
if (ip.isOpaqueType(types[i])) {
const msg = msg: {
const msg = try sema.errMsg(operand_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{});
errdefer msg.destroy(gpa);
@@ -19599,22 +19600,24 @@ fn zirUnaryMath(
}
fn zirTagName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const operand_src = block.builtinCallArgSrc(inst_data.src_node, 0);
const src = block.nodeOffset(inst_data.src_node);
const operand = sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const enum_ty = switch (operand_ty.zigTypeTag(zcu)) {
const unrestricted_operand_ty = operand_ty.unrestrictedType(zcu) orelse operand_ty;
const enum_ty = switch (unrestricted_operand_ty.zigTypeTag(zcu)) {
.enum_literal => {
const val = (try sema.resolveDefinedValue(block, operand_src, operand)).?;
const tag_name = ip.indexToKey(val.toIntern()).enum_literal;
return sema.addNullTerminatedStrLit(tag_name);
},
.@"enum" => operand_ty,
.@"union" => operand_ty.unionTagType(zcu) orelse
.@"enum" => unrestricted_operand_ty,
.@"union" => unrestricted_operand_ty.unionTagType(zcu) orelse
return sema.fail(block, src, "union '{f}' is untagged", .{operand_ty.fmt(pt)}),
else => return sema.fail(block, operand_src, "expected enum or union; found '{f}'", .{
operand_ty.fmt(pt),
@@ -19881,62 +19884,6 @@ fn zirReifyPointer(
}));
}
fn zirReifyRestricted(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
const io = comp.io;
const ip = &zcu.intern_pool;
const name_strategy: Zir.Inst.NameStrategy = @enumFromInt(extended.small);
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const tracked_inst = try block.trackZir(inst);
const src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
.offset = .nodeOffset(.zero),
};
const ptr_type_src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
.offset = .{ .node_offset_builtin_call_arg = .{
.builtin_call_node = extra.node,
.arg_index = 0,
} },
};
const operand = try sema.resolveType(block, src, extra.operand);
const unrestricted_ptr_type: Type = switch (ip.indexToKey(operand.toIntern())) {
else => return sema.fail(block, ptr_type_src, "expected pointer type, found '{f}'", .{operand.fmt(pt)}),
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.one, .many, .c => operand,
.slice => return sema.fail(block, ptr_type_src, "slice types cannot be restricted", .{}),
},
.restricted_ptr_type => |restricted_ptr_type| .fromInterned(restricted_ptr_type.unrestricted_ptr_type),
};
switch (try ip.getReifiedRestrictedType(gpa, io, pt.tid, tracked_inst, unrestricted_ptr_type.toIntern())) {
.existing => |ty| {
try sema.addTypeReferenceEntry(src, .fromInterned(ty));
// No need for `ensureNamespaceUpToDate` because this type doesn't have a namespace.
return .fromIntern(ty);
},
.wip => |wip| {
errdefer wip.cancel(ip, pt.tid);
const type_name, _ = try sema.computeTypeName(block, wip.index, name_strategy, "restricted", inst);
wip.setName(ip, type_name);
if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index);
try sema.addTypeReferenceEntry(src, .fromInterned(wip.index));
return .fromIntern(wip.index);
},
}
}
fn zirReifyFn(
sema: *Sema,
block: *Block,
@@ -20035,6 +19982,58 @@ fn zirReifyFn(
}));
}
fn zirReifyRestricted(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
const io = comp.io;
const ip = &zcu.intern_pool;
const name_strategy: Zir.Inst.NameStrategy = @enumFromInt(extended.small);
const extra = sema.code.extraData(Zir.Inst.ReifyRestricted, extended.operand).data;
const tracked_inst = try block.trackZir(inst);
const src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
.offset = .nodeOffset(.zero),
};
const operand_src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
.offset = .{ .node_offset_builtin_call_arg = .{
.builtin_call_node = .zero,
.arg_index = 0,
} },
};
const operand = try sema.resolveType(block, src, extra.unrestricted_ty);
const unrestricted_type = switch (ip.indexToKey(operand.toIntern())) {
else => operand.toIntern(),
.restricted_type => return sema.fail(block, operand_src, "cannot restrict restricted type '{f}'", .{operand.fmt(pt)}),
};
switch (try ip.getReifiedRestrictedType(gpa, io, pt.tid, tracked_inst, unrestricted_type)) {
.existing => |ty| {
try sema.addTypeReferenceEntry(src, .fromInterned(ty));
// No need for `ensureNamespaceUpToDate` because this type doesn't have a namespace.
return .fromIntern(ty);
},
.wip => |wip| {
errdefer wip.cancel(ip, pt.tid);
const type_name, _ = try sema.computeTypeName(block, wip.index, name_strategy, "restricted", inst);
wip.setName(ip, type_name);
if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index);
try sema.addTypeReferenceEntry(src, .fromInterned(wip.index));
return .fromIntern(wip.index);
},
}
}
fn zirReifyStruct(
sema: *Sema,
block: *Block,
@@ -25149,7 +25148,7 @@ pub fn validateVarType(
return sema.failWithOwnedErrorMsg(block, msg);
}
} else {
if (var_ty.zigTypeTag(zcu) == .@"opaque") {
if (zcu.intern_pool.isOpaqueType(var_ty.toIntern())) {
return sema.fail(
block,
src,
@@ -25166,8 +25165,11 @@ pub fn validateVarType(
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsComptime(msg, src, var_ty);
if (var_ty.zigTypeTag(zcu) == .comptime_int or var_ty.zigTypeTag(zcu) == .comptime_float) {
try sema.errNote(src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{});
switch (var_ty.toIntern()) {
else => {},
.comptime_int_type,
.comptime_float_type,
=> try sema.errNote(src, msg, "to modify this variable at runtime, it must be given an explicit fixed-size number type", .{}),
}
break :msg msg;
@@ -25756,23 +25758,24 @@ fn fieldVal(
const ip = &zcu.intern_pool;
const object_src = src; // TODO better source location
const object_ty = sema.typeOf(object);
const unrestricted_object_ty = object_ty.unrestrictedType(zcu) orelse object_ty;
// Zig allows dereferencing a single pointer during field lookup. Note that
// we don't actually need to generate the dereference some field lookups, like the
// length of arrays and other comptime operations.
const is_pointer_to = object_ty.isSinglePointer(zcu);
const is_pointer_to = unrestricted_object_ty.isSinglePointer(zcu);
const inner_ty = if (is_pointer_to)
object_ty.childType(zcu)
unrestricted_object_ty.childType(zcu)
else
object_ty;
unrestricted_object_ty;
switch (inner_ty.zigTypeTag(zcu)) {
.array => {
if (field_name.eqlSlice("len", ip)) {
return Air.internedToRef((try pt.intValue(.usize, inner_ty.arrayLen(zcu))).toIntern());
} else if (field_name.eqlSlice("ptr", ip) and is_pointer_to) {
const ptr_info = object_ty.ptrInfo(zcu);
const ptr_info = unrestricted_object_ty.ptrInfo(zcu);
const result_ty = try pt.ptrType(.{
.child = Type.fromInterned(ptr_info.child).childType(zcu).toIntern(),
.sentinel = if (inner_ty.sentinel(zcu)) |s| s.toIntern() else .none,
@@ -25829,11 +25832,12 @@ fn fieldVal(
object;
const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?;
const child_type = val.toType();
const child_ty = val.toType();
const unrestricted_child_ty = child_ty.unrestrictedType(zcu) orelse child_ty;
switch (child_type.zigTypeTag(zcu)) {
switch (unrestricted_child_ty.zigTypeTag(zcu)) {
.error_set => {
const err_set_ty: Type = err_set: switch (ip.indexToKey(child_type.toIntern())) {
const err_set_ty: Type = err_set: switch (ip.indexToKey(unrestricted_child_ty.toIntern())) {
.inferred_error_set_type => |func_index| {
try sema.ensureFuncIesResolved(block, src, func_index);
const resolved_ies = ip.funcIesResolvedUnordered(func_index);
@@ -25841,9 +25845,9 @@ fn fieldVal(
},
.error_set_type => |err_set| if (err_set.nameIndex(ip, field_name) == null) {
return sema.fail(block, src, "no error named '{f}' in '{f}'", .{
field_name.fmt(ip), child_type.fmt(pt),
field_name.fmt(ip), child_ty.fmt(pt),
});
} else child_type,
} else unrestricted_child_ty,
.simple_type => |t| {
assert(t == .anyerror);
_ = try pt.getErrorValue(field_name);
@@ -25857,42 +25861,42 @@ fn fieldVal(
} }));
},
.@"union" => {
if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
if (try sema.namespaceLookupVal(block, src, unrestricted_child_ty.getNamespaceIndex(zcu), field_name)) |inst| {
return inst;
}
try sema.ensureLayoutResolved(child_type, src, .field_used);
if (child_type.unionTagType(zcu)) |enum_ty| {
try sema.ensureLayoutResolved(unrestricted_child_ty, src, .field_used);
if (unrestricted_child_ty.unionTagType(zcu)) |enum_ty| {
if (enum_ty.enumFieldIndex(field_name, zcu)) |field_index_usize| {
const field_index: u32 = @intCast(field_index_usize);
return Air.internedToRef((try pt.enumValueFieldIndex(enum_ty, field_index)).toIntern());
}
}
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
return sema.failWithBadMemberAccess(block, child_ty, field_name_src, field_name);
},
.@"enum" => {
if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
if (try sema.namespaceLookupVal(block, src, unrestricted_child_ty.getNamespaceIndex(zcu), field_name)) |inst| {
return inst;
}
try sema.ensureLayoutResolved(child_type, src, .field_used);
const field_index_usize = child_type.enumFieldIndex(field_name, zcu) orelse
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
try sema.ensureLayoutResolved(unrestricted_child_ty, src, .field_used);
const field_index_usize = unrestricted_child_ty.enumFieldIndex(field_name, zcu) orelse
return sema.failWithBadMemberAccess(block, child_ty, field_name_src, field_name);
const field_index: u32 = @intCast(field_index_usize);
const enum_val = try pt.enumValueFieldIndex(child_type, field_index);
const enum_val = try pt.enumValueFieldIndex(unrestricted_child_ty, field_index);
return Air.internedToRef(enum_val.toIntern());
},
.@"struct", .@"opaque" => {
if (!child_type.isTuple(zcu) and child_type.toIntern() != .anyopaque_type) {
if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| {
if (!unrestricted_child_ty.isTuple(zcu) and unrestricted_child_ty.toIntern() != .anyopaque_type) {
if (try sema.namespaceLookupVal(block, src, unrestricted_child_ty.getNamespaceIndex(zcu), field_name)) |inst| {
return inst;
}
}
return sema.failWithBadMemberAccess(block, child_type, src, field_name);
return sema.failWithBadMemberAccess(block, child_ty, src, field_name);
},
else => return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "type '{f}' has no members", .{child_type.fmt(pt)});
const msg = try sema.errMsg(src, "type '{f}' has no members", .{child_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
if (child_type.isSlice(zcu)) try sema.errNote(src, msg, "slice values have 'len' and 'ptr' members", .{});
if (child_type.zigTypeTag(zcu) == .array) try sema.errNote(src, msg, "array values have 'len' member", .{});
if (unrestricted_child_ty.isSlice(zcu)) try sema.errNote(src, msg, "slice values have 'len' and 'ptr' members", .{});
if (unrestricted_child_ty.zigTypeTag(zcu) == .array) try sema.errNote(src, msg, "array values have 'len' member", .{});
break :msg msg;
}),
}
@@ -27471,6 +27475,29 @@ fn coerceExtra(
const maybe_inst_val = sema.resolveValue(inst);
// Restricted coercions
if (maybe_inst_val != null) {
if (dest_ty.unrestrictedType(zcu)) |dest_unrestricted_ty| {
if (sema.resolveValue(try sema.coerceExtra(block, dest_unrestricted_ty, inst, inst_src, opts))) |dest_unrestricted_val| {
return .fromIntern(try pt.intern(if (dest_unrestricted_val.isUndef(zcu)) .{
.undef = dest_ty.toIntern(),
} else .{ .restricted_value = .{
.ty = dest_ty.toIntern(),
.unrestricted_value = dest_unrestricted_val.toIntern(),
} }));
}
}
}
if (inst_ty.unrestrictedType(zcu)) |inst_unrestricted_ty| {
const unrestricted_inst: Air.Inst.Ref = if (maybe_inst_val) |inst_val|
.fromIntern(ip.indexToKey(inst_val.toIntern()).restricted_value.unrestricted_value)
else unrestricted_inst: {
try sema.requireRuntimeBlock(block, inst_src, null);
break :unrestricted_inst try sema.unwrapRestricted(block, inst_unrestricted_ty, inst, inst_src);
};
return sema.coerceExtra(block, dest_ty, unrestricted_inst, inst_src, opts);
}
var in_memory_result = try sema.coerceInMemoryAllowed(block, dest_ty, inst_ty, false, target, dest_ty_src, inst_src, maybe_inst_val);
if (in_memory_result == .ok) {
if (maybe_inst_val) |val| {
@@ -27796,22 +27823,6 @@ fn coerceExtra(
return sema.coerceCompatiblePtrs(block, dest_ty, slice_ptr, inst_src);
},
}
// Restricted coercions
if (maybe_inst_val != null) {
if (dest_ty.unrestrictedType(zcu)) |dest_unrestricted_ty| {
if (sema.resolveValue(try sema.coerceExtra(block, dest_unrestricted_ty, inst, inst_src, opts))) |inst_val| {
return .fromIntern(try ip.getCoerced(gpa, io, pt.tid, inst_val.toIntern(), dest_ty.toIntern()));
}
}
}
if (inst_ty.unrestrictedType(zcu)) |inst_unrestricted_ty| {
const inst_unrestricted: Air.Inst.Ref = if (maybe_inst_val) |inst_val|
.fromIntern(try ip.getCoerced(gpa, io, pt.tid, inst_val.toIntern(), inst_unrestricted_ty.toIntern()))
else
try sema.unwrapRestrictedPtr(block, inst_unrestricted_ty, inst, inst_src);
return sema.coerceExtra(block, dest_ty, inst_unrestricted, inst_src, opts);
}
},
.int, .comptime_int => switch (inst_ty.zigTypeTag(zcu)) {
.float, .comptime_float => float: {
@@ -28126,6 +28137,7 @@ fn coerceInMemory(
const InMemoryCoercionResult = union(enum) {
ok,
restricted: Pair,
no_match: Pair,
int_not_coercible: Int,
comptime_int_not_coercible: TypeValuePair,
@@ -28160,7 +28172,6 @@ const InMemoryCoercionResult = union(enum) {
ptr_alignment: AlignPair,
double_ptr_to_anyopaque: Pair,
slice_to_anyopaque: Pair,
ptr_restricted: Pair,
const Pair = struct {
actual: Type,
@@ -28247,6 +28258,16 @@ const InMemoryCoercionResult = union(enum) {
var cur = res;
while (true) switch (cur.*) {
.ok => unreachable,
.restricted => |pair| {
for ([_]Type{ pair.actual, pair.wanted }) |restricted_type| {
try sema.addDeclaredHereNote(msg, restricted_type);
const unrestricted_type = restricted_type.unrestrictedType(pt.zcu) orelse continue;
try sema.errNote(src, msg, "restricted type '{f}' is not guaranteed to have the same representation as its unrestricted type '{f}'", .{
restricted_type.fmt(pt), unrestricted_type.fmt(pt),
});
}
break;
},
.no_match => |types| {
try sema.addDeclaredHereNote(msg, types.wanted);
try sema.addDeclaredHereNote(msg, types.actual);
@@ -28492,15 +28513,6 @@ const InMemoryCoercionResult = union(enum) {
try sema.errNote(src, msg, "consider using '.ptr'", .{});
break;
},
.ptr_restricted => |pair| {
for ([_]Type{ pair.actual, pair.wanted }) |restricted_ptr_type| {
const unrestricted_ptr_type = restricted_ptr_type.unrestrictedType(pt.zcu) orelse continue;
try sema.errNote(src, msg, "restricted type '{f}' is not guaranteed to have the same representation as its unrestricted type '{f}'", .{
restricted_ptr_type.fmt(pt), unrestricted_ptr_type.fmt(pt),
});
}
break;
},
};
}
};
@@ -28538,6 +28550,7 @@ pub fn coerceInMemoryAllowed(
) CompileError!InMemoryCoercionResult {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
if (src_val) |val| {
assert(val.typeOf(zcu).toIntern() == src_ty.toIntern());
@@ -28546,6 +28559,12 @@ pub fn coerceInMemoryAllowed(
if (dest_ty.eql(src_ty, zcu))
return .ok;
// Restricted types have a different in-memory representation depending on the safety mode of the module that created them.
if (ip.isRestrictedType(dest_ty.toIntern()) or ip.isRestrictedType(src_ty.toIntern())) return .{ .restricted = .{
.actual = src_ty,
.wanted = dest_ty,
} };
const dest_tag = dest_ty.zigTypeTag(zcu);
const src_tag = src_ty.zigTypeTag(zcu);
@@ -29142,12 +29161,6 @@ fn coerceInMemoryAllowedPtrs(
}
}
// Restricted pointers have a different in-memory representation depending on the safety mode of the module that created it.
if (dest_ty.unrestrictedType(zcu) != null or src_ty.unrestrictedType(zcu) != null) return .{ .ptr_restricted = .{
.actual = src_ty,
.wanted = dest_ty,
} };
return .ok;
}
@@ -29295,7 +29308,7 @@ fn storePtr2(
try sema.requireRuntimeBlock(block, src, runtime_src);
const unrestricted_ptr = if (ptr_ty.unrestrictedType(zcu)) |unrestricted_ptr_ty|
try sema.unwrapRestrictedPtr(block, unrestricted_ptr_ty, ptr, ptr_src)
try sema.unwrapRestricted(block, unrestricted_ptr_ty, ptr, ptr_src)
else
ptr;
@@ -30185,7 +30198,7 @@ fn analyzeNavRefInner(sema: *Sema, block: *Block, src: LazySrcLoc, orig_nav_inde
const nav_index = nav: {
const orig_nav = ip.getNav(orig_nav_index);
if (orig_nav.resolved.?.is_extern_decl or ip.zigTypeTag(orig_nav.resolved.?.type) == .@"fn") {
if (orig_nav.resolved.?.is_extern_decl or ip.isFunctionType(orig_nav.resolved.?.type)) {
// A pointer to this `Nav` might actually be encoded as a pointer to a different `Nav`
// because this is either an `extern` definition or an `extern` alias. (The latter case
// is unsolved language weirdness; see https://github.com/ziglang/zig/issues/21027.) To
@@ -30266,7 +30279,7 @@ fn maybeQueueFuncBodyAnalysis(sema: *Sema, block: *Block, src: LazySrcLoc, nav_i
try sema.ensureNavResolved(block, src, nav_index, .type);
const nav_ty: Type = .fromInterned(ip.getNav(nav_index).resolved.?.type);
if (nav_ty.zigTypeTag(zcu) != .@"fn") return;
if (!ip.isFunctionType(nav_ty.toIntern())) return;
if (!nav_ty.fnHasRuntimeBits(zcu)) return;
try sema.ensureNavResolved(block, src, nav_index, .fully);
@@ -30349,8 +30362,9 @@ fn analyzeLoad(
const pt = sema.pt;
const zcu = pt.zcu;
const ptr_ty = sema.typeOf(ptr);
const elem_ty = switch (ptr_ty.zigTypeTag(zcu)) {
.pointer => ptr_ty.childType(zcu),
const unrestricted_ptr_ty = ptr_ty.unrestrictedType(zcu) orelse ptr_ty;
const elem_ty = switch (unrestricted_ptr_ty.zigTypeTag(zcu)) {
.pointer => unrestricted_ptr_ty.childType(zcu),
else => return sema.fail(block, ptr_src, "expected pointer, found '{f}'", .{ptr_ty.fmt(pt)}),
};
@@ -30379,8 +30393,8 @@ fn analyzeLoad(
break :msg msg;
});
const unrestricted_ptr = if (ptr_ty.unrestrictedType(zcu)) |unrestricted_ptr_ty|
try sema.unwrapRestrictedPtr(block, unrestricted_ptr_ty, ptr, ptr_src)
const unrestricted_ptr = if (unrestricted_ptr_ty.toIntern() != ptr_ty.toIntern())
try sema.unwrapRestricted(block, unrestricted_ptr_ty, ptr, ptr_src)
else
ptr;
@@ -31596,17 +31610,17 @@ fn wrapErrorUnionSet(
}
}
fn unwrapRestrictedPtr(
fn unwrapRestricted(
sema: *Sema,
block: *Block,
unrestricted_ptr_ty: Type,
unrestricted_ty: Type,
ptr: Air.Inst.Ref,
ptr_src: LazySrcLoc,
) !Air.Inst.Ref {
return block.addTyOp(if (block.wantSafety()) tag: {
try sema.preparePanicId(ptr_src, .corrupt_restricted_pointer);
try sema.preparePanicId(ptr_src, .corrupt_restricted_value);
break :tag .unwrap_restricted_safe;
} else .unwrap_restricted, unrestricted_ptr_ty, ptr);
} else .unwrap_restricted, unrestricted_ty, ptr);
}
/// Returns the enum tag value for the active tag of a tagged union value.
@@ -34431,7 +34445,7 @@ fn getExpectedBuiltinFnType(sema: *Sema, decl: Zcu.BuiltinDecl) CompileError!Typ
.@"panic.copyLenMismatch",
.@"panic.memcpyAlias",
.@"panic.noreturnReturned",
.@"panic.corruptRestrictedPointer",
.@"panic.corruptRestrictedValue",
=> try pt.funcType(.{
.param_types = &.{},
.return_type = .noreturn_type,
+3 -1
View File
@@ -239,13 +239,13 @@ const UnpackValueBits = struct {
switch (ip.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
.anyframe_type,
.error_union_type,
.simple_type,
.restricted_type,
.struct_type,
.tuple_type,
.union_type,
@@ -274,6 +274,8 @@ const UnpackValueBits = struct {
.bitpack => |bitpack| try unpack.primitive(.fromInterned(bitpack.backing_int_val)),
.restricted_value => |restricted_value| try unpack.add(.fromInterned(restricted_value.unrestricted_value)),
.aggregate => switch (ty.zigTypeTag(zcu)) {
.vector => {
const len: usize = @intCast(ty.arrayLen(zcu));
+2 -1
View File
@@ -84,7 +84,6 @@ fn ensureLayoutResolvedInner(sema: *Sema, ty: Type, orig_ty: Type, reason: *cons
switch (ip.indexToKey(ty.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.anyframe_type,
.simple_type,
.opaque_type,
@@ -106,6 +105,7 @@ fn ensureLayoutResolvedInner(sema: *Sema, ty: Type, orig_ty: Type, reason: *cons
.tuple_type => |tuple| for (tuple.types.get(ip)) |field_ty| {
try ensureLayoutResolvedInner(sema, .fromInterned(field_ty), orig_ty, reason);
},
.restricted_type => |restricted_type| return ensureLayoutResolvedInner(sema, .fromInterned(restricted_type.unrestricted_type), orig_ty, reason),
.struct_type, .union_type, .enum_type => {
try sema.declareDependency(.{ .type_layout = ty.toIntern() });
try sema.addReferenceEntry(null, reason.src, .wrap(.{ .type_layout = ty.toIntern() }));
@@ -132,6 +132,7 @@ fn ensureLayoutResolvedInner(sema: *Sema, ty: Type, orig_ty: Type, reason: *cons
.aggregate,
.un,
.bitpack,
.restricted_value,
// memoization, not types
.memoized_call,
=> unreachable,
+167 -122
View File
@@ -165,7 +165,6 @@ pub fn classify(start_ty: Type, zcu: *const Zcu) Class {
.error_set_type,
.inferred_error_set_type,
.ptr_type,
.restricted_ptr_type,
.anyframe_type,
=> .runtime,
@@ -201,6 +200,10 @@ pub fn classify(start_ty: Type, zcu: *const Zcu) Class {
cur_ty = .fromInterned(child_ty_ip);
continue;
},
.restricted_type => |restricted_type| {
cur_ty = .fromInterned(restricted_type.unrestricted_type);
continue;
},
.tuple_type => |tuple| {
@branchHint(.unlikely);
break classifyTuple(tuple.types.get(ip), tuple.values.get(ip), zcu);
@@ -254,6 +257,7 @@ pub fn classify(start_ty: Type, zcu: *const Zcu) Class {
.aggregate,
.un,
.bitpack,
.restricted_value,
// memoization, not types
.memoized_call,
=> unreachable,
@@ -374,28 +378,13 @@ pub fn arrayInfo(self: Type, zcu: *const Zcu) ArrayInfo {
}
pub fn ptrInfo(ty: Type, zcu: *const Zcu) InternPool.Key.PtrType {
return ty.ptrInfoOrNull(&zcu.intern_pool, .{}).?;
}
pub fn ptrInfoOrNull(ty: Type, ip: *const InternPool, comptime opts: struct {
allow_optional: bool = true,
allow_restricted: bool = true,
}) ?InternPool.Key.PtrType {
return switch (ip.indexToKey(ty.toIntern())) {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |p| p,
.restricted_ptr_type => |rp| if (opts.allow_restricted)
ip.indexToKey(rp.unrestricted_ptr_type).ptr_type
else
null,
.opt_type => |child| if (opts.allow_optional) switch (ip.indexToKey(child)) {
.opt_type => |child| switch (zcu.intern_pool.indexToKey(child)) {
.ptr_type => |p| p,
.restricted_ptr_type => |rp| if (opts.allow_restricted)
ip.indexToKey(rp.unrestricted_ptr_type).ptr_type
else
null,
else => null, // not a pointer type
} else null,
else => null, // not a pointer type
else => unreachable,
},
else => unreachable,
};
}
@@ -504,10 +493,6 @@ pub fn print(ty: Type, writer: *std.Io.Writer, pt: Zcu.PerThread, ctx: ?*Compari
try print(Type.fromInterned(info.child), writer, pt, ctx);
return;
},
.restricted_ptr_type => {
const name = ip.loadRestrictedType(ty.toIntern()).name;
try writer.print("{f}", .{name.fmt(ip)});
},
.array_type => |array_type| {
if (array_type.sentinel == .none) {
try writer.print("[{d}]", .{array_type.len});
@@ -607,6 +592,10 @@ pub fn print(ty: Type, writer: *std.Io.Writer, pt: Zcu.PerThread, ctx: ?*Compari
.generic_poison => unreachable,
},
.restricted_type => {
const name = ip.loadRestrictedType(ty.toIntern()).name;
try writer.print("{f}", .{name.fmt(ip)});
},
.struct_type => {
const name = ip.loadStructType(ty.toIntern()).name;
try writer.print("{f}", .{name.fmt(ip)});
@@ -708,6 +697,7 @@ pub fn print(ty: Type, writer: *std.Io.Writer, pt: Zcu.PerThread, ctx: ?*Compari
.aggregate,
.un,
.bitpack,
.restricted_value,
// memoization, not types
.memoized_call,
=> unreachable,
@@ -767,13 +757,13 @@ pub fn hasWellDefinedLayout(ty: Type, zcu: *const Zcu) bool {
.vector_type,
=> true,
.restricted_ptr_type,
.error_union_type,
.error_set_type,
.inferred_error_set_type,
.tuple_type,
.opaque_type,
.anyframe_type,
.restricted_type,
// These are function bodies, not function pointers.
.func_type,
=> false,
@@ -847,6 +837,7 @@ pub fn hasWellDefinedLayout(ty: Type, zcu: *const Zcu) bool {
.aggregate,
.un,
.bitpack,
.restricted_value,
// memoization, not types
.memoized_call,
=> unreachable,
@@ -898,10 +889,7 @@ pub fn fnHasRuntimeBits(fn_ty: Type, zcu: *const Zcu) bool {
/// Like `hasRuntimeBits`, but also returns `true` for runtime functions.
pub fn isRuntimeFnOrHasRuntimeBits(ty: Type, zcu: *const Zcu) bool {
switch (ty.zigTypeTag(zcu)) {
.@"fn" => return ty.fnHasRuntimeBits(zcu),
else => return ty.hasRuntimeBits(zcu),
}
return if (zcu.intern_pool.isFunctionType(ty.toIntern())) ty.fnHasRuntimeBits(zcu) else ty.hasRuntimeBits(zcu);
}
/// Returns whether `ty` is NPV, meaning it is "like `noreturn`" in a sense. See doc comments on
@@ -914,13 +902,22 @@ pub fn isNoReturn(ty: Type, zcu: *const Zcu) bool {
/// Never returns `none`. Asserts that all necessary type resolution is already done.
pub fn ptrAlignment(ptr_ty: Type, zcu: *Zcu) Alignment {
const ptr_key = ptr_ty.ptrInfo(zcu);
const ip = &zcu.intern_pool;
const ptr_key: InternPool.Key.PtrType = switch (ip.indexToKey(ptr_ty.toIntern())) {
.ptr_type => |key| key,
.opt_type => |child| ip.indexToKey(child).ptr_type,
else => unreachable,
};
if (ptr_key.flags.alignment != .none) return ptr_key.flags.alignment;
return Type.fromInterned(ptr_key.child).abiAlignment(zcu);
}
pub fn ptrAddressSpace(ty: Type, zcu: *const Zcu) std.builtin.AddressSpace {
return ty.ptrInfo(zcu).flags.address_space;
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.address_space,
.opt_type => |child| zcu.intern_pool.indexToKey(child).ptr_type.flags.address_space,
else => unreachable,
};
}
/// Never returns `.none`. Asserts that the layout of `ty` is resolved.
@@ -936,7 +933,7 @@ pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment {
if (int_type.bits == 0) return .@"1";
return .fromByteUnits(std.zig.target.intAlignment(target, int_type.bits));
},
.ptr_type, .restricted_ptr_type, .anyframe_type => ptrAbiAlignment(target),
.ptr_type, .anyframe_type => ptrAbiAlignment(target),
.array_type => |array_type| Type.fromInterned(array_type.child).abiAlignment(zcu),
.vector_type => |vector_type| {
if (vector_type.len == 0) return .@"1";
@@ -1020,6 +1017,10 @@ pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment {
.generic_poison => unreachable,
},
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => ptrAbiAlignment(target),
.direct => return abiAlignment(.fromInterned(restricted_type.unrestricted_type), zcu),
},
.tuple_type => |tuple| {
var big_align: Alignment = .@"1";
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
@@ -1069,6 +1070,7 @@ pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment {
.aggregate,
.un,
.bitpack,
.restricted_value,
// memoization, not types
.memoized_call,
=> unreachable,
@@ -1090,7 +1092,7 @@ pub fn abiSize(ty: Type, zcu: *const Zcu) u64 {
.slice => ptrAbiSize(target) * 2,
.one, .many, .c => ptrAbiSize(target),
},
.restricted_ptr_type, .anyframe_type => ptrAbiSize(target),
.anyframe_type => ptrAbiSize(target),
.array_type => |arr| arr.lenIncludingSentinel() * Type.fromInterned(arr.child).abiSize(zcu),
.vector_type => |vec| {
const elem_ty: Type = .fromInterned(vec.child);
@@ -1169,6 +1171,10 @@ pub fn abiSize(ty: Type, zcu: *const Zcu) u64 {
.anyopaque => unreachable,
.generic_poison => unreachable,
},
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => ptrAbiSize(target),
.direct => return abiSize(.fromInterned(restricted_type.unrestricted_type), zcu),
},
.tuple_type => |tuple| switch (ty.classify(zcu)) {
// `structFieldOffset` is bogus on NPV tuples, because there may be some fields with
// non-zero size.
@@ -1209,6 +1215,7 @@ pub fn abiSize(ty: Type, zcu: *const Zcu) u64 {
.aggregate,
.un,
.bitpack,
.restricted_value,
// memoization, not types
.memoized_call,
=> unreachable,
@@ -1243,7 +1250,7 @@ pub fn bitSize(ty: Type, zcu: *const Zcu) u64 {
.slice => target.ptrBitWidth() * 2,
else => target.ptrBitWidth(),
},
.restricted_ptr_type, .anyframe_type => target.ptrBitWidth(),
.anyframe_type => target.ptrBitWidth(),
.array_type => |array_type| {
const elem_ty: Type = .fromInterned(array_type.child);
const len = array_type.lenIncludingSentinel();
@@ -1294,6 +1301,10 @@ pub fn bitSize(ty: Type, zcu: *const Zcu) u64 {
.generic_poison => unreachable,
},
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => target.ptrBitWidth(),
.direct => return bitSize(.fromInterned(restricted_type.unrestricted_type), zcu),
},
.struct_type => {
const struct_obj = ip.loadStructType(ty.toIntern());
switch (struct_obj.layout) {
@@ -1335,29 +1346,27 @@ pub fn bitSize(ty: Type, zcu: *const Zcu) u64 {
.aggregate,
.un,
.bitpack,
.restricted_value,
// memoization, not types
.memoized_call,
=> unreachable,
};
}
/// Returns `null` if `ty` is not a restricted pointer.
/// Returns `null` if `ty` is not a restricted type.
pub fn unrestrictedType(ty: Type, zcu: *const Zcu) ?Type {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.restricted_ptr_type => |restricted_ptr_type| return .fromInterned(restricted_ptr_type.unrestricted_ptr_type),
.restricted_type => |restricted_type| return .fromInterned(restricted_type.unrestricted_type),
else => null,
};
}
const RestrictedRepr = enum { indirect, direct };
pub fn restrictedRepr(ty: Type, zcu: *const Zcu) RestrictedRepr {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.restricted_ptr_type => |restricted_ptr_type| restrictedReprByZirIndex(restricted_ptr_type.zir_index, zcu),
else => .direct,
};
return restrictedReprByTrackedInst(zcu.intern_pool.indexToKey(ty.toIntern()).restricted_type.zir_index, zcu);
}
pub fn restrictedReprByZirIndex(zir_index: InternPool.TrackedInst.Index, zcu: *const Zcu) RestrictedRepr {
pub fn restrictedReprByTrackedInst(zir_index: InternPool.TrackedInst.Index, zcu: *const Zcu) RestrictedRepr {
return switch (zcu.fileByIndex(zir_index.resolveFile(&zcu.intern_pool)).mod.?.optimize_mode) {
.Debug, .ReleaseSafe => if (zcu.backendSupportsFeature(.restricted_types)) .indirect else .direct,
.ReleaseFast, .ReleaseSmall => .direct,
@@ -1365,8 +1374,10 @@ pub fn restrictedReprByZirIndex(zir_index: InternPool.TrackedInst.Index, zcu: *c
}
pub fn isSinglePointer(ty: Type, zcu: *const Zcu) bool {
const ptr_info = ty.ptrInfoOrNull(&zcu.intern_pool, .{ .allow_optional = false }) orelse return false;
return ptr_info.flags.size == .one;
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_info| ptr_info.flags.size == .one,
else => false,
};
}
/// Asserts `ty` is a pointer.
@@ -1376,27 +1387,24 @@ pub fn ptrSize(ty: Type, zcu: *const Zcu) std.builtin.Type.Pointer.Size {
/// Returns `null` if `ty` is not a pointer.
pub fn ptrSizeOrNull(ty: Type, zcu: *const Zcu) ?std.builtin.Type.Pointer.Size {
const ptr_info = ty.ptrInfoOrNull(&zcu.intern_pool, .{ .allow_optional = false }) orelse return null;
return ptr_info.flags.size;
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_info| ptr_info.flags.size,
else => null,
};
}
pub fn isSlice(ty: Type, zcu: *const Zcu) bool {
const ptr_info = ty.ptrInfoOrNull(&zcu.intern_pool, .{ .allow_optional = false }) orelse return false;
return ptr_info.flags.size == .slice;
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.size == .slice,
else => false,
};
}
pub fn isSliceAtRuntime(ty: Type, zcu: *const Zcu) bool {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.size == .slice,
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
.opt_type => |child| opt_child: switch (zcu.intern_pool.indexToKey(child)) {
.opt_type => |child| switch (zcu.intern_pool.indexToKey(child)) {
.ptr_type => |ptr_type| !ptr_type.flags.is_allowzero and ptr_type.flags.size == .slice,
.restricted_ptr_type => |restricted_ptr_type| continue :opt_child .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => false,
},
else => false,
@@ -1408,8 +1416,10 @@ pub fn slicePtrFieldType(ty: Type, zcu: *const Zcu) Type {
}
pub fn isConstPtr(ty: Type, zcu: *const Zcu) bool {
const ptr_info = ty.ptrInfoOrNull(&zcu.intern_pool, .{ .allow_optional = false }) orelse return false;
return ptr_info.flags.is_const;
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.is_const,
else => false,
};
}
pub fn isVolatilePtr(ty: Type, zcu: *const Zcu) bool {
@@ -1417,25 +1427,25 @@ pub fn isVolatilePtr(ty: Type, zcu: *const Zcu) bool {
}
pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool {
const ptr_info = ty.ptrInfoOrNull(ip, .{ .allow_optional = false }) orelse return false;
return ptr_info.flags.is_volatile;
return switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.is_volatile,
else => false,
};
}
pub fn isAllowzeroPtr(ty: Type, zcu: *const Zcu) bool {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.is_allowzero,
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
.opt_type => true,
else => false,
};
}
pub fn isCPtr(ty: Type, zcu: *const Zcu) bool {
const ptr_info = ty.ptrInfoOrNull(&zcu.intern_pool, .{ .allow_optional = false }) orelse return false;
return ptr_info.flags.size == .c;
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.size == .c,
else => false,
};
}
pub fn isPtrAtRuntime(ty: Type, zcu: *const Zcu) bool {
@@ -1445,16 +1455,18 @@ pub fn isPtrAtRuntime(ty: Type, zcu: *const Zcu) bool {
.slice => false,
.one, .many, .c => true,
},
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => true,
.direct => continue :ty ip.indexToKey(restricted_type.unrestricted_type),
},
.opt_type => |child| opt_child: switch (ip.indexToKey(child)) {
.ptr_type => |p| switch (p.flags.size) {
.slice, .c => false,
.many, .one => !p.flags.is_allowzero,
},
.restricted_ptr_type => |restricted_ptr_type| continue :opt_child .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => true,
.direct => continue :opt_child ip.indexToKey(restricted_type.unrestricted_type),
},
else => false,
},
@@ -1473,17 +1485,19 @@ pub fn optionalReprIsPayload(ty: Type, zcu: *const Zcu) bool {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.size == .c,
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
.opt_type => |child_type| child_type == .anyerror_type or opt_child: switch (ip.indexToKey(child_type)) {
.opt_type => |opt_child_type| opt_child_type == .anyerror_type or opt_child: switch (ip.indexToKey(opt_child_type)) {
.ptr_type => |ptr_type| ptr_type.flags.size != .c and !ptr_type.flags.is_allowzero,
.restricted_ptr_type => |restricted_ptr_type| continue :opt_child .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
.error_set_type, .inferred_error_set_type => true,
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => true,
.direct => continue :opt_child ip.indexToKey(restricted_type.unrestricted_type),
},
else => false,
},
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => false,
.direct => continue :ty ip.indexToKey(restricted_type.unrestricted_type),
},
else => false,
};
}
@@ -1494,19 +1508,21 @@ pub fn isPtrLikeOptional(ty: Type, zcu: *const Zcu) bool {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.size == .c,
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
.opt_type => |child| opt_child: switch (ip.indexToKey(child)) {
.opt_type => |opt_child_type| opt_child: switch (ip.indexToKey(opt_child_type)) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.slice, .c => false,
.many, .one => !ptr_type.flags.is_allowzero,
},
.restricted_ptr_type => |restricted_ptr_type| continue :opt_child .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => true,
.direct => continue :opt_child ip.indexToKey(restricted_type.unrestricted_type),
},
else => false,
},
.restricted_type => |restricted_type| switch (restrictedReprByTrackedInst(restricted_type.zir_index, zcu)) {
.indirect => false,
.direct => continue :ty ip.indexToKey(restricted_type.unrestricted_type),
},
else => false,
};
}
@@ -1541,7 +1557,7 @@ pub fn nullablePtrElem(ty: Type, zcu: *const Zcu) Type {
.pointer => return ty.childType(zcu),
.optional => {
const ptr_ty = ty.childType(zcu);
const ptr_info = ptr_ty.ptrInfoOrNull(&zcu.intern_pool, .{ .allow_optional = false }).?;
const ptr_info = zcu.intern_pool.indexToKey(ptr_ty.toIntern()).ptr_type;
assert(ptr_info.flags.size != .c);
assert(!ptr_info.flags.is_allowzero);
return .fromInterned(ptr_info.child);
@@ -1563,7 +1579,7 @@ pub fn nullablePtrElem(ty: Type, zcu: *const Zcu) Type {
/// * `[*c]T`
pub fn indexableElem(ty: Type, zcu: *const Zcu) Type {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
return switch (ip.indexToKey(ty.toIntern())) {
inline .array_type, .vector_type => |arr| .fromInterned(arr.child),
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.many, .slice, .c => .fromInterned(ptr_type.child),
@@ -1572,9 +1588,6 @@ pub fn indexableElem(ty: Type, zcu: *const Zcu) Type {
else => unreachable,
},
},
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => unreachable,
};
}
@@ -1590,16 +1603,12 @@ pub fn scalarType(ty: Type, zcu: *const Zcu) Type {
/// Asserts that the type is an optional, or a C pointer.
/// For C pointers this returns the type unmodified.
pub fn optionalChild(ty: Type, zcu: *const Zcu) Type {
const ip = &zcu.intern_pool;
ty: switch (ip.indexToKey(ty.toIntern())) {
switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.opt_type => |child| return .fromInterned(child),
.ptr_type => |ptr_type| {
assert(ptr_type.flags.size == .c);
return ty;
},
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => unreachable,
}
}
@@ -1699,6 +1708,15 @@ pub fn containerLayout(ty: Type, zcu: *const Zcu) std.builtin.Type.ContainerLayo
};
}
pub fn isBitpack(ty: Type, zcu: *const Zcu) bool {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).layout == .@"packed",
.union_type => ip.loadUnionType(ty.toIntern()).layout == .@"packed",
else => false,
};
}
pub fn bitpackBackingInt(ty: Type, zcu: *const Zcu) Type {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
@@ -1728,7 +1746,7 @@ pub fn errorSetIsEmpty(ty: Type, zcu: *const Zcu) bool {
const ip = &zcu.intern_pool;
return switch (ty.toIntern()) {
.anyerror_type, .adhoc_inferred_error_set_type => false,
else => switch (ip.indexToKey(ty.toIntern())) {
else => |index| switch (ip.indexToKey(index)) {
.error_set_type => |error_set_type| error_set_type.names.len == 0,
.inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
.none, .anyerror_type => false,
@@ -1752,7 +1770,7 @@ pub fn isAnyError(ty: Type, zcu: *const Zcu) bool {
return switch (ty.toIntern()) {
.anyerror_type => true,
.adhoc_inferred_error_set_type => false,
else => switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
else => |index| switch (zcu.intern_pool.indexToKey(index)) {
.inferred_error_set_type => |i| ip.funcIesResolvedUnordered(i) == .anyerror_type,
else => false,
},
@@ -1760,9 +1778,13 @@ pub fn isAnyError(ty: Type, zcu: *const Zcu) bool {
}
pub fn isError(ty: Type, zcu: *const Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
.error_union, .error_set => true,
return switch (ty.toIntern()) {
.anyerror_type, .adhoc_inferred_error_set_type, .anyerror_void_error_union_type => true,
else => false,
_ => |index| switch (zcu.intern_pool.indexToKey(index)) {
.error_union_type, .error_set_type => true,
else => false,
},
};
}
@@ -1782,7 +1804,7 @@ pub fn errorSetHasField(
const ip = &zcu.intern_pool;
return switch (ty.toIntern()) {
.anyerror_type => true,
else => switch (ip.indexToKey(ty.toIntern())) {
else => |index| switch (ip.indexToKey(index)) {
.error_set_type => |error_set_type| error_set_type.nameIndex(ip, name) != null,
.inferred_error_set_type => |i| switch (ip.funcIesResolvedUnordered(i)) {
.anyerror_type => true,
@@ -1817,8 +1839,7 @@ pub fn vectorLen(ty: Type, zcu: *const Zcu) u32 {
/// Asserts the type is an array, pointer or vector.
pub fn sentinel(ty: Type, zcu: *const Zcu) ?Value {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.vector_type,
.struct_type,
.tuple_type,
@@ -1826,9 +1847,6 @@ pub fn sentinel(ty: Type, zcu: *const Zcu) ?Value {
.array_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null,
.ptr_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null,
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => unreachable,
};
@@ -1867,10 +1885,27 @@ pub fn isUnsignedInt(ty: Type, zcu: *const Zcu) bool {
/// Returns true for integers, enums, error sets, and packed structs/unions.
/// If this function returns true, then intInfo() can be called on the type.
pub fn isAbiInt(ty: Type, zcu: *const Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
.int, .@"enum", .error_set => true,
.@"struct", .@"union" => ty.containerLayout(zcu) == .@"packed",
else => false,
const ip = &zcu.intern_pool;
return switch (ty.toIntern()) {
.usize_type,
.isize_type,
.c_char_type,
.c_short_type,
.c_ushort_type,
.c_int_type,
.c_uint_type,
.c_long_type,
.c_ulong_type,
.c_longlong_type,
.c_ulonglong_type,
.anyerror_type,
=> true,
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type, .enum_type, .error_set_type => true,
.struct_type => ip.loadStructType(ty.toIntern()).layout == .@"packed",
.union_type => ip.loadUnionType(ty.toIntern()).layout == .@"packed",
else => false,
},
};
}
@@ -1914,10 +1949,10 @@ pub fn intInfo(starting_ty: Type, zcu: *const Zcu) InternPool.Key.IntType {
return .{ .signedness = .unsigned, .bits = zcu.errorSetBits() };
},
.restricted_type => unreachable,
.tuple_type => unreachable,
.ptr_type => unreachable,
.restricted_ptr_type => unreachable,
.anyframe_type => unreachable,
.array_type => unreachable,
@@ -1945,6 +1980,7 @@ pub fn intInfo(starting_ty: Type, zcu: *const Zcu) InternPool.Key.IntType {
.aggregate,
.un,
.bitpack,
.restricted_value,
// memoization, not types
.memoized_call,
=> unreachable,
@@ -2070,7 +2106,6 @@ pub fn isNumeric(ty: Type, zcu: *const Zcu) bool {
.c_longlong_type,
.c_ulonglong_type,
=> true,
else => switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.int_type => true,
else => false,
@@ -2088,7 +2123,6 @@ pub fn onePossibleValue(ty: Type, pt: Zcu.PerThread) !?Value {
assertHasLayout(ty, zcu);
return switch (ip.indexToKey(ty.toIntern())) {
.ptr_type,
.restricted_ptr_type, // number of possible values is not known until the end of compilation, so never treated as NPV/OPV
.error_union_type,
.func_type,
.anyframe_type,
@@ -2152,6 +2186,13 @@ pub fn onePossibleValue(ty: Type, pt: Zcu.PerThread) !?Value {
.no_possible_value => try pt.nullValue(ty),
else => null,
},
.restricted_type => |restricted_type| if (try onePossibleValue(
.fromInterned(restricted_type.unrestricted_type),
pt,
)) |unrestricted_opv| .fromInterned(try pt.intern(.{ .restricted_value = .{
.ty = ty.toIntern(),
.unrestricted_value = unrestricted_opv.toIntern(),
} })) else null,
.tuple_type => |tuple| {
// Check *whether* the OPV exists first, because constructing it is a little more expensive.
if (ty.classify(zcu) != .one_possible_value) return null;
@@ -2240,6 +2281,7 @@ pub fn onePossibleValue(ty: Type, pt: Zcu.PerThread) !?Value {
.aggregate,
.un,
.bitpack,
.restricted_value,
// memoization, not types
.memoized_call,
=> unreachable,
@@ -2249,7 +2291,8 @@ pub fn onePossibleValue(ty: Type, pt: Zcu.PerThread) !?Value {
/// Asserts that `ty` has its layout resolved. `generic_poison` will return `false`.
pub fn comptimeOnly(ty: Type, zcu: *const Zcu) bool {
if (ty.toIntern() == .generic_poison_type) return false;
if (ty.zigTypeTag(zcu) == .error_union and ty.errorUnionPayload(zcu).toIntern() == .generic_poison_type) return false;
const ip = &zcu.intern_pool;
if (ip.isErrorUnionType(ty.toIntern()) and ip.errorUnionPayload(ty.toIntern()) == .generic_poison_type) return false;
return switch (ty.classify(zcu)) {
.no_possible_value, .one_possible_value, .runtime => false,
.partially_comptime, .fully_comptime => true,
@@ -2257,7 +2300,7 @@ pub fn comptimeOnly(ty: Type, zcu: *const Zcu) bool {
}
pub fn isVector(ty: Type, zcu: *const Zcu) bool {
return ty.zigTypeTag(zcu) == .vector;
return zcu.intern_pool.isVectorType(ty.toIntern());
}
/// Returns 0 if not a vector, otherwise returns @bitSizeOf(Element) * vector_len.
@@ -2673,7 +2716,7 @@ pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Zcu.LazySrcLoc {
const ip = &zcu.intern_pool;
return .{
.base_node_inst = switch (ip.indexToKey(ty.toIntern())) {
.restricted_ptr_type => |restricted_ptr_type| restricted_ptr_type.zir_index,
.restricted_type => |restricted_type| restricted_type.zir_index,
.struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) {
.declared => |d| d.zir_index,
.reified => |r| r.zir_index,
@@ -2898,7 +2941,7 @@ pub fn getUnionLayout(loaded_union: InternPool.LoadedUnionType, zcu: *const Zcu)
pub fn elemPtrType(ptr_ty: Type, index: ?u64, pt: Zcu.PerThread) Allocator.Error!Type {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ptr_info = ptr_ty.ptrInfoOrNull(ip, .{ .allow_optional = false }).?;
const ptr_info = ip.indexToKey(ptr_ty.toIntern()).ptr_type;
const elem_ty: Type = switch (ptr_info.flags.size) {
.slice, .many, .c => .fromInterned(ptr_info.child),
.one => switch (ip.indexToKey(ptr_info.child)) {
@@ -2952,7 +2995,7 @@ pub fn elemPtrType(ptr_ty: Type, index: ?u64, pt: Zcu.PerThread) Allocator.Error
pub fn fieldPtrType(ptr_ty: Type, field_index: u32, pt: Zcu.PerThread) Allocator.Error!Type {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ptr_info = ptr_ty.ptrInfoOrNull(ip, .{ .allow_optional = false }).?;
const ptr_info = ip.indexToKey(ptr_ty.toIntern()).ptr_type;
assert(ptr_info.flags.size == .one or ptr_info.flags.size == .c);
const aggregate_ty: Type = .fromInterned(ptr_info.child);
aggregate_ty.assertHasLayout(zcu);
@@ -3080,7 +3123,7 @@ pub fn fieldPtrType(ptr_ty: Type, field_index: u32, pt: Zcu.PerThread) Allocator
.none => switch (ip.indexToKey(aggregate_ty.toIntern())) {
.tuple_type, .union_type => field_ty.abiAlignment(zcu),
.struct_type => field_ty.defaultStructFieldAlignment(.auto, zcu),
.ptr_type, .restricted_ptr_type => ptrAbiAlignment(zcu.getTarget()),
.ptr_type => ptrAbiAlignment(zcu.getTarget()),
else => unreachable,
},
else => |a| a,
@@ -3109,7 +3152,7 @@ pub fn fieldPtrType(ptr_ty: Type, field_index: u32, pt: Zcu.PerThread) Allocator
pub fn containerTypeName(ty: Type, ip: *const InternPool) InternPool.NullTerminatedString {
return switch (ip.indexToKey(ty.toIntern())) {
.restricted_ptr_type => ip.loadRestrictedType(ty.toIntern()).name,
.restricted_type => ip.loadRestrictedType(ty.toIntern()).name,
.struct_type => ip.loadStructType(ty.toIntern()).name,
.union_type => ip.loadUnionType(ty.toIntern()).name,
.enum_type => ip.loadEnumType(ty.toIntern()).name,
@@ -3317,7 +3360,6 @@ pub fn assertHasLayout(ty: Type, zcu: *const Zcu) void {
switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.anyframe_type,
.simple_type,
.opaque_type,
@@ -3337,6 +3379,7 @@ pub fn assertHasLayout(ty: Type, zcu: *const Zcu) void {
.tuple_type => |tuple| for (tuple.types.get(&zcu.intern_pool)) |field_ty| {
assertHasLayout(.fromInterned(field_ty), zcu);
},
.restricted_type => |restricted_type| assertHasLayout(.fromInterned(restricted_type.unrestricted_type), zcu),
.struct_type => {
assert(zcu.intern_pool.loadStructType(ty.toIntern()).want_layout);
zcu.assertUpToDate(.wrap(.{ .type_layout = ty.toIntern() }));
@@ -3351,6 +3394,7 @@ pub fn assertHasLayout(ty: Type, zcu: *const Zcu) void {
},
// values, not types
.undef,
.simple_value,
.@"extern",
.func,
@@ -3366,7 +3410,7 @@ pub fn assertHasLayout(ty: Type, zcu: *const Zcu) void {
.aggregate,
.un,
.bitpack,
.undef,
.restricted_value,
// memoization, not types
.memoized_call,
=> unreachable,
@@ -3419,13 +3463,13 @@ fn collectSubtypes(ty: Type, pt: Zcu.PerThread, visited: *std.AutoArrayHashMapUn
.undef,
.inferred_error_set_type,
.error_set_type,
.restricted_type,
.struct_type,
.union_type,
.opaque_type,
.enum_type,
.simple_type,
.int_type,
.restricted_ptr_type,
=> {},
// values, not types
@@ -3444,6 +3488,7 @@ fn collectSubtypes(ty: Type, pt: Zcu.PerThread, visited: *std.AutoArrayHashMapUn
.aggregate,
.un,
.bitpack,
.restricted_value,
// memoization, not types
.memoized_call,
=> unreachable,
+8 -7
View File
@@ -501,7 +501,7 @@ pub const BuiltinDecl = enum {
@"panic.copyLenMismatch",
@"panic.memcpyAlias",
@"panic.noreturnReturned",
@"panic.corruptRestrictedPointer",
@"panic.corruptRestrictedValue",
VaList,
@@ -589,7 +589,7 @@ pub const BuiltinDecl = enum {
.@"panic.copyLenMismatch",
.@"panic.memcpyAlias",
.@"panic.noreturnReturned",
.@"panic.corruptRestrictedPointer",
.@"panic.corruptRestrictedValue",
=> .func,
};
}
@@ -663,7 +663,7 @@ pub const SimplePanicId = enum {
copy_len_mismatch,
memcpy_alias,
noreturn_returned,
corrupt_restricted_pointer,
corrupt_restricted_value,
pub fn toBuiltin(id: SimplePanicId) BuiltinDecl {
return switch (id) {
@@ -687,7 +687,7 @@ pub const SimplePanicId = enum {
.copy_len_mismatch => .@"panic.copyLenMismatch",
.memcpy_alias => .@"panic.memcpyAlias",
.noreturn_returned => .@"panic.noreturnReturned",
.corrupt_restricted_pointer => .@"panic.corruptRestrictedPointer",
.corrupt_restricted_value => .@"panic.corruptRestrictedValue",
// zig fmt: on
};
}
@@ -2748,12 +2748,13 @@ pub const LazySrcLoc = struct {
.struct_init_anon => zir.extraData(Zir.Inst.StructInitAnon, inst.data.pl_node.payload_index).data.abs_node,
.extended => switch (inst.data.extended.opcode) {
.struct_decl => zir.getStructDecl(zir_inst).src_node,
.union_decl => zir.getUnionDecl(zir_inst).src_node,
.enum_decl => zir.getEnumDecl(zir_inst).src_node,
.union_decl => zir.getUnionDecl(zir_inst).src_node,
.opaque_decl => zir.getOpaqueDecl(zir_inst).src_node,
.reify_enum => zir.extraData(Zir.Inst.ReifyEnum, inst.data.extended.operand).data.node,
.reify_restricted => zir.extraData(Zir.Inst.ReifyRestricted, inst.data.extended.operand).data.node,
.reify_struct => zir.extraData(Zir.Inst.ReifyStruct, inst.data.extended.operand).data.node,
.reify_union => zir.extraData(Zir.Inst.ReifyUnion, inst.data.extended.operand).data.node,
.reify_enum => zir.extraData(Zir.Inst.ReifyEnum, inst.data.extended.operand).data.node,
else => unreachable,
},
else => unreachable,
@@ -4000,7 +4001,7 @@ pub const Feature = enum {
pub fn backendSupportsFeature(zcu: *const Zcu, comptime feature: Feature) bool {
const backend = target_util.zigBackend(&zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
return target_util.backendSupportsFeature(backend, feature);
return target_util.backendSupportsFeature(backend, zcu.comp.config.incremental, feature);
}
pub const AtomicPtrAlignmentError = error{
+88 -88
View File
@@ -232,15 +232,26 @@ const LazySymbolStructure = struct {
operation: Operation,
pub const Operation = enum {
ptr_inc,
end_ptr_inc,
pub fn apply(operation: Operation, slice: []u8, endian: std.builtin.Endian) void {
pub fn apply(operation: Operation, slice: []u8, target_opts: struct {
ptr_bit_width: u16,
endian: std.builtin.Endian,
}) void {
switch (operation) {
.ptr_inc => switch (slice.len) {
.end_ptr_inc => switch (target_opts.ptr_bit_width) {
else => unreachable,
2 => std.mem.writeInt(u16, slice[0..2], std.mem.readInt(u16, slice[0..2], endian) + 1, endian),
4 => std.mem.writeInt(u32, slice[0..4], std.mem.readInt(u32, slice[0..4], endian) + 1, endian),
8 => std.mem.writeInt(u64, slice[0..8], std.mem.readInt(u64, slice[0..8], endian) + 1, endian),
inline 16, 32, 64 => |ptr_bit_width| {
const ptr_size = @divExact(ptr_bit_width, 8);
const TargetPtrInt = @Int(.unsigned, ptr_bit_width);
const end_slice = slice[slice.len - ptr_size ..][0..ptr_size];
std.mem.writeInt(
TargetPtrInt,
end_slice,
std.mem.readInt(TargetPtrInt, end_slice, target_opts.endian) + 1,
target_opts.endian,
);
},
},
}
}
@@ -279,14 +290,14 @@ pub fn getLazySymbolInfo(
.structure => .{},
.attributes => .{ .required_alignment = .@"1" },
},
.restricted_ptr_type => |restricted_ptr_type| switch (kind) {
.restricted_type => |restricted_type| switch (kind) {
.structure => .{},
.attributes => {
const restricted_ptr_ty: Type = .fromInterned(lazy_sym.key);
const unrestricted_ptr_ty: Type =
.fromInterned(restricted_ptr_type.unrestricted_ptr_type);
return .{ .required_alignment = restricted_ptr_ty.abiAlignment(zcu)
.maxStrict(unrestricted_ptr_ty.abiAlignment(zcu)) };
const restricted_ty: Type = .fromInterned(lazy_sym.key);
const unrestricted_ty: Type =
.fromInterned(restricted_type.unrestricted_type);
return .{ .required_alignment = restricted_ty.abiAlignment(zcu)
.maxStrict(unrestricted_ty.abiAlignment(zcu)) };
},
},
},
@@ -298,31 +309,32 @@ pub fn getLazySymbolInfo(
},
_ => switch (ip.indexToKey(lazy_sym.key)) {
else => unreachable,
.ptr => |ptr| switch (ip.indexToKey(ptr.ty)) {
else => unreachable,
.restricted_ptr_type => |restricted_ptr_type| switch (kind) {
.structure => .{ .parent = .{ .kind = .const_data, .key = ptr.ty }, .modify = .{
.lazy_sym = .{ .kind = .deferred_const_data, .key = ptr.ty },
.operation = .ptr_inc,
} },
.attributes => {
const unrestricted_ptr_ty: Type =
.fromInterned(restricted_ptr_type.unrestricted_ptr_type);
return .{
.required_alignment = unrestricted_ptr_ty.abiAlignment(zcu),
.size = unrestricted_ptr_ty.abiSize(zcu),
};
},
.restricted_value => |restricted_value| switch (kind) {
.structure => .{ .parent = .{ .kind = .const_data, .key = restricted_value.ty }, .modify = .{
.lazy_sym = .{ .kind = .deferred_const_data, .key = restricted_value.ty },
.operation = .end_ptr_inc,
} },
.attributes => {
const unrestricted_ty: Type = .fromInterned(
ip.indexToKey(restricted_value.ty).restricted_type.unrestricted_type,
);
return .{
.required_alignment = unrestricted_ty.abiAlignment(zcu),
.size = unrestricted_ty.abiSize(zcu),
};
},
},
.restricted_ptr_type => switch (kind) {
.restricted_type => switch (kind) {
.structure => .{ .parent = .{ .kind = .const_data, .key = lazy_sym.key } },
.attributes => {
const restricted_ptr_ty: Type = .fromInterned(lazy_sym.key);
const restricted_ty: Type = .fromInterned(lazy_sym.key);
const unrestricted_ty: Type = .fromInterned(
ip.indexToKey(lazy_sym.key).restricted_type.unrestricted_type,
);
return .{
.header = true,
.required_alignment = restricted_ptr_ty.abiAlignment(zcu),
.size = restricted_ptr_ty.abiSize(zcu),
.required_alignment = restricted_ty.abiAlignment(zcu),
.size = unrestricted_ty.abiAlignment(zcu).forward(restricted_ty.abiSize(zcu)),
};
},
},
@@ -367,7 +379,7 @@ pub fn generateLazySymbol(
}
return;
},
.restricted_ptr_type => return,
.restricted_type => return,
else => {},
},
.deferred_const_data => switch (lazy_sym.key) {
@@ -392,25 +404,10 @@ pub fn generateLazySymbol(
return;
},
_ => switch (ip.indexToKey(lazy_sym.key)) {
.ptr => |ptr| switch (ip.indexToKey(ptr.ty)) {
.restricted_ptr_type => |restricted_ptr_type| return lowerPtr(
bin_file,
pt,
src_loc,
try ip.getCoerced(
comp.gpa,
comp.io,
pt.tid,
lazy_sym.key,
restricted_ptr_type.unrestricted_ptr_type,
),
w,
reloc_parent,
0,
),
else => {},
},
.restricted_ptr_type => return w.splatByteAll(0, @divExact(zcu.getTarget().ptrBitWidth(), 8)),
.restricted_value => |restricted_value| return generateSymbol(bin_file, pt, src_loc, .fromInterned(
restricted_value.unrestricted_value,
), w, reloc_parent),
.restricted_type => return w.splatByteAll(0, @divExact(zcu.getTarget().ptrBitWidth(), 8)),
else => {},
},
else => {},
@@ -463,13 +460,13 @@ pub fn generateSymbol(
switch (ip.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
.anyframe_type,
.error_union_type,
.simple_type,
.restricted_type,
.struct_type,
.tuple_type,
.union_type,
@@ -576,10 +573,7 @@ pub fn generateSymbol(
128 => try w.writeInt(u128, @bitCast(f128_val), endian),
},
},
.ptr => switch (ty.restrictedRepr(zcu)) {
.indirect => try lowerLazySymbolRef(bin_file, pt, .{ .kind = .deferred_const_data, .key = val.toIntern() }, w, reloc_parent, 0),
.direct => try lowerPtr(bin_file, pt, src_loc, val.toIntern(), w, reloc_parent, 0),
},
.ptr => try lowerPtr(bin_file, pt, src_loc, val.toIntern(), w, reloc_parent, 0),
.slice => |slice| {
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), w, reloc_parent);
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), w, reloc_parent);
@@ -788,6 +782,10 @@ pub fn generateSymbol(
}
},
.bitpack => |bitpack| try generateSymbol(bin_file, pt, src_loc, .fromInterned(bitpack.backing_int_val), w, reloc_parent),
.restricted_value => |restricted_value| switch (ty.restrictedRepr(zcu)) {
.indirect => try lowerLazySymbolRef(bin_file, pt, .{ .kind = .deferred_const_data, .key = val.toIntern() }, w, reloc_parent, 0),
.direct => try generateSymbol(bin_file, pt, src_loc, .fromInterned(restricted_value.unrestricted_value), w, reloc_parent),
},
.memoized_call => unreachable,
}
}
@@ -1185,55 +1183,57 @@ const LowerResult = union(enum) {
lea_lazy_sym: link.File.LazySymbol,
};
pub fn lowerValue(pt: Zcu.PerThread, val: Value, target: *const std.Target) Allocator.Error!LowerResult {
pub fn lowerValue(pt: Zcu.PerThread, start_val: Value, target: *const std.Target) Allocator.Error!LowerResult {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ty = val.typeOf(zcu);
const start_ty = start_val.typeOf(zcu);
log.debug("lowerValue(@as({f}, {f}))", .{ ty.fmt(pt), val.fmtValue(pt) });
log.debug("lowerValue(@as({f}, {f}))", .{ start_ty.fmt(pt), start_val.fmtValue(pt) });
if (val.isUndef(zcu)) return .undef;
if (start_val.isUndef(zcu)) return .undef;
const ty, const val: Value = if (start_ty.unrestrictedType(zcu)) |unrestricted_ty| switch (start_ty.restrictedRepr(zcu)) {
.indirect => return .{ .lea_lazy_sym = .{ .kind = .deferred_const_data, .key = start_val.toIntern() } },
.direct => .{ unrestricted_ty, .fromInterned(ip.indexToKey(start_val.toIntern()).restricted_value.unrestricted_value) },
} else .{ start_ty, start_val };
switch (ty.zigTypeTag(zcu)) {
.void => return .none,
.bool => return .{ .immediate = @intFromBool(val.toBool()) },
.pointer => switch (ty.ptrSize(zcu)) {
.slice => {},
.one, .many, .c => switch (ty.restrictedRepr(zcu)) {
.indirect => return .{ .lea_lazy_sym = .{ .kind = .deferred_const_data, .key = val.toIntern() } },
.direct => {
const ptr = ip.indexToKey(val.toIntern()).ptr;
if (ptr.base_addr == .int) return .{ .immediate = ptr.byte_offset };
if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.int => unreachable, // handled above
.one, .many, .c => {
const ptr = ip.indexToKey(val.toIntern()).ptr;
if (ptr.base_addr == .int) return .{ .immediate = ptr.byte_offset };
if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.int => unreachable, // handled above
.nav => |nav_index| {
const nav = ip.getNav(nav_index);
const nav_ty: Type = .fromInterned(nav.resolved.?.type);
if (nav_ty.isRuntimeFnOrHasRuntimeBits(zcu) or nav.getExtern(ip) != null) {
return .{ .lea_nav = nav_index };
} else {
// Create the 0xaa bit pattern...
const undef_ptr_bits: u64 = @intCast((@as(u66, 1) << @intCast(target.ptrBitWidth() + 1)) / 3);
// ...but align the pointer
const alignment = zcu.navAlignment(nav_index);
return .{ .immediate = alignment.forward(undef_ptr_bits) };
}
},
.uav => |uav| if (Value.fromInterned(uav.val).typeOf(zcu).isRuntimeFnOrHasRuntimeBits(zcu)) {
return .{ .lea_uav = uav };
.nav => |nav_index| {
const nav = ip.getNav(nav_index);
const nav_ty: Type = .fromInterned(nav.resolved.?.type);
if (nav_ty.isRuntimeFnOrHasRuntimeBits(zcu) or nav.getExtern(ip) != null) {
return .{ .lea_nav = nav_index };
} else {
// Create the 0xaa bit pattern...
const undef_ptr_bits: u64 = @intCast((@as(u66, 1) << @intCast(target.ptrBitWidth() + 1)) / 3);
// ...but align the pointer
const alignment = Type.fromInterned(uav.orig_ty).ptrAlignment(zcu);
const alignment = zcu.navAlignment(nav_index);
return .{ .immediate = alignment.forward(undef_ptr_bits) };
},
}
},
else => {},
};
},
.uav => |uav| if (Value.fromInterned(uav.val).typeOf(zcu).isRuntimeFnOrHasRuntimeBits(zcu)) {
return .{ .lea_uav = uav };
} else {
// Create the 0xaa bit pattern...
const undef_ptr_bits: u64 = @intCast((@as(u66, 1) << @intCast(target.ptrBitWidth() + 1)) / 3);
// ...but align the pointer
const alignment = Type.fromInterned(uav.orig_ty).ptrAlignment(zcu);
return .{ .immediate = alignment.forward(undef_ptr_bits) };
},
else => {},
};
},
},
.int => {
+99 -77
View File
@@ -767,7 +767,7 @@ pub const DeclGen = struct {
// somewhere and we should let the C compiler tell us about it.
const elem_ty = ptr_ty.childType(zcu);
const need_cast = elem_ty.toIntern() != nav_ty.toIntern() and
elem_ty.zigTypeTag(zcu) != .@"fn" or nav_ty.zigTypeTag(zcu) != .@"fn";
!ip.isFunctionType(elem_ty.toIntern()) or !ip.isFunctionType(nav_ty.toIntern());
if (need_cast) {
try w.writeAll("((");
try dg.renderType(w, ptr_ty);
@@ -919,13 +919,13 @@ pub const DeclGen = struct {
// types, not values
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
.anyframe_type,
.error_union_type,
.simple_type,
.restricted_type,
.struct_type,
.tuple_type,
.union_type,
@@ -1078,24 +1078,11 @@ pub const DeclGen = struct {
try dg.renderValue(w, .fromInterned(slice.len), initializer_type);
try w.writeByte('}');
},
.ptr => switch (ty.restrictedRepr(zcu)) {
.indirect => {
try dg.need_restricted.ensureUnusedCapacity(zcu.gpa, 2);
dg.need_restricted.putAssumeCapacity(ty.toIntern(), {});
dg.need_restricted.putAssumeCapacity(val.toIntern(), {});
const restricted_ty_name = ty.containerTypeName(ip).toSlice(ip);
try w.print("&zig_restricted_{f}__{d}[zig_restricted_index_{f}__{d}]", .{
fmtIdentUnsolo(restricted_ty_name), ty.toIntern(),
fmtIdentUnsolo(restricted_ty_name), val.toIntern(),
});
},
.direct => {
const derivation = try val.pointerDerivation(dg.arena, pt, null);
try w.writeByte('(');
try dg.renderPointer(w, derivation, location);
try w.writeByte(')');
},
.ptr => {
const derivation = try val.pointerDerivation(dg.arena, pt, null);
try w.writeByte('(');
try dg.renderPointer(w, derivation, location);
try w.writeByte(')');
},
.opt => |opt| switch (CType.classifyOptional(ty, zcu)) {
.npv_payload => unreachable, // opv optional
@@ -1319,13 +1306,28 @@ pub const DeclGen = struct {
if (loaded_union.layout == .auto) try w.writeByte('}');
}
},
.restricted_value => |restricted_value| switch (ty.restrictedRepr(zcu)) {
.indirect => {
const loaded_restricted = ip.loadRestrictedType(ty.toIntern());
// Explicitly add the restricted decl dependency on the unrestricted type
_ = try CType.lower(.fromInterned(loaded_restricted.unrestricted_type), &dg.ctype_deps, dg.arena, zcu);
try dg.need_restricted.put(zcu.gpa, val.toIntern(), {});
const restricted_ty_name = loaded_restricted.name.toSlice(ip);
try w.print("&zig_restricted_{f}__{d}[zig_restricted_index_{f}__{d}]", .{
fmtIdentUnsolo(restricted_ty_name), ty.toIntern(),
fmtIdentUnsolo(restricted_ty_name), val.toIntern(),
});
},
.direct => try dg.renderValue(w, .fromInterned(restricted_value.unrestricted_value), initializer_type),
},
}
}
fn renderUndefValue(
dg: *DeclGen,
w: *Writer,
ty: Type,
start_ty: Type,
location: ValueRenderLocation,
) Error!void {
const pt = dg.pt;
@@ -1343,7 +1345,8 @@ pub const DeclGen = struct {
.ReleaseFast, .ReleaseSmall => false,
};
switch (ty.toIntern()) {
var ty = start_ty;
ty: switch (start_ty.toIntern()) {
.c_longdouble_type,
.f16_type,
.f32_type,
@@ -1371,7 +1374,7 @@ pub const DeclGen = struct {
return w.writeByte(')');
},
.bool_type => try w.writeAll(if (safety_on) "0xaa" else "false"),
else => ty: switch (ip.indexToKey(ty.toIntern())) {
else => ty_key: switch (ip.indexToKey(ty.toIntern())) {
.simple_type, // anyerror, c_char (etc), usize, isize
.int_type,
.enum_type,
@@ -1442,9 +1445,6 @@ pub const DeclGen = struct {
try w.writeByte('}');
},
},
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
.opt_type => |child_type| switch (CType.classifyOptional(ty, zcu)) {
.npv_payload => unreachable, // opv optional
@@ -1475,6 +1475,16 @@ pub const DeclGen = struct {
try w.writeAll(" }");
},
},
.restricted_type => |restricted_type| switch (ty.restrictedRepr(zcu)) {
.indirect => continue :ty_key .{ .ptr_type = .{
.child = restricted_type.unrestricted_type,
.flags = .{ .is_const = true },
} },
.direct => {
ty = .fromInterned(restricted_type.unrestricted_type);
continue :ty restricted_type.unrestricted_type;
},
},
.struct_type => {
const loaded_struct = ip.loadStructType(ty.toIntern());
switch (loaded_struct.layout) {
@@ -1628,6 +1638,7 @@ pub const DeclGen = struct {
.aggregate,
.un,
.bitpack,
.restricted_value,
.memoized_call,
=> unreachable, // values, not types
},
@@ -2106,9 +2117,9 @@ pub fn genRestricted(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
for (need_restricted.keys(), need_restricted.values()) |restricted_ty, *restricted_vals| {
const unrestricted_ptr_type = ip.indexToKey(restricted_ty).restricted_ptr_type.unrestricted_ptr_type;
const unrestricted_cty: CType = try .lower(.fromInterned(unrestricted_ptr_type), &dg.ctype_deps, dg.arena, zcu);
const restricted_ty_name = Type.fromInterned(restricted_ty).containerTypeName(ip).toSlice(ip);
const unrestricted_type = ip.indexToKey(restricted_ty).restricted_type.unrestricted_type;
const unrestricted_cty: CType = try .lower(.fromInterned(unrestricted_type), &dg.ctype_deps, dg.arena, zcu);
const restricted_ty_name = ip.loadRestrictedType(restricted_ty).name.toSlice(ip);
try w.print(
\\#define zig_restricted_len_{f}__{d} {d}u
\\static {f}const zig_restricted_{f}__{d}[zig_restricted_len_{f}__{d}]{f} = {{
@@ -2138,7 +2149,7 @@ pub fn genRestricted(
restricted_val,
});
try dg.renderValue(w, .fromInterned(
try ip.getCoerced(zcu.gpa, zcu.comp.io, pt.tid, restricted_val, unrestricted_ptr_type),
ip.indexToKey(restricted_val).restricted_value.unrestricted_value,
), .static_initializer);
try w.writeAll(",\n");
}
@@ -3194,21 +3205,15 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
try f.allocs.put(zcu.gpa, local.new_local, true);
switch (elem_ty.zigTypeTag(zcu)) {
.@"struct", .@"union" => switch (elem_ty.containerLayout(zcu)) {
.@"packed" => {
// For packed aggregates, we zero-initialize to try and work around a design flaw
// related to how `packed`, `undefined`, and RLS interact. See comment in `airStore`
// for details.
const w = &f.code.writer;
try w.print("memset(&t{d}, 0x00, sizeof(", .{local.new_local});
try f.renderType(w, elem_ty);
try w.writeAll("));");
try f.newline();
},
.auto, .@"extern" => {},
},
else => {},
if (elem_ty.isBitpack(zcu)) {
// For packed aggregates, we zero-initialize to try and work around a design flaw
// related to how `packed`, `undefined`, and RLS interact. See comment in `airStore`
// for details.
const w = &f.code.writer;
try w.print("memset(&t{d}, 0x00, sizeof(", .{local.new_local});
try f.renderType(w, elem_ty);
try w.writeAll("));");
try f.newline();
}
return .{ .local_ref = local.new_local };
@@ -3228,21 +3233,15 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local });
try f.allocs.put(zcu.gpa, local.new_local, true);
switch (elem_ty.zigTypeTag(zcu)) {
.@"struct", .@"union" => switch (elem_ty.containerLayout(zcu)) {
.@"packed" => {
// For packed aggregates, we zero-initialize to try and work around a design flaw
// related to how `packed`, `undefined`, and RLS interact. See comment in `airStore`
// for details.
const w = &f.code.writer;
try w.print("memset(&t{d}, 0x00, sizeof(", .{local.new_local});
try f.renderType(w, elem_ty);
try w.writeAll("));");
try f.newline();
},
.auto, .@"extern" => {},
},
else => {},
if (elem_ty.isBitpack(zcu)) {
// For packed aggregates, we zero-initialize to try and work around a design flaw
// related to how `packed`, `undefined`, and RLS interact. See comment in `airStore`
// for details.
const w = &f.code.writer;
try w.print("memset(&t{d}, 0x00, sizeof(", .{local.new_local});
try f.renderType(w, elem_ty);
try w.writeAll("));");
try f.newline();
}
return .{ .local_ref = local.new_local };
@@ -5639,6 +5638,7 @@ fn airUnwrapRestricted(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue
try reap(f, inst, &.{ty_op.operand});
const w = &f.code.writer;
// Implicitly adds the restricted decl dependency on the unrestricted type
const local = try f.allocLocal(inst, unrestricted_ty);
switch (restricted_ty.restrictedRepr(zcu)) {
@@ -5647,8 +5647,13 @@ fn airUnwrapRestricted(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue
const target = &f.dg.mod.resolved_target.result;
const ptr_bits = target.ptrBitWidth();
const int_from_ptr = try f.allocLocal(inst, .usize);
try f.writeCValue(w, int_from_ptr, .other);
try f.dg.need_restricted.put(zcu.gpa, restricted_ty.toIntern(), {});
const unrestricted_size = unrestricted_ty.abiSize(zcu);
assert(unrestricted_size > 0);
const restricted_ty_name = ip.loadRestrictedType(restricted_ty.toIntern()).name.toSlice(ip);
const ptr_diff = try f.allocLocal(inst, .usize);
try f.writeCValue(w, ptr_diff, .other);
try w.print(" = zig_subw_u{d}(({f})", .{
ptr_bits,
CType.fmtTypeName(.{ .int = .uintptr_t }, zcu),
@@ -5656,29 +5661,46 @@ fn airUnwrapRestricted(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue
try f.writeCValue(w, operand, .other);
try w.print(", ({f})zig_restricted_{f}__{d}, {f});", .{
CType.fmtTypeName(.{ .int = .uintptr_t }, zcu),
fmtIdentUnsolo(restricted_ty.containerTypeName(ip).toSlice(ip)),
fmtIdentUnsolo(restricted_ty_name),
restricted_ty.toIntern(),
fmtUnsignedIntLiteralSmall(target, .uint8_t, ptr_bits, false, 10, .lower),
});
try f.newline();
const rotate_amount = std.math.log2_int(u16, @divExact(ptr_bits, 8));
try w.print("if ((zig_shr_u{d}(", .{ptr_bits});
try f.writeCValue(w, int_from_ptr, .other);
try w.print(", {f}) | zig_shlw_u{d}(", .{
fmtUnsignedIntLiteralSmall(target, .uint8_t, rotate_amount, false, 10, .lower),
ptr_bits,
});
try f.writeCValue(w, int_from_ptr, .other);
try w.print(", {f}, {f})) >= zig_restricted_len_{f}__{d}) {{", .{
fmtUnsignedIntLiteralSmall(target, .uint8_t, ptr_bits - rotate_amount, false, 10, .lower),
fmtUnsignedIntLiteralSmall(target, .uint8_t, ptr_bits, false, 10, .lower),
fmtIdentUnsolo(restricted_ty.containerTypeName(ip).toSlice(ip)),
try w.writeAll("if (");
if (unrestricted_size == 1) {
try f.writeCValue(w, ptr_diff, .other);
} else if (std.math.isPowerOfTwo(unrestricted_size)) {
const rotate_amount = std.math.log2_int(u64, unrestricted_size);
try w.print("(zig_shr_u{d}(", .{ptr_bits});
try f.writeCValue(w, ptr_diff, .other);
try w.print(", {f}) | zig_shlw_u{d}(", .{
fmtUnsignedIntLiteralSmall(target, .uint8_t, rotate_amount, false, 10, .lower),
ptr_bits,
});
try f.writeCValue(w, ptr_diff, .other);
try w.print(", {f}, {f}))", .{
fmtUnsignedIntLiteralSmall(target, .uint8_t, ptr_bits - rotate_amount, false, 10, .lower),
fmtUnsignedIntLiteralSmall(target, .uint8_t, ptr_bits, false, 10, .lower),
});
} else {
try f.writeCValue(w, ptr_diff, .other);
try w.print(" % {f} != {f} || ", .{
fmtUnsignedIntLiteralSmall(target, .uintptr_t, unrestricted_size, false, 10, .lower),
fmtUnsignedIntLiteralSmall(target, .uintptr_t, 0, false, 10, .lower),
});
try f.writeCValue(w, ptr_diff, .other);
try w.print(" / {f}", .{
fmtUnsignedIntLiteralSmall(target, .uintptr_t, unrestricted_size, false, 10, .lower),
});
}
try w.print(" >= zig_restricted_len_{f}__{d}) {{", .{
fmtIdentUnsolo(restricted_ty_name),
restricted_ty.toIntern(),
});
f.indent();
try f.newline();
try f.writePanic(.corrupt_restricted_pointer, w);
try f.writePanic(.corrupt_restricted_value, w);
try f.outdent();
try w.writeByte('}');
try f.newline();
@@ -6484,7 +6506,7 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(w, local, .other);
try f.need_tag_name_funcs.put(gpa, enum_ty.toIntern(), {});
try w.print(" = zig_tagName_{f}__{d}(", .{
fmtIdentUnsolo(enum_ty.containerTypeName(ip).toSlice(ip)),
fmtIdentUnsolo(ip.loadEnumType(enum_ty.toIntern()).name.toSlice(ip)),
@intFromEnum(enum_ty.toIntern()),
});
try f.writeCValue(w, operand, .other);
+30 -25
View File
@@ -239,7 +239,20 @@ pub const CType = union(enum) {
) Allocator.Error!CType {
const gpa = zcu.comp.gpa;
const ip = &zcu.intern_pool;
var cur_ty = start_ty;
var cur_ty: Type = if (start_ty.unrestrictedType(zcu)) |unrestricted_ty| switch (start_ty.restrictedRepr(zcu)) {
.indirect => {
const unrestricted_cty = try lowerInner(unrestricted_ty, true, deps, arena, zcu);
const unrestricted_cty_buf = try arena.create(CType);
unrestricted_cty_buf.* = unrestricted_cty;
return .{ .pointer = .{
.@"const" = true,
.@"volatile" = false,
.elem_ty = unrestricted_cty_buf,
.nonstring = unrestricted_cty.isStringElem(),
} };
},
.direct => unrestricted_ty,
} else start_ty;
while (true) {
switch (cur_ty.zigTypeTag(zcu)) {
.type,
@@ -284,20 +297,6 @@ pub const CType = union(enum) {
.pointer => {
const ptr = cur_ty.ptrInfo(zcu);
if (cur_ty.unrestrictedType(zcu)) |unrestricted_ty| switch (cur_ty.restrictedRepr(zcu)) {
.indirect => {
const unrestricted_cty = try lowerInner(unrestricted_ty, true, deps, arena, zcu);
const unrestricted_cty_buf = try arena.create(CType);
unrestricted_cty_buf.* = unrestricted_cty;
return .{ .pointer = .{
.@"const" = true,
.@"volatile" = false,
.elem_ty = unrestricted_cty_buf,
.nonstring = false,
} };
},
.direct => {},
};
switch (ptr.flags.size) {
.slice => {
try deps.addType(gpa, cur_ty, allow_incomplete);
@@ -305,7 +304,7 @@ pub const CType = union(enum) {
},
.one, .many, .c => {
const elem_ty: Type = .fromInterned(ptr.child);
const is_fn_ptr = elem_ty.zigTypeTag(zcu) == .@"fn";
const is_fn_ptr = ip.isFunctionType(elem_ty.toIntern());
const elem_cty: CType = elem_cty: {
if (ptr.packed_offset.host_size > 0 and ptr.flags.vector_index == .none) {
switch (classifyBitInt(.unsigned, ptr.packed_offset.host_size * 8, zcu)) {
@@ -876,6 +875,10 @@ pub const CType = union(enum) {
const ty = ctx.ty;
const zcu = ctx.zcu;
const ip = &zcu.intern_pool;
if (ip.isRestrictedType(ty.toIntern())) {
const name = ip.loadRestrictedType(ty.toIntern()).name.toSlice(ip);
return w.print("{f}", .{@import("../c.zig").fmtIdentUnsolo(name)});
}
switch (ty.zigTypeTag(zcu)) {
.frame => unreachable,
.@"anyframe" => unreachable,
@@ -926,10 +929,7 @@ pub const CType = union(enum) {
.optional => try w.print("opt_{f}", .{fmtZigType(ty.optionalChild(zcu), zcu)}),
.error_union => try w.print("errunion_{f}", .{fmtZigType(ty.errorUnionPayload(zcu), zcu)}),
.pointer => if (ty.unrestrictedType(zcu)) |_| {
const name = ty.containerTypeName(ip).toSlice(ip);
try w.print("{f}", .{@import("../c.zig").fmtIdentUnsolo(name)});
} else switch (ty.ptrSize(zcu)) {
.pointer => switch (ty.ptrSize(zcu)) {
.one, .many, .c => try w.print("ptr_{f}", .{fmtZigType(ty.childType(zcu), zcu)}),
.slice => try w.print("slice_{f}", .{fmtZigType(ty.childType(zcu), zcu)}),
},
@@ -971,6 +971,10 @@ pub const CType = union(enum) {
fmtZigType(ty.childType(zcu), zcu),
}),
.@"enum" => {
const name = ip.loadEnumType(ty.toIntern()).name.toSlice(ip);
try w.print("{f}", .{@import("../c.zig").fmtIdentUnsolo(name)});
},
.@"struct" => if (ty.isTuple(zcu)) {
const len = ty.structFieldCount(zcu);
try w.print("tuple_{d}", .{len});
@@ -979,17 +983,17 @@ pub const CType = union(enum) {
try w.print("_{f}", .{fmtZigType(field_ty, zcu)});
}
} else {
const name = ty.containerTypeName(ip).toSlice(ip);
const name = ip.loadStructType(ty.toIntern()).name.toSlice(ip);
try w.print("{f}", .{@import("../c.zig").fmtIdentUnsolo(name)});
},
.@"opaque" => if (ty.toIntern() == .anyopaque_type) {
try w.writeAll("anyopaque");
} else {
const name = ty.containerTypeName(ip).toSlice(ip);
const name = ip.loadOpaqueType(ty.toIntern()).name.toSlice(ip);
try w.print("{f}", .{@import("../c.zig").fmtIdentUnsolo(name)});
},
.@"union", .@"enum" => {
const name = ty.containerTypeName(ip).toSlice(ip);
.@"union" => {
const name = ip.loadUnionType(ty.toIntern()).name.toSlice(ip);
try w.print("{f}", .{@import("../c.zig").fmtIdentUnsolo(name)});
},
}
@@ -1002,7 +1006,6 @@ pub const CType = union(enum) {
return switch (ip.indexToKey(ty.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.anyframe_type,
.simple_type,
.opaque_type,
@@ -1010,6 +1013,7 @@ pub const CType = union(enum) {
.inferred_error_set_type,
=> true,
.restricted_type,
.struct_type,
.union_type,
.enum_type,
@@ -1045,6 +1049,7 @@ pub const CType = union(enum) {
.aggregate,
.un,
.bitpack,
.restricted_value,
// memoization, not types
.memoized_call,
=> unreachable,
+100 -94
View File
@@ -708,16 +708,17 @@ pub const Object = struct {
rd.* = undefined;
}
};
pub fn getRestrictedDecls(o: *Object, ty: Type) Allocator.Error!*RestrictedDecls {
const gop = try o.restricted_map.getOrPut(o.gpa, ty.toIntern());
pub fn getRestrictedDecls(o: *Object, restricted_ty: Type) Allocator.Error!*RestrictedDecls {
const gop = try o.restricted_map.getOrPut(o.gpa, restricted_ty.toIntern());
if (gop.found_existing) return gop.value_ptr;
errdefer _ = o.restricted_map.pop().?;
const target = o.zcu.getTarget();
const ip = &o.zcu.intern_pool;
const ptr_align = Type.ptrAbiAlignment(target).toLlvm();
const zcu = o.zcu;
const target = zcu.getTarget();
const ip = &zcu.intern_pool;
const unrestricted_ty = restricted_ty.unrestrictedType(zcu).?;
const ty_name = ty.containerTypeName(ip).toSlice(ip);
const ty_name = ip.loadRestrictedType(restricted_ty.toIntern()).name.toSlice(ip);
gop.value_ptr.* = .{
.len = try o.builder.addVariable(
try o.builder.strtabStringFmt("{s}.len", .{ty_name}),
@@ -733,11 +734,11 @@ pub const Object = struct {
};
gop.value_ptr.len.setLinkage(.private, &o.builder);
gop.value_ptr.len.setMutability(.constant, &o.builder);
gop.value_ptr.len.setAlignment(ptr_align, &o.builder);
gop.value_ptr.len.setAlignment(Type.ptrAbiAlignment(target).toLlvm(), &o.builder);
gop.value_ptr.len.setUnnamedAddr(.unnamed_addr, &o.builder);
gop.value_ptr.array.setLinkage(.private, &o.builder);
gop.value_ptr.array.setMutability(.constant, &o.builder);
gop.value_ptr.array.setAlignment(ptr_align, &o.builder);
gop.value_ptr.array.setAlignment(unrestricted_ty.abiAlignment(zcu).toLlvm(), &o.builder);
// Setting unnamed_addr here would reduce safety, and the module emitting the safety checks may not be the same module
// that defined the restricted type. In any case, llvm will add unnamed_addr itself if no safety checks end up being emitted.
gop.value_ptr.array.setUnnamedAddr(.default, &o.builder);
@@ -750,10 +751,13 @@ pub const Object = struct {
try o.builder.intConst(restricted_decls.len.typeOf(&o.builder), len),
&o.builder,
);
try restricted_decls.array.setInitializer(try o.builder.arrayConst(
try o.builder.arrayType(len, .ptr),
restricted_decls.values.values(),
), &o.builder);
try restricted_decls.array.setInitializer(switch (len) {
0 => try o.builder.structConst(try o.builder.structType(.normal, &.{}), &.{}),
else => try o.builder.arrayConst(
try o.builder.arrayType(len, restricted_decls.values.values()[0].typeOf(&o.builder)),
restricted_decls.values.values(),
),
}, &o.builder);
}
}
@@ -2023,7 +2027,7 @@ pub const Object = struct {
fn lowerDebugType(
o: *Object,
pt: Zcu.PerThread,
ty: Type,
start_ty: Type,
ty_fwd_ref: Builder.Metadata,
) Allocator.Error!Builder.Metadata {
assert(!o.builder.strip);
@@ -2033,7 +2037,7 @@ pub const Object = struct {
const target = zcu.getTarget();
const ip = &zcu.intern_pool;
const name = try o.builder.metadataStringFmt("{f}", .{ty.fmt(pt)});
const name = try o.builder.metadataStringFmt("{f}", .{start_ty.fmt(pt)});
// lldb cannot handle non-byte-sized types, so in the logic below, bit sizes are padded up.
// For instance, `bool` is considered to be 8 bits, and `u60` is considered to be 64 bits.
@@ -2044,6 +2048,24 @@ pub const Object = struct {
// handling for variants at all, and will never print fields in them, so I opted not to use
// them for now.
const ty = if (start_ty.unrestrictedType(zcu)) |unrestricted_ty| switch (start_ty.restrictedRepr(zcu)) {
.indirect => {
const ptr_size = Type.ptrAbiSize(zcu.getTarget());
const ptr_align = Type.ptrAbiAlignment(zcu.getTarget());
return o.builder.debugPointerType(
name,
null, // file
o.debug_compile_unit.unwrap().?, // scope
0, // line
try o.getDebugType(pt, unrestricted_ty),
ptr_size * 8,
ptr_align.toByteUnits().? * 8,
0, // offset
);
},
.direct => unrestricted_ty,
} else start_ty;
switch (ty.zigTypeTag(zcu)) {
.void,
.noreturn,
@@ -2071,64 +2093,52 @@ pub const Object = struct {
.pointer => {
const ptr_size = Type.ptrAbiSize(zcu.getTarget());
const ptr_align = Type.ptrAbiAlignment(zcu.getTarget());
switch (ty.restrictedRepr(zcu)) {
.indirect => return o.builder.debugPointerType(
if (ty.isSlice(zcu)) {
const debug_ptr_type = try o.builder.debugMemberType(
try o.builder.metadataString("ptr"),
null, // file
ty_fwd_ref,
0, // line
try o.getDebugType(pt, ty.slicePtrFieldType(zcu)),
ptr_size * 8,
ptr_align.toByteUnits().? * 8,
0, // offset
);
const debug_len_type = try o.builder.debugMemberType(
try o.builder.metadataString("len"),
null, // file
ty_fwd_ref,
0, // line
try o.getDebugType(pt, .usize),
ptr_size * 8,
ptr_align.toByteUnits().? * 8,
ptr_size * 8,
);
return o.builder.debugStructType(
name,
null, // file
o.debug_compile_unit.unwrap().?, // scope
0, // line
try o.getDebugType(pt, ty.unrestrictedType(zcu).?),
ptr_size * 8,
null, // underlying type
ptr_size * 2 * 8,
ptr_align.toByteUnits().? * 8,
0, // offset
),
.direct => if (ty.isSlice(zcu)) {
const debug_ptr_type = try o.builder.debugMemberType(
try o.builder.metadataString("ptr"),
null, // file
ty_fwd_ref,
0, // line
try o.getDebugType(pt, ty.slicePtrFieldType(zcu)),
ptr_size * 8,
ptr_align.toByteUnits().? * 8,
0, // offset
);
const debug_len_type = try o.builder.debugMemberType(
try o.builder.metadataString("len"),
null, // file
ty_fwd_ref,
0, // line
try o.getDebugType(pt, .usize),
ptr_size * 8,
ptr_align.toByteUnits().? * 8,
ptr_size * 8,
);
return o.builder.debugStructType(
name,
null, // file
o.debug_compile_unit.unwrap().?, // scope
0, // line
null, // underlying type
ptr_size * 2 * 8,
ptr_align.toByteUnits().? * 8,
try o.builder.metadataTuple(&.{
debug_ptr_type,
debug_len_type,
}),
);
} else return o.builder.debugPointerType(
name,
null, // file
o.debug_compile_unit.unwrap().?, // scope
0, // line
try o.getDebugType(pt, ty.childType(zcu)),
ptr_size * 8,
ptr_align.toByteUnits().? * 8,
0, // offset
),
}
try o.builder.metadataTuple(&.{
debug_ptr_type,
debug_len_type,
}),
);
} else return o.builder.debugPointerType(
name,
null, // file
o.debug_compile_unit.unwrap().?, // scope
0, // line
try o.getDebugType(pt, ty.childType(zcu)),
ptr_size * 8,
ptr_align.toByteUnits().? * 8,
0, // offset
);
},
.array => return o.builder.debugArrayType(
name,
@@ -3121,7 +3131,7 @@ pub const Object = struct {
.empty_tuple,
.none,
=> unreachable,
else => t: switch (ip.indexToKey(t.toIntern())) {
else => switch (ip.indexToKey(t.toIntern())) {
.int_type => |int_type| try o.builder.intType(int_type.bits),
.ptr_type => |ptr_type| type: {
const ptr_ty = try o.builder.ptrType(
@@ -3135,10 +3145,6 @@ pub const Object = struct {
}),
};
},
.restricted_ptr_type => |restricted_ptr_type| switch (t.restrictedRepr(zcu)) {
.indirect => .ptr,
.direct => continue :t .{ .ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type },
},
.array_type => |array_type| o.builder.arrayType(
array_type.lenIncludingSentinel(),
try o.lowerType(.fromInterned(array_type.child)),
@@ -3217,6 +3223,10 @@ pub const Object = struct {
return o.builder.structType(.normal, fields[0..fields_len]);
},
.simple_type => unreachable,
.restricted_type => |restricted_type| switch (t.restrictedRepr(zcu)) {
.indirect => .ptr,
.direct => try o.lowerType(.fromInterned(restricted_type.unrestricted_type)),
},
.struct_type => {
if (o.type_map.get(t.toIntern())) |value| return value;
@@ -3430,6 +3440,7 @@ pub const Object = struct {
.aggregate,
.un,
.bitpack,
.restricted_value,
// memoization, not types
.memoized_call,
=> unreachable,
@@ -3521,13 +3532,13 @@ pub const Object = struct {
return switch (val_key) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
.anyframe_type,
.error_union_type,
.simple_type,
.restricted_type,
.struct_type,
.tuple_type,
.union_type,
@@ -3622,27 +3633,7 @@ pub const Object = struct {
128 => try o.builder.fp128Const(val.toFloat(f128, zcu)),
else => unreachable,
},
.ptr => switch (ty.restrictedRepr(zcu)) {
.indirect => {
const restricted_decls = try o.getRestrictedDecls(ty);
const gop = try restricted_decls.values.getOrPut(o.gpa, arg_val);
if (!gop.found_existing) gop.value_ptr.* = try o.lowerValue(try ip.getCoerced(
zcu.gpa,
zcu.comp.io,
.main, // FIXME
arg_val,
ty.unrestrictedType(zcu).?.toIntern(),
));
return o.builder.gepConst(
.inbounds,
.ptr,
restricted_decls.array.toConst(&o.builder),
null,
&.{try o.builder.intConst(.i64, gop.index)},
);
},
.direct => try o.lowerPtr(arg_val, 0),
},
.ptr => try o.lowerPtr(arg_val, 0),
.slice => |slice| return o.builder.structConst(try o.lowerType(ty), &.{
try o.lowerValue(slice.ptr),
try o.lowerValue(slice.len),
@@ -4006,6 +3997,21 @@ pub const Object = struct {
else
union_ty, vals[0..len]);
},
.restricted_value => |restricted_value| switch (ty.restrictedRepr(zcu)) {
.indirect => {
const restricted_decls = try o.getRestrictedDecls(ty);
const gop = try restricted_decls.values.getOrPut(o.gpa, arg_val);
if (!gop.found_existing) gop.value_ptr.* = try o.lowerValue(restricted_value.unrestricted_value);
return o.builder.gepConst(
.inbounds,
gop.value_ptr.typeOf(&o.builder),
restricted_decls.array.toConst(&o.builder),
null,
&.{try o.builder.intConst(.i64, gop.index)},
);
},
.direct => try o.lowerValue(restricted_value.unrestricted_value),
},
.memoized_call => unreachable,
};
}
+40 -19
View File
@@ -3266,6 +3266,8 @@ fn airUnwrapRestricted(fg: *FuncGen, inst: Air.Inst.Index, safety: bool) Allocat
if (safety) {
const restricted_decls = try o.getRestrictedDecls(restricted_ty);
const llvm_usize_ty = restricted_decls.len.typeOf(&o.builder);
const unrestricted_size = unrestricted_ty.abiSize(zcu);
assert(unrestricted_size > 0);
const array = try o.builder.castConst(.ptrtoint, restricted_decls.array.toConst(&o.builder), llvm_usize_ty);
const ptr_diff = try fg.wip.bin(
.sub,
@@ -3273,11 +3275,6 @@ fn airUnwrapRestricted(fg: *FuncGen, inst: Air.Inst.Index, safety: bool) Allocat
array.toValue(),
"unwrap_restricted.ptr_diff",
);
const index = try fg.wip.callIntrinsic(.normal, .none, .fshr, &.{llvm_usize_ty}, &.{
ptr_diff,
ptr_diff,
try o.builder.intValue(llvm_usize_ty, std.math.log2_int(u64, Type.ptrAbiSize(target))),
}, "unwrap_restricted.index");
const len = try fg.wip.load(
.normal,
llvm_usize_ty,
@@ -3285,18 +3282,38 @@ fn airUnwrapRestricted(fg: *FuncGen, inst: Air.Inst.Index, safety: bool) Allocat
Type.ptrAbiAlignment(target).toLlvm(),
"unwrap_restricted.len",
);
const ok = try fg.wip.icmp(.ult, index, len, "unwrap_restricted.ok");
const invalid_block = try fg.wip.block(1, "unwrap_restricted.invalid");
const is_po2_unrestricted_size = std.math.isPowerOfTwo(unrestricted_size);
const check_block = if (is_po2_unrestricted_size) undefined else try fg.wip.block(1, "unwrap_restricted.check");
const invalid_block = try fg.wip.block(if (is_po2_unrestricted_size) 1 else 2, "unwrap_restricted.invalid");
const valid_block = try fg.wip.block(1, "unwrap_restricted.valid");
_ = try fg.wip.brCond(ok, valid_block, invalid_block, .none);
if (is_po2_unrestricted_size) {
const index = if (unrestricted_size == 1)
ptr_diff
else
try fg.wip.callIntrinsic(.normal, .none, .fshr, &.{llvm_usize_ty}, &.{
ptr_diff,
ptr_diff,
try o.builder.intValue(llvm_usize_ty, std.math.log2_int(u64, unrestricted_size)),
}, "unwrap_restricted.index");
const ok = try fg.wip.icmp(.ult, index, len, "unwrap_restricted.ok");
_ = try fg.wip.brCond(ok, valid_block, invalid_block, .none);
} else {
const unrestricted_size_value = try o.builder.intValue(llvm_usize_ty, unrestricted_size);
const misalignment = try fg.wip.bin(.urem, ptr_diff, unrestricted_size_value, "unwrap_restricted.misalignment");
const misaligned = try fg.wip.icmp(.ne, misalignment, try o.builder.intValue(llvm_usize_ty, 0), "unwrap_restricted.misaligned");
_ = try fg.wip.brCond(misaligned, invalid_block, check_block, .none);
fg.wip.cursor = .{ .block = check_block };
const index = try fg.wip.bin(.@"udiv exact", ptr_diff, unrestricted_size_value, "unwrap_restricted.index");
const ok = try fg.wip.icmp(.ult, index, len, "unwrap_restricted.ok");
_ = try fg.wip.brCond(ok, valid_block, invalid_block, .none);
}
fg.wip.cursor = .{ .block = invalid_block };
try fg.buildSimplePanic(.corrupt_restricted_pointer);
try fg.buildSimplePanic(.corrupt_restricted_value);
fg.wip.cursor = .{ .block = valid_block };
}
return fg.wip.load(.normal, .ptr, operand, unrestricted_ty.abiAlignment(zcu).toLlvm(), "unwrap_restricted");
return fg.load(operand, unrestricted_ty, unrestricted_ty.abiAlignment(zcu).toLlvm(), .normal);
},
.direct => return operand,
}
@@ -7275,7 +7292,11 @@ pub fn buildAllocaInner(
/// This is the one source of truth for whether a type is passed around as an LLVM pointer,
/// or as an LLVM value.
pub fn isByRef(ty: Type, zcu: *const Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
const unrestricted_ty = if (ty.unrestrictedType(zcu)) |unrestricted_ty| switch (ty.restrictedRepr(zcu)) {
.indirect => return false,
.direct => unrestricted_ty,
} else ty;
return switch (unrestricted_ty.zigTypeTag(zcu)) {
.type,
.comptime_int,
.comptime_float,
@@ -7300,19 +7321,19 @@ pub fn isByRef(ty: Type, zcu: *const Zcu) bool {
.array,
.frame,
=> ty.hasRuntimeBits(zcu),
=> unrestricted_ty.hasRuntimeBits(zcu),
.error_union => ty.errorUnionPayload(zcu).hasRuntimeBits(zcu),
.error_union => unrestricted_ty.errorUnionPayload(zcu).hasRuntimeBits(zcu),
.optional => !ty.optionalReprIsPayload(zcu) and ty.optionalChild(zcu).hasRuntimeBits(zcu),
.optional => !unrestricted_ty.optionalReprIsPayload(zcu) and unrestricted_ty.optionalChild(zcu).hasRuntimeBits(zcu),
.@"struct" => switch (ty.containerLayout(zcu)) {
.@"struct" => switch (unrestricted_ty.containerLayout(zcu)) {
.@"packed" => false,
.auto, .@"extern" => ty.hasRuntimeBits(zcu),
.auto, .@"extern" => unrestricted_ty.hasRuntimeBits(zcu),
},
.@"union" => switch (ty.containerLayout(zcu)) {
.@"union" => switch (unrestricted_ty.containerLayout(zcu)) {
.@"packed" => false,
else => ty.hasRuntimeBits(zcu) and !ty.unionHasAllZeroBitFieldTypes(zcu),
else => unrestricted_ty.hasRuntimeBits(zcu) and !unrestricted_ty.unionHasAllZeroBitFieldTypes(zcu),
},
};
}
+3 -2
View File
@@ -774,13 +774,13 @@ fn constant(cg: *CodeGen, ty: Type, val: Value, repr: Repr) Error!Id {
switch (ip.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
.anyframe_type,
.error_union_type,
.simple_type,
.restricted_type,
.struct_type,
.tuple_type,
.union_type,
@@ -990,6 +990,7 @@ fn constant(cg: *CodeGen, ty: Type, val: Value, repr: Repr) Error!Id {
break :cache try cg.constant(int_val.typeOf(zcu), int_val, repr);
},
.restricted_value => return cg.todo("implement restricted values", .{}),
.memoized_call => unreachable,
}
};
@@ -2777,7 +2778,7 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) Error!void {
.wrap_errunion_err => try cg.airWrapErrUnionErr(inst),
.wrap_errunion_payload => try cg.airWrapErrUnionPayload(inst),
.unwrap_restricted => return cg.fail("TODO implement restricted pointers", .{}),
.unwrap_restricted => return cg.todo("implement restricted values", .{}),
.is_null => try cg.airIsNull(inst, false, .is_null),
.is_non_null => try cg.airIsNull(inst, false, .is_non_null),
+2 -1
View File
@@ -4679,13 +4679,13 @@ fn lowerConstant(cg: *CodeGen, val: Value) InnerError!WValue {
switch (ip.indexToKey(val.ip_index)) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
.anyframe_type,
.error_union_type,
.simple_type,
.restricted_type,
.struct_type,
.tuple_type,
.union_type,
@@ -4779,6 +4779,7 @@ fn lowerConstant(cg: *CodeGen, val: Value) InnerError!WValue {
},
.un => unreachable, // packed unions use `bitpack`
.bitpack => |bitpack| return cg.lowerConstant(.fromInterned(bitpack.backing_int_val)),
.restricted_value => return cg.fail("Wasm TODO: LowerConstant for restricted value", .{}),
.memoized_call => unreachable,
}
}
+219 -16
View File
@@ -103826,14 +103826,15 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
const unrestricted_ty = ty_op.ty.toType();
const restricted_ty = cg.typeOf(ty_op.operand);
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand}) ++ .{try cg.tempInit(ty_op.ty.toType(), .none)};
const res = res: switch (restricted_ty.restrictedRepr(zcu)) {
.indirect => {
if (zcu.comp.config.use_new_linker) switch (air_tag) {
else => unreachable,
.unwrap_restricted => {},
.unwrap_restricted_safe => cg.select(&.{}, &.{}, &ops, &.{ .{
.required_features = .{ .avx, null, null, null },
.required_features = .{ .avx, .bmi2, null, null },
.src_constraints = .{ .any, .po2_any, .any },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
@@ -103843,7 +103844,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_pointer } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
@@ -103857,7 +103858,134 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._r, .ro, .tmp2p, .sa(.none, .add_log2_ptr_size), ._, ._ },
.{ ._, ._rx, .ro, .tmp2p, .tmp2p, .sa(.src1, .add_log2_size), ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
} },
}, .{
.required_features = .{ .avx, null, null, null },
.src_constraints = .{ .any, .po2_any, .any },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._r, .ro, .tmp2p, .sa(.src1, .add_log2_size), ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
} },
}, .{
.required_features = .{ .avx, null, null, null },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .reg = .rax } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .reg = .rdx } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._, .mov, .tmp3d, .sa(.src1, .add_size), ._, ._ },
.{ ._, ._, .xor, .tmp4p, .tmp4p, ._, ._ },
.{ ._, ._, .div, .tmp3p, ._, ._, ._ },
.{ ._, ._, .@"test", .tmp4p, .tmp4p, ._, ._ },
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ .@"1:", ._, .call, .tmp5d, ._, ._, ._ },
} },
}, .{
.required_features = .{ .sse, .bmi2, null, null },
.src_constraints = .{ .any, .po2_any, .any },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._rx, .ro, .tmp2p, .tmp2p, .sa(.src1, .add_log2_size), ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.src_constraints = .{ .any, .po2_any, .any },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._r, .ro, .tmp2p, .sa(.src1, .add_log2_size), ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
@@ -103872,8 +104000,43 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .reg = .rax } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_pointer } },
.{ .type = .usize, .kind = .{ .reg = .rdx } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._, .mov, .tmp3d, .sa(.src1, .add_size), ._, ._ },
.{ ._, ._, .xor, .tmp4p, .tmp4p, ._, ._ },
.{ ._, ._, .div, .tmp3p, ._, ._, ._ },
.{ ._, ._, .@"test", .tmp4p, .tmp4p, ._, ._ },
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ .@"1:", ._, .call, .tmp5d, ._, ._, ._ },
} },
}, .{
.required_features = .{ .bmi2, null, null, null },
.src_constraints = .{ .any, .po2_any, .any },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
@@ -103887,7 +104050,37 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._r, .ro, .tmp2p, .sa(.none, .add_log2_ptr_size), ._, ._ },
.{ ._, ._rx, .ro, .tmp2p, .tmp2p, .sa(.src1, .add_log2_size), ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
} },
}, .{
.src_constraints = .{ .any, .po2_any, .any },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._r, .ro, .tmp2p, .sa(.src1, .add_log2_size), ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
@@ -103901,10 +104094,10 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .deferred_const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .reg = .rax } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_pointer } },
.unused,
.unused,
.{ .type = .usize, .kind = .{ .reg = .rdx } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_value } },
.unused,
.unused,
.unused,
@@ -103916,10 +104109,14 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._r, .ro, .tmp2p, .sa(.none, .add_log2_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp3d, .sa(.src1, .add_size), ._, ._ },
.{ ._, ._, .xor, .tmp4p, .tmp4p, ._, ._ },
.{ ._, ._, .div, .tmp3p, ._, ._, ._ },
.{ ._, ._, .@"test", .tmp4p, .tmp4p, ._, ._ },
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
.{ .@"1:", ._, .call, .tmp5d, ._, ._, ._ },
} },
} }) catch |err| switch (err) {
error.SelectFailed => return cg.fail("failed to select {t} {f} {f} {f}", .{
@@ -103935,7 +104132,8 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
},
.direct => ops[0],
};
try res.finish(inst, &.{ty_op.operand}, &ops, cg);
for (ops[1..]) |op| try op.die(cg);
try res.finish(inst, &.{ty_op.operand}, ops[0..1], cg);
},
.struct_field_ptr => {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
@@ -174190,7 +174388,12 @@ fn allocRegOrMemAdvanced(self: *CodeGen, ty: Type, inst: ?Air.Inst.Index, reg_ok
};
if (reg_ok) need_mem: {
if (std.math.isPowerOfTwo(abi_size) and abi_size <= @as(u32, max_abi_size: switch (ty.zigTypeTag(zcu)) {
if (!std.math.isPowerOfTwo(abi_size)) break :need_mem;
const unrestricted_ty: Type = if (ty.unrestrictedType(zcu)) |unrestricted_ty| switch (ty.restrictedRepr(zcu)) {
.indirect => .usize,
.direct => unrestricted_ty,
} else ty;
if (abi_size <= @as(u32, max_abi_size: switch (unrestricted_ty.zigTypeTag(zcu)) {
.float => switch (ty.floatBits(self.target)) {
16, 32, 64, 128 => 16,
80 => break :need_mem,
@@ -189139,9 +189342,9 @@ const Select = struct {
lhs: enum(u6) {
none,
ptr_size,
log2_ptr_size,
ptr_bit_size,
size,
log2_size,
src0_size,
dst0_size,
delta_size,
@@ -189178,7 +189381,6 @@ const Select = struct {
const none: Adjust = .{ .sign = .pos, .lhs = .none, .op = .mul, .rhs = .@"1" };
const add_ptr_size: Adjust = .{ .sign = .pos, .lhs = .ptr_size, .op = .mul, .rhs = .@"1" };
const sub_ptr_size: Adjust = .{ .sign = .neg, .lhs = .ptr_size, .op = .mul, .rhs = .@"1" };
const add_log2_ptr_size: Adjust = .{ .sign = .pos, .lhs = .log2_ptr_size, .op = .mul, .rhs = .@"1" };
const add_ptr_bit_size: Adjust = .{ .sign = .pos, .lhs = .ptr_bit_size, .op = .mul, .rhs = .@"1" };
const add_size: Adjust = .{ .sign = .pos, .lhs = .size, .op = .mul, .rhs = .@"1" };
const add_size_div_4: Adjust = .{ .sign = .pos, .lhs = .size, .op = .div, .rhs = .@"4" };
@@ -189186,6 +189388,7 @@ const Select = struct {
const sub_size_div_8: Adjust = .{ .sign = .neg, .lhs = .size, .op = .div, .rhs = .@"8" };
const sub_size_div_4: Adjust = .{ .sign = .neg, .lhs = .size, .op = .div, .rhs = .@"4" };
const sub_size: Adjust = .{ .sign = .neg, .lhs = .size, .op = .mul, .rhs = .@"1" };
const add_log2_size: Adjust = .{ .sign = .pos, .lhs = .log2_size, .op = .mul, .rhs = .@"1" };
const sub_src0_size_div_8: Adjust = .{ .sign = .neg, .lhs = .src0_size, .op = .div, .rhs = .@"8" };
const sub_src0_size: Adjust = .{ .sign = .neg, .lhs = .src0_size, .op = .mul, .rhs = .@"1" };
const add_src0_size: Adjust = .{ .sign = .pos, .lhs = .src0_size, .op = .mul, .rhs = .@"1" };
@@ -190119,8 +190322,8 @@ const Select = struct {
const lhs: SignedImm = lhs: switch (op.flags.adjust.lhs) {
.none => 0,
.ptr_size => @divExact(s.cg.target.ptrBitWidth(), 8),
.log2_ptr_size => std.math.log2(@divExact(s.cg.target.ptrBitWidth(), 8)),
.ptr_bit_size => s.cg.target.ptrBitWidth(),
.log2_size => std.math.log2_int_ceil(u64, op.flags.base.ref.typeOf(s).abiSize(s.cg.pt.zcu)),
.size => @intCast(op.flags.base.ref.typeOf(s).abiSize(s.cg.pt.zcu)),
.src0_size => @intCast(Select.Operand.Ref.src0.typeOf(s).abiSize(s.cg.pt.zcu)),
.dst0_size => @intCast(Select.Operand.Ref.dst0.typeOf(s).abiSize(s.cg.pt.zcu)),
+2 -2
View File
@@ -1385,8 +1385,8 @@ fn mergeNeededRestricted(
for (new.keys()) |restricted_key| {
const restricted_ty = switch (ip.indexToKey(restricted_key)) {
else => unreachable,
.restricted_ptr_type => restricted_key,
.ptr => |ptr| ptr.ty,
.restricted_type => restricted_key,
.restricted_value => |restricted_value| restricted_value.ty,
};
const gop = global.getOrPutAssumeCapacity(restricted_ty);
if (!gop.found_existing) gop.value_ptr.* = .empty;
+8 -1
View File
@@ -2173,7 +2173,14 @@ fn flushLazy(coff: *Coff, pt: Zcu.PerThread, lmr: Node.LazyMapRef) !void {
if (structure.modify) |modification| modification.operation.apply(
coff.lazySymbolIfExists(modification.lazy_sym).?.node(coff).slice(&coff.mf),
coff.targetEndian(),
.{
.ptr_bit_width = switch (coff.optionalHeaderStandardPtr().magic) {
else => unreachable,
.PE32 => 32,
.@"PE32+" => 64,
},
.endian = coff.targetEndian(),
},
);
}
+23 -21
View File
@@ -181,8 +181,9 @@ fn update(pool: *ConstPool, pt: Zcu.PerThread, user: User, index: ConstPool.Inde
}
}
fn checkType(pool: *const ConstPool, ty: Type, zcu: *const Zcu) bool {
if (ty.isGenericPoison()) return true;
return switch (ty.zigTypeTag(zcu)) {
const unrestricted_ty = ty.unrestrictedType(zcu) orelse ty;
if (unrestricted_ty.isGenericPoison()) return true;
return switch (unrestricted_ty.zigTypeTag(zcu)) {
.type,
.void,
.bool,
@@ -201,33 +202,34 @@ fn checkType(pool: *const ConstPool, ty: Type, zcu: *const Zcu) bool {
.enum_literal,
=> true,
.array, .vector => pool.checkType(ty.childType(zcu), zcu),
.optional => pool.checkType(ty.optionalChild(zcu), zcu),
.error_union => pool.checkType(ty.errorUnionPayload(zcu), zcu),
.array, .vector => pool.checkType(unrestricted_ty.childType(zcu), zcu),
.optional => pool.checkType(unrestricted_ty.optionalChild(zcu), zcu),
.error_union => pool.checkType(unrestricted_ty.errorUnionPayload(zcu), zcu),
.@"fn" => {
const ip = &zcu.intern_pool;
const func = ip.indexToKey(ty.toIntern()).func_type;
const func = ip.indexToKey(unrestricted_ty.toIntern()).func_type;
for (func.param_types.get(ip)) |param_ty_ip| {
if (!pool.checkType(.fromInterned(param_ty_ip), zcu)) return false;
}
return pool.checkType(.fromInterned(func.return_type), zcu);
},
.@"struct" => if (ty.isTuple(zcu)) {
for (0..ty.structFieldCount(zcu)) |field_index| {
if (!pool.checkType(ty.fieldType(field_index, zcu), zcu)) return false;
.@"struct" => if (unrestricted_ty.isTuple(zcu)) {
for (0..unrestricted_ty.structFieldCount(zcu)) |field_index| {
if (!pool.checkType(unrestricted_ty.fieldType(field_index, zcu), zcu)) return false;
}
return true;
} else {
return pool.complete_containers.contains(ty.toIntern());
return pool.complete_containers.contains(unrestricted_ty.toIntern());
},
.@"union", .@"enum" => {
return pool.complete_containers.contains(ty.toIntern());
return pool.complete_containers.contains(unrestricted_ty.toIntern());
},
};
}
fn registerTypeDeps(pool: *ConstPool, root: Index, ty: Type, zcu: *const Zcu) Allocator.Error!void {
if (ty.isGenericPoison()) return;
switch (ty.zigTypeTag(zcu)) {
const unrestricted_ty = ty.unrestrictedType(zcu) orelse ty;
if (unrestricted_ty.isGenericPoison()) return;
switch (unrestricted_ty.zigTypeTag(zcu)) {
.type,
.void,
.bool,
@@ -246,20 +248,20 @@ fn registerTypeDeps(pool: *ConstPool, root: Index, ty: Type, zcu: *const Zcu) Al
.enum_literal,
=> {},
.array, .vector => try pool.registerTypeDeps(root, ty.childType(zcu), zcu),
.optional => try pool.registerTypeDeps(root, ty.optionalChild(zcu), zcu),
.error_union => try pool.registerTypeDeps(root, ty.errorUnionPayload(zcu), zcu),
.array, .vector => try pool.registerTypeDeps(root, unrestricted_ty.childType(zcu), zcu),
.optional => try pool.registerTypeDeps(root, unrestricted_ty.optionalChild(zcu), zcu),
.error_union => try pool.registerTypeDeps(root, unrestricted_ty.errorUnionPayload(zcu), zcu),
.@"fn" => {
const ip = &zcu.intern_pool;
const func = ip.indexToKey(ty.toIntern()).func_type;
const func = ip.indexToKey(unrestricted_ty.toIntern()).func_type;
for (func.param_types.get(ip)) |param_ty_ip| {
try pool.registerTypeDeps(root, .fromInterned(param_ty_ip), zcu);
}
try pool.registerTypeDeps(root, .fromInterned(func.return_type), zcu);
},
.@"struct", .@"union", .@"enum" => if (ty.isTuple(zcu)) {
for (0..ty.structFieldCount(zcu)) |field_index| {
try pool.registerTypeDeps(root, ty.fieldType(field_index, zcu), zcu);
.@"struct", .@"union", .@"enum" => if (unrestricted_ty.isTuple(zcu)) {
for (0..unrestricted_ty.structFieldCount(zcu)) |field_index| {
try pool.registerTypeDeps(root, unrestricted_ty.fieldType(field_index, zcu), zcu);
}
} else {
// `ty` is a container; register the dependency.
@@ -269,7 +271,7 @@ fn registerTypeDeps(pool: *ConstPool, root: Index, ty: Type, zcu: *const Zcu) Al
try pool.container_dep_entries.ensureUnusedCapacity(gpa, 1);
errdefer comptime unreachable;
const gop = pool.container_deps.getOrPutAssumeCapacity(ty.toIntern());
const gop = pool.container_deps.getOrPutAssumeCapacity(unrestricted_ty.toIntern());
const entry: ContainerDepEntry.Index = @enumFromInt(pool.container_dep_entries.items.len);
pool.container_dep_entries.appendAssumeCapacity(.{
.next = if (gop.found_existing) gop.value_ptr.toOptional() else .none,
+45 -17
View File
@@ -1176,7 +1176,7 @@ pub const Loc = union(enum) {
implicit_pointer: struct {
unit: Unit.Index,
entry: Entry.Index,
offset: i65,
offset: i65 = 0,
},
wasm_ext: union(enum) {
local: u32,
@@ -3060,13 +3060,13 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
} = switch (ip.indexToKey(nav_val.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
.error_union_type,
.anyframe_type,
.simple_type,
.restricted_type,
.tuple_type,
.func_type,
.error_set_type,
@@ -3128,6 +3128,7 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
.aggregate,
.un,
.bitpack,
.restricted_value,
=> if (nav.resolved.?.@"const") .@"const" else .@"var",
.@"extern" => unreachable,
@@ -3509,12 +3510,15 @@ fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.Co
if (value_index == .anyerror_type) return; // handled in `flush` instead
const value_ip_key = ip.indexToKey(value_index);
switch (value_ip_key) {
const value_ip_key: InternPool.Key = switch (ip.indexToKey(value_index)) {
.func => return, // populated by the Nav instead (`updateComptimeNav` or `initWipNav`)
.@"extern" => return, // populated by the Nav instead (`initWipNav`)
else => {},
}
.restricted_value => |restricted_value| switch (Type.restrictedRepr(.fromInterned(restricted_value.ty), zcu)) {
.indirect => .{ .restricted_value = restricted_value },
.direct => ip.indexToKey(restricted_value.unrestricted_value),
},
else => |key| key,
};
switch (value_index) {
.generic_poison_type => log.debug("updateValue(anytype)", .{}),
@@ -3567,7 +3571,7 @@ fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.Co
const diw = &wip_nav.debug_info.writer;
var big_int_space: Value.BigIntSpace = undefined;
key: switch (value_ip_key) {
switch (value_ip_key) {
.func => unreachable, // handled above
.@"extern" => unreachable, // handled above
@@ -3630,13 +3634,6 @@ fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.Co
try diw.writeUleb128(@intFromEnum(AbbrevCode.null));
},
},
.restricted_ptr_type => |restricted_ptr_type| switch (Type.restrictedReprByZirIndex(restricted_ptr_type.zir_index, zcu)) {
.indirect => continue :key .{ .ptr_type = .{
.child = restricted_ptr_type.unrestricted_ptr_type,
.flags = .{ .is_const = true },
} },
.direct => continue :key .{ .ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type },
},
.array_type => |array_type| {
const array_child_type: Type = .fromInterned(array_type.child);
try wip_nav.abbrevCode(if (array_type.sentinel == .none) .array_type else .array_sentinel_type);
@@ -3847,6 +3844,28 @@ fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.Co
.anyerror => unreachable, // already did early return above
.adhoc_inferred_error_set => unreachable,
},
.restricted_type => |restricted_type| {
const repr = Type.restrictedReprByTrackedInst(restricted_type.zir_index, zcu);
try wip_nav.abbrevCode(switch (repr) {
.indirect => .ptr_type,
.direct => .alias_type,
});
try wip_nav.strpFmt("{f}", .{val.toType().fmt(pt)});
switch (repr) {
.indirect => {
try diw.writeByte(@intFromEnum(InternPool.Key.PtrType.AddressSpace.generic));
try wip_nav.infoSectionOffset(
.debug_info,
wip_nav.unit,
wip_nav.entry,
@intCast(diw.end + dwarf.sectionOffsetBytes()),
);
try wip_nav.abbrevCode(.is_const);
},
.direct => {},
}
try wip_nav.refType(.fromInterned(restricted_type.unrestricted_type));
},
.tuple_type => |tuple_type| if (tuple_type.types.len == 0) {
try wip_nav.abbrevCode(.generated_empty_struct_type);
try wip_nav.strpFmt("{f}", .{val.toType().fmt(pt)});
@@ -4265,7 +4284,7 @@ fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.Co
if (error_set_type.names.len > 0) try diw.writeUleb128(@intFromEnum(AbbrevCode.null));
},
.inferred_error_set_type => |func| {
try wip_nav.abbrevCode(.inferred_error_set_type);
try wip_nav.abbrevCode(.alias_type);
try wip_nav.strpFmt("{f}", .{val.toType().fmt(pt)});
try wip_nav.refType(.fromInterned(switch (ip.funcIesResolvedUnordered(func)) {
.none => .anyerror_type,
@@ -4648,6 +4667,15 @@ fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.Co
}
try diw.writeUleb128(@intFromEnum(AbbrevCode.null));
},
.restricted_value => |restricted_value| { // repr checked above
try wip_nav.abbrevCode(.location_comptime_value);
const unrestricted_unit, const unrestricted_entry =
try wip_nav.getValueEntry(.fromInterned(restricted_value.unrestricted_value));
try wip_nav.infoExprLoc(.{ .implicit_pointer = .{
.unit = unrestricted_unit,
.entry = unrestricted_entry,
} });
},
.memoized_call => unreachable, // not a value
}
try dwarf.debug_info.section.replaceEntry(unit, entry, dwarf, wip_nav.debug_info.written());
@@ -5209,7 +5237,7 @@ const AbbrevCode = enum {
tagged_union_default_field,
void_type,
numeric_type,
inferred_error_set_type,
alias_type,
ptr_type,
ptr_sentinel_type,
ptr_aligned_type,
@@ -5839,7 +5867,7 @@ const AbbrevCode = enum {
.{ .alignment, .udata },
},
},
.inferred_error_set_type = .{
.alias_type = .{
.tag = .typedef,
.attrs = &.{
.{ .name, .strp },
+8 -1
View File
@@ -3349,7 +3349,14 @@ fn flushLazy(elf: *Elf, pt: Zcu.PerThread, lmr: Node.LazyMapRef) !void {
if (structure.modify) |modification| modification.operation.apply(
elf.lazySymbolIfExists(modification.lazy_sym).?.node(elf).slice(&elf.mf),
elf.targetEndian(),
.{
.ptr_bit_width = switch (elf.identClass()) {
.NONE, _ => unreachable,
.@"32" => 32,
.@"64" => 64,
},
.endian = elf.targetEndian(),
},
);
}
+10 -9
View File
@@ -49,7 +49,6 @@ pub fn print(
switch (ip.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
@@ -64,6 +63,7 @@ pub fn print(
.func_type,
.error_set_type,
.inferred_error_set_type,
.restricted_type,
=> try Type.print(val.toType(), writer, pt, null),
.undef => try writer.writeAll("undefined"),
.simple_value => |simple_value| switch (simple_value) {
@@ -88,7 +88,7 @@ pub fn print(
.err_name => |err_name| try writer.print("error.{f}", .{
err_name.fmt(ip),
}),
.payload => |payload| try print(Value.fromInterned(payload), writer, level, pt, opt_sema),
.payload => |payload| try print(.fromInterned(payload), writer, level, pt, opt_sema),
},
.enum_literal => |enum_literal| try writer.print(".{f}", .{
enum_literal.fmt(ip),
@@ -102,7 +102,7 @@ pub fn print(
return writer.writeAll("@enumFromInt(...)");
}
try writer.writeAll("@enumFromInt(");
try print(Value.fromInterned(enum_tag.int), writer, level - 1, pt, opt_sema);
try print(.fromInterned(enum_tag.int), writer, level - 1, pt, opt_sema);
try writer.writeAll(")");
},
.float => |float| switch (float.storage) {
@@ -130,7 +130,7 @@ pub fn print(
if (level == 0) {
try writer.writeAll("(...)");
} else {
try print(Value.fromInterned(slice.len), writer, level - 1, pt, opt_sema);
try print(.fromInterned(slice.len), writer, level - 1, pt, opt_sema);
}
try writer.writeAll("]");
},
@@ -148,7 +148,7 @@ pub fn print(
},
.opt => |opt| switch (opt.val) {
.none => try writer.writeAll("null"),
else => |payload| try print(Value.fromInterned(payload), writer, level, pt, opt_sema),
else => |payload| try print(.fromInterned(payload), writer, level, pt, opt_sema),
},
.aggregate => |aggregate| try printAggregate(val, aggregate, false, writer, level, pt, opt_sema),
.un => |un| {
@@ -159,13 +159,13 @@ pub fn print(
if (un.tag == .none) {
const backing_ty = try val.typeOf(zcu).externUnionBackingType(pt);
try writer.print("@bitCast(@as({f}, ", .{backing_ty.fmt(pt)});
try print(Value.fromInterned(un.val), writer, level - 1, pt, opt_sema);
try print(.fromInterned(un.val), writer, level - 1, pt, opt_sema);
try writer.writeAll("))");
} else {
try writer.writeAll(".{ ");
try print(Value.fromInterned(un.tag), writer, level - 1, pt, opt_sema);
try print(.fromInterned(un.tag), writer, level - 1, pt, opt_sema);
try writer.writeAll(" = ");
try print(Value.fromInterned(un.val), writer, level - 1, pt, opt_sema);
try print(.fromInterned(un.val), writer, level - 1, pt, opt_sema);
try writer.writeAll(" }");
}
},
@@ -198,6 +198,7 @@ pub fn print(
else => unreachable,
}
},
.restricted_value => |restricted_value| try print(.fromInterned(restricted_value.unrestricted_value), writer, level, pt, opt_sema),
.memoized_call => unreachable,
}
}
@@ -470,7 +471,7 @@ pub fn printPtrDerivation(
if (x.level == 0) {
try writer.writeAll("...");
} else {
try print(Value.fromInterned(uav.val), writer, x.level - 1, pt, x.opt_sema);
try print(.fromInterned(uav.val), writer, x.level - 1, pt, x.opt_sema);
}
try writer.writeByte(')');
},
+11 -8
View File
@@ -622,14 +622,6 @@ const Writer = struct {
try stream.writeAll(")) ");
try self.writeSrcNode(stream, extra.node);
},
.reify_restricted => {
const extra = self.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const name_strat: Zir.Inst.NameStrategy = @enumFromInt(extended.small);
try stream.print("{t}, ", .{name_strat});
try self.writeInstRef(stream, extra.operand);
try stream.writeAll(")) ");
try self.writeSrcNode(stream, extra.node);
},
.reify_fn => {
const extra = self.code.extraData(Zir.Inst.ReifyFn, extended.operand).data;
try self.writeInstRef(stream, extra.param_types);
@@ -642,6 +634,17 @@ const Writer = struct {
try stream.writeAll(")) ");
try self.writeSrcNode(stream, extra.node);
},
.reify_restricted => {
const extra = self.code.extraData(Zir.Inst.ReifyRestricted, extended.operand).data;
const name_strat: Zir.Inst.NameStrategy = @enumFromInt(extended.small);
try stream.print("{t}, ", .{name_strat});
try self.writeInstRef(stream, extra.unrestricted_ty);
try stream.writeAll(")) ");
const prev_parent_decl_node = self.parent_decl_node;
self.parent_decl_node = extra.node;
defer self.parent_decl_node = prev_parent_decl_node;
try self.writeSrcNode(stream, .zero);
},
.reify_struct => {
const extra = self.code.extraData(Zir.Inst.ReifyStruct, extended.operand).data;
const name_strat: Zir.Inst.NameStrategy = @enumFromInt(extended.small);
+3 -2
View File
@@ -908,7 +908,7 @@ pub fn zigBackend(target: *const std.Target, use_llvm: bool) std.builtin.Compile
};
}
pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, comptime feature: Feature) bool {
pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, incremental: bool, comptime feature: Feature) bool {
return switch (feature) {
.panic_fn => switch (backend) {
.stage2_aarch64,
@@ -949,7 +949,8 @@ pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, compt
else => true,
},
.restricted_types => switch (backend) {
.stage2_c, .stage2_llvm, .stage2_x86_64 => true,
.stage2_c => true,
.stage2_llvm, .stage2_x86_64 => !incremental,
else => false,
},
};