start implementing restricted types

This commit is contained in:
Jacob Young
2026-04-08 21:30:56 -04:00
parent ec3f362ae9
commit a8428b777c
34 changed files with 919 additions and 182 deletions
+7
View File
@@ -5769,6 +5769,13 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
<p>Returns a {#link|pointer|Pointers#} type with the properties specified by the arguments.</p>
{#header_close#}
{#header_open|@Restricted#}
<pre>{#syntax#}@Restricted(
comptime Pointer: type,
) type{#endsyntax#}</pre>
<p>Returns a restricted pointer type based on the specified pointer type.</p>
{#header_close#}
{#header_open|@Fn#}
<pre>{#syntax#}@Fn(
comptime param_types: []const type,
+4
View File
@@ -201,6 +201,10 @@ pub fn FullPanic(comptime panicFn: fn ([]const u8, ?usize) noreturn) type {
@branchHint(.cold);
call("'noreturn' function returned", @returnAddress());
}
pub fn corruptRestrictedPointer() noreturn {
@branchHint(.cold);
call("corrupt restricted pointer value", @returnAddress());
}
};
}
+5
View File
@@ -134,3 +134,8 @@ pub fn noreturnReturned() noreturn {
@branchHint(.cold);
@trap();
}
pub fn corruptRestrictedPointer() noreturn {
@branchHint(.cold);
@trap();
}
+4
View File
@@ -126,3 +126,7 @@ pub fn memcpyAlias() noreturn {
pub fn noreturnReturned() noreturn {
call("'noreturn' function returned", null);
}
pub fn corruptRestrictedPointer() noreturn {
call("corrupt restricted pointer value", null);
}
+10 -1
View File
@@ -1208,7 +1208,7 @@ fn nameStratExpr(
const builtin_name = tree.tokenSlice(builtin_token);
const info = BuiltinFn.list.get(builtin_name) orelse return null;
switch (info.tag) {
.Enum, .Struct, .Union => {
.Restricted, .Enum, .Struct, .Union => {
var buf: [2]Ast.Node.Index = undefined;
const params = tree.builtinCallParams(&buf, node).?;
return try builtinCall(gz, scope, ri, node, params, false, name_strat);
@@ -9320,6 +9320,15 @@ fn builtinCall(
});
return rvalue(gz, ri, result, node);
},
.Restricted => {
const unrestricted_ptr_ty = try typeExpr(gz, scope, params[0]);
const result = try gz.addExtendedPayloadSmall(
.reify_restricted,
@intFromEnum(reify_name_strat),
Zir.Inst.UnNode{ .node = gz.nodeIndexToRelative(node), .operand = unrestricted_ptr_ty },
);
return rvalue(gz, ri, result, node);
},
.Fn => {
const fn_attrs_ty = try gz.addBuiltinValue(node, .fn_attributes);
const param_types = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_type_type } }, params[0], .fn_param_types);
+1
View File
@@ -903,6 +903,7 @@ fn builtinCall(astrl: *AstRlAnnotate, block: ?*Block, ri: ResultInfo, node: Ast.
.error_name,
.set_runtime_safety,
.Tuple,
.Restricted,
.wasm_memory_size,
.splat,
.set_float_mode,
+8
View File
@@ -110,6 +110,7 @@ pub const Tag = enum {
Int,
Tuple,
Pointer,
Restricted,
Fn,
Struct,
Union,
@@ -943,6 +944,13 @@ pub const list = list: {
.param_count = 4,
},
},
.{
"@Restricted",
.{
.tag = .Restricted,
.param_count = 1,
},
},
.{
"@Fn",
.{
+9 -3
View File
@@ -2062,6 +2062,10 @@ pub const Inst = struct {
/// Implements builtin `@Pointer`.
/// `operand` is payload index to `ReifyPointer`.
reify_pointer,
/// Implements builtin `@Restricted`.
/// `operand` is payload index to `UnNode`.
/// `small` contains `NameStrategy`.
reify_restricted,
/// Implements builtin `@Fn`.
/// `operand` is payload index to `ReifyFn`.
reify_fn,
@@ -4431,15 +4435,16 @@ fn findTrackableInner(
},
// Reifications need tracking.
.reify_enum,
.reify_restricted,
.reify_struct,
.reify_union,
.reify_enum,
=> return contents.other.append(gpa, inst),
// Type declarations need tracking.
.struct_decl,
.union_decl,
.enum_decl,
.union_decl,
.opaque_decl,
=> return contents.type_decls.append(gpa, inst),
}
@@ -5232,9 +5237,10 @@ pub fn assertTrackable(zir: Zir, inst_idx: Zir.Inst.Index) void {
.union_decl,
.enum_decl,
.opaque_decl,
.reify_enum,
.reify_restricted,
.reify_struct,
.reify_union,
.reify_enum,
=> {}, // tracked in order, as the owner instructions of explicit container types
else => unreachable, // assertion failure; not trackable
},
+14
View File
@@ -633,6 +633,16 @@ pub const Inst = struct {
/// wrap from E to E!T
/// Uses the `ty_op` field.
wrap_errunion_err,
/// Converts a runtime restricted pointer into the corresponding unrestricted pointer.
/// Uses the `ty_op` field.
unwrap_restricted,
/// Converts a runtime restricted pointer into the corresponding unrestricted pointer.
/// All invalid pointers are a guaranteed safety panic, which is only applicable
/// when the restricted pointer type belongs to a module with safety enabled.
/// The panic handler function must be populated before lowering AIR
/// that contains this instruction.
/// Uses the `ty_op` field.
unwrap_restricted_safe,
/// Given a pointer to a struct or union and a field index, returns a pointer to the field.
/// Uses the `ty_pl` field, payload is `StructField`.
/// TODO rename to `agg_field_ptr`.
@@ -1681,6 +1691,8 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.unwrap_errunion_err_ptr,
.wrap_errunion_payload,
.wrap_errunion_err,
.unwrap_restricted,
.unwrap_restricted_safe,
.slice_ptr,
.ptr_slice_len_ptr,
.ptr_slice_ptr_ptr,
@@ -1886,6 +1898,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
.unreach,
.optional_payload_ptr_set,
.errunion_payload_ptr_set,
.unwrap_restricted_safe,
.set_union_tag,
.memset,
.memset_safe,
@@ -2014,6 +2027,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
.unwrap_errunion_payload_ptr,
.wrap_errunion_payload,
.wrap_errunion_err,
.unwrap_restricted,
.struct_field_ptr,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
+2
View File
@@ -741,6 +741,8 @@ fn legalizeBody(l: *Legalize, body_start: usize, body_len: usize) Error!void {
.errunion_payload_ptr_set,
.wrap_errunion_payload,
.wrap_errunion_err,
.unwrap_restricted,
.unwrap_restricted_safe,
.struct_field_ptr,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
+2
View File
@@ -506,6 +506,8 @@ fn analyzeInst(
.unwrap_errunion_err_ptr,
.wrap_errunion_payload,
.wrap_errunion_err,
.unwrap_restricted,
.unwrap_restricted_safe,
.slice_ptr,
.slice_len,
.ptr_slice_len_ptr,
+2
View File
@@ -96,6 +96,8 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.unwrap_errunion_err_ptr,
.wrap_errunion_payload,
.wrap_errunion_err,
.unwrap_restricted,
.unwrap_restricted_safe,
.slice_ptr,
.slice_len,
.ptr_slice_len_ptr,
+2
View File
@@ -250,6 +250,8 @@ const Writer = struct {
.unwrap_errunion_err_ptr,
.wrap_errunion_payload,
.wrap_errunion_err,
.unwrap_restricted,
.unwrap_restricted_safe,
.slice_ptr,
.slice_len,
.ptr_slice_len_ptr,
+176 -54
View File
@@ -1969,6 +1969,7 @@ pub const CaptureValue = packed struct(u32) {
pub const Key = union(enum) {
int_type: IntType,
ptr_type: PtrType,
restricted_ptr_type: RestrictedPtrType,
array_type: ArrayType,
vector_type: VectorType,
opt_type: Index,
@@ -2094,6 +2095,14 @@ pub const Key = union(enum) {
pub const AddressSpace = std.builtin.AddressSpace;
};
/// Extern layout so it can be hashed with `std.mem.asBytes`.
pub const RestrictedPtrType = extern struct {
/// A `reify_restricted` instruction.
zir_index: TrackedInst.Index,
/// The underlying pointer type.
unrestricted_ptr_type: Index,
};
/// Extern so that hashing can be done via memory reinterpreting.
pub const ArrayType = extern struct {
len: u64,
@@ -2590,6 +2599,7 @@ pub const Key = union(enum) {
return switch (key) {
// TODO: assert no padding in these types
inline .ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
@@ -2606,11 +2616,11 @@ pub const Key = union(enum) {
.un,
=> |x| Hash.hash(seed, asBytes(&x)),
.int_type => |x| Hash.hash(seed + @intFromEnum(x.signedness), asBytes(&x.bits)),
.int_type => |x| Hash.hash(seed | @shlExact(@as(u64, @intFromEnum(x.signedness)), 63), asBytes(&x.bits)),
.error_union => |x| switch (x.val) {
.err_name => |y| Hash.hash(seed + 0, asBytes(&x.ty) ++ asBytes(&y)),
.payload => |y| Hash.hash(seed + 1, asBytes(&x.ty) ++ asBytes(&y)),
.err_name => |y| Hash.hash(seed | @as(u64, 0 << 63), asBytes(&x.ty) ++ asBytes(&y)),
.payload => |y| Hash.hash(seed | @as(u64, 1 << 63), asBytes(&x.ty) ++ asBytes(&y)),
},
.opaque_type,
@@ -2631,10 +2641,7 @@ pub const Key = union(enum) {
std.hash.autoHash(&hasher, cv);
}
},
.reified => |reified| {
std.hash.autoHash(&hasher, reified.zir_index);
std.hash.autoHash(&hasher, reified.type_hash);
},
.reified => |reified| std.hash.autoHash(&hasher, reified),
.generated_union_tag => |union_type| {
std.hash.autoHash(&hasher, union_type);
},
@@ -2672,7 +2679,7 @@ pub const Key = union(enum) {
// Int-to-ptr pointers are hashed separately than decl-referencing pointers.
// This is sound due to pointer provenance rules.
const addr_tag: Key.Ptr.BaseAddr.Tag = ptr.base_addr;
const seed2 = seed + @intFromEnum(addr_tag);
const seed2 = seed | @shlExact(@as(u64, @intFromEnum(addr_tag)), 60);
const big_offset: i128 = ptr.byte_offset;
const common = asBytes(&ptr.ty) ++ asBytes(&big_offset);
return switch (ptr.base_addr) {
@@ -3020,6 +3027,8 @@ pub const Key = union(enum) {
}
},
.restricted_ptr_type => |a_r| return std.meta.eql(a_r, b.restricted_ptr_type),
inline .opaque_type, .enum_type, .union_type, .struct_type => |a_info, a_tag_ct| {
const b_info = @field(b, @tagName(a_tag_ct));
if (std.meta.activeTag(a_info) != b_info) return false;
@@ -3037,11 +3046,7 @@ pub const Key = union(enum) {
};
return std.mem.eql(u32, @ptrCast(a_captures), @ptrCast(b_captures));
},
.reified => |a_r| {
const b_r = b_info.reified;
return a_r.zir_index == b_r.zir_index and
a_r.type_hash == b_r.type_hash;
},
.reified => |a_r| return std.meta.eql(a_r, b_info.reified),
.generated_union_tag => |a_union_ty| return a_union_ty == b_info.generated_union_tag,
}
},
@@ -3125,6 +3130,7 @@ pub const Key = union(enum) {
return switch (key) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
@@ -3172,6 +3178,8 @@ pub const Key = union(enum) {
}
};
pub const LoadedRestrictedType = Tag.TypeRestricted;
pub const LoadedStructType = struct {
/// Index of the `struct_decl` or `reify` ZIR instruction.
zir_index: TrackedInst.Index,
@@ -3499,6 +3507,12 @@ pub const LoadedOpaqueType = struct {
namespace: NamespaceIndex,
};
pub fn loadRestrictedType(ip: *const InternPool, index: Index) LoadedRestrictedType {
const unwrapped_index = index.unwrap(ip);
const item = unwrapped_index.getItem(ip);
return extraData(unwrapped_index.getExtra(ip), Tag.TypeRestricted, item.data);
}
pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
const unwrapped_index = index.unwrap(ip);
const extra_list = unwrapped_index.getExtra(ip);
@@ -4173,6 +4187,7 @@ pub const Index = enum(u32) {
type_array_small: struct { data: *Vector },
type_vector: struct { data: *Vector },
type_pointer: struct { data: *Tag.TypePointer },
type_restricted: struct { data: *Tag.TypeRestricted },
type_slice: DataIsIndex,
type_optional: DataIsIndex,
type_anyframe: DataIsIndex,
@@ -4771,6 +4786,9 @@ pub const Tag = enum(u8) {
/// A slice type.
/// data is Index of underlying pointer type.
type_slice,
/// A restricted pointer type.
/// data is payload to `TypeRestricted`.
type_restricted,
/// An optional type.
/// data is the child type.
type_optional,
@@ -5024,13 +5042,6 @@ pub const Tag = enum(u8) {
/// data is extra index to `MemoizedCall`
memoized_call,
const ErrorUnionType = Key.ErrorUnionType;
const TypeValue = Key.TypeValue;
const Error = Key.Error;
const EnumTag = Key.EnumTag;
const Union = Key.Union;
const TypePointer = Key.PtrType;
const struct_packed_encoding = .{
.summary = .@"{.payload.name%summary#\"}",
.payload = TypeStructPacked,
@@ -5116,6 +5127,7 @@ pub const Tag = enum(u8) {
.type_array_small = .{ .summary = .@"[{.payload.len%value}]{.payload.child%summary}", .payload = Vector },
.type_vector = .{ .summary = .@"@Vector({.payload.len%value}, {.payload.child%summary})", .payload = Vector },
.type_pointer = .{ .summary = .@"*... {.payload.child%summary}", .payload = TypePointer },
.type_restricted = .{ .summary = .@"@Restricted({.payload.ptr_type%summary})", .payload = TypeRestricted },
.type_slice = .{ .summary = .@"[]... {.data.unwrapped.payload.child%summary}", .data = Index },
.type_optional = .{ .summary = .@"?{.data%summary}", .data = Index },
.type_anyframe = .{ .summary = .@"anyframe->{.data%summary}", .data = Index },
@@ -5363,6 +5375,25 @@ pub const Tag = enum(u8) {
return @field(encodings, @tagName(tag)).payload;
}
const ErrorUnionType = Key.ErrorUnionType;
const TypeValue = Key.TypeValue;
const Error = Key.Error;
const EnumTag = Key.EnumTag;
const Union = Key.Union;
const TypePointer = Key.PtrType;
const TypeRestricted = struct {
/// Index of the `reify_restricted` ZIR instruction.
zir_index: TrackedInst.Index,
// TODO: the non-fqn will be needed by the new dwarf structure
/// The name of this restricted type.
name: NullTerminatedString,
/// The pointer type this restricted type is based on.
unrestricted_ptr_type: Index,
};
pub const Extern = struct {
// name, is_const, alignment, addrspace come from `owner_nav`.
ty: Index,
@@ -6455,6 +6486,14 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
return .{ .ptr_type = ptr_info };
},
.type_restricted => {
const restricted_ptr_info = extraData(unwrapped_index.getExtra(ip), Tag.TypeRestricted, data);
return .{ .restricted_ptr_type = .{
.zir_index = restricted_ptr_info.zir_index,
.unrestricted_ptr_type = restricted_ptr_info.unrestricted_ptr_type,
} };
},
.type_optional => .{ .opt_type = @enumFromInt(data) },
.type_anyframe => .{ .anyframe_type = @enumFromInt(data) },
@@ -7237,6 +7276,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, key:
.data = try addExtra(extra, ptr_type_adjusted),
});
},
.restricted_ptr_type => unreachable, // instead getReifiedRestrictedType
.array_type => |array_type| {
assert(array_type.child != .none);
assert(array_type.sentinel == .none or ip.typeOf(array_type.sentinel) == array_type.child);
@@ -7367,7 +7407,11 @@ pub fn get(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, key:
},
.ptr => |ptr| {
const ptr_type = ip.indexToKey(ptr.ty).ptr_type;
const ptr_type = switch (ip.indexToKey(ptr.ty)) {
.ptr_type => |ptr_type| ptr_type,
.restricted_ptr_type => |restricted_ptr_type| ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
else => unreachable,
};
assert(ptr_type.flags.size != .slice);
items.appendAssumeCapacity(switch (ptr.base_addr) {
.nav => |nav| .{
@@ -7959,6 +8003,65 @@ pub fn get(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, key:
return gop.put();
}
pub fn getReifiedRestrictedType(
ip: *InternPool,
gpa: Allocator,
io: Io,
tid: Zcu.PerThread.Id,
zir_index: TrackedInst.Index,
unrestricted_ptr_type: Index,
) Allocator.Error!WipRestrictedType.Result {
var gop = try ip.getOrPutKey(gpa, io, tid, .{ .restricted_ptr_type = .{
.zir_index = zir_index,
.unrestricted_ptr_type = unrestricted_ptr_type,
} });
defer gop.deinit();
if (gop == .existing) return .{ .existing = gop.existing };
const local = ip.getLocal(tid);
const items = local.getMutableItems(gpa, io);
const extra = local.getMutableExtra(gpa, io);
try items.ensureUnusedCapacity(1);
try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeRestricted).@"struct".fields.len);
const extra_index = addExtraAssumeCapacity(extra, Tag.TypeRestricted{
.zir_index = zir_index,
.name = undefined,
.unrestricted_ptr_type = unrestricted_ptr_type,
});
items.appendAssumeCapacity(.{
.tag = .type_restricted,
.data = extra_index,
});
return .{ .wip = .{
.index = gop.put(),
.tid = tid,
.type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeRestricted, "name").?,
} };
}
pub const WipRestrictedType = struct {
index: Index,
tid: Zcu.PerThread.Id,
type_name_index: u32,
pub fn setName(wip: WipRestrictedType, ip: *InternPool, type_name: NullTerminatedString) void {
const extra = ip.getLocalShared(wip.tid).extra.acquire();
const extra_items = extra.view().items(.@"0");
extra_items[wip.type_name_index] = @intFromEnum(type_name);
}
pub fn cancel(wip: WipRestrictedType, ip: *InternPool, tid: Zcu.PerThread.Id) void {
ip.remove(tid, wip.index);
}
pub const Result = union(enum) {
wip: WipRestrictedType,
existing: Index,
};
};
pub fn getDeclaredStructType(
ip: *InternPool,
gpa: Allocator,
@@ -9925,6 +10028,7 @@ test "basic usage" {
pub fn childType(ip: *const InternPool, i: Index) Index {
return switch (ip.indexToKey(i)) {
.ptr_type => |ptr_type| ptr_type.child,
.restricted_ptr_type => |restricted_ptr_type| ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type.child,
.vector_type => |vector_type| vector_type.child,
.array_type => |array_type| array_type.child,
.opt_type, .anyframe_type => |child| child,
@@ -10007,22 +10111,28 @@ pub fn getCoerced(
.val = .none,
} });
if (ip.isPointerType(new_ty)) switch (ip.indexToKey(new_ty).ptr_type.flags.size) {
.one, .many, .c => return ip.get(gpa, io, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = .int,
.byte_offset = 0,
} }),
.slice => return ip.get(gpa, io, tid, .{ .slice = .{
.ty = new_ty,
.ptr = try ip.get(gpa, io, tid, .{ .ptr = .{
.ty = ip.slicePtrType(new_ty),
new_ty: switch (ip.indexToKey(new_ty)) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.one, .many, .c => return ip.get(gpa, io, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = .int,
.byte_offset = 0,
} }),
.len = .undef_usize,
} }),
};
.slice => return ip.get(gpa, io, tid, .{ .slice = .{
.ty = new_ty,
.ptr = try ip.get(gpa, io, tid, .{ .ptr = .{
.ty = ip.slicePtrType(new_ty),
.base_addr = .int,
.byte_offset = 0,
} }),
.len = .undef_usize,
} }),
},
.restricted_ptr_type => |restricted_ptr_type| continue :new_ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => {},
}
},
else => {
const unwrapped_val = val.unwrap(ip);
@@ -10101,28 +10211,40 @@ pub fn getCoerced(
},
else => {},
},
.slice => |slice| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size == .slice)
return ip.get(gpa, io, tid, .{ .slice = .{
.ty = new_ty,
.ptr = try ip.getCoerced(gpa, io, tid, slice.ptr, ip.slicePtrType(new_ty)),
.len = slice.len,
} })
else if (ip.isIntegerType(new_ty))
return ip.getCoerced(gpa, io, tid, slice.ptr, new_ty),
.ptr => |ptr| if (ip.isPointerType(new_ty) and ip.indexToKey(new_ty).ptr_type.flags.size != .slice)
return ip.get(gpa, io, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = ptr.base_addr,
.byte_offset = ptr.byte_offset,
} })
else if (ip.isIntegerType(new_ty))
switch (ptr.base_addr) {
.slice => |slice| new_ty: switch (ip.indexToKey(new_ty)) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.one, .many, .c => {},
.slice => return ip.get(gpa, io, tid, .{ .slice = .{
.ty = new_ty,
.ptr = try ip.getCoerced(gpa, io, tid, slice.ptr, ip.slicePtrType(new_ty)),
.len = slice.len,
} }),
},
.restricted_ptr_type => |restricted_ptr_type| continue :new_ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => if (ip.isIntegerType(new_ty)) return ip.getCoerced(gpa, io, tid, slice.ptr, new_ty),
},
.ptr => |ptr| new_ty: switch (ip.indexToKey(new_ty)) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.one, .many, .c => return ip.get(gpa, io, tid, .{ .ptr = .{
.ty = new_ty,
.base_addr = ptr.base_addr,
.byte_offset = ptr.byte_offset,
} }),
.slice => {},
},
.restricted_ptr_type => |restricted_ptr_type| continue :new_ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => if (ip.isIntegerType(new_ty)) switch (ptr.base_addr) {
.int => return ip.get(gpa, io, tid, .{ .int = .{
.ty = .usize_type,
.storage = .{ .u64 = @intCast(ptr.byte_offset) },
} }),
else => {},
},
},
.opt => |opt| switch (ip.indexToKey(new_ty)) {
.ptr_type => |ptr_type| return switch (opt.val) {
.none => switch (ptr_type.flags.size) {
@@ -10397,10 +10519,6 @@ pub fn isFunctionType(ip: *const InternPool, ty: Index) bool {
return ip.indexToKey(ty) == .func_type;
}
pub fn isPointerType(ip: *const InternPool, ty: Index) bool {
return ip.indexToKey(ty) == .ptr_type;
}
pub fn isOptionalType(ip: *const InternPool, ty: Index) bool {
return ip.indexToKey(ty) == .opt_type;
}
@@ -10577,6 +10695,7 @@ fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) !vo
.type_array_big => @sizeOf(Array),
.type_vector => @sizeOf(Vector),
.type_pointer => @sizeOf(Tag.TypePointer),
.type_restricted => @sizeOf(Tag.TypeRestricted),
.type_slice => 0,
.type_optional => 0,
.type_anyframe => 0,
@@ -10838,6 +10957,7 @@ fn dumpAllFallible(ip: *const InternPool, w: *Io.Writer) anyerror!void {
.type_array_big,
.type_vector,
.type_pointer,
.type_restricted,
.type_optional,
.type_anyframe,
.type_error_union,
@@ -11576,6 +11696,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.type_array_small,
.type_vector,
.type_pointer,
.type_restricted,
.type_slice,
.type_optional,
.type_anyframe,
@@ -11921,6 +12042,7 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
.type_pointer,
.type_slice,
.type_restricted,
=> .pointer,
.type_optional => .optional,
+198 -35
View File
@@ -398,7 +398,7 @@ pub const Block = struct {
/// The name of the current "context" for naming namespace types.
/// The interpretation of this depends on the name strategy in ZIR, but the name
/// is always incorporated into the type name somehow.
/// See `Sema.setTypeName`.
/// See `Sema.computeTypeName`.
type_name_ctx: InternPool.NullTerminatedString,
/// Create a `LazySrcLoc` based on an `Offset` from the code being analyzed in this block.
@@ -1435,6 +1435,7 @@ fn analyzeBodyInner(
.reify_pointer_sentinel_ty => try sema.zirReifyPointerSentinelTy(block, extended),
.reify_tuple => try sema.zirReifyTuple( block, extended),
.reify_pointer => try sema.zirReifyPointer( block, extended),
.reify_restricted => try sema.zirReifyRestricted( block, extended, inst),
.reify_fn => try sema.zirReifyFn( block, extended),
.reify_struct => try sema.zirReifyStruct( block, extended, inst),
.reify_union => try sema.zirReifyUnion( block, extended, inst),
@@ -18956,7 +18957,8 @@ fn structInitAnon(
.existing => |ty| .fromInterned(ty),
.wip => |wip| ty: {
errdefer wip.cancel(ip, pt.tid);
try sema.setTypeName(block, &wip, .anon, "struct", inst);
const type_name, const name_nav = try sema.computeTypeName(block, wip.index, .anon, "struct", inst);
wip.setName(ip, type_name, name_nav);
// Reified structs have field information populated immediately.
@memcpy(wip.field_names.get(ip), names);
@@ -19879,6 +19881,62 @@ fn zirReifyPointer(
}));
}
fn zirReifyRestricted(
sema: *Sema,
block: *Block,
extended: Zir.Inst.Extended.InstData,
inst: Zir.Inst.Index,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
const io = comp.io;
const ip = &zcu.intern_pool;
const name_strategy: Zir.Inst.NameStrategy = @enumFromInt(extended.small);
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const tracked_inst = try block.trackZir(inst);
const src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
.offset = .nodeOffset(.zero),
};
const ptr_type_src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
.offset = .{ .node_offset_builtin_call_arg = .{
.builtin_call_node = extra.node,
.arg_index = 0,
} },
};
const operand = try sema.resolveType(block, src, extra.operand);
const unrestricted_ptr_type: Type = switch (ip.indexToKey(operand.toIntern())) {
else => return sema.fail(block, ptr_type_src, "expected pointer type, found '{f}'", .{operand.fmt(pt)}),
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.one, .many, .c => operand,
.slice => return sema.fail(block, ptr_type_src, "slice types cannot be restricted", .{}),
},
.restricted_ptr_type => |restricted_ptr_type| .fromInterned(restricted_ptr_type.unrestricted_ptr_type),
};
switch (try ip.getReifiedRestrictedType(gpa, io, pt.tid, tracked_inst, unrestricted_ptr_type.toIntern())) {
.existing => |ty| {
try sema.addTypeReferenceEntry(src, .fromInterned(ty));
// No need for `ensureNamespaceUpToDate` because this type doesn't have a namespace.
return .fromIntern(ty);
},
.wip => |wip| {
errdefer wip.cancel(ip, pt.tid);
const type_name, _ = try sema.computeTypeName(block, wip.index, name_strategy, "restricted", inst);
wip.setName(ip, type_name);
if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index);
try sema.addTypeReferenceEntry(src, .fromInterned(wip.index));
return .fromIntern(wip.index);
},
}
}
fn zirReifyFn(
sema: *Sema,
block: *Block,
@@ -20194,7 +20252,8 @@ fn zirReifyStruct(
},
.wip => |wip| {
errdefer wip.cancel(ip, pt.tid);
try sema.setTypeName(block, &wip, name_strategy, "struct", inst);
const type_name, const name_nav = try sema.computeTypeName(block, wip.index, name_strategy, "struct", inst);
wip.setName(ip, type_name, name_nav);
for (0..fields_len) |field_idx| {
const field_name_val = try field_names_arr.elemValue(pt, field_idx);
const field_attrs_val = try field_attrs_arr.elemValue(pt, field_idx);
@@ -20438,7 +20497,8 @@ fn zirReifyUnion(
},
.wip => |wip| {
errdefer wip.cancel(ip, pt.tid);
try sema.setTypeName(block, &wip, name_strategy, "union", inst);
const type_name, const name_nav = try sema.computeTypeName(block, wip.index, name_strategy, "union", inst);
wip.setName(ip, type_name, name_nav);
for (0..fields_len) |field_idx| {
const field_name_val = try field_names_arr.elemValue(pt, field_idx);
@@ -20603,8 +20663,8 @@ fn zirReifyEnum(
},
.wip => |wip| {
errdefer wip.cancel(ip, pt.tid);
try sema.setTypeName(block, &wip, name_strategy, "enum", inst);
const type_name, const name_nav = try sema.computeTypeName(block, wip.index, name_strategy, "enum", inst);
wip.setName(ip, type_name, name_nav);
// Populate field names and values. Duplicate checking will be handled by type resolution.
for (0..fields_len) |field_index| {
@@ -27736,6 +27796,22 @@ fn coerceExtra(
return sema.coerceCompatiblePtrs(block, dest_ty, slice_ptr, inst_src);
},
}
// Restricted coercions
if (maybe_inst_val != null) {
if (dest_ty.unrestrictedType(zcu)) |dest_unrestricted_ty| {
if (sema.resolveValue(try sema.coerceExtra(block, dest_unrestricted_ty, inst, inst_src, opts))) |inst_val| {
return .fromIntern(try ip.getCoerced(gpa, io, pt.tid, inst_val.toIntern(), dest_ty.toIntern()));
}
}
}
if (inst_ty.unrestrictedType(zcu)) |inst_unrestricted_ty| {
const inst_unrestricted: Air.Inst.Ref = if (maybe_inst_val) |inst_val|
.fromIntern(try ip.getCoerced(gpa, io, pt.tid, inst_val.toIntern(), inst_unrestricted_ty.toIntern()))
else
try sema.unwrapRestrictedPtr(block, inst_unrestricted_ty, inst, inst_src);
return sema.coerceExtra(block, dest_ty, inst_unrestricted, inst_src, opts);
}
},
.int, .comptime_int => switch (inst_ty.zigTypeTag(zcu)) {
.float, .comptime_float => float: {
@@ -28084,6 +28160,7 @@ const InMemoryCoercionResult = union(enum) {
ptr_alignment: AlignPair,
double_ptr_to_anyopaque: Pair,
slice_to_anyopaque: Pair,
ptr_restricted: Pair,
const Pair = struct {
actual: Type,
@@ -28415,6 +28492,15 @@ const InMemoryCoercionResult = union(enum) {
try sema.errNote(src, msg, "consider using '.ptr'", .{});
break;
},
.ptr_restricted => |pair| {
for ([_]Type{ pair.actual, pair.wanted }) |restricted_ptr_type| {
const unrestricted_ptr_type = restricted_ptr_type.unrestrictedType(pt.zcu) orelse continue;
try sema.errNote(src, msg, "restricted type '{f}' is not guaranteed to have the same representation as its unrestricted type '{f}'", .{
restricted_ptr_type.fmt(pt), unrestricted_ptr_type.fmt(pt),
});
}
break;
},
};
}
};
@@ -28479,7 +28565,7 @@ pub fn coerceInMemoryAllowed(
(dest_info.signedness == .signed and src_info.signedness == .unsigned and dest_info.bits <= src_info.bits) or
(dest_info.signedness == .unsigned and src_info.signedness == .signed))
{
return InMemoryCoercionResult{ .int_not_coercible = .{
return .{ .int_not_coercible = .{
.actual_signedness = src_info.signedness,
.wanted_signedness = dest_info.signedness,
.actual_bits = src_info.bits,
@@ -28913,7 +28999,7 @@ fn coerceInMemoryAllowedPtrs(
const ok_ptr_size = src_info.flags.size == dest_info.flags.size or
src_info.flags.size == .c or dest_info.flags.size == .c;
if (!ok_ptr_size) {
return InMemoryCoercionResult{ .ptr_size = .{
return .{ .ptr_size = .{
.actual = src_info.flags.size,
.wanted = dest_info.flags.size,
} };
@@ -29049,13 +29135,19 @@ fn coerceInMemoryAllowedPtrs(
break :a dest_child.abiAlignment(zcu);
} else dest_info.flags.alignment;
if (dest_align.compare(if (dest_is_mut) .neq else .gt, src_align)) {
return InMemoryCoercionResult{ .ptr_alignment = .{
return .{ .ptr_alignment = .{
.actual = src_align,
.wanted = dest_align,
} };
}
}
// Restricted pointers have a different in-memory representation depending on the safety mode of the module that created it.
if (dest_ty.unrestrictedType(zcu) != null or src_ty.unrestrictedType(zcu) != null) return .{ .ptr_restricted = .{
.actual = src_ty,
.wanted = dest_ty,
} };
return .ok;
}
@@ -29202,10 +29294,15 @@ fn storePtr2(
try sema.requireRuntimeBlock(block, src, runtime_src);
const store_inst = if (is_ret)
try block.addBinOp(.store, ptr, operand)
const unrestricted_ptr = if (ptr_ty.unrestrictedType(zcu)) |unrestricted_ptr_ty|
try sema.unwrapRestrictedPtr(block, unrestricted_ptr_ty, ptr, ptr_src)
else
try block.addBinOp(air_tag, ptr, operand);
ptr;
const store_inst = if (is_ret)
try block.addBinOp(.store, unrestricted_ptr, operand)
else
try block.addBinOp(air_tag, unrestricted_ptr, operand);
try sema.checkComptimeKnownStore(block, store_inst, operand_src);
@@ -30282,7 +30379,12 @@ fn analyzeLoad(
break :msg msg;
});
return block.addTyOp(.load, elem_ty, ptr);
const unrestricted_ptr = if (ptr_ty.unrestrictedType(zcu)) |unrestricted_ptr_ty|
try sema.unwrapRestrictedPtr(block, unrestricted_ptr_ty, ptr, ptr_src)
else
ptr;
return block.addTyOp(.load, elem_ty, unrestricted_ptr);
}
fn analyzeSlicePtr(
@@ -31494,6 +31596,19 @@ fn wrapErrorUnionSet(
}
}
fn unwrapRestrictedPtr(
sema: *Sema,
block: *Block,
unrestricted_ptr_ty: Type,
ptr: Air.Inst.Ref,
ptr_src: LazySrcLoc,
) !Air.Inst.Ref {
return block.addTyOp(if (block.wantSafety()) tag: {
try sema.preparePanicId(ptr_src, .corrupt_restricted_pointer);
break :tag .unwrap_restricted_safe;
} else .unwrap_restricted, unrestricted_ptr_ty, ptr);
}
/// Returns the enum tag value for the active tag of a tagged union value.
///
/// Asserts that the type of `un` is a tagged union type.
@@ -34211,13 +34326,59 @@ pub fn analyzeMemoizedState(sema: *Sema, stage: InternPool.MemoizedStateStage) C
fn getExpectedBuiltinFnType(sema: *Sema, decl: Zcu.BuiltinDecl) CompileError!Type {
const pt = sema.pt;
return switch (decl) {
.Signedness,
.AddressSpace,
.CallingConvention,
=> unreachable,
// `noinline fn () void`
.returnError => try pt.funcType(.{
.param_types = &.{},
.return_type = .void_type,
.is_noinline = true,
}),
.StackTrace,
.SourceLocation,
.CallModifier,
.AtomicOrder,
.AtomicRmwOp,
.ReduceOp,
.FloatMode,
.PrefetchOptions,
.ExportOptions,
.ExternOptions,
.BranchHint,
=> unreachable,
.Type,
.@"Type.Fn",
.@"Type.Fn.Param",
.@"Type.Fn.Param.Attributes",
.@"Type.Fn.Attributes",
.@"Type.Int",
.@"Type.Float",
.@"Type.Pointer",
.@"Type.Pointer.Size",
.@"Type.Pointer.Attributes",
.@"Type.Array",
.@"Type.Vector",
.@"Type.Optional",
.@"Type.Error",
.@"Type.ErrorUnion",
.@"Type.EnumField",
.@"Type.Enum",
.@"Type.Enum.Mode",
.@"Type.Union",
.@"Type.UnionField",
.@"Type.UnionField.Attributes",
.@"Type.Struct",
.@"Type.StructField",
.@"Type.StructField.Attributes",
.@"Type.ContainerLayout",
.@"Type.Opaque",
.@"Type.Declaration",
=> unreachable,
.panic => unreachable,
// `fn ([]const u8, ?usize) noreturn`
.@"panic.call" => try pt.funcType(.{
.param_types = &.{
@@ -34226,7 +34387,6 @@ fn getExpectedBuiltinFnType(sema: *Sema, decl: Zcu.BuiltinDecl) CompileError!Typ
},
.return_type = .noreturn_type,
}),
// `fn (anytype, anytype) noreturn`
.@"panic.sentinelMismatch",
.@"panic.inactiveUnionField",
@@ -34234,19 +34394,16 @@ fn getExpectedBuiltinFnType(sema: *Sema, decl: Zcu.BuiltinDecl) CompileError!Typ
.param_types = &.{ .generic_poison_type, .generic_poison_type },
.return_type = .noreturn_type,
}),
// `fn (anyerror) noreturn`
.@"panic.unwrapError" => try pt.funcType(.{
.param_types = &.{.anyerror_type},
.return_type = .noreturn_type,
}),
// `fn (usize) noreturn`
.@"panic.sliceCastLenRemainder" => try pt.funcType(.{
.param_types = &.{.usize_type},
.return_type = .noreturn_type,
}),
// `fn (usize, usize) noreturn`
.@"panic.outOfBounds",
.@"panic.startGreaterThanEnd",
@@ -34254,7 +34411,6 @@ fn getExpectedBuiltinFnType(sema: *Sema, decl: Zcu.BuiltinDecl) CompileError!Typ
.param_types = &.{ .usize_type, .usize_type },
.return_type = .noreturn_type,
}),
// `fn () noreturn`
.@"panic.reachedUnreachable",
.@"panic.unwrapNull",
@@ -34275,23 +34431,28 @@ fn getExpectedBuiltinFnType(sema: *Sema, decl: Zcu.BuiltinDecl) CompileError!Typ
.@"panic.copyLenMismatch",
.@"panic.memcpyAlias",
.@"panic.noreturnReturned",
.@"panic.corruptRestrictedPointer",
=> try pt.funcType(.{
.param_types = &.{},
.return_type = .noreturn_type,
}),
else => unreachable,
.VaList => unreachable,
.assembly,
.@"assembly.Clobbers",
=> unreachable,
};
}
pub fn setTypeName(
pub fn computeTypeName(
sema: *Sema,
block: *Block,
wip: *const InternPool.WipContainerType,
index: InternPool.Index,
name_strategy: Zir.Inst.NameStrategy,
anon_prefix: []const u8,
inst: Zir.Inst.Index,
) CompileError!void {
) CompileError!struct { InternPool.NullTerminatedString, InternPool.Nav.Index.Optional } {
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
@@ -34308,16 +34469,16 @@ pub fn setTypeName(
// TODO: that would be possible, by detecting line number changes and renaming
// types appropriately. However, `@typeName` becomes a problem then. If we remove
// that builtin from the language, we can consider this.
wip.setName(ip, try ip.getOrPutStringFmt(
return .{ try ip.getOrPutStringFmt(
gpa,
io,
pt.tid,
"{f}__{s}_{d}",
.{ block.type_name_ctx.fmt(ip), anon_prefix, @intFromEnum(wip.index) },
.{ block.type_name_ctx.fmt(ip), anon_prefix, @intFromEnum(index) },
.no_embedded_nulls,
), .none);
), .none };
},
.parent => wip.setName(ip, block.type_name_ctx, sema.owner.unwrap().nav_val.toOptional()),
.parent => return .{ block.type_name_ctx, sema.owner.unwrap().nav_val.toOptional() },
.func => {
const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index).resolve(ip) orelse return error.AnalysisFail);
const zir_tags = sema.code.instructions.items(.tag);
@@ -34360,8 +34521,7 @@ pub fn setTypeName(
};
w.writeByte(')') catch return error.OutOfMemory;
const name = try ip.getOrPutString(gpa, io, pt.tid, aw.written(), .no_embedded_nulls);
wip.setName(ip, name, .none);
return .{ try ip.getOrPutString(gpa, io, pt.tid, aw.written(), .no_embedded_nulls), .none };
},
.dbg_var => {
// TODO: this logic is questionable. We ideally should be traversing the `Block` rather than relying on the order of AstGen instructions.
@@ -34376,10 +34536,9 @@ pub fn setTypeName(
} else {
continue :strat .anon;
};
const name = try ip.getOrPutStringFmt(gpa, io, pt.tid, "{f}.{s}", .{
return .{ try ip.getOrPutStringFmt(gpa, io, pt.tid, "{f}.{s}", .{
block.type_name_ctx.fmt(ip), var_name,
}, .no_embedded_nulls);
wip.setName(ip, name, .none);
}, .no_embedded_nulls), .none };
},
}
}
@@ -34420,7 +34579,8 @@ fn zirStructDecl(
.existing => |ty| .fromInterned(ty),
.wip => |wip| ty: {
errdefer wip.cancel(ip, pt.tid);
try sema.setTypeName(block, &wip, struct_decl.name_strategy, "struct", inst);
const type_name, const name_nav = try sema.computeTypeName(block, wip.index, struct_decl.name_strategy, "struct", inst);
wip.setName(ip, type_name, name_nav);
const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{
.parent = block.namespace.toOptional(),
.owner_type = wip.index,
@@ -34493,7 +34653,8 @@ fn zirUnionDecl(
.existing => |ty| .fromInterned(ty),
.wip => |wip| ty: {
errdefer wip.cancel(ip, pt.tid);
try sema.setTypeName(block, &wip, union_decl.name_strategy, "union", inst);
const type_name, const name_nav = try sema.computeTypeName(block, wip.index, union_decl.name_strategy, "union", inst);
wip.setName(ip, type_name, name_nav);
const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{
.parent = block.namespace.toOptional(),
.owner_type = wip.index,
@@ -34545,7 +34706,8 @@ fn zirEnumDecl(
.existing => |ty| .fromInterned(ty),
.wip => |wip| ty: {
errdefer wip.cancel(ip, pt.tid);
try sema.setTypeName(block, &wip, enum_decl.name_strategy, "enum", inst);
const type_name, const name_nav = try sema.computeTypeName(block, wip.index, enum_decl.name_strategy, "enum", inst);
wip.setName(ip, type_name, name_nav);
const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{
.parent = block.namespace.toOptional(),
.owner_type = wip.index,
@@ -34594,7 +34756,8 @@ fn zirOpaqueDecl(
.existing => |ty| .fromInterned(ty),
.wip => |wip| ty: {
errdefer wip.cancel(ip, pt.tid);
try sema.setTypeName(block, &wip, opaque_decl.name_strategy, "opaque", inst);
const type_name, const name_nav = try sema.computeTypeName(block, wip.index, opaque_decl.name_strategy, "opaque", inst);
wip.setName(ip, type_name, name_nav);
const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{
.parent = block.namespace.toOptional(),
.owner_type = wip.index,
+2 -1
View File
@@ -150,7 +150,8 @@ fn lowerExprAnonResTy(self: *LowerZon, node: Zoir.Node.Index) CompileError!Inter
errdefer wip.cancel(ip, pt.tid);
const block = self.block;
const zcu = pt.zcu;
try self.sema.setTypeName(block, &wip, .anon, "struct", self.base_node_inst.resolve(ip).?);
const type_name, const name_nav = try self.sema.computeTypeName(block, wip.index, .anon, "struct", self.base_node_inst.resolve(ip).?);
wip.setName(ip, type_name, name_nav);
// Reified structs have field information populated immediately.
@memcpy(wip.field_values.get(ip), elems);
+1
View File
@@ -239,6 +239,7 @@ const UnpackValueBits = struct {
switch (ip.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
+1
View File
@@ -84,6 +84,7 @@ fn ensureLayoutResolvedInner(sema: *Sema, ty: Type, orig_ty: Type, reason: *cons
switch (ip.indexToKey(ty.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.anyframe_type,
.simple_type,
.opaque_type,
+138 -70
View File
@@ -165,6 +165,7 @@ pub fn classify(start_ty: Type, zcu: *const Zcu) Class {
.error_set_type,
.inferred_error_set_type,
.ptr_type,
.restricted_ptr_type,
.anyframe_type,
=> .runtime,
@@ -373,13 +374,28 @@ pub fn arrayInfo(self: Type, zcu: *const Zcu) ArrayInfo {
}
pub fn ptrInfo(ty: Type, zcu: *const Zcu) InternPool.Key.PtrType {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
return ty.ptrInfoOrNull(&zcu.intern_pool, .{}).?;
}
pub fn ptrInfoOrNull(ty: Type, ip: *const InternPool, comptime opts: struct {
allow_optional: bool = true,
allow_restricted: bool = true,
}) ?InternPool.Key.PtrType {
return switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |p| p,
.opt_type => |child| switch (zcu.intern_pool.indexToKey(child)) {
.restricted_ptr_type => |rp| if (opts.allow_restricted)
ip.indexToKey(rp.unrestricted_ptr_type).ptr_type
else
null,
.opt_type => |child| if (opts.allow_optional) switch (ip.indexToKey(child)) {
.ptr_type => |p| p,
else => unreachable,
},
else => unreachable,
.restricted_ptr_type => |rp| if (opts.allow_restricted)
ip.indexToKey(rp.unrestricted_ptr_type).ptr_type
else
null,
else => null, // not a pointer type
} else null,
else => null, // not a pointer type
};
}
@@ -488,6 +504,10 @@ pub fn print(ty: Type, writer: *std.Io.Writer, pt: Zcu.PerThread, ctx: ?*Compari
try print(Type.fromInterned(info.child), writer, pt, ctx);
return;
},
.restricted_ptr_type => {
const name = ip.loadRestrictedType(ty.toIntern()).name;
try writer.print("{f}", .{name.fmt(ip)});
},
.array_type => |array_type| {
if (array_type.sentinel == .none) {
try writer.print("[{d}]", .{array_type.len});
@@ -747,6 +767,7 @@ pub fn hasWellDefinedLayout(ty: Type, zcu: *const Zcu) bool {
.vector_type,
=> true,
.restricted_ptr_type,
.error_union_type,
.error_set_type,
.inferred_error_set_type,
@@ -893,22 +914,13 @@ pub fn isNoReturn(ty: Type, zcu: *const Zcu) bool {
/// Never returns `none`. Asserts that all necessary type resolution is already done.
pub fn ptrAlignment(ptr_ty: Type, zcu: *Zcu) Alignment {
const ip = &zcu.intern_pool;
const ptr_key: InternPool.Key.PtrType = switch (ip.indexToKey(ptr_ty.toIntern())) {
.ptr_type => |key| key,
.opt_type => |child| ip.indexToKey(child).ptr_type,
else => unreachable,
};
const ptr_key = ptr_ty.ptrInfo(zcu);
if (ptr_key.flags.alignment != .none) return ptr_key.flags.alignment;
return Type.fromInterned(ptr_key.child).abiAlignment(zcu);
}
pub fn ptrAddressSpace(ty: Type, zcu: *const Zcu) std.builtin.AddressSpace {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.address_space,
.opt_type => |child| zcu.intern_pool.indexToKey(child).ptr_type.flags.address_space,
else => unreachable,
};
return ty.ptrInfo(zcu).flags.address_space;
}
/// Never returns `.none`. Asserts that the layout of `ty` is resolved.
@@ -924,7 +936,7 @@ pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment {
if (int_type.bits == 0) return .@"1";
return .fromByteUnits(std.zig.target.intAlignment(target, int_type.bits));
},
.ptr_type, .anyframe_type => ptrAbiAlignment(target),
.ptr_type, .restricted_ptr_type, .anyframe_type => ptrAbiAlignment(target),
.array_type => |array_type| Type.fromInterned(array_type.child).abiAlignment(zcu),
.vector_type => |vector_type| {
if (vector_type.len == 0) return .@"1";
@@ -1078,7 +1090,7 @@ pub fn abiSize(ty: Type, zcu: *const Zcu) u64 {
.slice => ptrAbiSize(target) * 2,
.one, .many, .c => ptrAbiSize(target),
},
.anyframe_type => ptrAbiSize(target),
.restricted_ptr_type, .anyframe_type => ptrAbiSize(target),
.array_type => |arr| arr.lenIncludingSentinel() * Type.fromInterned(arr.child).abiSize(zcu),
.vector_type => |vec| {
const elem_ty: Type = .fromInterned(vec.child);
@@ -1231,7 +1243,7 @@ pub fn bitSize(ty: Type, zcu: *const Zcu) u64 {
.slice => target.ptrBitWidth() * 2,
else => target.ptrBitWidth(),
},
.anyframe_type => target.ptrBitWidth(),
.restricted_ptr_type, .anyframe_type => target.ptrBitWidth(),
.array_type => |array_type| {
const elem_ty: Type = .fromInterned(array_type.child);
const len = array_type.lenIncludingSentinel();
@@ -1329,13 +1341,31 @@ pub fn bitSize(ty: Type, zcu: *const Zcu) u64 {
};
}
pub fn isSinglePointer(ty: Type, zcu: *const Zcu) bool {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_info| ptr_info.flags.size == .one,
else => false,
/// Returns `null` if `ty` is not a restricted pointer.
pub fn unrestrictedType(ty: Type, zcu: *const Zcu) ?Type {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.restricted_ptr_type => |restricted_ptr_type| return .fromInterned(restricted_ptr_type.unrestricted_ptr_type),
else => null,
};
}
const RestrictedRepr = enum { double_pointer, single_pointer };
pub fn restrictedRepr(ty: Type, zcu: *const Zcu) RestrictedRepr {
return restrictedReprByZirIndex(zcu.intern_pool.indexToKey(ty.toIntern()).restricted_ptr_type.zir_index, zcu);
}
pub fn restrictedReprByZirIndex(zir_index: InternPool.TrackedInst.Index, zcu: *const Zcu) RestrictedRepr {
return switch (zcu.fileByIndex(zir_index.resolveFile(&zcu.intern_pool)).mod.?.optimize_mode) {
.Debug, .ReleaseSafe => .double_pointer,
.ReleaseFast, .ReleaseSmall => .single_pointer,
};
}
pub fn isSinglePointer(ty: Type, zcu: *const Zcu) bool {
const ptr_info = ty.ptrInfoOrNull(&zcu.intern_pool, .{ .allow_optional = false }) orelse return false;
return ptr_info.flags.size == .one;
}
/// Asserts `ty` is a pointer.
pub fn ptrSize(ty: Type, zcu: *const Zcu) std.builtin.Type.Pointer.Size {
return ty.ptrSizeOrNull(zcu).?;
@@ -1343,24 +1373,27 @@ pub fn ptrSize(ty: Type, zcu: *const Zcu) std.builtin.Type.Pointer.Size {
/// Returns `null` if `ty` is not a pointer.
pub fn ptrSizeOrNull(ty: Type, zcu: *const Zcu) ?std.builtin.Type.Pointer.Size {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_info| ptr_info.flags.size,
else => null,
};
const ptr_info = ty.ptrInfoOrNull(&zcu.intern_pool, .{ .allow_optional = false }) orelse return null;
return ptr_info.flags.size;
}
pub fn isSlice(ty: Type, zcu: *const Zcu) bool {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.size == .slice,
else => false,
};
const ptr_info = ty.ptrInfoOrNull(&zcu.intern_pool, .{ .allow_optional = false }) orelse return false;
return ptr_info.flags.size == .slice;
}
pub fn isSliceAtRuntime(ty: Type, zcu: *const Zcu) bool {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.size == .slice,
.opt_type => |child| switch (zcu.intern_pool.indexToKey(child)) {
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
.opt_type => |child| opt_child: switch (zcu.intern_pool.indexToKey(child)) {
.ptr_type => |ptr_type| !ptr_type.flags.is_allowzero and ptr_type.flags.size == .slice,
.restricted_ptr_type => |restricted_ptr_type| continue :opt_child .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => false,
},
else => false,
@@ -1372,10 +1405,8 @@ pub fn slicePtrFieldType(ty: Type, zcu: *const Zcu) Type {
}
pub fn isConstPtr(ty: Type, zcu: *const Zcu) bool {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.is_const,
else => false,
};
const ptr_info = ty.ptrInfoOrNull(&zcu.intern_pool, .{ .allow_optional = false }) orelse return false;
return ptr_info.flags.is_const;
}
pub fn isVolatilePtr(ty: Type, zcu: *const Zcu) bool {
@@ -1383,38 +1414,45 @@ pub fn isVolatilePtr(ty: Type, zcu: *const Zcu) bool {
}
pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool {
return switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.is_volatile,
else => false,
};
const ptr_info = ty.ptrInfoOrNull(ip, .{ .allow_optional = false }) orelse return false;
return ptr_info.flags.is_volatile;
}
pub fn isAllowzeroPtr(ty: Type, zcu: *const Zcu) bool {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.is_allowzero,
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
.opt_type => true,
else => false,
};
}
pub fn isCPtr(ty: Type, zcu: *const Zcu) bool {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.size == .c,
else => false,
};
const ptr_info = ty.ptrInfoOrNull(&zcu.intern_pool, .{ .allow_optional = false }) orelse return false;
return ptr_info.flags.size == .c;
}
pub fn isPtrAtRuntime(ty: Type, zcu: *const Zcu) bool {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.slice => false,
.one, .many, .c => true,
},
.opt_type => |child| switch (zcu.intern_pool.indexToKey(child)) {
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
.opt_type => |child| opt_child: switch (ip.indexToKey(child)) {
.ptr_type => |p| switch (p.flags.size) {
.slice, .c => false,
.many, .one => !p.flags.is_allowzero,
},
.restricted_ptr_type => |restricted_ptr_type| continue :opt_child .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => false,
},
else => false,
@@ -1429,13 +1467,20 @@ pub fn ptrAllowsZero(ty: Type, zcu: *const Zcu) bool {
/// See also `isPtrLikeOptional`.
pub fn optionalReprIsPayload(ty: Type, zcu: *const Zcu) bool {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.opt_type => |child_type| child_type == .anyerror_type or switch (zcu.intern_pool.indexToKey(child_type)) {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.size == .c,
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
.opt_type => |child_type| child_type == .anyerror_type or opt_child: switch (ip.indexToKey(child_type)) {
.ptr_type => |ptr_type| ptr_type.flags.size != .c and !ptr_type.flags.is_allowzero,
.restricted_ptr_type => |restricted_ptr_type| continue :opt_child .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
.error_set_type, .inferred_error_set_type => true,
else => false,
},
.ptr_type => |ptr_type| ptr_type.flags.size == .c,
else => false,
};
}
@@ -1443,13 +1488,20 @@ pub fn optionalReprIsPayload(ty: Type, zcu: *const Zcu) bool {
/// Returns true if the type is optional and would be lowered to a single pointer
/// address value, using 0 for null. Note that this returns true for C pointers.
pub fn isPtrLikeOptional(ty: Type, zcu: *const Zcu) bool {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => |ptr_type| ptr_type.flags.size == .c,
.opt_type => |child| switch (zcu.intern_pool.indexToKey(child)) {
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
.opt_type => |child| opt_child: switch (ip.indexToKey(child)) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.slice, .c => false,
.many, .one => !ptr_type.flags.is_allowzero,
},
.restricted_ptr_type => |restricted_ptr_type| continue :opt_child .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => false,
},
else => false,
@@ -1486,7 +1538,7 @@ pub fn nullablePtrElem(ty: Type, zcu: *const Zcu) Type {
.pointer => return ty.childType(zcu),
.optional => {
const ptr_ty = ty.childType(zcu);
const ptr_info = zcu.intern_pool.indexToKey(ptr_ty.toIntern()).ptr_type;
const ptr_info = ptr_ty.ptrInfoOrNull(&zcu.intern_pool, .{ .allow_optional = false }).?;
assert(ptr_info.flags.size != .c);
assert(!ptr_info.flags.is_allowzero);
return .fromInterned(ptr_info.child);
@@ -1508,7 +1560,7 @@ pub fn nullablePtrElem(ty: Type, zcu: *const Zcu) Type {
/// * `[*c]T`
pub fn indexableElem(ty: Type, zcu: *const Zcu) Type {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
return ty: switch (ip.indexToKey(ty.toIntern())) {
inline .array_type, .vector_type => |arr| .fromInterned(arr.child),
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.many, .slice, .c => .fromInterned(ptr_type.child),
@@ -1517,6 +1569,9 @@ pub fn indexableElem(ty: Type, zcu: *const Zcu) Type {
else => unreachable,
},
},
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => unreachable,
};
}
@@ -1532,12 +1587,16 @@ pub fn scalarType(ty: Type, zcu: *const Zcu) Type {
/// Asserts that the type is an optional, or a C pointer.
/// For C pointers this returns the type unmodified.
pub fn optionalChild(ty: Type, zcu: *const Zcu) Type {
switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
const ip = &zcu.intern_pool;
ty: switch (ip.indexToKey(ty.toIntern())) {
.opt_type => |child| return .fromInterned(child),
.ptr_type => |ptr_type| {
assert(ptr_type.flags.size == .c);
return ty;
},
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => unreachable,
}
}
@@ -1755,7 +1814,8 @@ pub fn vectorLen(ty: Type, zcu: *const Zcu) u32 {
/// Asserts the type is an array, pointer or vector.
pub fn sentinel(ty: Type, zcu: *const Zcu) ?Value {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
const ip = &zcu.intern_pool;
return ty: switch (ip.indexToKey(ty.toIntern())) {
.vector_type,
.struct_type,
.tuple_type,
@@ -1763,6 +1823,9 @@ pub fn sentinel(ty: Type, zcu: *const Zcu) ?Value {
.array_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null,
.ptr_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null,
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
else => unreachable,
};
@@ -1851,6 +1914,7 @@ pub fn intInfo(starting_ty: Type, zcu: *const Zcu) InternPool.Key.IntType {
.tuple_type => unreachable,
.ptr_type => unreachable,
.restricted_ptr_type => unreachable,
.anyframe_type => unreachable,
.array_type => unreachable,
@@ -2021,6 +2085,7 @@ pub fn onePossibleValue(ty: Type, pt: Zcu.PerThread) !?Value {
assertHasLayout(ty, zcu);
return switch (ip.indexToKey(ty.toIntern())) {
.ptr_type,
.restricted_ptr_type, // number of possible values is not known until the end of compilation, so never treated as NPV/OPV
.error_union_type,
.func_type,
.anyframe_type,
@@ -2829,7 +2894,7 @@ pub fn getUnionLayout(loaded_union: InternPool.LoadedUnionType, zcu: *const Zcu)
pub fn elemPtrType(ptr_ty: Type, index: ?u64, pt: Zcu.PerThread) Allocator.Error!Type {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ptr_info = ip.indexToKey(ptr_ty.toIntern()).ptr_type;
const ptr_info = ptr_ty.ptrInfoOrNull(ip, .{ .allow_optional = false }).?;
const elem_ty: Type = switch (ptr_info.flags.size) {
.slice, .many, .c => .fromInterned(ptr_info.child),
.one => switch (ip.indexToKey(ptr_info.child)) {
@@ -2883,7 +2948,7 @@ pub fn elemPtrType(ptr_ty: Type, index: ?u64, pt: Zcu.PerThread) Allocator.Error
pub fn fieldPtrType(ptr_ty: Type, field_index: u32, pt: Zcu.PerThread) Allocator.Error!Type {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ptr_info = ip.indexToKey(ptr_ty.toIntern()).ptr_type;
const ptr_info = ptr_ty.ptrInfoOrNull(ip, .{ .allow_optional = false }).?;
assert(ptr_info.flags.size == .one or ptr_info.flags.size == .c);
const aggregate_ty: Type = .fromInterned(ptr_info.child);
aggregate_ty.assertHasLayout(zcu);
@@ -3011,7 +3076,7 @@ pub fn fieldPtrType(ptr_ty: Type, field_index: u32, pt: Zcu.PerThread) Allocator
.none => switch (ip.indexToKey(aggregate_ty.toIntern())) {
.tuple_type, .union_type => field_ty.abiAlignment(zcu),
.struct_type => field_ty.defaultStructFieldAlignment(.auto, zcu),
.ptr_type => Type.usize.abiAlignment(zcu),
.ptr_type, .restricted_ptr_type => ptrAbiAlignment(zcu.getTarget()),
else => unreachable,
},
else => |a| a,
@@ -3040,6 +3105,7 @@ pub fn fieldPtrType(ptr_ty: Type, field_index: u32, pt: Zcu.PerThread) Allocator
pub fn containerTypeName(ty: Type, ip: *const InternPool) InternPool.NullTerminatedString {
return switch (ip.indexToKey(ty.toIntern())) {
.restricted_ptr_type => ip.loadRestrictedType(ty.toIntern()).name,
.struct_type => ip.loadStructType(ty.toIntern()).name,
.union_type => ip.loadUnionType(ty.toIntern()).name,
.enum_type => ip.loadEnumType(ty.toIntern()).name,
@@ -3247,6 +3313,7 @@ pub fn assertHasLayout(ty: Type, zcu: *const Zcu) void {
switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.anyframe_type,
.simple_type,
.opaque_type,
@@ -3315,34 +3382,34 @@ fn collectSubtypes(ty: Type, pt: Zcu.PerThread, visited: *std.AutoArrayHashMapUn
}
switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => try collectSubtypes(Type.fromInterned(ty.ptrInfo(zcu).child), pt, visited),
.array_type => |array_type| try collectSubtypes(Type.fromInterned(array_type.child), pt, visited),
.vector_type => |vector_type| try collectSubtypes(Type.fromInterned(vector_type.child), pt, visited),
.opt_type => |child| try collectSubtypes(Type.fromInterned(child), pt, visited),
.ptr_type => |ptr_type| try collectSubtypes(.fromInterned(ptr_type.child), pt, visited),
.array_type => |array_type| try collectSubtypes(.fromInterned(array_type.child), pt, visited),
.vector_type => |vector_type| try collectSubtypes(.fromInterned(vector_type.child), pt, visited),
.opt_type => |child| try collectSubtypes(.fromInterned(child), pt, visited),
.error_union_type => |error_union_type| {
try collectSubtypes(Type.fromInterned(error_union_type.error_set_type), pt, visited);
try collectSubtypes(.fromInterned(error_union_type.error_set_type), pt, visited);
if (error_union_type.payload_type != .generic_poison_type) {
try collectSubtypes(Type.fromInterned(error_union_type.payload_type), pt, visited);
try collectSubtypes(.fromInterned(error_union_type.payload_type), pt, visited);
}
},
.tuple_type => |tuple| {
for (tuple.types.get(ip)) |field_ty| {
try collectSubtypes(Type.fromInterned(field_ty), pt, visited);
try collectSubtypes(.fromInterned(field_ty), pt, visited);
}
},
.func_type => |fn_info| {
const param_types = fn_info.param_types.get(&zcu.intern_pool);
for (param_types) |param_ty| {
if (param_ty != .generic_poison_type) {
try collectSubtypes(Type.fromInterned(param_ty), pt, visited);
try collectSubtypes(.fromInterned(param_ty), pt, visited);
}
}
if (fn_info.return_type != .generic_poison_type) {
try collectSubtypes(Type.fromInterned(fn_info.return_type), pt, visited);
try collectSubtypes(.fromInterned(fn_info.return_type), pt, visited);
}
},
.anyframe_type => |child| try collectSubtypes(Type.fromInterned(child), pt, visited),
.anyframe_type => |child| try collectSubtypes(.fromInterned(child), pt, visited),
// leaf types
.undef,
@@ -3354,6 +3421,7 @@ fn collectSubtypes(ty: Type, pt: Zcu.PerThread, visited: *std.AutoArrayHashMapUn
.enum_type,
.simple_type,
.int_type,
.restricted_ptr_type,
=> {},
// values, not types
+5 -1
View File
@@ -501,6 +501,7 @@ pub const BuiltinDecl = enum {
@"panic.copyLenMismatch",
@"panic.memcpyAlias",
@"panic.noreturnReturned",
@"panic.corruptRestrictedPointer",
VaList,
@@ -588,6 +589,7 @@ pub const BuiltinDecl = enum {
.@"panic.copyLenMismatch",
.@"panic.memcpyAlias",
.@"panic.noreturnReturned",
.@"panic.corruptRestrictedPointer",
=> .func,
};
}
@@ -661,6 +663,7 @@ pub const SimplePanicId = enum {
copy_len_mismatch,
memcpy_alias,
noreturn_returned,
corrupt_restricted_pointer,
pub fn toBuiltin(id: SimplePanicId) BuiltinDecl {
return switch (id) {
@@ -684,6 +687,7 @@ pub const SimplePanicId = enum {
.copy_len_mismatch => .@"panic.copyLenMismatch",
.memcpy_alias => .@"panic.memcpyAlias",
.noreturn_returned => .@"panic.noreturnReturned",
.corrupt_restricted_pointer => .@"panic.corruptRestrictedPointer",
// zig fmt: on
};
}
@@ -4215,7 +4219,7 @@ fn resolveReferencesInner(zcu: *Zcu) Allocator.Error!std.AutoArrayHashMapUnmanag
// Queue any decls within this type which would be automatically analyzed.
// Keep in sync with analysis queueing logic in `Zcu.PerThread.ScanDeclIter.scanDecl`.
const ns = Type.fromInterned(ty).getNamespace(zcu).unwrap().?;
const ns = Type.fromInterned(ty).getNamespace(zcu).unwrap() orelse continue;
for (zcu.namespacePtr(ns).comptime_decls.items) |cu| {
// `comptime` decls are always analyzed.
const unit: AnalUnit = .wrap(.{ .@"comptime" = cu });
+4
View File
@@ -281,6 +281,9 @@ pub fn generateLazySymbol(
w.writeAll(tag_name) catch unreachable;
w.writeByte(0) catch unreachable;
}
} else if (Type.fromInterned(lazy_sym.ty).unrestrictedType(zcu)) |unrestricted_ptr_ty| {
alignment.* = unrestricted_ptr_ty.abiAlignment(zcu);
try w.splatByteAll(0, @divExact(zcu.getTarget().ptrBitWidth(), 8)); // to be filled in later
} else {
return zcu.codegenFailType(lazy_sym.ty, "TODO implement generateLazySymbol for {s} {f}", .{
@tagName(lazy_sym.kind), Type.fromInterned(lazy_sym.ty).fmt(pt),
@@ -325,6 +328,7 @@ pub fn generateSymbol(
switch (ip.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
+45
View File
@@ -658,6 +658,28 @@ pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
air_inst_index = air_body[air_body_index];
continue :air_tag air_tags[@intFromEnum(air_inst_index)];
},
.unwrap_restricted, .unwrap_restricted_safe => {
const ty_op = air_data[@intFromEnum(air_inst_index)].ty_op;
maybe_noop: {
switch (isel.air.typeOf(ty_op.operand, ip).restrictedRepr(zcu)) {
.double_pointer => break :maybe_noop,
.single_pointer => {},
}
if (true) break :maybe_noop;
if (ty_op.operand.toIndex()) |src_air_inst_index| {
if (isel.hints.get(src_air_inst_index)) |hint_vpsi| {
try isel.hints.putNoClobber(gpa, air_inst_index, hint_vpsi);
}
}
}
try isel.analyzeUse(ty_op.operand);
try isel.def_order.putNoClobber(gpa, air_inst_index, {});
air_body_index += 1;
air_inst_index = air_body[air_body_index];
continue :air_tag air_tags[@intFromEnum(air_inst_index)];
},
.struct_field_ptr, .struct_field_val => {
const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
const extra = isel.air.extraData(Air.StructField, ty_pl.payload).data;
@@ -5737,6 +5759,29 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.unwrap_restricted, .unwrap_restricted_safe => |air_tag| {
if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| {
defer dst_vi.value.deref(isel);
const ty_op = air.data(air.inst_index).ty_op;
const unrestricted_ty = ty_op.ty.toType();
const restricted_ty = isel.air.typeOf(ty_op.operand, ip);
switch (restricted_ty.restrictedRepr(zcu)) {
.double_pointer => {
switch (air_tag) {
else => unreachable,
.unwrap_restricted => {},
.unwrap_restricted_safe => {}, // TODO
}
const ptr_vi = try isel.use(ty_op.operand);
const ptr_mat = try ptr_vi.matReg(isel);
_ = try dst_vi.value.load(isel, unrestricted_ty, ptr_mat.ra, .{});
try ptr_mat.finish(isel);
},
.single_pointer => try dst_vi.value.move(isel, ty_op.operand),
}
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.struct_field_ptr => {
if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
defer dst_vi.value.deref(isel);
+36 -1
View File
@@ -896,6 +896,7 @@ pub const DeclGen = struct {
// types, not values
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
@@ -1334,7 +1335,7 @@ pub const DeclGen = struct {
return w.writeByte(')');
},
.bool_type => try w.writeAll(if (safety_on) "0xaa" else "false"),
else => switch (ip.indexToKey(ty.toIntern())) {
else => ty: switch (ip.indexToKey(ty.toIntern())) {
.simple_type, // anyerror, c_char (etc), usize, isize
.int_type,
.enum_type,
@@ -1405,6 +1406,9 @@ pub const DeclGen = struct {
try w.writeByte('}');
},
},
.restricted_ptr_type => |restricted_ptr_type| continue :ty .{
.ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type,
},
.opt_type => |child_type| switch (CType.classifyOptional(ty, zcu)) {
.npv_payload => unreachable, // opv optional
@@ -2840,6 +2844,9 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) Error!void {
.set_err_return_trace => try airSetErrReturnTrace(f, inst),
.save_err_return_trace_index => try airSaveErrReturnTraceIndex(f, inst),
.unwrap_restricted => try airUnwrapRestricted(f, inst, false),
.unwrap_restricted_safe => try airUnwrapRestricted(f, inst, true),
.wasm_memory_size => try airWasmMemorySize(f, inst),
.wasm_memory_grow => try airWasmMemoryGrow(f, inst),
@@ -5533,6 +5540,34 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
fn airUnwrapRestricted(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const pt = f.dg.pt;
const zcu = pt.zcu;
const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const unrestricted_ty = ty_op.ty.toType();
const restricted_ty = f.typeOf(ty_op.operand);
const operand = try f.resolveInst(ty_op.operand);
try reap(f, inst, &.{ty_op.operand});
const w = &f.code.writer;
const local = try f.allocLocal(inst, unrestricted_ty);
try f.writeCValue(w, local, .other);
try w.writeAll(" = ");
switch (restricted_ty.restrictedRepr(zcu)) {
.double_pointer => {
_ = safety; // TODO
try f.writeCValueDeref(w, operand);
},
.single_pointer => try f.writeCValue(w, operand, .other),
}
try w.writeByte(';');
try f.newline();
return local;
}
fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.dg.pt;
const w = &f.code.writer;
+19 -1
View File
@@ -284,6 +284,20 @@ pub const CType = union(enum) {
.pointer => {
const ptr = cur_ty.ptrInfo(zcu);
if (cur_ty.unrestrictedType(zcu)) |unrestricted_ty| switch (cur_ty.restrictedRepr(zcu)) {
.double_pointer => {
const unrestricted_cty = try lowerInner(unrestricted_ty, true, deps, arena, zcu);
const unrestricted_cty_buf = try arena.create(CType);
unrestricted_cty_buf.* = unrestricted_cty;
return .{ .pointer = .{
.@"const" = true,
.@"volatile" = false,
.elem_ty = unrestricted_cty_buf,
.nonstring = false,
} };
},
.single_pointer => {},
};
switch (ptr.flags.size) {
.slice => {
try deps.addType(gpa, cur_ty, allow_incomplete);
@@ -912,7 +926,10 @@ pub const CType = union(enum) {
.optional => try w.print("opt_{f}", .{fmtZigType(ty.optionalChild(zcu), zcu)}),
.error_union => try w.print("errunion_{f}", .{fmtZigType(ty.errorUnionPayload(zcu), zcu)}),
.pointer => switch (ty.ptrSize(zcu)) {
.pointer => if (ty.unrestrictedType(zcu)) |_| {
const name = ty.containerTypeName(ip).toSlice(ip);
try w.print("{f}", .{@import("../c.zig").fmtIdentUnsolo(name)});
} else switch (ty.ptrSize(zcu)) {
.one, .many, .c => try w.print("ptr_{f}", .{fmtZigType(ty.childType(zcu), zcu)}),
.slice => try w.print("slice_{f}", .{fmtZigType(ty.childType(zcu), zcu)}),
},
@@ -985,6 +1002,7 @@ pub const CType = union(enum) {
return switch (ip.indexToKey(ty.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.anyframe_type,
.simple_type,
.opaque_type,
+2
View File
@@ -3061,6 +3061,7 @@ pub const Object = struct {
}),
};
},
.restricted_ptr_type => @panic("TODO implement restricted pointers"),
.array_type => |array_type| o.builder.arrayType(
array_type.lenIncludingSentinel(),
try o.lowerType(.fromInterned(array_type.child)),
@@ -3443,6 +3444,7 @@ pub const Object = struct {
return switch (val_key) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
+19
View File
@@ -413,6 +413,9 @@ pub fn genBody(self: *FuncGen, body: []const Air.Inst.Index, coverage_point: Air
.wrap_errunion_payload => try self.airWrapErrUnionPayload(body[i..]),
.wrap_errunion_err => try self.airWrapErrUnionErr(body[i..]),
.unwrap_restricted => try self.airUnwrapRestricted(inst, false),
.unwrap_restricted_safe => try self.airUnwrapRestricted(inst, true),
.wasm_memory_size => try self.airWasmMemorySize(inst),
.wasm_memory_grow => try self.airWasmMemoryGrow(inst),
@@ -3250,6 +3253,22 @@ fn airWrapErrUnionErr(self: *FuncGen, body_tail: []const Air.Inst.Index) Allocat
return result_ptr;
}
fn airUnwrapRestricted(self: *FuncGen, inst: Air.Inst.Index, safety: bool) Allocator.Error!Builder.Value {
const o = self.object;
const zcu = o.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const unrestricted_ty = ty_op.ty.toType();
const restricted_ty = self.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
switch (restricted_ty.restrictedRepr(zcu)) {
.double_pointer => {
_ = safety; // TODO
return self.wip.load(.normal, .ptr, operand, unrestricted_ty.abiAlignment(zcu).toLlvm(), "restricted.unwrap");
},
.single_pointer => return operand,
}
}
fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) Allocator.Error!Builder.Value {
const o = self.object;
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+4
View File
@@ -1614,6 +1614,10 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_payload => try func.airWrapErrUnionPayload(inst),
.wrap_errunion_err => try func.airWrapErrUnionErr(inst),
.unwrap_restricted,
.unwrap_restricted_safe,
=> return func.fail("TODO implement restricted pointers", .{}),
.runtime_nav_ptr => try func.airRuntimeNavPtr(inst),
.add_optimized,
+4
View File
@@ -676,6 +676,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_payload => try self.airWrapErrUnionPayload(inst),
.wrap_errunion_err => try self.airWrapErrUnionErr(inst),
.unwrap_restricted,
.unwrap_restricted_safe,
=> return self.fail("TODO implement restricted pointers", .{}),
.add_optimized,
.sub_optimized,
.mul_optimized,
+4 -1
View File
@@ -774,6 +774,7 @@ fn constant(cg: *CodeGen, ty: Type, val: Value, repr: Repr) Error!Id {
switch (ip.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
@@ -2774,7 +2775,9 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) Error!void {
.unwrap_errunion_err => try cg.airErrUnionErr(inst),
.unwrap_errunion_payload => try cg.airErrUnionPayload(inst),
.wrap_errunion_err => try cg.airWrapErrUnionErr(inst),
.wrap_errunion_payload => try cg.airWrapErrUnionPayload(inst),
.wrap_errunion_payload => try cg.airWrapErrUnionPayload(inst),
.unwrap_restricted => return cg.fail("TODO implement restricted pointers", .{}),
.is_null => try cg.airIsNull(inst, false, .is_null),
.is_non_null => try cg.airIsNull(inst, false, .is_non_null),
+20
View File
@@ -1815,6 +1815,9 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.errunion_payload_ptr_set => cg.airErrUnionPayloadPtrSet(inst),
.error_name => cg.airErrorName(inst),
.unwrap_restricted => cg.airUnwrapRestricted(inst, false),
.unwrap_restricted_safe => cg.airUnwrapRestricted(inst, true),
.wasm_memory_size => cg.airWasmMemorySize(inst),
.wasm_memory_grow => cg.airWasmMemoryGrow(inst),
@@ -4676,6 +4679,7 @@ fn lowerConstant(cg: *CodeGen, val: Value) InnerError!WValue {
switch (ip.indexToKey(val.ip_index)) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
@@ -6719,6 +6723,22 @@ fn airErrUnionPayloadPtrSet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void
return cg.finishAir(inst, result, &.{ty_op.operand});
}
fn airUnwrapRestricted(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
const zcu = cg.pt.zcu;
const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try cg.resolveInst(ty_op.operand);
const unrestricted_ty = ty_op.ty.toType();
const restricted_ty = cg.typeOf(ty_op.operand);
const result = result: switch (restricted_ty.restrictedRepr(zcu)) {
.double_pointer => {
_ = safety; // TODO
break :result try cg.load(operand, unrestricted_ty, 0);
},
.single_pointer => cg.reuseOperand(ty_op.operand, operand),
};
return cg.finishAir(inst, result, &.{ty_op.operand});
}
fn airFieldParentPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pt = cg.pt;
const zcu = pt.zcu;
+153 -13
View File
@@ -103829,6 +103829,121 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
try eu.write(&ops[0], .{ .disp = eu_err_off }, cg);
try eu.finish(inst, &.{ty_op.operand}, &ops, cg);
},
.unwrap_restricted, .unwrap_restricted_safe => |air_tag| {
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
const unrestricted_ty = ty_op.ty.toType();
const restricted_ty = cg.typeOf(ty_op.operand);
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
const res = res: switch (restricted_ty.restrictedRepr(zcu)) {
.double_pointer => {
switch (air_tag) {
else => unreachable,
.unwrap_restricted => {},
.unwrap_restricted_safe => cg.select(&.{}, &.{}, &ops, &.{ .{
.required_features = .{ .avx, null, null, null },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"32" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_pointer } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._r, .ro, .tmp2p, .sa(.none, .add_log2_ptr_size), ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
} },
}, .{
.required_features = .{ .sse, null, null, null },
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"16" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_pointer } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._r, .ro, .tmp2p, .sa(.none, .add_log2_ptr_size), ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
} },
}, .{
.patterns = &.{
.{ .src = .{ .mem, .none, .none } },
.{ .src = .{ .to_gpr, .none, .none } },
},
.call_frame = .{ .alignment = .@"8" },
.extra_temps = .{
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .lazy_sym = .{ .kind = .const_data, .ref = .src0 } } },
.{ .type = .usize, .kind = .{ .rc = .general_purpose } },
.{ .type = .usize, .kind = .{ .panic_func = .corrupt_restricted_pointer } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp0p, .leaa(.tmp1, .add_ptr_size), ._, ._ },
.{ ._, ._, .mov, .tmp2p, .src0p, ._, ._ },
.{ ._, ._, .sub, .tmp2p, .tmp0p, ._, ._ },
.{ ._, ._r, .ro, .tmp2p, .sa(.none, .add_log2_ptr_size), ._, ._ },
.{ ._, ._, .cmp, .tmp2p, .leaa(.tmp0p, .sub_ptr_size), ._, ._ },
.{ ._, ._b, .j, .@"0f", ._, ._, ._ },
.{ ._, ._, .call, .tmp3d, ._, ._, ._ },
} },
} }) catch |err| switch (err) {
error.SelectFailed => return cg.fail("failed to select {s} {f} {f} {f}", .{
@tagName(air_tag),
unrestricted_ty.fmt(pt),
restricted_ty.fmt(pt),
ops[0].tracking(cg),
}),
else => |e| return e,
},
}
break :res try ops[0].load(unrestricted_ty, .{}, cg);
},
.single_pointer => ops[0],
};
try res.finish(inst, &.{ty_op.operand}, &ops, cg);
},
.struct_field_ptr => {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const struct_field = cg.air.extraData(Air.StructField, ty_pl.payload).data;
@@ -176216,20 +176331,22 @@ fn genCall(self: *CodeGen, info: union(enum) {
// Due to incremental compilation, how function calls are generated depends
// on linking.
switch (info) {
.air => |callee| if (callee.toInterned()) |func_ip_index| {
const func_key = ip.indexToKey(func_ip_index);
switch (switch (func_key) {
else => func_key,
.ptr => |ptr| if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.nav => |nav| ip.indexToKey(zcu.navValue(nav).toIntern()),
else => func_key,
} else func_key,
.air => |callee| if (callee.toInterned()) |func_ip_index| try self.asmImmediate(
.{ ._, .call },
switch (switch (ip.indexToKey(func_ip_index)) {
else => |func_key| func_key,
.ptr => |ptr| switch (ptr.byte_offset) {
0 => switch (ptr.base_addr) {
.nav => |nav| ip.indexToKey(zcu.navValue(nav).toIntern()),
else => unreachable,
},
else => unreachable,
},
}) {
else => unreachable,
.func => |func| try self.asmImmediate(.{ ._, .call }, .{ .nav = .{ .index = func.owner_nav } }),
.@"extern" => |@"extern"| try self.asmImmediate(.{ ._, .call }, .{ .nav = .{ .index = @"extern".owner_nav } }),
}
} else {
inline .func, .@"extern" => |func| .{ .nav = .{ .index = func.owner_nav } },
},
) else {
assert(self.typeOf(callee).zigTypeTag(zcu) == .pointer);
const scratch_reg = abi.getCAbiLinkerScratchReg(fn_info.cc);
try self.genSetReg(scratch_reg, .usize, .{ .air_ref = callee }, .{});
@@ -188525,6 +188642,7 @@ const Select = struct {
splat_float_mem: struct { ref: Select.Operand.Ref, inside: enum { zero } = .zero, outside: f16 },
frame: FrameIndex,
lazy_sym: struct { kind: link.File.LazySymbol.Kind, ref: Select.Operand.Ref = .none },
panic_func: Zcu.SimplePanicId,
extern_func: [*:0]const u8,
const ConstSpec = struct {
@@ -188986,7 +189104,25 @@ const Select = struct {
},
} }), true };
},
.extern_func => |extern_func_spec| .{ try cg.tempInit(spec.type, .{ .lea_extern_func = try cg.addString(std.mem.span(extern_func_spec)) }), true },
.panic_func => |panic_id| .{ try cg.tempInit(
spec.type,
switch (switch (pt.zcu.intern_pool.indexToKey(pt.zcu.builtin_decl_values.get(panic_id.toBuiltin()))) {
else => |func_key| func_key,
.ptr => |ptr| switch (ptr.byte_offset) {
0 => switch (ptr.base_addr) {
.nav => |nav| pt.zcu.intern_pool.indexToKey(pt.zcu.navValue(nav).toIntern()),
else => unreachable,
},
else => unreachable,
},
}) {
else => unreachable,
inline .func, .@"extern" => |func| .{ .lea_nav = func.owner_nav },
},
), true },
.extern_func => |extern_func_spec| .{ try cg.tempInit(spec.type, .{
.lea_extern_func = try cg.addString(std.mem.span(extern_func_spec)),
}), true },
};
}
@@ -189036,6 +189172,7 @@ const Select = struct {
lhs: enum(u6) {
none,
ptr_size,
log2_ptr_size,
ptr_bit_size,
size,
src0_size,
@@ -189072,7 +189209,9 @@ const Select = struct {
rhs: Memory.Scale,
const none: Adjust = .{ .sign = .pos, .lhs = .none, .op = .mul, .rhs = .@"1" };
const add_ptr_size: Adjust = .{ .sign = .pos, .lhs = .ptr_size, .op = .mul, .rhs = .@"1" };
const sub_ptr_size: Adjust = .{ .sign = .neg, .lhs = .ptr_size, .op = .mul, .rhs = .@"1" };
const add_log2_ptr_size: Adjust = .{ .sign = .pos, .lhs = .log2_ptr_size, .op = .mul, .rhs = .@"1" };
const add_ptr_bit_size: Adjust = .{ .sign = .pos, .lhs = .ptr_bit_size, .op = .mul, .rhs = .@"1" };
const add_size: Adjust = .{ .sign = .pos, .lhs = .size, .op = .mul, .rhs = .@"1" };
const add_size_div_4: Adjust = .{ .sign = .pos, .lhs = .size, .op = .div, .rhs = .@"4" };
@@ -190013,6 +190152,7 @@ const Select = struct {
const lhs: SignedImm = lhs: switch (op.flags.adjust.lhs) {
.none => 0,
.ptr_size => @divExact(s.cg.target.ptrBitWidth(), 8),
.log2_ptr_size => std.math.log2(@divExact(s.cg.target.ptrBitWidth(), 8)),
.ptr_bit_size => s.cg.target.ptrBitWidth(),
.size => @intCast(op.flags.base.ref.typeOf(s).abiSize(s.cg.pt.zcu)),
.src0_size => @intCast(Select.Operand.Ref.src0.typeOf(s).abiSize(s.cg.pt.zcu)),
+9 -1
View File
@@ -3060,6 +3060,7 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
} = switch (ip.indexToKey(nav_val.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
@@ -3566,7 +3567,7 @@ fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.Co
const diw = &wip_nav.debug_info.writer;
var big_int_space: Value.BigIntSpace = undefined;
switch (value_ip_key) {
key: switch (value_ip_key) {
.func => unreachable, // handled above
.@"extern" => unreachable, // handled above
@@ -3629,6 +3630,13 @@ fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.Co
try diw.writeUleb128(@intFromEnum(AbbrevCode.null));
},
},
.restricted_ptr_type => |restricted_ptr_type| switch (Type.restrictedReprByZirIndex(restricted_ptr_type.zir_index, zcu)) {
.double_pointer => continue :key .{ .ptr_type = .{
.child = restricted_ptr_type.unrestricted_ptr_type,
.flags = .{ .is_const = true },
} },
.single_pointer => continue :key .{ .ptr_type = ip.indexToKey(restricted_ptr_type.unrestricted_ptr_type).ptr_type },
},
.array_type => |array_type| {
const array_child_type: Type = .fromInterned(array_type.child);
try wip_nav.abbrevCode(if (array_type.sentinel == .none) .array_type else .array_sentinel_type);
+1
View File
@@ -49,6 +49,7 @@ pub fn print(
switch (ip.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.restricted_ptr_type,
.array_type,
.vector_type,
.opt_type,
+8
View File
@@ -622,6 +622,14 @@ const Writer = struct {
try stream.writeAll(")) ");
try self.writeSrcNode(stream, extra.node);
},
.reify_restricted => {
const extra = self.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const name_strat: Zir.Inst.NameStrategy = @enumFromInt(extended.small);
try stream.print("{t}, ", .{name_strat});
try self.writeInstRef(stream, extra.operand);
try stream.writeAll(")) ");
try self.writeSrcNode(stream, extra.node);
},
.reify_fn => {
const extra = self.code.extraData(Zir.Inst.ReifyFn, extended.operand).data;
try self.writeInstRef(stream, extra.param_types);